Untitled
Untitled
Untitled
abstract = {In this paper, from the perspective of human ergonomics, we analyze the movement of
the joints in the process of human body movements, and we establish a dynamic model according to
the human skeleton structure. On this basis, from the rigid body dynamics point of view, combined
with the principle of inertial navigation, a body sensor network based on MEMS inertial sensors is
built to capture human body motion in real time. On the basis of space trajectory of human body
movement and traditional human motion solution strategy, a human motion solution strategy based
on particle filter fusion solution is proposed to realize the prediction of human motion analysis.
Therefore, we evaluate the performance of the designed system by comparing with the real motion.
Finally, in order to verify the human motion data, the motion capture data verification platforms are
established. Experimental results show that the proposed joint attitude solution algorithm can
achieve a relatively smooth tracking effect and provides a certain reference value.},
author = {Li, Jie and Wang, Zhe Long and Zhao, Hongyu and Gravina, Raffaele and Fortino, Giancarlo
and Jiang, Yongmei and Tang, Kai},
doi = {10.1145/0000000.0000000},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1.1.736.9072.pdf:pdf},
issn = {23103582},
number = {212},
pages = {1--33},
volume = {1},
year = {2017}
@article{Kafura1981,
abstract = {Structured design methodologies provide a disciplined and organized guide to the
construction of software systems. However, while the methodology structures and documents the
points at which design decisions are made, it does not provide a specific, quantitative basis for
making these decisions. Typically, the designers' only guidelines are qualitative, perhaps even vague,
principles such as “functionality,” “data transparency,” or “clarity.” This paper, like several recent
publications, defines and validates a set of software metrics which are appropriate for evaluating the
structure of large-scale systems. These metrics are based on the measurement of information flow
between system components. Specific metrics are defined for procedure complexity, module
complexity, and module coupling. The validation, using the source code for the UNIX operating
system, shows that the complexity measures are strongly correlated with the occurrence of changes.
Further, the metrics for procedures and modules can be interpreted to reveal various types of
structural flaws in the design and implementation. Copyright {\textcopyright} 1981 by The Institute
of Electrical and Electronics Engineers, Inc.},
author = {Kafura, D{\'{e}}nnis},
doi = {10.1109/TSE.1981.231113},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/henry1981.pdf:pdf},
issn = {00985589},
number = {5},
pages = {510--518},
volume = {SE-7},
year = {1981}
@inproceedings{Schall2008,
abstract = {Novel forms of collaboration increasingly distribute control among e-workers, thereby
allowing agile and autonomous collaboration. However, this requires a novel blend of infrastructure
and algorithms for self-adaptation of collaboration services. We present VieCAR (Vienna
Collaborative Activity and Resource Management Framework), a framework that addresses the
requirements of new collaborative service-oriented environments. Self-adaptive collaboration
services depend on the user's context. VieCAR combines service-oriented architectures with activity-
centric computing enabling people to interact and collaborate regardless of their location and across
organizational boundaries. Based on VieCAR's activity model, we present a ranking algorithm
determining the relevant input for service adaptation. {\textcopyright} 2008 IEEE.},
author = {Schall, Daniel and Dorn, Christoph and Dustdar, Schahram and Dadduzio, Ignazio},
doi = {10.1109/SEAA.2008.25},
file = {:C$\backslash$:/Users/Asus/Downloads/seaa.2008.25.pdf:pdf},
isbn = {9780769532769},
pages = {285--292},
year = {2008}
@article{B,
author = {B, Jeremy Pitt and Milanovic, Kristina and Coupland, Alexander and Allan, Tim and Davies,
Alun and Lane, Tristan and Malagoni, Anna Maria and Thapar, Ankur and Shalhoub, Joseph},
isbn = {9783030034245},
pages = {63--78},
title = {{A Collective Adaptive Socio-Technical System for Remote- and Self-supervised Claudication}},
volume = {1}
@article{Zhang2010a,
abstract = {Modern computer systems for distributed service computing become highly complex and
difficult to manage. A self-adaptive approach that integrates monitoring, analyzing, and actuation
functionalities has the potential to accommodate to a dynamically changing environment. The main
objective of this paper is to develop an architecture-based self-adaptive framework to improve
performance and resource efficiency of a server while maintaining reliable services. The target
problem is distributed and concurrent systems. This paper proposes a Self-Adaptive Framework for
Concurrency Architecture (SAFCA) that includes multiple concurrency architectural patterns or
alternatives. The framework has monitoring and managing capabilities that can invoke another
architectural alternative at run-time to cope with increasing demands or for reliability purpose. Two
control mechanisms have been developed: SAFCA-Q and SAFCA-R. With SAFCA-Q, the system does
not need to be statically configured for the highest workloads; hence, resource usage becomes more
efficient in normal conditions and the system still is able to handle busty demands. SAFCA-R is used
to improve reliability in the case of a failure by conducting a switchover to another software
architecture. Experiment results demonstrate that the performance of SAFCA-Q is better than
systems using only standalone concurrency architecture and resources are also better utilized.
SAFCA-R also shows fast recovery in the face of a failure. {\textcopyright} 2010 IEEE.},
doi = {10.1109/COMPSAC.2010.68},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zhang2010.pdf:pdf},
isbn = {9780769540856},
issn = {07303157},
pages = {72--81},
year = {2010}
}
@article{Li2016,
abstract = {We present a multi-level formation model for complex software systems. The previous
works extract the software systems to software networks for further studies, but usually investigate
the software networks at the class level. In contrast to these works, our treatment of software
systems as multi-level networks is more realistic. In particular, the software networks are organized
by three levels of granularity, which represents the modularity and hierarchy in the formation
process of real-world software systems. More importantly, simulations based on this model have
generated more realistic structural properties of software networks, such as power-law, clustering
and modularization. On the basis of this model, how the structure of software systems effects
software design principles is then explored, and it could be helpful for understanding software
evolution and software engineering practices.},
doi = {10.3390/e18050178},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/li2016.pdf:pdf},
issn = {10994300},
journal = {Entropy},
number = {5},
pages = {1--25},
volume = {18},
year = {2016}
@article{Ghamisi2014,
author = {Ghamisi, Pedram and Couceiro, Micael S. and Martins, Fernando M.L. and Benediktsson,
Jon Atli},
doi = {10.1109/TGRS.2013.2260552},
isbn = {0196-2892},
issn = {01962892},
number = {5},
pages = {2382--2394},
volume = {52},
year = {2014}
@article{Xie2018a,
author = {Xie, Bing and Gu, Xueqiang and Chen, Jing and Shen, Lin Cheng},
doi = {10.1177/1729881418813037},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1729881418813037.pdf:pdf},
issn = {17298814},
number = {6},
pages = {1--15},
title = {{A multi-responsibility–oriented coalition formation framework for dynamic task allocation in
mobile–distributed multi-agent systems}},
volume = {15},
year = {2018}
@article{Bures2020,
author = {Bures, Tomas and Gerostathopoulos, Ilias and Hnetynka, Petr and Plasil, Frantisek and Krijt,
Filip and Vinarek, Jiri and Kofron, Jan},
doi = {10.1007/s10009-020-00558-z},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Bures2020{\_}Article{\
_}ALanguageAndFrameworkForDynami.pdf:pdf},
isbn = {1000902000558},
issn = {14332787},
title = {{A language and framework for dynamic component ensembles in smart systems}},
url = {https://doi.org/10.1007/s10009-020-00558-z},
year = {2020}
@article{Hooda2014,
isbn = {9781479939145},
number = {978},
pages = {1621--1626},
title = {{Brain Tumor Segmentation : A Performance Analysis using K-Means , Fuzzy C-Means and
Region Growing Algorithm}},
year = {2014}
@article{Clarke1996a,
author = {Clarke, Edmund M and Wing, Jeannette M and Al, E T and Co-chair, Edmund Clarke and
Cleaveland, Rance and Dill, David and Emerson, Allen and Garland, Stephen and German, Steven and
Guttag, John and Hall, Anthony},
pages = {626--643},
volume = {28},
year = {1996}
@article{Sayagh2018,
author = {Sayagh, Mohammed and Kerzazi, Noureddine and Adams, Bram and Petrillo, Fabio},
doi = {10.1109/TSE.2018.2867847},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sayagh2018.pdf:pdf},
number = {8},
pages = {1},
publisher = {IEEE},
volume = {PP},
year = {2018}
@article{Chen2016,
abstract = {In this work we address the task of semantic image segmentation with Deep Learning and
make three main contributions that are experimentally shown to have substantial practical merit.
First, we highlight convolution with upsampled filters, or 'atrous convolution', as a powerful tool in
dense prediction tasks. Atrous convolution allows us to explicitly control the resolution at which
feature responses are computed within Deep Convolutional Neural Networks. It also allows us to
effectively enlarge the field of view of filters to incorporate larger context without increasing the
number of parameters or the amount of computation. Second, we propose atrous spatial pyramid
pooling (ASPP) to robustly segment objects at multiple scales. ASPP probes an incoming
convolutional feature layer with filters at multiple sampling rates and effective fields-of-views, thus
capturing objects as well as image context at multiple scales. Third, we improve the localization of
object boundaries by combining methods from DCNNs and probabilistic graphical models. The
commonly deployed combination of max-pooling and downsampling in DCNNs achieves invariance
but has a toll on localization accuracy. We overcome this by combining the responses at the final
DCNN layer with a fully connected Conditional Random Field (CRF), which is shown both qualitatively
and quantitatively to improve localization performance. Our proposed "DeepLab" system sets the
new state-of-art at the PASCAL VOC-2012 semantic image segmentation task, reaching 79.7{\%}
mIOU in the test set, and advances the results on three other datasets: PASCAL-Context, PASCAL-
Person-Part, and Cityscapes. All of our code is made publicly available online.},
archivePrefix = {arXiv},
arxivId = {1606.00915},
author = {Chen, Liang-Chieh and Papandreou, George and Kokkinos, Iasonas and Murphy, Kevin and
Yuille, Alan L.},
doi = {10.1109/TPAMI.2017.2699184},
eprint = {1606.00915},
issn = {0162-8828},
pages = {1--14},
pmid = {28463186},
title = {{DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution,
and Fully Connected CRFs}},
url = {http://arxiv.org/abs/1606.00915},
year = {2016}
@article{Bouyer,
author = {Bouyer, Patricia and Fahrenberg, Uli and Larsen, Kim G and Markey, Nicolas and Worrell,
James},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/hmc2018-BFLMOW.pdf:pdf},
@article{Mathew2015,
doi = {10.17148/IJARCCE.2015.41117},
number = {11},
pages = {73--76},
title = {{Image Compression by using Morphological Operations and Edge-Based Segmentation
Technique}},
volume = {4},
year = {2015}
@article{Coronato2012,
doi = {10.1109/SASOW.2012.40},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Formal{\_}Modeling{\_}of{\_}Socio-
technical{\_}Collective{\_}Adap.pdf:pdf},
number = {September},
year = {2012}
@article{Alsaryrah,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/PID5155529.pdf:pdf},
@article{Nigro2018a,
abstract = {This paper proposes a novel approach to modelling and analysis of complex multi-agent
systems. The approach is based on actors and asynchronous message passing, and exploits the
UPPAAL statistical model checker (SMC) for the experiments. UPPAAL SMC is interesting because it
automates simulations by predicting the number of executions capable of ensuring a required
output accuracy, it uses statistical techniques (Monte Carlo-like simulations and sequential
hypothesis testing) for extracting quantitative measures from the simulation runs, and it offers a
temporal logic query language to express property queries tailored to the application needs. The
paper describes the approach, clarifies its structural translation on top of UPPAAL SMC, and
demonstrates its practical usefulness through modelling and analysis of a large scale and adaptive
version of the Iterated Prisoner's Dilemma (IPD) problem. The case study confirms known properties,
namely the emergence of cooperation under context preservation, that is when the player
interaction links are preserved during the game, but it also suggests some new quantitative
measures about the temporal behaviour which were not previously pointed out.},
author = {Nigro, Christian and Nigro, Libero and Sciammarella, Paolo F.},
doi = {10.1504/ijspm.2018.090275},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/nigro2018.pdf:pdf},
issn = {17402131},
number = {1},
pages = {73--87},
volume = {13},
year = {2018}
@article{Kwiatkowskab,
title = {{PRISM-games 2 . 0 : A Tool for Multi-Objective Strategy Synthesis for Stochastic Games}}
@article{Collins2018,
abstract = {Most often, researchers model crowds as individuals rather than groups with social
cohesion. This approach diminishes the impact of altruism and other group-supporting behaviors.
For example, in real evacuation scenarios, some people will move counterintuitively towards danger
to help friends and loved ones. Current modeling approaches to capture group formation and
dynamics lack the strategic element required to model the complexity of human decision-making.
Game Theory provides a mechanism to introduce this strategic behavior. This paper shows the
investigation into strategic group formation through the introduction of cooperative Game Theory
techniques into an agent-based model (ABM). This means looking at the core, nucleolus, and the
Shapley value as opposed to Nash Equilibrium. This paper shows some analytical results as well as
empirical results from introducing the cooperative group formation into a simple agent-based
model. The results indicate the formation of large dominant groups— much like mobs in the real-
world. Additionally, heterogeneity can radically change the dynamics of group formation. Empirical
results demonstrate that the system does not always reach equilibrium, implying an inherent
unstabilizability in the formation of groups. Policymakers could benefit from this research through
the greater understanding of how humans navigate the social environment through strategic
interactions.},
doi = {10.1177/0037549717732408},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/collins{\_}frydenlund-
strategicABMSgroups-springsim216.pdf:pdf},
issn = {17413133},
journal = {Simulation},
number = {3},
pages = {179--193},
volume = {94},
year = {2018}
@article{Liu,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/liu2019.pdf:pdf},
isbn = {9781450371},
keywords = {loop kinematic chains,mode mobile parallel mechanism,multi -,planar 4r single -,spatial
geometry,type synthesis},
title = {{Type Synthesis of Multi - mode Mobile Parallel Mechanism Based on Planar 4R Single - loop
Kinematic Chains}}
@article{Scekic2016,
abstract = {{\textcopyright} 2015 IEEE. Hybrid Diversity-aware Collective Adaptive Systems (HDA-
CAS) is a new generation of socio-technical systems where both humans and machine peers
complement each other and operate collectively to achieve their goals. These systems are
characterized by the fundamental properties of hybridity and collectiveness, hiding from users the
complexities associated with managing the collaboration and coordination of hybrid
human/machine teams. In this paper we present the key programming elements of the Smart
Society HDA-CAS platform. We first describe the overall platform's architecture and functionality and
then present concrete programming model elements - Collective-based Tasks (CBTs) and Collectives,
describe their properties and show how they meet the hybridity and collectiveness requirements.
We also describe the associated Java language constructs, and show how concrete use-cases can be
encoded with the introduced constructs.},
author = {Scekic, Ognjen and Schiavinotto, Tommaso and Diochnos, Dimitrios I. and Rovatsos,
Michael and Truong, Hong Linh and Carreras, Iacopo and Dustdar, Schahram},
doi = {10.1109/CIC.2015.17},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/truong-cic2015-smartsoc.pdf:pdf},
isbn = {9781509000890},
journal = {Proceedings - 2015 IEEE Conference on Collaboration and Internet Computing, CIC 2015},
keywords = {CAS,Collective adaptive systems,Crowdsourcing,Programming,Programming
model,Social computing,Socio-technical systems},
pages = {278--287},
year = {2016}
@article{Gelaye2016,
archivePrefix = {arXiv},
arxivId = {15334406},
author = {Gelaye, Bizu and Rondon, Marta and Araya, Prof Ricardo and A, Prof Michelle},
doi = {10.1016/S2215-0366(16)30284-X.Epidemiology},
eprint = {15334406},
isbn = {0002-9297},
issn = {2045-2322},
number = {10},
pages = {973--982},
pmid = {28642624},
volume = {3},
year = {2016}
@book{Hutchison1973,
abstract = {This book fills this void by including a collection of representative articles, which gives an
up-to-date and comprehensive snapshot of the Peer-to-Peer field. One of the main challenges that
faces any book covering such a vast and relatively new territory is how to structure the material. This
book resolves this conundrum by dividing the material into roughly three parts. The first part of the
book covers the basics of Peer-to-Peer designs, un- structured and structured systems, and presents
a variety of applications in- cluding e-mail, multicast, Grid computing, andWeb services. The book
then goes beyond describing traditional systems, by discussing general aspects of the Peer-to-Peer
systems, namely the self-organization nature of the Peer- to-Peer systems, and the all-important
topic of evaluating these systems. In addition, the book illustrates the broad applicability of Peer-to-
Peer by dis- cussing the impact of the Peer-to-Peer technologies in two computer-science areas,
namely searching and information retrieval, and mobile computing. No Peer-to-Peer book would be
complete without discussing the business model, accounting, and security. This book touches on
these topics in the last part.},
archivePrefix = {arXiv},
arxivId = {9780201398298},
doi = {10.1016/0020-7101(78)90038-7},
eprint = {9780201398298},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/formal-methods-for-industrial-critical-
systems-2012.pdf:pdf},
isbn = {9783642206290},
issn = {03029743},
number = {3},
pages = {242--242},
pmid = {4520227},
url = {http://www.mendeley.com/research/lecture-notes-computer-science-2/},
volume = {9},
year = {1973}
@article{Deshpande2014,
abstract = {The Domain Name System (DNS) is an Internet-wide, hierarchical naming system used to
translate domain names into numeric IP addresses. Any disruption of DNS service can have serious
consequences. We present a formal game-theoretic analysis of a notable threat to DNS, namely the
bandwidth amplification attack (BAA), and the countermeasures designed to defend against it. We
model the DNS BAA as a two-player, turn-based, zero-sum stochastic game between an attacker and
a defender. The attacker attempts to flood a victim DNS server with malicious traffic by choosing an
appropriate number of zombie machines with which to attack. In response, the defender chooses
among five BAA countermeasures, each of which seeks to increase the amount of legitimate traffic
the victim server processes. To simplify the model and optimize the analysis, our model does not
explicitly track the handling of each packet. Instead, our model is based on calculations of the rates
at which the relevant kinds of events occur in each state. We use our game-based model of DNS BAA
to generate optimal attack strategies, which vary the number of zombies, and optimal defense
strategies, which aim to enhance the utility of the BAA countermeasures by combining them in
advantageous ways. The goal of these strategies is to optimize the attacker's and defender's payoffs,
which are defined using probabilistic reward-based properties, and are measured in terms of the
attacker's ability to minimize the volume of legitimate traffic that is processed, and the defender's
ability to maximize the volume of legitimate traffic that is processed. {\textcopyright} 2014 IEEE.},
author = {Deshpande, Tushar and Katsaros, Panagiotis and Smolka, Scott A. and Stoller, Scott D.},
doi = {10.1109/EDCC.2014.37},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/EDCC-2014.pdf:pdf},
isbn = {9781479938032},
journal = {Proceedings - 2014 10th European Dependable Computing Conference, EDCC 2014},
pages = {226--237},
title = {{Stochastic game-based analysis of the DNS bandwidth amplification attack using probabilistic
model checking}},
year = {2014}
@article{Kit2015,
abstract = {Recent advances in embedded devices capabilities and wireless networks paved the way
for creating ubiquitous Cyber-Physical Systems (CPS) grafted with self-configuring and self-adaptive
capabilities. As these systems need to strike a balance between dependability, open-endedness and
adaptability, and operate in dynamic and opportunistic environments, their design and development
is particularly challenging. We take an architecture-based approach to this problem and advocate
the use of component-based abstractions and related machinery to engineer self-adaptive CPS. Our
approach is structured around DEECo - a component framework that introduces the concept of
component ensembles to deal with the dynamicity of CPS at the middleware level. DEECo provides
the architecture abstractions of autonomous components and component ensembles on top of
which different adaptation techniques can be deployed. This makes DEECo a vehicle for seamless
experiments with self-adaptive systems where the physical distribution and mobility of nodes, and
the limited data availability play an important role.},
author = {Kit, Michal and Gerostathopoulos, Ilias and Bures, Tomas and Hnetynka, Petr and Plasil,
Frantisek},
doi = {10.1109/SEAMS.2015.28},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]@2821374.pdf:pdf},
isbn = {9781479919345},
journal = {Proceedings - 10th International Symposium on Software Engineering for Adaptive and
Self-Managing Systems, SEAMS 2015},
year = {2015}
@article{Pa1992,
author = {Pa, Computer Mellon and Computer, Orna and Science, Grumberg and Technion, October},
isbn = {0897914538},
pages = {343--354},
year = {1992}
@book{Wing1995,
doi = {10.1145/222132.222148},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/model-checking-software-2018.pdf:pdf},
isbn = {9783319941103},
issn = {01635948},
number = {4},
pages = {128--139},
volume = {20},
year = {1995}
@article{Beek2018,
abstract = {The FORmal methods for the quantitative Evaluation of Collective Adaptive
SysTems(FORECAST), held in Vienna, Austria, on July 8, 2016 as a satellite event of the 4th federated
event on Software Technologies: Applications and Foundations (STAF 2016) was organized to raise
awareness in the software engineering and formal methods communities of the particularities of
CAS, and the design and control problems that they bring. The guest editors of this special issue
served as co-chairs of the workshop's Program Committee and were responsible for its proceedings.
FORECAST was sponsored by the FP7-ICT-600708 European project a Quantitative Approach to
Management and Design of Collective and Adaptive Behaviors (QUANTICOL) that ran from 2013 to
2017.},
doi = {10.1145/3177772},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/TOMACS18.pdf:pdf},
issn = {10493301},
number = {2},
pages = {1--4},
title = {{Guest Editorial for the Special Issue on FORmal methods for the quantitative Evaluation of
Collective Adaptive SysTems (FORECAST)}},
volume = {28},
year = {2018}
@article{Gaggioli2016,
author = {Gaggioli, Andrea and Ferscha, Alois and Riva, Giuseppe and Dunne, Stephen and Viaud-
Delmon, Isabelle and Ferscha, Alois},
doi = {10.1515/9783110471137-001},
year = {2016}
@article{Wang2012,
doi = {10.1016/j.jmsy.2011.11.001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/wang2012.pdf:pdf},
issn = {02786125},
number = {2},
pages = {83--91},
url = {http://dx.doi.org/10.1016/j.jmsy.2011.11.001},
volume = {31},
year = {2012}
@article{Camara2018,
author = {C{\'{a}}mara, Javier and Schmerl, Bradley and Moreno, Gabriel A. and Garlan, David},
doi = {10.1007/s10515-018-0234-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {15737535},
number = {3},
pages = {595--626},
title = {{MOSAICO: offline synthesis of adaptation strategy repertoires with flexible trade-offs}},
url = {https://doi.org/10.1007/s10515-018-0234-9},
volume = {25},
year = {2018}
@article{Kravets2020,
abstract = {The problem of incentive training of multi-agent systems in the game formulation for
collective decision making under uncertainty is considered. Methods of incentive training do not
require a mathematical model of the environment and enable decision making directly in the
training process. Markov model of stochastic game is constructed and the criteria for its solution are
formulated. An iterative Q-method for solving a stochastic game based on the numerical
identification of a characteristic function of a dynamic system in space of state-action is described.
Players' current gains are determined by the method of randomization of payment Q-matrix
elements. Mixed player strategies are calculated using the Boltzmann method. Pure strategies are
determined on the basis of discrete random distributions given by mixed player strategies. The
algorithm for stochastic game solving is developed and results of computer implementation of game
Q-method are analyzed.},
author = {Kravets, Petro and Lytvyn, Vasyl and Vysotska, Victoria and Burov, Yevhen},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper28.pdf:pdf},
issn = {16130073},
pages = {364--378},
volume = {2608},
year = {2020}
}
@article{Moraglio2010,
doi = {10.1162/EVCO},
isbn = {1063-6560},
issn = {1530-9304},
number = {4},
pages = {591--624},
pmid = {23270388},
url = {http://www.cs.kent.ac.uk/pubs/2010/2993},
volume = {21},
year = {2010}
@article{Taormina2015,
abstract = {Selecting an adequate set of inputs is a critical step for successful data-driven streamflow
prediction. In this study, we present a novel approach for Input Variable Selection (IVS) that employs
Binary-coded discrete Fully Informed Particle Swarm optimization (BFIPS) and Extreme Learning
Machines (ELM) to develop fast and accurate IVS algorithms. A scheme is employed to encode the
subset of selected inputs and ELM specifications into the binary particles, which are evolved using
single objective and multi-objective BFIPS optimization (MBFIPS). The performances of these ELM-
based methods are assessed using the evaluation criteria and the datasets included in the
comprehensive IVS evaluation framework proposed by Galelli et al. (2014). From a comparison with
4 major IVS techniques used in their original study it emerges that the proposed methods compare
very well in terms of selection accuracy. The best performers were found to be (1) a MBFIPS-ELM
algorithm based on the concurrent minimization of an error function and the number of selected
inputs, and (2) a BFIPS-ELM algorithm based on the minimization of a variant of the Akaike
Information Criterion (AIC). The first technique is arguably the most accurate overall, and is able to
reach an almost perfect specification of the optimal input subset for a partially synthetic rainfall-
runoff experiment devised for the Kentucky River basin. In addition, MBFIPS-ELM allows for the
determination of the relative importance of the selected inputs. On the other hand, the BFIPS-ELM is
found to consistently reach high accuracy scores while being considerably faster. By extrapolating
the results obtained on the IVS test-bed, it can be concluded that the proposed techniques are
particularly suited for rainfall-runoff modeling applications characterized by high nonlinearity in the
catchment dynamics.},
doi = {10.1016/j.jhydrol.2015.08.022},
isbn = {0022-1694},
issn = {00221694},
pages = {1617--1632},
title = {{Data-driven input variable selection for rainfall-runoff modeling using binary-coded particle
swarm optimization and Extreme Learning Machines}},
url = {http://dx.doi.org/10.1016/j.jhydrol.2015.08.022},
volume = {529},
year = {2015}
@article{Oliva2013,
abstract = {In this paper, a multilevel thresholding (MT) algorithm based on the harmony search
algorithm (HSA) is introduced. HSA is an evolutionary method which is inspired in musicians
improvising new harmonies while playing. Different to other evolutionary algorithms, HSA exhibits
interesting search capabilities still keeping a low computational overhead. The proposed algorithm
encodes random samples from a feasible search space inside the image histogram as candidate
solutions, whereas their quality is evaluated considering the objective functions that are employed
by the Otsu's or Kapur's methods. Guided by these objective values, the set of candidate solutions
are evolved through the HSA operators until an optimal solution is found. Experimental results
demonstrate the high performance of the proposedmethod for the segmentation of digital images.},
author = {Oliva, Diego and Cuevas, Erik and Pajares, Gonzalo and Zaldivar, Daniel and Perez-Cisneros,
Marco},
doi = {10.1155/2013/575414},
issn = {1110757X},
volume = {2013},
year = {2013}
@article{Khatkar2014,
number = {9},
pages = {468--472},
volume = {3},
year = {2014}
@book{Baier2008,
abstract = {Our growing dependence on increasingly complex computer and software systems
necessitates the development of formalisms, techniques, and tools for assessing functional
properties of these systems. One such technique that has emerged in the last twenty years is model
checking, which systematically (and automatically) checks whether a model of a given system
satisfies a desired property such as deadlock freedom, invariants, and request-response properties.
This automated technique for verification and debugging has developed into a mature and widely
used approach with many applications. Principles of Model Checking offers a comprehensive
introduction to model checking that is not only a text suitable for classroom use but also a valuable
reference for researchers and practitioners in the field. The book begins with the basic principles for
modeling concurrent and communicating systems, introduces different classes of properties
(including safety and liveness), presents the notion of fairness, and provides automata-based
algorithms for these properties. It introduces the temporal logics LTL and CTL, compares them, and
covers algorithms for verifying these logics, discussing real-time systems as well as systems subject
to random phenomena. Separate chapters treat such efficiency-improving techniques as abstraction
and symbolic manipulation. The book includes an extensive set of examples (most of which run
through several chapters) and a complete set of basic results accompanied by detailed proofs. Each
chapter concludes with a summary, bibliographic notes, and an extensive list of exercises of both
practical and theoretical nature.Christel Baier is Professor and Chair for Algebraic and Logical
Foundations of Computer Science in the Faculty of Computer Science at the Technical University of
Dresden. Joost-Pieter Katoen is Professor at the RWTH Aachen University and leads the Software
Modeling and Verification Group within the Department of Computer Science.},
doi = {10.1590/1678-5150-PVB-4828},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/{\_}principles{\_}of{\_}model{\
_}checking.pdf:pdf},
isbn = {978-0-262-02649-9},
issn = {16785150},
number = {8},
pages = {1505--1510},
pmid = {22693012},
volume = {38},
year = {2008}
@article{Talcott2016,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/talcott.pdf:pdf},
number = {June},
year = {2016}
@article{Bondi2000,
abstract = {Scalability is a desirable attribute of a network, system, or process. Poor scalability can
result in poor system performance, necessitating the reengineering or duplication of systems. While
scalability is valued, its characteristics and the characteristics that undermine it are usually only
apparent from the context. Here, we attempt to define different aspects of scalability, such as
structural scalability and load scalability. Structural scalability is the ability of a system to expand in a
chosen dimension without major modifications to its architecture. Load scalability is the ability of a
system to perform gracefully as the offered traffic increases. It is argued that systems with poor load
scalability may exhibit it because they repeatedly engage in wasteful activity, because they are
encumbered with poor scheduling algorithms, because they cannot fully take advantage of
parallelism, or because they are algorithmically inefficient. We qualitatively illustrate these concepts
with classical examples from the literature of operating systems and local area networks, as well as
an example of our own. Some of these are accompanied by rudimentary delay analysis.},
doi = {10.1145/350391.350432},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/350391.350432.pdf:pdf},
isbn = {158113195X},
journal = {Proceedings Second International Workshop on Software and Performance WOSP 2000},
pages = {195--203},
year = {2000}
@article{Ng2001,
author = {Ng, H.P. and Ong, S.H. and Foong, K.W.C. and Goh, P.S. and Nowinski, W.L},
doi = {10.1109/SSIAI.2006.1633722},
isbn = {1424400694},
pages = {61--65},
title = {{Medical Image Segmentation Using K-Means Clustering and Improved Watershed
Algorithm}},
year = {2001}
@article{Calinescu2009,
abstract = {The software underpinning today's IT systems needs to adapt dynamically and
predictably to rapid changes in system workload, environment and objectives. We describe a
software framework that achieves such adaptiveness for IT systems whose components can be
modelled as Markov chains. The framework comprises (i) an autonomic architecture that uses
Markov-chain quantitative analysis to dynamically adjust the parameters of an IT system in line with
its state, environment and objectives; and (ii) a method for developing instances of this architecture
for real-world systems. Two case studies are presented that use the framework successfully for the
dynamic power management of disk drives, and for the adaptive management of cluster availability
within data centres, respectively.},
doi = {10.1109/ICSE.2009.5070512},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/calinescu2009.pdf:pdf},
isbn = {9781424434527},
issn = {02705257},
pages = {100--110},
year = {2009}
@article{February2015,
title = {{NANOS / AGS Collaborative Session Glaucoma : The Other Optic Neuropathy}},
year = {2015}
@article{Baresi2011,
abstract = {Distributed systems comprise a significant number of entities that must be properly
coordinated to reach a goal. These systems present high turnover of elements, and demand for
solutions that keep their coordination as decentralized as possible to avoid bottlenecks. The paper
discusses why it is important to address these characteristics from a system's conception and
proposes A-3, an innovative architectural solution that adopts the concept of group as an abstraction
for organizing an application into semi-independent slices, providing a single and coherent view of
these aggregates, and coordinating the interactions inside and among groups. The paper presents
the A-3 model and defines it as an in- novative architectural style, describes a Java-based framework
that supports A-3 and provides users with the proper means to exploit the style, and exemplifies all
the main concepts on a simple scenario where autonomous robotic vacuum cleaners are
coordinated to properly clean a museum.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/baresi2011.pdf:pdf},
isbn = {9780769543512},
journal = {Proceedings - 9th Working IEEE/IFIP Conference on Software Architecture, WICSA 2011},
pages = {161--170},
year = {2011}
@article{Kwiatkowska2011,
pages = {585--591},
year = {2011}
@article{Milletari2016,
abstract = {Convolutional Neural Networks (CNNs) have been recently employed to solve problems
from both the computer vision and medical image analysis fields. Despite their popularity, most
approaches are only able to process 2D images while most medical data used in clinical practice
consists of 3D volumes. In this work we propose an approach to 3D image segmentation based on a
volumetric, fully convolutional, neural network. Our CNN is trained end-to-end on MRI volumes
depicting prostate, and learns to predict segmentation for the whole volume at once. We introduce
a novel objective function, that we optimise during training, based on Dice coefficient. In this way we
can deal with situations where there is a strong imbalance between the number of foreground and
background voxels. To cope with the limited number of annotated volumes available for training, we
augment the data applying random non-linear transformations and histogram matching. We show in
our experimental evaluation that our approach achieves good performances on challenging test data
while requiring only a fraction of the processing time needed by other previous methods.},
archivePrefix = {arXiv},
arxivId = {1606.04797},
doi = {10.1109/3DV.2016.79},
eprint = {1606.04797},
pages = {1--11},
pmid = {1000436750},
title = {{V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation}},
url = {http://arxiv.org/abs/1606.04797},
year = {2016}
@article{Maheshwari2017,
author = {Maheshwari, Shishir and Pachori, Ram Bilas and Acharya, U. Rajendra},
doi = {10.1109/JBHI.2016.2544961},
issn = {21682194},
number = {3},
pages = {803--813},
title = {{Automated Diagnosis of Glaucoma Using Empirical Wavelet Transform and Correntropy
Features Extracted from Fundus Images}},
volume = {21},
year = {2017}
@article{Chang1999,
abstract = {Glaucomas are a major cause of blindness. Visual loss typically involves retinal ganglion
cell death and optic nerve atrophy subsequent to a pathologic elevation of intraocular pressure
(IOP). Some human glaucomas are associated with anterior segment abnormalities such as pigment
dispersion syndrome (PDS) and iris atrophy with associated synechiae. The primary causes of these
abnormalities are unknown, and their aetiology is poorly understood. We recently characterized a
mouse strain (DBA/2J) that develops glaucoma subsequent to anterior segment changes including
pigment dispersion and iris atrophy. Using crosses between mouse strains DBA/2J (D2) and C57BL/6J
(B6), we now show there are two chromosomal regions that contribute to the anterior segment
changes and glaucoma. Progeny homozygous for the D2 allele of one locus on chromosome 6 (called
ipd) develop an iris pigment dispersion phenotype similar to human PDS. ipd resides on a region of
mouse chromosome 6 with conserved synteny to a region of human chromosome 7q that is
associated with human PDS. Progeny homozygous for the D2 allele of a different locus on
chromosome 4 (called isa) develop an iris stromal atrophy phenotype (ISA). The Tyrpl gene is a
candidate for isa and likely causes ISA via a mechanism involving pigment production. Progeny
homozygous for the D2 alleles of both ipd and isa develop an earlier onset and more severe disease
involving pigment dispersion and iris stromal atrophy.},
author = {Chang, B and Smith, R S and Hawes, N L and Anderson, M G and Zabaleta, A and Savinova,
O and Roderick, T H and Heckenlively, J R and Davisson, M T and John, S W},
doi = {10.1038/7741},
issn = {1061-4036},
number = {4},
pages = {405--9},
pmid = {10192392},
title = {{Interacting loci cause severe iris atrophy and glaucoma in DBA/2J mice.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/10192392},
volume = {21},
year = {1999}
@article{Abo-zahhad2014,
abstract = {Edge detection is the process of determining where boundaries of objects fall within an
image. So far, several standard operators-based methods have been widely used for edge detection.
However, due to inherent quality of images, these methods prove ineffective if they are applied
without any preprocessing. In this paper, an image preprocessing approach has been adopted in
order to get certain parameters that are useful to perform better edge detection with the standard
operators-based edge detection methods. The proposed preprocessing approach involves
computation of the histogram, finding out the total number of peaks and suppressing irrelevant
peaks. From the intensity values corresponding to relevant peaks, threshold values are obtained.
From these threshold values, optimal multilevel thresholds are calculated using the Otsu method,
then multilevel image segmentation is carried out. Finally, a standard edge detection method can be
applied to the resultant segmented image. Simulation results are presented to show that our
preprocessed approach when used with a standard edge detection method enhances its
performance. It has been also shown that applying wavelet edge detection method to the
segmented images, generated through our preprocessing approach, yields the superior performance
among other standard edge detection methods.},
author = {Abo-zahhad, Mohamed and Gharieb, Reda Ragab and Ahmed, Sabah M},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/JSIP{\
_}2014101511513442.pdf:pdf},
number = {November},
pages = {123--134},
year = {2014}
@article{Mwanza2017,
author = {Mwanza, Jean-claude and Oakley, Jonathan D and Budenz, Donald L and Chang, Robert T
and Knight, O Rese J and Feuer, William J},
doi = {10.1167/iovs.11-7962},
number = {11},
title = {{Macular Ganglion Cell – Inner Plexiform Layer : Automated Detection and Thickness
Reproducibility with Spectral Domain – Optical Coherence Tomography in Glaucoma}},
volume = {52},
year = {2017}
@article{Sabatucci2018,
doi = {10.1007/978-3-319-59480-4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ims17-029.pdf:pdf},
isbn = {9783319594804},
number = {May},
title = {{The Four Types of Self-adaptive Systems : A Metamodel The Four Types of Self-Adaptive
Systems : a Metamodel}},
year = {2018}
@article{Berthomieu2015,
abstract = {We describe a model-checking toolchain for the behavioral verification of AADL models
that takes into account the realtime semantics of the language and that is compatible with the AADL
Behavioral Annex. We give a high-level view of the tools and transformations involved in the
verification process and focus on the support offered by our framework for checking user-defined
properties. We also describe the experimental results obtained on a significant avionic
demonstrator, that models a network protocol in charge of data communications between an
airplane and ground stations.},
archivePrefix = {arXiv},
arxivId = {1503.00493},
author = {Berthomieu, B and Bodeveix, J. -P and Zilio, S Dal and Filali, M and Botlan, D Le and
Verdier, G and Vernadat, F},
eprint = {1503.00493},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1503.00493.pdf:pdf},
url = {http://arxiv.org/abs/1503.00493},
year = {2015}
@article{Viroli2018a,
abstract = {Collective adaptive systems are an emerging class of networked computational systems
particularly suited for application domains such as smart cities, complex sensor networks, and the
Internet of Things. These systems tend to feature large-scale, heterogeneity of communication
model (including opportunistic peer-to-peer wireless interaction) and require inherent self-
adaptiveness properties to address unforeseen changes in operating conditions. In this context, it is
extremely difficult (if not seemingly intractable) to engineer reusable pieces of distributed behaviour
to make them provably correct and smoothly composable. Building on the field calculus, a
computational model (and associated toolchain) capturing the notion of aggregate network-level
computation, we address this problem with an engineering methodology coupling formal theory and
computer simulation. On the one hand, functional properties are addressed by identifying the
largest-to-date field calculus fragment generating self-stabilising behaviour, guaranteed to
eventually attain a correct and stable final state despite any transient perturbation in state or
topology and including highly reusable building blocks for information spreading, aggregation, and
time evolution. On the other hand, dynamical properties are addressed by simulation, empirically
evaluating the different performances that can be obtained by switching between implementations
of building blocks with provably equivalent functional properties. Overall, our methodology sheds
light on how to identify core building blocks of collective behaviour and how to select
implementations that improve system performance while leaving overall system function and
resiliency properties unchanged.},
archivePrefix = {arXiv},
arxivId = {1711.08297},
author = {Viroli, Mirko and Audrito, Giorgio and Beal, Jacob and Damiani, Ferruccio and Pianini,
Danilo},
doi = {10.1145/3177774},
eprint = {1711.08297},
file = {:C$\backslash$:/Users/Asus/Downloads/viroli2018.pdf:pdf},
issn = {15581195},
number = {2},
volume = {28},
year = {2018}
@article{Lopez-Benitez2011,
abstract = {The spectrum occupancy models widely used to date in dynamic spectrum
access/cognitive radio (DSA/CR) research frequently rely on assumptions and oversimplifications
that have not been validated with empirical measurement data. In this context, this paper presents
an empirical time-dimension model of spectrum use that is appropriate for DSA/CR studies.
Concretely, a two-state discrete-time Markov chain with novel deterministic and stochastic duty
cycle models is proposed as an adequate mean to accurately describe spectrum occupancy in the
time domain. The validity and accuracy of the proposed modeling approach is evaluated and
corroborated with extensive empirical data from a multiband spectrum measurement campaign. The
obtained results demonstrate that the proposed approach is able to accurately capture and
reproduce the relevant statistical properties of spectrum use observed in real-world channels of
various radio technologies. The importance of accurately modeling spectrum use in the design and
evaluation of novel DSA/CR techniques is highlighted with a practical case study. {\textcopyright}
2011 IEEE.},
doi = {10.1109/TVT.2011.2157372},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/lopez-benitez2011.pdf:pdf},
issn = {00189545},
number = {6},
pages = {2519--2533},
title = {{Empirical time-dimension model of spectrum use based on a discrete-time Markov chain
with deterministic and stochastic duty cycle models}},
volume = {60},
year = {2011}
@article{Xu2019,
abstract = {This paper presents a novel design approach to systematically synthesize available
configurations for dedicated hybrid transmission (DHT) systems subject to design constraints and
required operation modes by using simple planetary gear sets (PGSs). The configuration synthesis
process includes two main steps. The first step is the synthesis of the PGSs by synthesizing all the
components to a simple PGS subject to the design constraints. The second step is to combine the
structural and shift elements into all configurations and detect those meeting the requirements with
the mechanical and operation mode constraints. By applying the proposed design approach, the
configurations of the Toyota's hybrid systems (THSs) and Voltec-II prove the feasibility of the
method. Furthermore, several new DHT configurations are synthesized under the new design
conditions. The proposed design approach is capable of systematically synthesizing new DHT
systems with multiple PGSs, variable design constraints, and expected modes.},
author = {Xu, Xiangyang and Sun, Hanqiao and Liu, Yanfang and Dong, Peng},
doi = {10.1115/1.4042846},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/xu2019.pdf:pdf},
issn = {10500472},
number = {9},
pages = {1--14},
title = {{Automatic Enumeration of Feasible Configuration for the Dedicated Hybrid Transmission
with Multi-Degree-of-Freedom and Multiplanetary Gear Set}},
volume = {141},
year = {2019}
@inproceedings{Medvidovic2002,
abstract = {Software architectures provide high-level abstractions for representing the structure,
behavior, and key properties of software systems. Various architecture description languages, styles,
tools, and technologies have emerged over the past decade. At the same time, there has been
comparatively little focus on techniques and technologies for transforming architectural models into
running systems. This often results in significant differences between conceptual and concrete
architectures, rendering system evolution and maintenance difficult. Furthermore, it calls into
question the ability of developers to consistently transfer the key architectural properties into
system implementations. One solution to this problem is to employ architectural frameworks.
Architectural frameworks provide support for implementing, deploying, executing, and evolving
software architectures. This paper describes the design of and our experience with a family of
architectural frameworks that support implementation of systems in a specific architectural style-C2.
To date, the C2 frameworks have been used in the development of over 100 applications by several
academic and industrial organizations. The paper discusses the issues we have encountered in
implementing and using the frameworks, as well as the approaches adopted to resolve these
issues.},
doi = {10.1007/978-0-387-35607-5_14},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Medvidovic2002{\_}Chapter{\
_}AFamilyOfSoftwareArchitectureI.pdf:pdf},
isbn = {9780387356075},
pages = {221--235},
year = {2002}
@article{Marin2011,
author = {Mar{\'{i}}n, Diego and Aquino, Arturo and Geg{\'{u}}ndez-arias, Manuel Emilio and Bravo,
Jos{\'{e}} Manuel},
number = {1},
pages = {146--158},
title = {{A New Supervised Method for Blood Vessel Segmentation in Retinal Images by Using Gray-
Level and Moment Invariants-Based Features}},
volume = {30},
year = {2011}
@article{Kulkarni1995,
number = {94},
volume = {2217},
year = {1995}
@article{Brunner2018,
abstract = {We present a formally verified and executable on-the-fly LTL model checker that uses
ample set partial order reduction. The verifica-tion is done using the proof assistant Isabelle/HOL
and covers everything from the abstract correctness proof down to the generated SML code.
Building on Doron Peled's paper " Combining Partial Order Reductions with On-the-Fly Model-
Checking " , we formally prove abstract correct-ness of ample set partial order reduction. This
theorem is independent of the actual reduction algorithm. We then verify a reduction algorithm for
a simple but expressive fragment of Promela. We use static partial order reduction, which allows
separating the partial order reduction and the model checking algorithms regarding both the
correctness proof and the implementation. Thus, the Cava model checker that we verified in
previous work can be used as a back end with only minimal changes. Finally, we generate executable
SML code using a stepwise refinement approach. We test our model checker on some examples,
observing the effectiveness of the partial order reduction algorithm.},
doi = {10.1007/s10817-017-9418-4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/JAR17{\_}POR.pdf:pdf},
issn = {15730670},
number = {1},
pages = {3--21},
title = {{Formal Verification of an Executable LTL Model Checker with Partial Order Reduction}},
volume = {60},
year = {2018}
}
@article{Hensel2007,
archivePrefix = {arXiv},
arxivId = {arXiv:2002.07080v1},
author = {Hensel, Christian and Junges, Sebastian and Feb, S E and Clarke, Edmund M and Emerson,
E Allen},
eprint = {arXiv:2002.07080v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2002.07080.pdf:pdf},
year = {2007}
@article{Figus2016,
author = {Figus, Michele and Melamed, Shlomo and Ferreras, Antonio and Marchini, Giorgio and
Costa, Vital P},
pages = {2--4},
volume = {2016},
year = {2016}
@article{Johnson2016,
abstract = {The rise of automation in many systems; and technology ubiquity in general, present
some complex operational environments that require highly collaborative Complex Adaptive
Systems of Systems (CASoS) solutions. This paper describes the need to engineer CASoS and explores
how they may be applied to address complex problems. This effort builds on a developing body of
knowledge in complex systems and focuses on understanding characteristics and measures of CASoS
with an ultimate goal on developing engineered CASoS. The implications of deeper CASoS
understanding hold potential for more effective future responses to naturally-occurring and
adversarial CASoS. Thus, there stands much to gain in increasing this body of knowledge.},
doi = {10.1016/j.procs.2016.09.293},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S1877050916324656-
main.pdf:pdf},
issn = {18770509},
pages = {58--65},
url = {http://dx.doi.org/10.1016/j.procs.2016.09.293},
volume = {95},
year = {2016}
@article{Svore2016,
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/223c9497b91718e83e28006b9fa2403c.pdf:pdf
},
pages = {3580},
title = {{Quantitative verification and strategy synthesis for stochastic games {\$}}},
volume = {30},
year = {2016}
@article{Jagustovic2019,
abstract = {Climate-smart agriculture (CSA) conceptually has the potential to contribute to the
sustainable development goals of achieving zero hunger, reducing land degradation, eliminating
poverty, tackling climate change, and promoting gender equality. The scaling-up needed to achieve
goals of CSA represents a challenge, as it entails understanding synergies between often opposing
socioeconomic and environmental priorities and trade-offs over temporal and spatial scales. In this
paper, we tested new approaches to support scaling-up of sustainable food production through
investigating the contribution of systems thinking as a conceptual approach and complex adaptive
system (CAS) attributes as a framework for analysis of CSA. This was done through examining (i) to
what extent CSA represents a CAS and (ii) what contribution systems thinking and CAS attributes can
make to understanding and scaling-up sustainable food production systems through CSA. The CSA
situation was conceptualized through systems thinking sessions with women farmers in the climate-
smart village (CSV) of Doggoh-Jirapa, northern Ghana, and was guided by the Distinctions, Systems,
Relationships and Perspectives (DSRP) framework. Systems thinking, and CAS attributes provide
system-wide understanding of elements, dynamics and trade-offs over temporal and spatial scale in
selected agri-food systems. As such it could aid horizontal and vertical scaling-up by informing policy
developoment and selection of a context-specific portfolio of technologies and practices at
landscape and farm levels to achieve synergies between goals. In this study, systems thinking
enabled women farmers in the CSV to identify income-generating and tree planting activities, with
desirable simultaneous system-wide impact. The paper calls for further testing of tools, approaches,
and methods that enable dynamic systems thinking to inform scaling-up efforts, while embracing the
transdisciplinary nature and complexity of CSA as a constituent of the food production system.},
author = {Jagustovi{\'{c}}, Renata and Zougmor{\'{e}}, Robert B. and Kessler, Aad and Ritsema, Coen
J. and Keesstra, Saskia and Reynolds, Martin},
doi = {10.1016/j.agsy.2018.12.008},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/PDF{\_}58925.pdf:pdf},
issn = {0308521X},
pages = {65--75},
title = {{Contribution of systems thinking and complex adaptive system attributes to sustainable food
production: Example from a climate-smart village}},
volume = {171},
year = {2019}
@article{Liu2016,
doi = {10.1007/s11042-016-4265-6},
file = {:C$\backslash$:/Users/Asus/Downloads/RGR.pdf:pdf},
issn = {1380-7501},
title = {{Hand gesture recognition based on concentric circular scan lines and weighted K-nearest
neighbor algorithm}},
url = {http://dx.doi.org/10.1007/s11042-016-4265-6},
year = {2016}
@article{Abdelsamea2011,
abstract = {In this paper, an automatic seeded region growing algorithm is proposed for cellular
image segmentation. First, the regions of interest (ROIs) extracted from the preprocessed image.
Second, the initial seeds are automatically selected based on ROIs extracted from the image. Third,
the most reprehensive seeds are selected using a machine learning algorithm. Finally, the cellular
image is segmented into regions where each region corresponds to a seed. The aim of the proposed
is to automatically extract the Region of Interests (ROI) from the cellular images in terms of
overcoming the explosion, under segmentation and over segmentation problems. Experimental
results show that the proposed algorithm can improve the segmented image and the segmented
results are less noisy as compared to some existing algorithms.},
pages = {1--5},
title = {{An Automatic Seeded Region Growing for 2D Biomedical Image Segmentation}},
volume = {21},
year = {2011}
@article{Tribastone2016,
doi = {10.4204/eptcs.217.8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1607.02966.pdf:pdf},
pages = {62--68},
volume = {217},
year = {2016}
@article{Ciller2017,
abstract = {Retinoblastoma and uveal melanoma are fast spreading eye tumors usually diagnosed by
using 2D Fundus Image Photography (Fundus) and 2D Ultrasound (US). Diagnosis and treatment
planning of such diseases often require additional complementary imaging to confirm the tumor
extend via 3D Magnetic Resonance Imaging (MRI). In this context, having automatic segmentations
to estimate the size and the distribution of the pathological tissue would be advantageous towards
tumor characterization. Until now, the alternative has been the manual delineation of eye
structures, a rather time consuming and error-prone task, to be conducted in multiple MRI
sequences simultaneously. This situation, and the lack of tools for accurate eye MRI analysis, reduces
the interest in MRI beyond the qualitative evaluation of the optic nerve invasion and the
confirmation of recurrent malignancies below calcified tumors. In this manuscript, we propose a new
framework for the automatic segmentation of eye structures and ocular tumors in multi-sequence
MRI. Our key contribution is the introduction of a pathological eye model from which Eye Patient-
Specific Features (EPSF) can be computed. These features combine intensity and shape information
of pathological tissue while embedded in healthy structures of the eye. We assess our work on a
dataset of pathological patient eyes by computing the Dice Similarity Coefficient (DSC) of the sclera,
the cornea, the vitreous humor, the lens and the tumor. In addition, we quantitatively show the
superior performance of our pathological eye model as compared to the segmentation obtained by
using a healthy model (over 4{\%} DSC) and demonstrate the relevance of our EPSF, which improve
the final segmentation regardless of the classifier employed.},
author = {Ciller, Carlos and {De Zanet}, Sandro and Kamnitsas, Konstantinos and Maeder, Philippe
and Glocker, Ben and Munier, Francis L. and Rueckert, Daniel and Thiran, Jean Philippe and Cuadra,
Meritxell Bach and Sznitman, Raphael},
doi = {10.1371/journal.pone.0173900},
isbn = {1111111111},
issn = {19326203},
number = {3},
pages = {1--14},
pmid = {28350816},
title = {{Multi-channel MRI segmentation of eye structures and tumors using patient-specific
features}},
volume = {12},
year = {2017}
@article{Misra2012,
abstract = {Internet of Things (IOT) is a wireless ad-hoc network of everyday objects collaborating
and cooperating with one other in order to accomplish some shared objectives. The envisioned high
degrees of association of humans with IOT nodes require equally high degrees of reliability of the
network. In order to render this reliability to IOT networks, it is necessary to make them tolerant to
faults. In this paper, we propose mixed cross-layered and learning automata (LA)-based fault-
tolerant routing protocol for IOTs, which assures successful delivery of packets even in the presence
of faults between a pair of source and destination nodes. As this work concerns IOT, the algorithm
designed should be highly scalable and should be able to deliver high degrees of performance in a
heterogeneous environment. The LA and cross-layer concepts adopted in the proposed approach
endow this flexibility to the algorithm so that the same standard can be used across the network. It
dynamically adopts itself to the changing environment and, hence, chooses the optimal action. Since
energy is a major concern in IOTs, the algorithm performs energy-aware fault-tolerant routing. To
save on energy, all the nodes lying in the unused path are put to sleep. Again this sleep scheduling is
dynamic and adaptive. The simulation results of the proposed strategy shows an increase in the
overall energy-efficiency of the network and decrease in overhead, as compared to the existing
protocols we have considered as benchmarks in this study. {\textcopyright} 2012 IEEE.},
author = {Misra, Sudip and Gupta, Anshima and Krishna, P. Venkata and Agarwal, Harshit and
Obaidat, Mohammad S.},
doi = {10.1109/WCNC.2012.6214484},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}4.pdf:pdf},
isbn = {9781467304375},
issn = {15253511},
pages = {815--819},
title = {{An adaptive learning approach for fault-tolerant routing in Internet of Things}},
year = {2012}
@article{Rojec2019,
abstract = {Designing an analog electrical circuit requires substantial effort and time by a highly-
skilled designer. Many tools have been devised to aid engineers in the design procedure, from
simulators to parameter optimization tools, which largely assist in fine-tuning the parameter values
of circuits with predetermined topologies. In this paper, we propose a new approach to circuit
optimization that is capable of searching not only for the parameter but also the topological space.
Starting from a high-level statement describing a desired circuit functionality, the proposed method
can either improve a given circuit, or create a novel topology with given resources, based on the
Darwinian theory of evolutionary selection. Our method uses a global parameter optimization
method PSADE to speed up evolutionary convergence. We use the proposed algorithm (both single-
and multi-objective) to successfully evolve passive, active, and BiCMOS circuits.},
doi = {10.1016/j.engappai.2019.01.012},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/rojec2019.pdf:pdf},
issn = {09521976},
number = {January},
pages = {48--65},
url = {https://doi.org/10.1016/j.engappai.2019.01.012},
volume = {80},
year = {2019}
@article{Basavaprasad2014,
keywords = {--------------------------------------------------,classification,clusters,comparative,edge-
based,graph based,histogram,pixel-based,segmentation,techniques,thresholding},
pages = {310--315},
title = {{a Comparative Study on Classification of Image Segmentation Methods With a Focus on
Graph Based Techniques}},
year = {2014}
}
@article{Priebe2012,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Priebe, Sarah J and Keenan, Janice M and Miller, Amanda C},
doi = {10.1007/s11145-010-9260-0.How},
eprint = {NIHMS150003},
isbn = {1110301197},
issn = {00092665},
number = {6},
pages = {581--586},
pmid = {19772347},
volume = {121},
year = {2012}
@article{Caldas2020,
abstract = {Control theoretical techniques have been successfully adopted as methods for self-
adaptive systems design to provide formal guarantees about the effectiveness and robustness of
adaptation mechanisms. However, the computational effort to obtain guarantees poses severe
constraints when it comes to dynamic adaptation. In order to solve these limitations, in this paper,
we propose a hybrid approach combining software engineering, control theory, and AI to design for
software self-adaptation. Our solution proposes a hierarchical and dynamic system manager with
performance tuning. Due to the gap between high-level requirements specification and the internal
knob behavior of the managed system, a hierarchically composed components architecture seek the
separation of concerns towards a dynamic solution. Therefore, a two-layered adaptive manager was
designed to satisfy the software requirements with parameters optimization through regression
analysis and evolutionary meta-heuristic. The optimization relies on the collection and processing of
performance, effectiveness, and robustness metrics w.r.t control theoretical metrics at the offline
and online stages. We evaluate our work with a prototype of the Body Sensor Network (BSN) in the
healthcare domain, which is largely used as a demonstrator by the community. The BSN was
implemented under the Robot Operating System (ROS) architecture, and concerns about the system
dependability are taken as adaptation goals. Our results reinforce the necessity of performing well
on such a safety-critical domain and contribute with substantial evidence on how hybrid approaches
that combine control and AI-based techniques for engineering self-adaptive systems can provide
effective adaptation.},
archivePrefix = {arXiv},
arxivId = {2004.11793},
author = {Caldas, Ricardo Diniz and Rodrigues, Arthur and Gil, Eric Bernd and Rodrigues,
Gena{\'{i}}na Nunes and Vogel, Thomas and Pelliccione, Patrizio},
doi = {10.1145/3387939.3391595},
eprint = {2004.11793},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2004.11793.pdf:pdf},
isbn = {9781450379625},
title = {{A Hybrid Approach Combining Control Theory and AI for Engineering Self-Adaptive
Systems}},
url = {http://arxiv.org/abs/2004.11793{\%}0Ahttp://dx.doi.org/10.1145/3387939.3391595},
year = {2020}
@article{Badrinarayanan2015,
abstract = {We present a novel and practical deep fully convolutional neural network architecture for
semantic pixel-wise segmentation termed SegNet. This core trainable segmentation engine consists
of an encoder network, a corresponding decoder network followed by a pixel-wise classification
layer. The architecture of the encoder network is topologically identical to the 13 convolutional
layers in the VGG16 network. The role of the decoder network is to map the low resolution encoder
feature maps to full input resolution feature maps for pixel-wise classification. The novelty of SegNet
lies is in the manner in which the decoder upsamples its lower resolution input feature map(s).
Specifically, the decoder uses pooling indices computed in the max-pooling step of the
corresponding encoder to perform non-linear upsampling. This eliminates the need for learning to
upsample. The upsampled maps are sparse and are then convolved with trainable filters to produce
dense feature maps. We compare our proposed architecture with the widely adopted FCN and also
with the well known DeepLab-LargeFOV, DeconvNet architectures. This comparison reveals the
memory versus accuracy trade-off involved in achieving good segmentation performance. SegNet
was primarily motivated by scene understanding applications. Hence, it is designed to be efficient
both in terms of memory and computational time during inference. It is also significantly smaller in
the number of trainable parameters than other competing architectures. We also performed a
controlled benchmark of SegNet and other architectures on both road scenes and SUN RGB-D indoor
scene segmentation tasks. We show that SegNet provides good performance with competitive
inference time and more efficient inference memory-wise as compared to other architectures. We
also provide a Caffe implementation of SegNet and a web demo at
http://mi.eng.cam.ac.uk/projects/segnet/.},
archivePrefix = {arXiv},
arxivId = {1511.00561},
author = {Badrinarayanan, Vijay and Kendall, Alex and Cipolla, Roberto},
doi = {10.1109/TPAMI.2016.2644615},
eprint = {1511.00561},
isbn = {9783319464879},
issn = {0162-8828},
pages = {1--14},
pmid = {28060704},
url = {http://arxiv.org/abs/1511.00561},
year = {2015}
@article{Greenstreet1999a,
doi = {10.1007/3-540-48983-5_12},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/3-540-48983-5{\_}12.pdf:pdf},
pages = {103--116},
year = {1999}
@article{Lee2005,
abstract = {Conventional adaptive systems have common well-known constraints when attempting
to normalize environment. An adaptive system must contain a certain number of rules allowing such
a system to adapt to specific situations. If there is an absence of a rule in a new situation, the system
cannot take appropriate action. Building and managing such complex static adaptive systems places
an enormous burden on system developers. In this paper, we propose a multi-agent based
intelligent adaptive system with a self-growing engine. In this system, the inference agent evaluates
input context with specific factors and analyzes the results. The decision agent selects the most
appropriate action among alternatives available for a specific context and intelligently evolves and
adapts by means of a self-growing engine (SGE). The SGE can evaluate actions and generate new
rules by applying it to a practical situation using remote video conferencing with mobile devices such
as PDAs, and PCs. {\textcopyright} Springer-Verlag Berlin Heidelberg 2005.},
doi = {10.1007/11508069_64},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/lee2005.pdf:pdf},
issn = {03029743},
pages = {494--500},
title = {{An architecture for multi-agent based self-adaptive system in mobile environment}},
volume = {3578},
year = {2005}
@article{Liu2013,
author = {Liu, Jiang and Zhang, Zhuo and Wong, Damon Wing Kee and Xu, Yanwu and Yin, Fengshou
and Cheng, Jun and Tan, Ngan Meng and Kwoh, Chee Keong and Xu, Dong and Tham, Yih Chung and
Aung, Tin and Wong, Tien Yin},
doi = {10.1136/amiajnl-2012-001336},
isbn = {1527-974X},
issn = {1527-974X},
journal = {Jamia},
number = {6},
pages = {1021--7},
pmid = {23538725},
url = {http://www.ncbi.nlm.nih.gov/pubmed/23538725},
volume = {20},
year = {2013}
@book{Marti-oliet,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/logic-rewriting-and-concurrency-
2015.pdf:pdf},
isbn = {9783319231648},
@article{Parker2019,
author = {Parker, David and Klauck, Michaela and Klein, Joachim and Kˇ, Jan},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007978-3-030-17502-3.pdf:pdf},
isbn = {9783030175023},
pages = {69--92},
title = {{The 2019 Comparison of Tools for the Analysis of Quantitative ( QComp 2019 Competition
Report )}},
volume = {3},
year = {2019}
@article{Feng2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/iccps15.pdf:pdf},
isbn = {9781450334556},
pages = {70--79},
title = {{Controller Synthesis for Autonomous Systems Interacting Categories and Subject
Descriptors}},
year = {2015}
}
@article{Kravets2018,
abstract = {The stochastic game method of coalitions formation in multi-agent systems is offered. An
adaptive algorithm for solving a stochastic game is developed. Computer modeling of the stochastic
game is executed. The parameter influences on the convergence of stochastic game method of
coalition formation are studied. The analysis of received results is realized.},
doi = {10.1109/STC-CSIT.2018.8526610},
file = {:C$\backslash$:/Users/Asus/Downloads/kravets2018.pdf:pdf},
isbn = {9781538664636},
journal = {2018 IEEE 13th International Scientific and Technical Conference on Computer Sciences
and Information Technologies, CSIT 2018 - Proceedings},
pages = {1--4},
publisher = {IEEE},
volume = {1},
year = {2018}
@book{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/innovations-in-bayesian-networks-
2008.pdf:pdf},
isbn = {9783540793540},
@article{Vardi2001,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Vardi{\_}automata-theoretic-and-
LTL.pdf:pdf},
journal = {Esslli'01},
url = {http://www.helsinki.fi/esslli/courses/readers/K12.pdf},
year = {2001}
}
@article{Rabin1963,
doi = {10.1016/S0019-9958(63)90290-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0019995863902900-
main.pdf:pdf},
issn = {00199958},
number = {3},
pages = {230--245},
volume = {6},
year = {1963}
}
@article{Chandrashekar,
number = {4},
pages = {28--32},
volume = {3}
@article{Nair2019,
abstract = {This paper presents a conceptual model for a renewed consideration of the complex
adaptive systems (CAS) perspective in operations and supply chain management research. A
literature review identifies the approaches taken in published research to examine issues such as
complexity, adaptation, and emergent behavior. We present a revised conceptual framework that
offers directions for embracing key tenets from CAS research so as to gain deeper insights into
pertinent issues within the field. We introduce the articles that are part of this special issue and
highlight how these articles relate to the conceptual framework proposed in the paper. We also
propose some methodological directions that can help in undertaking rigorous investigations of
some important aspects that have theoretical and managerial significance.},
doi = {10.1002/joom.1022},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/NairReed-Tsochas2019.pdf:pdf},
issn = {02726963},
number = {2},
pages = {80--92},
title = {{Revisiting the complex adaptive systems paradigm: Leading perspectives for researching
operations and supply chain management issues}},
volume = {65},
year = {2019}
}
@article{Nerurkar2017,
number = {4},
volume = {4},
year = {2017}
@book{Cordeiro2019,
abstract = {JBMC is a bounded model checking tool for verifying Java bytecode. It is built on top of
the CPROVER framework. JBMC processes Java bytecode together with a model of the standard Java
libraries. It checks a set of desired properties, such as assertions and absence of uncaught
exceptions, under given bounds on loops, recursion and data structures. Internally, it uses the same
bounded model checking engine as its sibling tool CBMC and discharges the generated verification
conditions with the help of MiniSAT 2.2.1.},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-17502-3_17},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030175016},
issn = {16113349},
pages = {219--223},
title = {{JBMC: Bounded Model Checking for Java Bytecode: (Competition Contribution)}},
url = {http://dx.doi.org/10.1007/978-3-030-17502-3{\_}17},
year = {2019}
@article{Sutopo2009,
archivePrefix = {arXiv},
arxivId = {arXiv:1303.7288v1},
author = {Sutopo, Hadi},
doi = {10.1109/MSN.2009.48},
eprint = {arXiv:1303.7288v1},
isbn = {0300300301},
number = {1},
pages = {22--35},
volume = {3},
year = {2009}
@article{Hamzei2018,
doi = {10.1109/JIOT.2018.2861742},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/towards-efficient-service-composition-
techniques-in-the-internet-2018.pdf:pdf},
number = {5},
pages = {3774--3787},
publisher = {IEEE},
volume = {5},
year = {2018}
@article{Katz2017,
abstract = {We show here how the use of genetic programming in combination of model checking
provides a powerful way to synthesize programs. Whereas classical algorithmic synthesis provides
alarming high complexity and undecidability results, the genetic approach provides a surprisingly
successful heuristics. We describe several versions of a method for synthesizing sequential and
concurrent systems. We show several examples where we used our approach to synthesize, improve
and correct code.},
author = {Katz, Gal and Peled, Doron},
doi = {10.1007/s10009-016-0418-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/katz2016.pdf:pdf},
isbn = {9783319030760},
issn = {14332787},
number = {4},
pages = {449--464},
title = {{Synthesizing, correcting and improving code, using model checking-based genetic
programming}},
volume = {19},
year = {2017}
@article{Tan2017a,
abstract = {Screening for vision threatening diabetic retinopathy by grading digital retinal images
reduces the risk of blindness in people with diabetes. Computer-aided diagnosis can aid human
graders to cope with this mounting problem. We propose to use a 10-layer convolutional neural
network to automatically, simultaneously segment and discriminate exudates, haemorrhages and
micro-aneurysms. Input image is normalized before segmentation. The net is trained in two stages to
improve performance. On average, our net on 30,275,903 effective points achieved a sensitivity of
0.8758 and 0.7158 for exudates and dark lesions on the CLEOPATRA database. It also achieved a
sensitivity of 0.6257 and 0.4606 for haemorrhages and micro-aneurysms. This study shows that it is
possible to get a single convolutional neural network to segment these pathological features on a
wide range of fundus images with reasonable accuracy.},
author = {Tan, Jen Hong and Fujita, Hamido and Sivaprasad, Sobha and Bhandary, Sulatha V. and
Rao, A. Krishna and Chua, Kuang Chua and Acharya, U. Rajendra},
doi = {10.1016/j.ins.2017.08.050},
issn = {00200255},
pages = {66--76},
title = {{Automated segmentation of exudates, haemorrhages, microaneurysms using single
convolutional neural network}},
volume = {420},
year = {2017}
@article{Sharma2013,
abstract = {The overall aim of the software industry is to ensure delivery of high quality software to
the end user. To ensure high quality software, it is required to test software. Testing ensures that
software meets user specifications and requirements. However, the field of software testing has a
number of underlying issues like effective generation of test cases, prioritisation of test cases etc
which need to be tackled. These issues demand on effort, time and cost of the testing. Different
techniques and methodologies have been proposed for taking care of these issues. Use of
evolutionary algorithms for automatic test generation has been an area of interest for many
researchers. Genetic Algorithm (GA) is one such form of evolutionary algorithms. In this research
paper, we present a survey of GA approach for addressing the various issues encountered during
software testing.},
archivePrefix = {arXiv},
arxivId = {1411.1154},
eprint = {1411.1154},
issn = {1694-0784},
number = {1},
pages = {381--393},
volume = {10},
year = {2013}
@article{Goldberg2001,
abstract = {This paper addresses the problem of combinational equivalence checking (CEC) which
forms one of the key components of the current verification methodology for digital systems. A
number of recently proposed BDD based approaches have met with considerable success in this
area. However, the growing gap between the capability of current solvers and the complexity of
verification instances necessitates the exploration of alternative, better solutions. This paper revisits
the application of Satisfiability (SAT) algorithms to the combinational equivalence checking (CEC)
problem. We argue that SAT is a more robust and flexible engine of Boolean reasoning for the CEC
application than BDDs, which have traditionally been the method of choice. Preliminary results on a
simple framework for SAT based CEC show a speedup of up to two orders of magnitude compared to
state-of-the-art SAT based methods for CEC and also demonstrate that even with this simple
algorithm and untuned prototype implementation it is only moderately slower and sometimes faster
than a state-of-the-art BDD based mixed engine commercial CEC tool. While SAT based CEC methods
need further research and tuning before they can surpass almost a decade of research in BDD based
CEC, the recent progress is very promising and merits continued research},
author = {Goldberg, Evgueni I. and Prasad, Mukul R. and Brayton, Robert K.},
doi = {10.1109/DATE.2001.915010},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}11.pdf:pdf},
issn = {15301591},
pages = {114--121},
year = {2001}
@article{Ho2019,
abstract = {A systematic framework based on a creative mechanism design methodology and the
lever analogy method is proposed for the synthesis of two-mode hybrid transmission systems
comprising 3 planetary gears, 9 links and 14 joints. In the proposed approach, the topological
characteristics of existing hybrid transmissions are examined and processes of generalization and
number synthesis are performed to construct an atlas of generalized kinematic chains. The atlas of
feasible kinematic chains is then obtained by applying the specified design requirements and
constraints. Based on the required operating modes of the hybrid transmission, the lever analogy
method is employed to determine all possible placements of the inputs, output, clutches and brakes
of the synthesized configurations. Finally, existing configurations are eliminated in order to obtain
the final atlas of new hybrid transmission designs. Utilizing the GM hybrid transmission system for
existing hybrid transmissions, a total of 17 novel two-mode hybrid transmissions are obtained. The
feasibility of the proposed methodology is demonstrated by performing a detailed kinematic analysis
of one of the synthesized transmissions.},
doi = {10.1016/j.mechmachtheory.2019.103615},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0094114X19312571-
main.pdf:pdf},
issn = {0094114X},
pages = {103615},
url = {https://doi.org/10.1016/j.mechmachtheory.2019.103615},
volume = {142},
year = {2019}
@article{Holzl2016,
doi = {10.1007/978-3-319-46508-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {978-3-319-46507-4},
pages = {242--256},
url = {http://link.springer.com/10.1007/978-3-319-46508-1},
volume = {9960},
year = {2016}
@article{Behrmann2004,
abstract = {This is a tutorial paper on the tool UPPAAL. Its goal is to be a short introduction on the
flavor of timed automata implemented in the tool, to present its interface, and to explain how to use
the tool. The contribution of the paper is to provide reference examples and modeling patterns. {\
textcopyright} Springer-Verlag 2004.},
author = {Behrmann, Gerd and David, Alexandre and Larsen, Kim G.},
doi = {10.1007/978-3-540-30080-9_7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-540-30080-9{\_}7.pdf:pdf},
isbn = {9783540230687},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {200--236},
volume = {3185},
year = {2004}
@article{Mishra2011,
author = {Mishra, Madhusudan and Nath, Malaya Kumar and Dandapat, Samarendra},
number = {Vi},
pages = {4--7},
year = {2011}
@article{Langdon2016,
doi = {10.1109/CEC.2016.7744177},
isbn = {9781509006229},
pages = {3068--3075},
year = {2016}
}
@article{Schlingloff2018,
doi = {10.1109/EITEC.2018.00006},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/EITEC.2018.00006.pdf:pdf},
isbn = {9781538674680},
journal = {Proceedings - 2018 4th International Workshop on Emerging Ideas and Trends in the
Engineering of Cyber-Physical Systems, EITEC 2018},
pages = {3--8},
publisher = {IEEE},
year = {2018}
@article{Bortolussi,
author = {Bortolussi, Luca and Cabri, Giacomo and Di, Giovanna and Serugendo, Marzo and Galpin,
Vashti and Hillston, Jane and Massink, Mieke and Tribastone, Mirco and Weyns, Danny},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2015CASVerification.pdf:pdf},
pages = {1--11},
@article{Khan2009,
abstract = {As semiconductor manufacturing enters advanced nanometer design paradigm, aging
and device wear-out related degradation is becoming a major concern. Negative Bias Temperature
Instability (NBTI) is one of the main sources of device lifetime degradation. The severity of such
degradation depends on the operation history of a chip in the field, including such characteristics as
temperature and workloads. In this paper, we propose a system level reliability management
scheme where a chip dynamically adjusts its own operating frequency and supply voltage over time
as the device ages. Major benefits of the proposed approach are (i) increased performance due to
reduced frequency guard banding in the factory and (ii) continuous field adjustments that take
environmental operating conditions such as actual room temperature and the power supply
tolerance into account. The greatest challenge in implementing such a scheme is to perform
calibration without a tester. Much of this work is performed by a hypervisor like software with very
little hardware assistance. This keeps both the hardware overhead and the system complexity low.
This paper describes the entire system architecture including hardware and software components.
Our simulation data indicates that under aggressive wear-out conditions, scheduling interval of days
or weeks is sufficient to reconfigure and keep the system operational, thus the run time overhead
for such adjustments is of no consequence at all. {\textcopyright} 2009 EDAA.},
doi = {10.1109/date.2009.5090637},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/khan2009.pdf:pdf},
isbn = {9783981080155},
issn = {15301591},
pages = {81--86},
year = {2009}
@book{Ghanbari,
isbn = {9789492863},
title = {{The Role of MicroRNAs in Age-Related Disorders From population-based genetic studies to
experimental validation}}
@article{Ye2017,
author = {Ye, Dayong and Zhang, Minjie and Vasilakos, Athanasios V},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/IEEE-self-organisation-
survey2015.pdf:pdf},
year = {2017}
@article{Camara2015b,
author = {C{\'{a}}mara, Javier and Garlan, David and Schmerl, Bradley and Pandey, Ashutosh},
doi = {10.1145/2695664.2695680},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/DADS2014.pdf:pdf},
isbn = {9781450331968},
pages = {428--435},
title = {{Optimal planning for architecture-based self-adaptation via model checking of stochastic
games}},
volume = {13-17-Apri},
year = {2015}
@article{Althoff2010,
title = {{Reachability Analysis and its Application to the Safety Assessment of Autonomous Cars}},
year = {2010}
@book{Beyer2019,
doi = {10.1007/978-3-030-17502-3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030175016},
year = {2019}
@article{Longchamps2017,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/adaptive{\_}2017{\_}1{\_}20{\
_}50006.pdf:pdf},
isbn = {9781612085326},
number = {c},
pages = {8--15},
title = {{Design Patterns for Addition of Adaptive Behavior in Graphical User Interfaces}},
year = {2017}
@article{Laure2018,
author = {Laure, Ngassima Fanny and Mung, Joseph K and Martin, Mbelebidima},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/4641{\_}pdf.pdf:pdf},
number = {2},
pages = {1--18},
title = {{Discrete- Time Markov ' s Chain for a Multivariate Stochastic Volatility}},
volume = {7},
year = {2018}
}
@article{Fu2017,
abstract = {With the development of software-defined networking (SDN), its scalability has become
one of the most important issues of SDN. The features of SDN was studied which lead to its
scalability problem when SDN was applied to large-scale network. The three main causes leading to
scalability problem were discussed: control plane and data plane separation, logical centralized
control and fine-grained flow control. Meanwhile, the studies which focus on the scalability of SDN
from three aspects: scalability of performance, scalability of geographic and scalability of control was
presented. Further, the studies on the performance evaluation of the scalability of SDN were
introduced. Finally, the future work was discussed.},
author = {Fu, Yong Hong and Bi, Jun and Zhang, Ke Yao and Wu, Jian Ping},
doi = {10.11959/j.issn.1000-436x.2017137},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/yeganeh2013.pdf:pdf},
issn = {1000436X},
number = {7},
pages = {141--154},
volume = {38},
year = {2017}
@article{Umamaheswari2018,
doi = {10.20532/cit.2018.1004123},
file = {:C$\backslash$:/Users/Asus/Downloads/4123-13502-1-PB.pdf:pdf},
number = {2},
pages = {131--140},
title = {{A Framework for Efficient Recognition and Classification of Acute Lymphoblastic Leukemia
with a Novel Customized-KNN Classifier}},
volume = {26},
year = {2018}
}
@article{Icer2016,
abstract = {This paper presents a time-efficient, task-based configuration synthesis algorithm for
modular robot manipulators. One of the main challenges in modular manipulators is to find possible
combinations of modules that are able to complete given tasks while avoiding obstacles in the
environment. Most studies on modular robots focus on obtaining combinations of modules to
achieve a given task without considering the required path planning in an environment with
obstacles. In contrast to previous works, we present a configuration synthesis method for modular
manipulators, considering collision detection and path planning in task space. Our simulations show
that our approach finds possible combinations with reduced computational time compared to
previous techniques.},
doi = {10.1109/ICRA.2016.7487727},
file = {:C$\backslash$:/Users/Asus/Downloads/icer2016.pdf:pdf},
isbn = {9781467380263},
issn = {10504729},
pages = {5203--5209},
volume = {2016-June},
year = {2016}
@book{Siekmann2011,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/automated-deduction--cade23-
2011.pdf:pdf},
isbn = {9783642224379},
year = {2011}
@article{Pace,
pages = {2--3},
title = {{SIEMENS MAGNETOM Skyra {\_} fit Properties Resolution - Common Resolution - iPAT
Resolution - Filter Image Resolution - Filter Rawdata Geometry - Common Contrast - Common
Geometry - AutoAlign Contrast - Dynamic Resolution - Common Geometry - Saturation SIEME}}
@book{Cook2018,
doi = {10.1007/978-3-319-96145-3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2018{\_}Book{\
_}ComputerAidedVerification.pdf:pdf},
isbn = {9783319961453},
pages = {38--47},
url = {http://dx.doi.org/10.1007/978-3-319-96145-3{\_}3},
volume = {2},
year = {2018}
@book{Hrabia,
file = {:C$\backslash$:/Users/Asus/Downloads/hrabia{\_}christopher-eyk.pdf:pdf},
isbn = {0000000252201},
keywords = {Entscheindungsfindung,Informatik,Koordination,Multi-
Agentensysteme,Planung,Robotik,Selbstadaption,Selbstorganisation,computer
science,coordination,decision-making,multi-agent systems,planning,reinforcement
learning,robotics,self-adapation,self-organisation},
title = {{Self-Adaptive and Self-Organised Planning and Decision-Making for Multi-Robot Systems}}
@article{Klarl2014,
abstract = {Ensembles are collections of autonomic entities which collaborate to perform certain
tasks. They show typically a complex dynamic behavior which is difficult to implement with state of
the art development techniques. In this paper, we present a systematic methodology for the design
and implementation of ensemble-based systems which goes beyond component-based
development. A conceptual key point of our approach (elaborated in [1]) is that components can
adopt different roles and that they can participate (under certain roles) in several, possibly
concurrently running ensembles. In this paper, we present a novel developer framework that
extends the component-based approach by explicitly taking into account roles and ensembles. The
framework implementation follows rigorous rules formalized in terms of ensemble-structures and
ensemble automata. Its application is demonstrated by a peer-2-peer file system network. {\
textcopyright} 2014 IEEE.},
doi = {10.1109/ASWEC.2014.26},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/klarl2014.pdf:pdf},
isbn = {9781479931491},
pages = {15--24},
title = {{Design and implementation of dynamically evolving ensembles with the helena framework}},
year = {2014}
@article{Alur,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/hmch13.pdf:pdf},
@article{Hadka2013,
abstract = {This study introduces the Borg multi-objective evolutionary algorithm (MOEA) for many-
objective, multimodal optimization. The Borg MOEA combines $\epsilon$-dominance, a measure of
convergence speed named $\epsilon$-progress, randomized restarts, and auto-adaptive
multioperator recombination into a unified optimization framework. A comparative study on 33
instances of 18 test problems from the DTLZ, WFG, and CEC 2009 test suites demonstrates Borg
meets or exceeds six state of the art MOEAs on the majority of the tested problems. The
performance for each test problem is evaluated using a 1,000 point Latin hypercube sampling of
each algorithm's feasible parameterization space. The statistical performance of every sampled
MOEA parameterization is evaluated using 50 replicate random seed trials. The Borg MOEA is not a
single algorithm; instead it represents a class of algorithms whose operators are adaptively selected
based on the problem. The adaptive discovery of key operators is of particular importance for
benchmarking how variation operators enhance search for complex many-objective problems.},
doi = {10.1162/EVCO_a_00075},
isbn = {1063-6560},
issn = {1063-6560},
number = {2},
pages = {231--259},
pmid = {22385134},
url = {http://www.mitpressjournals.org/doi/10.1162/EVCO{\_}a{\_}00075},
volume = {21},
year = {2013}
@article{Ben2020,
author = {Ben, Houssem and Giovanna, Mahfoudh and Marzo, Di and Nabil, Serugendo and Nabil,
Naja},
doi = {10.1007/s10009-020-00557-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {1433-2787},
url = {https://doi.org/10.1007/s10009-020-00557-0},
year = {2020}
@article{Stollenga2015,
abstract = {Convolutional Neural Networks (CNNs) can be shifted across 2D images or 3D videos to
segment them. They have a fixed input size and typically perceive only small local contexts of the
pixels to be classified as foreground or background. In contrast, Multi-Dimensional Recurrent NNs
(MD-RNNs) can perceive the entire spatio-temporal context of each pixel in a few sweeps through all
pixels, especially when the RNN is a Long Short-Term Memory (LSTM). Despite these theoretical
advantages, however, unlike CNNs, previous MD-LSTM variants were hard to parallelize on GPUs.
Here we re-arrange the traditional cuboid order of computations in MD-LSTM in pyramidal fashion.
The resulting PyraMiD-LSTM is easy to parallelize, especially for 3D data such as stacks of brain slice
images. PyraMiD-LSTM achieved best known pixel-wise brain image segmentation results on
MRBrainS13 (and competitive results on EM-ISBI12).},
archivePrefix = {arXiv},
arxivId = {1506.07452},
author = {Stollenga, Marijn F. and Byeon, Wonmin and Liwicki, Marcus and Schmidhuber, Juergen},
eprint = {1506.07452},
issn = {10495258},
pages = {1--9},
title = {{Parallel Multi-Dimensional LSTM, With Application to Fast Biomedical Volumetric Image
Segmentation}},
url = {http://arxiv.org/abs/1506.07452},
volume = {di},
year = {2015}
@article{Yu2017,
abstract = {It is interesting and of significant importance to investigate how network structures co-
evolve with opinions. The existing models of such co-evolution typically lead to the final states
where network nodes either reach a global consensus or break into separated communities, each of
which holding its own community consensus. Such results, however, can hardly explain the richness
of real-life observations that opinions are always diversified with no global or even community
consensus, and people seldom, if not never, totally cut off themselves from dissenters. In this article,
we show that, a simple model integrating consensus formation, link rewiring and opinion change
allows complex system dynamics to emerge, driving the system into a dynamic equilibrium with co-
existence of diversified opinions. Specifically, similar opinion holders may form into communities yet
with no strict community consensus; and rather than being separated into disconnected
communities, different communities remain to be interconnected by non-trivial proportion of inter-
community links. More importantly, we show that the complex dynamics may lead to different
numbers of communities at steady state with a given tolerance between different opinion holders.
We construct a framework for theoretically analyzing the co-evolution process. Theoretical analysis
and extensive simulation results reveal some useful insights into the complex co-evolution process,
including the formation of dynamic equilibrium, the phase transition between different steady states
with different numbers of communities, and the dynamics between opinion distribution and
network modularity, etc.},
archivePrefix = {arXiv},
arxivId = {arXiv:1703.02223v2},
author = {Yu, Y. and Xiao, G. and Li, G. and Tay, W. P. and Teoh, H. F.},
doi = {10.1063/1.4989668},
eprint = {arXiv:1703.02223v2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1703.02223.pdf:pdf},
issn = {10541500},
journal = {Chaos},
number = {10},
pages = {1--12},
volume = {27},
year = {2017}
@article{Jeyashree2014,
number = {5},
pages = {235--240},
title = {{Combined Approach on Analysis of Retinal Blood Vessel Segmentation for Diabetic
Retinopathy and Glaucoma Diagnosis}},
volume = {5},
year = {2014}
@article{Hanssen2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/34913.pdf:pdf},
pages = {8--10},
title = {{PDF hosted at the Radboud Repository of the Radboud University Nijmegen Article
information :}},
year = {2019}
@article{Langton1995,
abstract = {Artificial life, a field that seeks to increase the role of synthesis in the study of biological
phenomena, has great potential, both for unlocking the secrets of life and for raising a host of
disturbing issues - scientific and technical as well as philosophical and ethical. This book brings
together a series of overview articles that appeared in the first three issues of the groundbreaking
journal Artificial Life, along with a new introduction by Christopher Langton, Editor-in-Chief of
Artificial Life, founder of the discipline, and Director of the Artificial Life Program at the Santa Fe
Institute.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
doi = {10.2307/1575317},
eprint = {arXiv:1011.1669v3},
isbn = {0262121891},
issn = {0024094X},
pages = {344},
pmid = {3099},
url = {http://www.amazon.com/dp/0262621126},
volume = {1},
year = {1995}
@article{Sarikaya2017,
abstract = {We have long envisioned that one day computers will understand natural language and
anticipate what we need, when and where we need it, and proactively complete tasks on our behalf.
As computers get smaller and more pervasive, how humans interact with them is becoming a crucial
issue. Despite numerous attempts over the past 30 years to make language understanding (LU) an
effective and robust natural user interface for computer interaction, success has been limited and
scoped to applications that were not particularly central to everyday use. However, speech
recognition and machine learning have continued to be refined, and structured data served by
applications and content providers has emerged. These advances, along with increased
computational power, have broadened the application of natural LU to a wide spectrum of everyday
tasks that are central to a user's productivity. We believe that as computers become smaller and
more ubiquitous [e.g., wearables and Internet of Things (IoT)], and the number of applications
increases, both system-initiated and user-initiated task completion across various applications and
web services will become indispensable for personal life management and work productivity. In this
article, we give an overview of personal digital assistants (PDAs); describe the system architecture,
key components, and technology behind them; and discuss their future potential to fully redefine
human-computer interaction.},
doi = {10.1109/MSP.2016.2617341},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sarikaya2017.pdf:pdf},
issn = {10535888},
number = {1},
pages = {67--81},
title = {{The technology behind personal digital assistants: An overview of the system architecture
and key components}},
volume = {34},
year = {2017}
@article{Kaur2014,
abstract = {The Image segmentation is referred to as one of the most important processes of image
processing. Image segmentation is the technique of dividing or partitioning an image into parts,
called segments. It is mostly useful for applications like image compression or object recognition,
because for these types of applications, it is inefficient to process the whole image. So, image
segmentation is used to segment the parts from image for further processing. There exist several
image segmentation techniques, which partition the image into several parts based on certain image
features like pixel intensity value, color, texture, etc. These all techniques are categorized based on
the segmentation method used. In this paper the various image segmentation techniques are
reviewed, discussed and finally a comparison of their advantages and disadvantages is listed},
number = {5},
volume = {3},
year = {2014}
@article{Gu2016,
author = {Gu, Chunyan and Yang, Ye and Sompallae, Ramakrishna and Xu, Hongwei and Tompkins,
Van S and Holman, Carol and Hose, Dirk and Goldschmidt, Hartmut and Tricot, Guido and Janz,
Siegfried and City, Iowa and City, Iowa and Carver, Lucille A and City, Iowa and Carver, Lucille A and
City, Iowa},
doi = {10.1038/leu.2015.334.FOXM1},
isbn = {7135634217},
number = {4},
pages = {873--882},
volume = {30},
year = {2016}
@article{Oreski2014,
abstract = {In this paper, an advanced novel heuristic algorithm is presented, the hybrid genetic
algorithm with neural networks (HGA-NN), which is used to identify an optimum feature subset and
to increase the classification accuracy and scalability in credit risk assessment. This algorithm is
based on the following basic hypothesis: the high-dimensional input feature space can be
preliminarily restricted to only the important features. In this preliminary restriction, fast algorithms
for feature ranking and earlier experience are used. Additionally, enhancements are made in the
creation of the initial population, as well as by introducing an incremental stage in the genetic
algorithm. The performances of the proposed HGA-NN classifier are evaluated using a real-world
credit dataset that is collected at a Croatian bank, and the findings are further validated on another
real-world credit dataset that is selected in a UCI database. The classification accuracy is compared
with that presented in the literature. Experimental results that were achieved using the proposed
novel HGA-NN classifier are promising for feature selection and classification in retail credit risk
assessment and indicate that the HGA-NN classifier is a promising addition to existing data mining
techniques. ?? 2013 Elsevier Ltd. All rights reserved.},
author = {Oreski, Stjepan and Oreski, Goran},
doi = {10.1016/j.eswa.2013.09.004},
isbn = {0957-4174},
issn = {09574174},
pages = {2052--2064},
title = {{Genetic algorithm-based heuristic for feature selection in credit risk assessment}},
url = {http://dx.doi.org/10.1016/j.eswa.2013.09.004},
volume = {41},
year = {2014}
@article{RidlerT.W.Calvard1978,
abstract = {An object may be extracted from its background in a picture by theshold selection.
Ideally, if the object has a different average gray level from that of its surrounding, the effect of
thresholding will produce a white object with a black background or vice versa. In practice, it is often
difficult, however, to select an appropriate threshold, and a technique is described whereby an
optimum threshold may be chosen automatically as a result of an iterative process, successive
iterations providing increasingly cleaner extractions of the object region. An application to low
contrast images of handwritten text is discussed.},
doi = {10.1109/TSMC.1978.4310039},
isbn = {0018-9472},
issn = {00189472},
number = {8},
pages = {630--632},
year = {1978}
@article{Abid2012,
abstract = {An issue limiting the adoption of model checking technologies by the industry is the
ability, for non-experts, to express their requirements using the property languages supported by
verification tools. This has motivated the definition of dedicated assertion languages for expressing
temporal properties at a higher level. However, only a limited number of these formalisms support
the definition of timing constraints. In this paper, we propose a set of specification patterns that can
be used to express real-time requirements commonly found in the design of reactive systems. We
also provide an integrated model checking tool chain for the verification of timed requirements on
TTS, an extension of Time Petri Nets with data variables and priorities. {\textcopyright} 2012
Springer-Verlag.},
archivePrefix = {arXiv},
arxivId = {1301.7534},
author = {Abid, Nouha and {Dal Zilio}, Silvano and {Le Botlan}, Didier},
doi = {10.1007/978-3-642-32469-7_1},
eprint = {1301.7534},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-642-32469-7{\_}1.pdf:pdf},
isbn = {9783642324680},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {1--15},
year = {2012}
@article{Nigro2017,
abstract = {This paper proposes an original approach to modelling and simulation of multi-agent
systems which is based on statistical model checking (SMC). The approach is prototyped in the
context of the popular UPPAAL SMC toolbox. Usefulness and validation of the approach are checked
by applying it to a known complex and adaptive model of the Iterated Prisoner's Dilemma (IPD)
game, by studying the emergence of cooperation in the presence of different social interaction
structures.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/0011-abs{\_}ECMS2017{\
_}0059.pdf:pdf},
isbn = {9780993244049},
journal = {Proceedings - 31st European Conference on Modelling and Simulation, ECMS 2017},
number = {Cd},
pages = {11--17},
volume = {6},
year = {2017}
@article{Klarl2015,
abstract = {When engineering self-adaptive systems, separating adaptation and application logic was
proven beneficial to avoid interdependencies between adaptation strategy and standard behaviour.
Several engineering methods support this separation in different phases of the classical
development process, but none addresses it consistently in all of them. We propose a holistic model-
driven engineering process with systematic transitions between all phases to develop self-adaptive
systems. Adaptation is achieved by changing the behavioral mode of a component in response to
perceptions. We realize behavioral modes by roles which a component can dynamically adopt. For
specification, we propose adaptation automata which allow to specify complex adaptation
behaviour by hierarchical structure and history of states. Furthermore, we propose the Helena
Adaptation Manager pattern to derive a role-based model from a specification. Due to its formal
foundation, the model can be analyzed with Spin and executed with the Java framework jHelena.},
doi = {10.1109/WETICE.2015.32},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/klarl2015.pdf:pdf},
isbn = {9781467376921},
number = {5},
pages = {3--8},
volume = {2},
year = {2015}
@article{Holland1999,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/holland99stigmergy.pdf:pdf},
pages = {173--202},
volume = {202},
year = {1999}
@article{Elrakaiby2020,
abstract = {Many software systems have become too large and complex to be managed efficiently by
human administrators, particularly when they operate in uncertain and dynamic environments and
require frequent changes. Requirements-driven adaptation techniques have been proposed to
endow systems with the necessary means to autonomously decide ways to satisfy their
requirements. However, many current approaches rely on general-purpose languages, models
and/or frameworks to design, develop and analyze autonomous systems. Unfortunately, these tools
are not tailored towards the characteristics of adaptation problems in autonomous systems. In this
paper, we present Optimal by Design (ObD ), a framework for model-based requirements-driven
synthesis of optimal adaptation strategies for autonomous systems. ObD proposes a model (and a
language) for the high-level description of the basic elements of self-adaptive systems, namely the
system, capabilities, requirements and environment. Based on those elements, a Markov Decision
Process (MDP) is constructed to compute the optimal strategy or the most rewarding system
behaviour. Furthermore, this defines a reflex controller that can ensure timely responses to changes.
One novel feature of the framework is that it benefits both from goal-oriented techniques,
developed for requirement elicitation, refinement and analysis, and synthesis capabilities and
extensive research around MDPs, their extensions and tools. Our preliminary evaluation results
demonstrate the practicality and advantages of the framework.},
archivePrefix = {arXiv},
arxivId = {2001.08525},
eprint = {2001.08525},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2001.08525.pdf:pdf},
url = {http://arxiv.org/abs/2001.08525},
year = {2020}
@article{Joshi2010,
author = {Joshi, Gopal Datt and Sivaswamy, Jayanthi and Karan, Kundan and Krishnadas, S R},
title = {{Optic Disk and Cup Boundary Detection Using Regional Information}},
year = {2010}
@article{Hennicker2014a,
doi = {10.1007/978-3-642-54624-2_18},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-642-54624-2{\_}18.pdf:pdf},
pages = {359--381},
year = {2014}
@article{Keller1998,
doi = {10.1109/icse.1998.671356},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/design-components-towards-software-
composition-at-the-design-lev.pdf:pdf},
isbn = {0818683686},
issn = {02705257},
journal = {Proceedings - International Conference on Software Engineering},
pages = {302--311},
year = {1998}
@article{Mahmood2019,
abstract = {The discipline of component-based modeling and simulation offers promising gains in
reducing cost, time, and the complexity of model development through the (re)use of modular
components. Model-driven development suggests 1) the realization of a complex system using a
conceptual model; 2) its automatic transformation into an executable form using transformation
rules, and; 3) its automatic verification using a formal analysis technique for an accurate assessment
of its correctness. Both approaches have numerous complementary benefits in rapid prototyping of
complex systems using model reuse. In this paper, we propose a framework grounded in a
combination of component-based and model-driven approaches to promote rapid prototyping of
complex systems through the effective reuse of the simulation models. Our proposed process allows
developers to 1) build or select existing components and compose them to formulate the conceptual
models of complex systems; 2) automatically transform the conceptual models for the rapid
implementation and simulation, and; 3) automatically verify them as per the requirement
specifications. We propose the use of the extended finite-state machine (EFSM) as conceptual
modeling formalism, anylogic simulation platform for the implementation, and probabilistic model
checking technique using communicating sequential process (CSP) formalism for the verification.
Finally, we present a case study of a real-time adaptive cruise control system to demonstrate the
functionality of our framework. Our proposed component-based model-driven approach facilitates
rapid prototyping and effective meaningful reuse of complex system models, which further
accelerates the modeling, simulation, and analysis process of real-time systems and aids in complex
engineering designs and implementations.},
author = {Mahmood, Imran and Kausar, Tameen and Sarjoughian, Hessam S. and Malik, Asad Waqar
and Riaz, Naveed},
doi = {10.1109/ACCESS.2019.2917652},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/mahmood2019.pdf:pdf},
issn = {21693536},
pages = {67497--67514},
publisher = {IEEE},
title = {{An Integrated Modeling, Simulation and Analysis Framework for Engineering Complex
Systems}},
volume = {7},
year = {2019}
@book{Kounev2017e,
abstract = {This book provides formal and informal definitions and taxonomies for self-aware
computing systems, and explains how self-aware computing relates to many existing subfields of
computer science, especially software engineering. It describes architectures and algorithms for self-
aware systems as well as the benefits and pitfalls of self-awareness, and reviews much of the latest
relevant research across a wide array of disciplines, including open research challenges. The
chapters of this book are organized into five parts: Introduction, System Architectures, Methods and
Algorithms, Applications and Case Studies, and Outlook. Part I offers an introduction that defines
self-aware computing systems from multiple perspectives, and establishes a formal definition, a
taxonomy and a set of reference scenarios that help to unify the remaining chapters. Next, Part II
explores architectures for self-aware computing systems, such as generic concepts and notations
that allow a wide range of self-aware system architectures to be described and compared with both
isolated and interacting systems. It also reviews the current state of reference architectures,
architectural frameworks, and languages for self-aware systems. Part III focuses on methods and
algorithms for self-aware computing systems by addressing issues pertaining to system design, like
modeling, synthesis and verification. It also examines topics such as adaptation, benchmarks and
metrics. Part IV then presents applications and case studies in various domains including cloud
computing, data centers, cyber-physical systems, and the degree to which self-aware computing
approaches have been adopted within those domains. Lastly, Part V surveys open challenges and
future research directions for self-aware computing systems. It can be used as a handbook for
professionals and researchers working in areas related to self-aware computing, and can also serve
as an advanced textbook for lecturers and postgraduate students studying subjects like advanced
software engineering, autonomic computing, self-adaptive systems, and data-center resource
management. Each chapter is largely self-contained, and offers plenty of references for anyone
wishing to pursue the topic more deeply.},
author = {Kounev, Samuel and Kephart, Jeffrey O. and Milenkoski, Aleksandar and Zhu, Xiaoyun},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/diaconescu2017.pdf:pdf},
isbn = {9783319474748},
pages = {1--722},
year = {2017}
}
@article{Naz2014,
pages = {51--58},
title = {{Glaucoma Detection in Color Fundus Images Using Cup to Disc Ratio}},
year = {2014}
@article{Camara2020,
abstract = {Two of the main paradigms used to build adaptive software employ different types of
properties to capture relevant aspects of the system's run-time behavior. On the one hand, control
systems consider properties that concern static aspects like stability, as well as dynamic properties
that capture the transient evolution of variables such as settling time. On the other hand, self-
adaptive systems consider mostly non-functional properties that capture concerns such as
performance, reliability, and cost. In general, it is not easy to reconcile these two types of properties
or identify under which conditions they constitute a good fit to provide run-time guarantees. There
is a need of identifying the key properties in the areas of control and self-adaptation, as well as of
characterizing and mapping them to better understand how they relate and possibly complement
each other. In this paper, we take a first step to tackle this problem by: (1) identifying a set of key
properties in control theory, (2) illustrating the formalization of some of these properties employing
temporal logic languages commonly used to engineer self-adaptive software systems, and (3)
illustrating how to map key properties that characterize self-adaptive software systems into control
properties, leveraging their formalization in temporal logics. We illustrate the different steps of the
mapping on an exemplar case in the cloud computing domain and conclude with identifying open
challenges in the area.},
archivePrefix = {arXiv},
arxivId = {2004.11846},
author = {C{\'{a}}mara, Javier and Papadopoulos, Alessandro V. and Vogel, Thomas and Weyns,
Danny and Garlan, David and Huang, Shihong and Tei, Kenji},
doi = {10.1145/3387939.3391568},
eprint = {2004.11846},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2004.11846.pdf:pdf},
isbn = {9781450379625},
title = {{Towards Bridging the Gap between Control and Self-Adaptive System Properties}},
url = {http://arxiv.org/abs/2004.11846{\%}0Ahttp://dx.doi.org/10.1145/3387939.3391568},
year = {2020}
@article{Sims2003,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1.1.88.3263.pdf:pdf},
isbn = {4962003100},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.307.7770{\&}rep=rep1{\
&}type=pdf},
year = {2003}
@article{Miiller2011,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Mueller{\_}272322.pdf:pdf},
isbn = {9781441154231},
year = {2011}
}
@article{Clements2003,
author = {Clements, Paul and Garlan, David and Little, Reed and Nord, Robert and Stafford, Judith},
doi = {10.1109/icse.2003.1201264},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/clements2003.pdf:pdf},
issn = {02705257},
pages = {740--741},
volume = {6},
year = {2003}
@article{Calinescu2011,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/calinescu2011.pdf:pdf},
pages = {122--135},
year = {2011}
@article{Afzal2019,
abstract = {Cognitive radio (CR) arises as an important technique for wireless network with the aim
to provide better utilization of radio spectrum, which is being utilized sporadically in some areas.
IEEE 802.22 wireless regional area network (WRAN) is the first standard for CR technology designed
to opportunistically utilize the unused or under-utilized TV bands. A WRAN cell normally consists of a
number of customer premises equipments (CPEs)/CR users and a base station (BS) having
master/slave architecture. When a CPE is powered on, it first attempts to associate with the BS. In
this paper, a discrete time Markov chain model (DTMC) is presented to show the association process
of CPEs with the BS. To the best of our knowledge, this paper is the first in which DTMC Model is
analyzed and investigated for WRAN. Using this model, various parameters such as association time,
expected return time to the respective backoff stage, first passage time, etc., are derived. Finally, the
evaluation results are provided to analyze the system's performance.},
author = {Afzal, Humaira and Mufti, Muhammad Rafiq and Awan, Irfan and Yousaf, Muhammad},
doi = {10.1016/j.jss.2019.01.053},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0164121219300068-
main.pdf:pdf},
issn = {01641212},
journal = {Journal of Systems and Software},
pages = {1--7},
title = {{Performance analysis of radio spectrum for cognitive radio wireless networks using discrete
time Markov chain}},
url = {https://doi.org/10.1016/j.jss.2019.01.053},
volume = {151},
year = {2019}
@article{FelixSolano2019,
abstract = {Goals are first-class entities in a self-adaptive system (SAS) as they guide the self-
adaptation. A SAS often operates in dynamic and partially unknown environments, which cause
uncertainty that the SAS has to address to achieve its goals. Moreover, besides the environment,
other classes of uncertainty have been identified. However, these various classes and their sources
are not systematically addressed by current approaches throughout the life cycle of the SAS. In
general, uncertainty typically makes the assurance provision of SAS goals exclusively at design time
not viable. This calls for an assurance process that spans the whole life cycle of the SAS. In this work,
we propose a goal-oriented assurance process that supports taming different sources (within
different classes) of uncertainty from defining the goals at design time to performing self-adaptation
at runtime. Based on a goal model augmented with uncertainty annotations, we automatically
generate parametric symbolic formulae with parameterized uncertainties at design time using
symbolic model checking. These formulae and the goal model guide the synthesis of adaptation
policies by engineers. At runtime, the generated formulae are evaluated to resolve the uncertainty
and to steer the self-adaptation using the policies. In this paper, we focus on reliability and cost
properties, for which we evaluate our approach on the Body Sensor Network (BSN) implemented in
OpenDaVINCI. The results of the validation are promising and show that our approach is able to
systematically tame multiple classes of uncertainty, and that it is effective and efficient in providing
assurances for the goals of self-adaptive systems.},
author = {{Felix Solano}, Gabriela and {Diniz Caldas}, Ricardo and {Nunes Rodrigues}, Genaina and
Vogel, Thomas and Pelliccione, Patrizio},
doi = {10.1109/SEAMS.2019.00020},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/felixsolano2019.pdf:pdf},
isbn = {9781728133683},
issn = {21567891},
journal = {ICSE Workshop on Software Engineering for Adaptive and Self-Managing Systems},
keywords = {Self-adaptive systems,adaptation policy,goal modelling,markov decision
process,symbolic model checking,uncertainty},
pages = {89--99},
volume = {2019-May},
year = {2019}
@article{Rahman2020,
abstract = {The traditional classroom has been evolving with the implementation of technology in
and outside the classroom environment. There has been a significant implementation of technology
for learning mathematics. Augmented Reality stands out as part of the visualization of spatiality
related contents. Technology implementation has caused changes in interactivity of students with
system and also with their fellow peers and teachers. This paper provides a systemic literature
review of studies that analyzed the effects of human-human and human-system interactivity in
students' learning experiences and learning performances and proposes a research framework of an
interactive learning system for learning calculus through the implementation of augmented reality.},
author = {Rahman, Md Asifur and Ling, Lew Sook and Yin, Ooi Shih},
doi = {10.1007/978-981-15-0058-9_47},
file = {:C$\backslash$:/Users/Asus/Downloads/978-981-15-0058-9{\_}47.pdf:pdf},
isbn = {9789811500572},
issn = {18761119},
pages = {491--499},
title = {{Augmented Reality for Learning Calculus: A Research Framework of Interactive Learning
System}},
volume = {603},
year = {2020}
@article{Amorim2014,
author = {Amorim, Silva and De, Eduardo Santana and Mcgregor, John D},
doi = {10.1109/WICSA.2014.36},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/amorim2014.pdf:pdf},
isbn = {9781479934126},
pages = {49--52},
year = {2014}
@article{Zhang2010,
abstract = {Retinal fundus image is an important modality to document the health of the retina and
is widely used to diagnose ocular diseases such as glaucoma, diabetic retinopathy and age-related
macular degeneration. However, the enormous amount of retinal data obtained nowadays mostly
stored locally; and the valuable embedded clinical knowledge is not efficiently exploited. In this
paper we present an online depository, ORIGA(-light), which aims to share clinical groundtruth
retinal images with the public; provide open access for researchers to benchmark their computer-
aided segmentation algorithms. An in-house image segmentation and grading tool is developed to
facilitate the construction of ORIGA(-light). A quantified objective benchmarking method is
proposed, focusing on optic disc and cup segmentation and Cup-to-Disc Ratio (CDR). Currently,
ORIGA(-light) contains 650 retinal images annotated by trained professionals from Singapore Eye
Research Institute. A wide collection of image signs, critical for glaucoma diagnosis, are annotated.
We will update the system continuously with more clinical ground-truth images. ORIGA(-light) is
available for online access upon request.},
author = {Zhang, Zhuo and Yin, Feng Shou and Liu, Jiang and Wong, Wing Kee and Tan, Ngan Meng
and Lee, Beng Hai and Cheng, Jun and Wong, Tien Yin},
doi = {10.1109/IEMBS.2010.5626137},
isbn = {9781424441235},
issn = {1557-170X},
journal = {2010 Annual International Conference of the IEEE Engineering in Medicine and Biology
Society, EMBC'10},
pages = {3065--3068},
pmid = {21095735},
title = {{ORIGA-light : An online retinal fundus image database for glaucoma analysis and research}},
year = {2010}
}
@article{Hoos2005,
doi = {10.1016/b978-155860872-6/50023-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sat+csp-suling.pdf:pdf},
pages = {257--312},
year = {2005}
@article{Diment2009,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Diment-complexity.pdf:pdf},
url = {http://www.uow.edu.au/{~}kd21/uploads/Diment-complexity.pdf},
year = {2009}
@article{Fraser2020,
abstract = {We show how detailed simulation models and abstract Markov modelscan be developed
collaboratively to generate and implement effectivecontrollers for autonomous agent search and
retrieve missions. Weintroduce a concrete simulation model of an Unmanned Aerial Vehicle(UAV).
We then show how the probabilistic model checker PRISM isused for optimal strategy synthesis for a
sequence of scenariosrelevant to UAVs and potentially other autonomous agent systems. Foreach
scenario we demonstrate how it can be modelled using PRISM,give model checking statistics and
present the synthesised optimalstrategies. We then show how our strategies can be returned to
thecontroller for the simulation model and provide experimental resultsto demonstrate the
effectiveness of one such strategy. Finally weexplain how our models can be adapted, using
symmetry, for use onlarger search areas, and demonstrate the feasibility of thisapproach.},
author = {Fraser, Douglas and Giaquinta, Ruben and Hoffmann, Ruth and Ireland, Murray and Miller,
Alice and Norman, Gethin},
doi = {10.1007/s00165-020-00508-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Fraser2020{\_}Article{\
_}CollaborativeModelsForAutonomo.pdf:pdf},
isbn = {0016502000},
issn = {1433299X},
number = {2-3},
pages = {157--186},
url = {https://doi.org/10.1007/s00165-020-00508-1},
volume = {32},
year = {2020}
@article{Lichtenstein2006,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/fulltext.pdf:pdf},
year = {2006}
@article{Sarhosis2016,
doi = {10.1504/IJMRI.2016.074735},
issn = {2056-9459},
number = {1},
pages = {48},
title = {{Optimisation procedure for material parameter identification for masonry constitutive
models}},
url = {http://www.inderscience.com/link.php?id=74735},
volume = {1},
year = {2016}
}
@article{Krupitzer2015,
abstract = {The complexity of information systems is increasing in recent years, leading to increased
effort for maintenance and configuration. Self-adaptive systems (SASs) address this issue. Due to
new computing trends, such as pervasive computing, miniaturization of IT leads to mobile devices
with the emerging need for context adaptation. Therefore, it is beneficial that devices are able to
adapt context. Hence, we propose to extend the definition of SASs and include context adaptation.
This paper presents a taxonomy of self-adaptation and a survey on engineering SASs. Based on the
taxonomy and the survey, we motivate a new perspective on SAS including context adaptation.},
author = {Krupitzer, Christian and Roth, Felix Maximilian and Vansyckel, Sebastian and Schiele,
Gregor and Becker, Christian},
doi = {10.1016/j.pmcj.2014.09.009},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/krupitzer2015.pdf:pdf},
issn = {15741192},
number = {PB},
pages = {184--206},
url = {http://dx.doi.org/10.1016/j.pmcj.2014.09.009},
volume = {17},
year = {2015}
@article{Alrahman2016,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ISOLA.pdf:pdf},
year = {2016}
@article{Xie2018,
file = {:C$\backslash$:/Users/Asus/Downloads/De{\_}Xie{\_}2018{\_}J.{\_}Phys.{\%}3A{\_}Conf.{\
_}Ser.{\_}1087{\_}062030.pdf:pdf},
title = {{Multi-pixels Classification for nuclei segmentation in digital pathology based on deep
machine learning Multi-pixels Classification for nuclei segmentation in digital pathology based on
deep machine learning}},
year = {2018}
@article{Tom2015,
author = {Tom, Manu and Ramakrishnan, Vignesh and van Oterendorp, Christian and Deserno,
Thomas M.},
doi = {10.1117/12.2082513},
isbn = {9781628415049},
issn = {16057422},
number = {February},
pages = {941430},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2082513},
volume = {51},
year = {2015}
@article{Aldini2016,
abstract = {{\textcopyright} A. Aldini. Trust and reputation models for distributed, collaborative
systems have been studied and applied in several domains, in order to stimulate cooperation while
preventing selfish and malicious behaviors. Nonetheless, such models have received less attention in
the process of specifying and analyzing formally the functionalities of the systems mentioned above.
The objective of this paper is to define a process algebraic framework for the modeling of systems
that use (i) trust and reputation to govern the interactions among nodes, and (ii) communication
models characterized by a high level of adaptiveness and flexibility. Hence, we propose a formalism
for verifying, through model checking techniques, the robustness of these systems with respect to
the typical attacks conducted against webs of trust.},
doi = {10.4204/eptcs.217.4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1607.02232.pdf:pdf},
pages = {19--30},
title = {{A Formal Framework for Modeling Trust and Reputation in Collective Adaptive Systems}},
volume = {217},
year = {2016}
@article{Spichkova2014,
abstract = {This paper presents an approach for modeling and verification of components controlling
behaviour of safety-critical systems in their physical environment. In particular, we introduce the
modeling language FocusST that is centred on specifying time and space aspects. Veriffications can
be carried out using the interactive semi-automatic proof assistant Isabelle. The approach is
exemplified by means of a railway system scenario.},
author = {Spichkova, Maria and Blech, Jan Olaf and Herrmann, Peter and Schmidt, Heinz},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper-07.pdf:pdf},
issn = {16130073},
pages = {49--58},
volume = {1235},
year = {2014}
@article{Lim2015a,
abstract = {Collective adaptive systems (CAS) consist of multiple agents that adapt to changing
system and environmental conditions in order to satisfy system goals and quality requirements. As
more applications involve using CAS in a critical context, ensuring the correct and safe adaptive
behaviors of quality-driven CAS has become more important. In this paper, we propose Collective
Adaptive System Testing (CAST), a scalable and efficient approach to testing self-adaptive behaviors
of CAS. We propose a selective method to instantiate and execute test cases relevant to the current
adaptation context. This enables testers to focus testing on key self-adaptive behaviors while dealing
with the scale and dynamicity of the system. An experimental evaluation using a traffic monitoring
system is performed to validate its scalability, efficiency, and fault-detection effectiveness. The
experimental results provide insights into how CAST can serve as a feasible and effective assurance
technique for CAS.},
author = {Lim, Yoo Jin and Jee, Eunkyoung and Shin, Donghwan and Bae, Doo Hwan},
doi = {10.1109/COMPSAC.2015.131},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/lim2015.pdf:pdf},
isbn = {9781467365635},
issn = {07303157},
journal = {Proceedings - International Computer Software and Applications Conference},
pages = {216--221},
volume = {2},
year = {2015}
@article{Nigro2018,
abstract = {This paper proposes a method for modelling and analysis of knowledge and
commitments in multi-agent systems. The approach is based on an actors model and its reduction
onto UPPAAL. A key factor of the approach is the possibility of exploiting the same UPPAAL model
for exhaustive verification or, when state explosion problems forbid model checking, for quantitative
evaluation of system properties through statistical model checking. The article describes the
method, shows its application to modelling the NetBill protocol, proposes a translation into the
terms of the timed automata language of UPPAAL and demonstrates the analysis of the NetBill
protocol together with some experimental results.},
author = {Nigro, Christian and Nigro, Libero and Sciammarella, Paolo F.},
doi = {10.7148/2018-0136},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/0136{\_}is{\_}ecms2018{\
_}0856.pdf:pdf},
isbn = {9780993244063},
issn = {25222414},
pages = {136--142},
title = {{Model checking knowledge and commitments in multi-agent systems using actors and
UPPAAL}},
year = {2018}
@article{Kwiatkowska,
author = {Kwiatkowska, Marta and Norman, Gethin and B, David Parker and Santos, Gabriel},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007978-3-030-53291-8{\
_}2.pdf:pdf},
isbn = {9783030532918},
pages = {475--487},
volume = {3}
@book{Hutchison2020,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {978-3-642-22109-5},
volume = {23},
year = {2020}
@article{Emerson2008,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Emerson.pdf:pdf},
pages = {1--19},
year = {2008}
@article{Brous2019,
abstract = {Organizations are increasingly looking to adopt the Internet of Things (IoT) to collect the
data required for data-driven decision-making. IoT might yield many benefits for asset management
organizations engaged in infrastructure asset management, yet not all organizations are equipped to
handle this data. IoT data is collected, stored, and analyzed within data infrastructures and there are
many changes over time, resulting in the evolution of the data infrastructure and the need to view
data infrastructures as complex adaptive systems (CAS). Such data infrastructures represent
information about physical reality, in this case about the underlying physical infrastructure. Physical
infrastructures are often described and analyzed in literature as CASs, but their underlying data
infrastructures are not yet systematically analyzed, whereas they can also be viewed as CAS. Current
asset management data models tend to view the system from a static perspective, posing
constraints on the extensibility of the system, and making it difficult to adopt new data sources such
as IoT. The objective of the research is therefore to develop an extensible model of asset
management data infrastructures which helps organizations implement data infrastructures which
are capable of evolution and aids the successful adoption of IoT. Systematic literature review and an
IoT case study in the infrastructure management domain are used as research methods. By adopting
a CAS lens in the design, the resulting data infrastructure is extendable to deal with evolution of
asset management data infrastructures in the face of new technologies and new requirements and
to steadily exhibit new forms of emergent behavior. This paper concludes that asset management
data infrastructures are inherently multilevel, consisting of subsystems, links, and nodes, all of which
are interdependent in several ways.},
doi = {10.1155/2019/5415828},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/brous2019.pdf:pdf},
issn = {1076-2787},
journal = {Complexity},
number = {2},
pages = {1--17},
title = {{Next Generation Data Infrastructures: Towards an Extendable Model of the Asset
Management Data Infrastructure as Complex Adaptive System}},
volume = {2019},
year = {2019}
@article{Camara2015a,
abstract = {{\textcopyright} 2015 IEEE. Self-adaptive systems overcome many of the limitations of
human supervision in complex software-intensive systems by endowing them with the ability to
automatically adapt their structure and behavior in the presence of runtime changes. However,
adaptation in some classes of systems (e.g., Safety-critical) can benefit by receiving information from
humans (e.g., Acting as sophisticated sensors, decision-makers), or by involving them as system-level
effectors to execute adaptations (e.g., When automation is not possible, or as a fallback mechanism).
However, human participants are influenced by factors external to the system (e.g., Training level,
fatigue) that affect the likelihood of success when they perform a task, its duration, or even if they
are willing to perform it in the first place. Without careful consideration of these factors, it is unclear
how to decide when to involve humans in adaptation, and in which way. In this paper, we investigate
how the explicit modeling of human participants can provide a better insight into the trade-offs of
involving humans in adaptation. We contribute a formal framework to reason about human
involvement in self-adaptation, focusing on the role of human participants as actors (i.e., Effectors)
during the execution stage of adaptation. The approach consists of: (i) a language to express
adaptation models that capture factors affecting human behavior and its interactions with the
system, and (ii) a formalization of these adaptation models as stochastic multiplayer games (SMGs)
that can be used to analyze human-system-environment interactions. We illustrate our approach in
an adaptive industrial middleware used to monitor and manage sensor networks in renewable
energy production plants.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/a614218.pdf:pdf},
isbn = {9781479919345},
journal = {Proceedings - 10th International Symposium on Software Engineering for Adaptive and
Self-Managing Systems, SEAMS 2015},
number = {i},
pages = {146--156},
year = {2015}
@article{Webster2018,
abstract = {The Internet of Things (IoT) promises a revolution in the monitoring and control of a wide
range of applications, from urban water supply networks and precision agriculture food production,
to vehicle connectivity and healthcare monitoring. For applications in such critical areas, control
software and protocols for IoT systems must be veried to be both robust and reliable. Two of the
largest obstacles to robust-ness and reliability in IoT systems are eects on the hardware caused by
environmental conditions, and the choice of parameters used by the protocol. In this paper we use
probabilistic model checking to verify that a synchronisation and dissemination protocol for Wireless
Sensor Networks (WSNs) is correct with respect to its requirements, and is not adversely aected by
the environment. We show how the protocol can be converted into a logical model and then
analysed using the probabilis-tic model-checker, Prism. Using this approach we prove under which
circumstances the protocol is guaranteed to synchronise all nodes and disseminate new information
to all nodes. We also examine the bounds on synchronisation as the environment changes the
performance of the hardware clock, and investigate the scalability constraints of this approach .},
author = {Webster, Matt and Breza, Michael and Dixon, Clare and Fisher, Michael and Mccann, Julie},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/proceedings{\_}paper{\_}798.pdf:pdf},
title = {{Formal Verification of Synchronisation , Gossip and Environmental Effects for Critical IoT
Systems}},
url = {https://www.liverpool.ac.uk/computer-science/https://www.imperial.ac.uk/computing},
volume = {076},
year = {2018}
}
@article{Lian2014,
number = {2},
pages = {626--633},
title = {{A Maximum Power Point Tracking Method Based on Perturb-and-Observe Combined With
Particle Swarm Optimization}},
volume = {4},
year = {2014}
@article{Holland2015,
number = {1},
pages = {17--30},
volume = {121},
year = {2015}
@book{Margaria2014,
author = {Margaria, Tiziana and Steffen, Bernhard and Symposium, International and Hutchison,
David},
booktitle = {Springer},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/leveraging-applications-of-formal-
methods-verification-and-valid-2014.pdf:pdf},
isbn = {9783662452332},
volume = {1},
year = {2014}
@article{Abbas2018,
abstract = {As the gap between digital and physical worlds getting dwindled as a result of the
dramatic advance getting achieved in information and communication technology (ICT), feasible,
efficient, reliable, and secure smart cities are becoming a reality. Future smart cities will be
characterized by their high distribution, openness, heterogeneity, complexity,
unpredictable/uncertain/dynamic work environments, and their large-scale nature. These
challenging characteristics require a transition from the traditional parts thinking paradigm which
studies systems by breaking them down into their separate elements to the emerging systems
thinking paradigm which represents a holistic approach focuses on the way that a system's
constituent parts interrelate and how systems work over time and within the context of larger
systems. In this article, we first study smart cities from systems thinking perspective and then
introduce self-regulating agent systems and fog computing as promising technological paradigms for
developing future large-scale complex smart cities applications. Preliminary simulation results to test
the performance of the proposed framework are provided. The results show that self-regulated
agent systems can give high performance if an appropriate self-regulation model is used. A complete
architecture for building future complex smart cities based on the systems thinking paradigm and
using self-regulating MAS integrated with fog computing for implementation is currently under
preparation.},
author = {Abbas, Hosny and Shaheen, Samir and Elhoseny, Mohamed and Singh, Amit Kumar and
Alkhambashi, Majid},
doi = {10.1016/j.suscom.2018.05.005},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}3.pdf:pdf},
issn = {22105379},
pages = {204--213},
title = {{Systems thinking for developing sustainable complex smart cities based on self-regulated
agent systems and fog computing}},
url = {https://doi.org/10.1016/j.suscom.2018.05.005},
volume = {19},
year = {2018}
@article{Janez2015,
title = {{Diagnostic Ability of Optic Nerve Head Examination , Heidelberg Retina Tomograph ' s
Moorfield ' s Regression Analysis , and Glaucoma Probability Score}},
volume = {2015},
year = {2015}
@article{Daneshmand2013,
archivePrefix = {arXiv},
arxivId = {arXiv:1405.2936v1},
eprint = {arXiv:1405.2936v1},
pages = {793--801},
volume = {32},
year = {2013}
@article{Kephart2003,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}22.pdf:pdf},
journal = {Computer},
number = {1},
pages = {41--50},
volume = {36},
year = {2003}
@book{Niaki2014,
abstract = {rankB},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/FULLTEXT01{\_}3.pdf:pdf},
isbn = {9789175952864},
title = {{Managing the Complexity in Embedded and Cyber-Physical System Design}},
year = {2014}
@article{NayakSeetanadi2019,
author = {{Nayak Seetanadi}, Gautham and Arzen, Karl-Erik and Maggio, Martina},
doi = {10.1109/icac.2019.00021},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Conf-ICAC2019.pdf:pdf},
isbn = {9781728124117},
pages = {95--104},
year = {2019}
@article{Prakash2015,
number = {5},
pages = {98--102},
volume = {2},
year = {2015}
@article{Brings2020,
abstract = {Context: Emergent behavior cannot be attributed to one individual system alone but
arises in the interplay of various systems, components etc. Ensuring the correctness of emergent
behavior is a well-known challenge that has been addressed by research in various subfields of
software engineering. Objective: This paper aims at providing a unified view on the research
activities conducted and research contributions made on verification and validation of emergent
behavior. Methods: We have conducted a systematic mapping study on the topic of verification and
validation of emergent behavior. We applied a combined search strategy using manual, database,
and snowball search. In total we investigated 7211 papers, from these 168 relevant papers have
been included and classified. Results: Results show an increasing interest in the topic of verification
and validation of emergent behavior. As only little validation and evaluation research has been
conducted, the field can be considered still immature. There exist different verification and
validation techniques used in the various solution approaches such as model checking, simulation, or
runtime monitoring. It stands out that even though research is published in different software
engineering fields and subfields no verification or validation technique can be attributed solely to a
single field.},
author = {Brings, Jennifer and Daun, Marian and Keller, Kevin and {Aluko Obe}, Patricia and Weyer,
Thorsten},
doi = {10.1016/j.future.2020.06.049},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {0167739X},
pages = {1010--1037},
title = {{A systematic map on verification and validation of emergent behavior in software
engineering research}},
url = {https://doi.org/10.1016/j.future.2020.06.049},
volume = {112},
year = {2020}
@article{Hugh2012,
pages = {1--7},
title = {{Barriers to using Agile Software Development Practices within the Medical Device Industry}},
year = {2012}
@article{Chen2019,
abstract = {The use of high performance computing (HPC) has been generating influential scientific
breakthroughs since the twentieth century. Yet there have been few studies of the complex socio-
technical systems formed by these supercomputers and the humans who operate and use them. In
this paper, we describe the first complex adaptive systems (CAS) analysis of the dynamics of HPC
ecosystems. We conducted an 18-month ethnographic study that included scientific collaborations
that use an HPC research center and examined the processes in HPC socio-technical systems via CAS
theory to devise organizational designs and strategies that take advantage of system complexity. We
uncovered several significant mismatches in the variation and adaptation processes within
subsystems and conclude with three potential design directions for management and organization of
HPC socio-technical ecosystems.},
author = {Chen, Nan-Chen and Ramakrishnan, Lavanya and Poon, Sarah S. and Aragon, Cecilia},
doi = {10125/60065},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/0625.pdf:pdf},
isbn = {978-0-9981331-2-6},
pages = {6311--6320},
url = {https://scholarspace.manoa.hawaii.edu/handle/10125/60065},
volume = {6},
year = {2019}
@article{Konur2012,
abstract = {An alternative to deploying a single robot of high complexity can be to utilise robot
swarms comprising large numbers of identical, and much simpler, robots. Such swarms have been
shown to be adaptable, fault-tolerant and widely applicable. However, designing individual robot
algorithms to ensure effective and correct overall swarm behaviour is actually very difficult. While
mechanisms for assessing the effectiveness of any swarm algorithm before deployment are
essential, such mechanisms have traditionally involved either computational simulations of swarm
behaviour, or experiments with robot swarms themselves. However, such simulations or
experiments cannot, by their nature, analyse all possible swarm behaviours. In this paper, we will
develop and apply the use of automated probabilistic formal verification techniques to robot
swarms, involving an exhaustive mathematical analysis, in order to assess whether swarms will
indeed behave as required. In particular we consider a foraging robot scenario to which we apply
probabilistic model checking. {\textcopyright} 2011 Elsevier B.V. All rights reserved.},
doi = {10.1016/j.robot.2011.10.005},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/konur2012.pdf:pdf},
issn = {09218890},
number = {2},
pages = {199--213},
url = {http://dx.doi.org/10.1016/j.robot.2011.10.005},
volume = {60},
year = {2012}
@article{Klein2015,
abstract = {BACKGROUND: To investigate the effect of cataract on the ability of spatial and temporal
contrast sensitivity tests used to detect early glaucoma.$\backslash$n$\backslash$nMETHODS:
Twenty-seven glaucoma subjects with early cataract (mean age 60 ± 10.2 years) which constituted
the test group were recruited together with twenty-seven controls (cataract only) matched for age
and cataract type from a primary eye care setting. Contrast sensitivity to flickering gratings at 20 Hz
and stationary gratings with and without glare, were measured for 0.5, 1.5 and 3 cycles per degree
(cpd) in central vision. Perimetry and structural measurements with the Heidelberg Retinal
Tomograph (HRT) were also performed.$\backslash$n$\backslash$nRESULTS: After considering the
effect of cataract, contrast sensitivity to stationary gratings was reduced in the test group compared
with controls with a statistically significant mean difference of 0.2 log units independent of spatial
frequency. The flicker test showed a significant difference between test and control group at 1.5 and
3 cpd (p = 0.019 and p = 0.011 respectively). The percentage of glaucoma patients who could not see
the temporal modulation was much higher compared with their cataract only counterparts. A
significant correlation was found between the reduction of contrast sensitivity caused by glare and
the Glaucoma Probability Score (GPS) as measured with the HRT (p{\textless}0.005).$\backslash$n$\
backslash$nCONCLUSIONS: These findings indicate that both spatial and temporal contrast
sensitivity tests are suitable for distinguishing between vision loss as a consequence of glaucoma and
vision loss caused by cataract only. The correlation between glare factor and GPS suggests that there
may be an increase in intraocular stray light in glaucoma.},
author = {Klein, Johann and Pierscionek, Barbara K. and Lauritzen, Jan and Derntl, Karin and
Grzybowski, Andrzej and Zlatkova, Margarita B.},
doi = {10.1371/journal.pone.0128681},
issn = {19326203},
number = {6},
pages = {1--17},
pmid = {26053793},
title = {{The effect of cataract on early stage glaucoma detection using spatial and temporal contrast
sensitivity tests}},
volume = {10},
year = {2015}
@article{Hnetynka2020a,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/hnetynka{\_}using{\_}2020.pdf:pdf},
isbn = {9781450379625},
title = {{Using Component Ensembles for Modeling Autonomic Component Collaboration in Smart
Farming}},
year = {2020}
@article{Afshar2015,
abstract = {Among the emerged metaheuristic optimization techniques, ant colony optimization
(ACO) has received considerable attentions in water resources and environmental planning and
management during last decade. Different versions of ACO have proved to be flexible and powerful
in solving number of spatially and temporally complex water resources problems in discrete and
continuous domains with single and/or multiple objectives. Reviewing large number of peer
reviewed journal papers and few valuable conference papers, we intend to touch the characteristics
of ant algorithms and critically review their state-of- the-art applications in water resources and
environmental management problems, both in discrete and continuous domains. The paper seeks to
promote Opportunities, advantages and disadvantages of the algorithm as applied to different areas
of water resources problems both in research and practice. It also intends to identify and present
the major and seminal contributions of ant algorithms and their findings in organized areas of
reservoir operation and surface water management, water distribution systems, urban drainage and
sewer systems, groundwater managements, environmental and watershed management. Current
trends and challenges in ACO algorithms are discussed and called for increased attempts to carry out
convergence analysis as an active area of interest.},
author = {Afshar, Abbas and Massoumi, Fariborz and Afshar, Amin and Mari{\~{n}}o, Miquel A.},
doi = {10.1007/s11269-015-1016-9},
issn = {15731650},
number = {11},
pages = {3891--3904},
title = {{State of the Art Review of Ant Colony Optimization Applications in Water Resource
Management}},
volume = {29},
year = {2015}
@article{Schuld2015,
abstract = {Machine learning algorithms learn a desired input-output relation from examples in
order to interpret new inputs. This is important for tasks such as image and speech recognition or
strategy optimisation, with growing applications in the IT industry. In the last couple of years,
researchers investigated if quantum computing can help to improve classical machine learning
algorithms. Ideas range from running computationally costly algorithms or their subroutines
efficiently on a quantum computer to the translation of stochastic methods into the language of
quantum theory. This contribution gives a systematic overview of the emerging field of quantum
machine learning. It presents the approaches as well as technical details in an accessible way, and
discusses the potential of a future theory of quantum learning.},
archivePrefix = {arXiv},
arxivId = {1409.3097},
doi = {10.1080/00107514.2014.964942},
eprint = {1409.3097},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/schuld2014.pdf:pdf},
issn = {13665812},
number = {2},
pages = {172--185},
volume = {56},
year = {2015}
@article{Konnov2017,
abstract = {Counter abstraction is a powerful tool for parameterized model checking, if the number
of local states of the concurrent processes is relatively small. In recent work, we introduced
parametric interval counter abstraction that allowed us to verify the safety and liveness of
threshold-based fault-tolerant distributed algorithms (FTDA). Due to state space explosion, applying
this technique to distributed algorithms with hundreds of local states is challenging for state-of-the-
art model checkers. In this paper, we demonstrate that reachability properties of FTDAs can be
verified by bounded model checking. To ensure completeness, we need an upper bound on the
distance between states. We show that the diameters of accelerated counter systems of FTDAs, and
of their counter abstractions, have a quadratic upper bound in the number of local transitions. Our
experiments show that the resulting bounds are sufficiently small to use bounded model checking
for parameterized verification of reachability properties of several FTDAs, some of which have not
been automatically verified before.},
doi = {10.1016/j.ic.2016.03.006},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/konnov2016.pdf:pdf},
isbn = {9783662445839},
issn = {10902651},
pages = {95--109},
title = {{On the completeness of bounded model checking for threshold-based distributed
algorithms: Reachability}},
url = {http://dx.doi.org/10.1016/j.ic.2016.03.006},
volume = {252},
year = {2017}
@article{Le2009,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/CLeGuernic{\_}thesis.pdf:pdf},
year = {2009}
@article{Jeong2016,
abstract = {PURPOSE To evaluate the effect of multiple covariates on the diagnostic performance of
the Cirrus high-definition optical coherence tomography (HD-OCT) for glaucoma detection.
METHODS A prospective case-control study was performed and included 173 recently diagnosed
glaucoma patients and 63 unaffected individuals from the Macular Ganglion Cell Imaging Study.
Regression analysis of receiver operating characteristic were conducted to evaluate the influence of
age, spherical equivalent, axial length, optic disc size, and visual field index on the macular ganglion
cell-inner plexiform layer (GCIPL) and peripapillary retinal nerve fiber layer (RNFL) measurements.
RESULTS Disease severity, as measured by visual field index, had a significant effect on the diagnostic
performance of all Cirrus HD-OCT parameters. Age, axial length and optic disc size were significantly
associated with diagnostic accuracy of average peripapillary RNFL thickness, whereas axial length
had a significant effect on the diagnostic accuracy of average GCIPL thickness. CONCLUSIONS
Diagnostic performance of the Cirrus HD-OCT may be more accurate in the advanced stages of
glaucoma than at earlier stages. A smaller optic disc size was significantly associated with improved
the diagnostic ability of average RNFL thickness measurements; however, GCIPL thickness may be
less affected by age and optic disc size.},
author = {Jeong, Jae Hoon and Choi, Yun Jeong and Park, Ki Ho and Kim, Dong Myung and Jeoung, Jin
Wook},
doi = {10.1371/journal.pone.0160448},
isbn = {1404039570},
issn = {19326203},
number = {8},
pages = {1--13},
pmid = {27490718},
title = {{Macular ganglion cell imaging study: Covariate effects on the spectral domain optical
coherence tomography for glaucoma diagnosis}},
volume = {11},
year = {2016}
@book{Epstein2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/wijethilake2018.pdf:pdf},
isbn = {1474787120180},
keywords = {Corporate sustainability pressures,institutional theory,management control
systems,proactive strategic responses,resource-based view,sustainability control systems},
pages = {iii},
year = {2014}
@article{Prema2016,
number = {3},
pages = {21--29},
title = {{BRAIN CANCER FEATURE EXTRACTION USING OTSU ' S THRESHOLDING SEGMENTATION}},
volume = {6},
year = {2016}
@book{Holzl2015,
author = {Holzl, Matthias and Koch, Nora and Puviani, Mariachiara and Wirsing, Martin},
doi = {10.1007/978-3-319-16310-9},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/HKPWZEnsembleLifeCycle2015Draft.pdf:pdf},
isbn = {978-3-319-16309-3},
number = {January},
title = {{The Ensemble Development Life Cycle and Best Practices for Collective Autonomic Systems}},
url = {http://link.springer.com/10.1007/978-3-319-16310-9},
volume = {8998},
year = {2015}
@article{Zheng2012,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/50199.pdf:pdf},
journal = {School of Enviromental Sciences},
title = {{We are IntechOpen , the world ' s leading publisher of Open Access books Built by scientists ,
for scientists TOP 1 {\%}}},
year = {2012}
@article{Chasmer2017,
file = {:C$\backslash$:/Users/Asus/Downloads/JSME-19-0290{\_}Proof{\_}hi.pdf:pdf},
pages = {1--5},
year = {2017}
@book{Loreti2016,
doi = {10.1007/978-3-319-34096-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/main{\_}3.pdf:pdf},
isbn = {9783319340968},
title = {{Modelling and Analysis of Collective Adaptive Systems with CARMA and its Tools}},
year = {2016}
@article{Syahputra2015,
number = {April},
pages = {293--304},
volume = {10},
year = {2015}
@article{Madhusudhan2011,
author = {Madhusudhan, Mishra and Malay, Nath and Nirmala, S R and Samerendra, Dandapat},
pages = {365--373},
year = {2011}
@article{Zurita2005,
number = {January},
year = {2005}
@article{Salama2019,
abstract = {Cloud-based software systems are increasingly becoming complex and operating in highly
dynamic environments. Self-adaptivity and self-awareness have recently emerged to cope with such
level of dynamicity and scalability. Meanwhile, designing and testing such systems have poven to be
a challenging task, as well as research benchmarking. Despite the influx of research in both self-
adaptivity and cloud computing, as well as the various simulations environments proposed so far,
there is a general lack of modelling and simulation environments of self-adaptive and self-aware
cloud architectures. To aid researchers and practioners in overcoming such challenges, this paper
presents a novel modelling and simulation environment for self-adaptive and self-aware cloud
architectures. The environment provides significant benefits for designing self-adaptive and self-
aware cloud architectures, as well as testing adaptation and awareness mechanisms. The toolkit is
also beneficial as a symbiotic simulator during runtime to support adaptation decisions. We
experimentally validated and evaluated the implementation using benchmarks and evaluation use
cases.},
archivePrefix = {arXiv},
arxivId = {1912.05058},
author = {Salama, Maria and Bahsoon, Rami and Buyya, Rajkumar},
eprint = {1912.05058},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1912.05058.pdf:pdf},
keywords = {cloud,modelling,self-adaptation,self-awareness,simulation},
title = {{Modelling and Simulation Environment for Self-Adaptive and Self-Aware Cloud
Architectures}},
url = {http://arxiv.org/abs/1912.05058},
year = {2019}
@article{Papadopoulos2016,
abstract = {Much of the development of model-based design and dependability analysis in the
design of dependable systems, including software intensive systems, can be attributed to the
application of advances in formal logic and its application to fault forecasting and verification of
systems. In parallel, work on bio-inspired technologies has shown potential for the evolutionary
design of engineering systems via automated exploration of potentially large design spaces. We have
not yet seen the emergence of a design paradigm that effectively combines these two techniques,
schematically founded on the two pillars of formal logic and biology, from the early stages of, and
throughout, the design lifecycle. Such a design paradigm would apply these techniques
synergistically and systematically to enable optimal refinement of new designs which can be driven
effectively by dependability requirements. The paper sketches such a model-centric paradigm for
the design of dependable systems, presented in the scope of the HiP-HOPS tool and technique, that
brings these technologies together to realise their combined potential benefits. The paper begins by
identifying current challenges in model-based safety assessment and then overviews the use of
meta-heuristics at various stages of the design lifecycle covering topics that span from allocation of
dependability requirements, through dependability analysis, to multi-objective optimisation of
system architectures and maintenance schedules.},
author = {Papadopoulos, Yiannis and Walker, Martin and Parker, David and Sharvia, Septavera and
Bottaci, Leonardo and Kabir, Sohag and Azevedo, Luis and Sorokos, Ioannis},
doi = {10.1016/j.arcontrol.2016.04.008},
issn = {13675788},
pages = {170--182},
title = {{A synthesis of logic and bio-inspired techniques in the design of dependable systems}},
volume = {41},
year = {2016}
@article{DeSousa2019,
abstract = {Self-adaptive systems (SAS) can adapt their behavior to suit user preferences or contexts,
as well as monitor their performance and adjust it if necessary. In addition to adaptation operations,
self-adaptive systems communicate with sensors, actuators, and other devices. Due to the
complexity and dynamism of SAS, many situations can compromise the functioning of the system,
such as faults in adaptations, low performance to execute tasks, and context inconsistencies. To
prevent the system of these problems, it is essential to ensure high levels of quality. However, due
to the peculiarities of these systems, there are still challenges to perform quality evaluations in these
systems. In this sense, this paper proposes a discussion about the quality evaluation of self-adaptive
systems in the last years. As a result, we identify challenges, limitations and research opportunities
related to SAS quality evaluation.},
author = {{De Sousa}, Amanda Oliveira and Andrade, Rossana M.C. and Bezerra, Carla I.M. and Filho,
Jos{\'{e}} M.S.M.},
doi = {10.1145/3350768.3352455},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/3350768.3352455.pdf:pdf},
isbn = {9781450376518},
pages = {213--218},
year = {2019}
@article{Geihs2019,
doi = {10.20944/preprints201911.0385.v1},
file = {:C$\backslash$:/Users/Asus/Downloads/preprints201911.0385.v1.pdf:pdf},
number = {November},
pages = {1--16},
title = {{Teamwork for Multi-Robot Systems in Dynamic Environments Requirements and Solutions}},
year = {2019}
}
@article{Liu2015,
abstract = {This paper addresses semantic image segmentation by incorporating rich information
into Markov Random Field (MRF), including high-order relations and mixture of label contexts.
Unlike previous works that optimized MRFs using iterative algorithm, we solve MRF by proposing a
Convolutional Neural Network (CNN), namely Deep Parsing Network (DPN), which enables
deterministic end-to-end computation in a single forward pass. Specifically, DPN extends a
contemporary CNN architecture to model unary terms and additional layers are carefully devised to
approximate the mean field algorithm (MF) for pairwise terms. It has several appealing properties.
First, different from the recent works that combined CNN and MRF, where many iterations of MF
were required for each training image during back-propagation, DPN is able to achieve high
performance by approximating one iteration of MF. Second, DPN represents various types of
pairwise terms, making many existing works as its special cases. Third, DPN makes MF easier to be
parallelized and speeded up in Graphical Processing Unit (GPU). DPN is thoroughly evaluated on the
PASCAL VOC 2012 dataset, where a single DPN model yields a new state-of-the-art segmentation
accuracy.},
archivePrefix = {arXiv},
arxivId = {1509.02634},
author = {Liu, Ziwei and Li, Xiaoxiao and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
doi = {10.1109/ICCV.2015.162},
eprint = {1509.02634},
isbn = {9781467383912},
issn = {15505499},
pages = {1377--1385},
year = {2015}
@article{Bartocci2016,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/grosu{\_}bartocci.pdf:pdf},
year = {2016}
}
@article{Pfeffer2019,
author = {Pfeffer, Avi and Wu, Curt and Fry, Gerald and Lu, Kenny and Marotta, Steve and Reposa,
Mike and Shi, Yuan and Kumar, T. K.Satish and Knoblock, Craig A. and Parker, David and Muhammad,
Irfan and Novakovic, Chris},
doi = {10.1109/MS.2018.2886815},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ieeesw-princess.pdf:pdf},
issn = {19374194},
number = {2},
pages = {91--96},
volume = {36},
year = {2019}
@article{DoNascimento2017,
abstract = {Billions of resources, such as cars, clothes, household appliances and even food are being
connected to the Internet forming the Internet of Things (IoT). Subsets of these resources can work
together to create new self-regulating IoT applications such as smart health, smart communities and
smart homes. However, several challenging issues need to be addressed before this vision of
applications based on IoT concepts becomes a reality. Because many IoT applications will be
distributed over a large number of interacting devices, centralized control will not be possible and so
open problems will need to be solved that relate to building locally operating self-organizing and
self-adaptive systems. As an initial step in creating IoT applications with these features, this paper
presents a Framework for IoT (FIoT). The approach is based on Multi-Agent Systems (MAS) and
Machine Learning Techniques, such as neural networks and evolutionary algorithms. To illustrate the
use of FIoT, the paper contains two different IoT applications: (i) Quantified Things and (ii) Smart
traffic control. We show how flexible points of our framework are instantiated to generate these IoT
application.},
author = {do Nascimento, Nathalia Moraes and de Lucena, Carlos Jos{\'{e}} Pereira},
doi = {10.1016/j.ins.2016.10.031},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/donascimento2017.pdf:pdf},
issn = {00200255},
title = {{FIoT: An agent-based framework for self-adaptive and self-organizing applications based on
the Internet of Things}},
volume = {378},
year = {2017}
@article{Angelo2008,
doi = {10.1016/j.jcss.2007.07.010},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0022000007001080-
main.pdf:pdf},
pages = {1082--1093},
title = {{A framework for adaptive collective communications for heterogeneous hierarchical
computing systems}},
volume = {74},
year = {2008}
@article{Tan2017,
abstract = {We have developed and trained a convolutional neural network to automatically and
simultaneously segment optic disc, fovea and blood vessels. Fundus images were normalized before
segmentation was performed to enforce consistency in background lighting and contrast. For every
effective point in the fundus image, our algorithm extracted three channels of input from the point's
neighbourhood and forwarded the response across the 7-layer network. The output layer consists of
four neurons, representing background, optic disc, fovea and blood vessels. In average, our
segmentation correctly classified 92.68{\%} of the ground truths (on the testing set from Drive
database). The highest accuracy achieved on a single image was 94.54{\%}, the lowest 88.85{\%}. A
single convolutional neural network can be used not just to segment blood vessels, but also optic
disc and fovea with good accuracy.},
archivePrefix = {arXiv},
arxivId = {1702.00509},
author = {Tan, Jen Hong and Acharya, U. Rajendra and Bhandary, Sulatha V. and Chua, Kuang Chua
and Sivaprasad, Sobha},
doi = {10.1016/j.jocs.2017.02.006},
eprint = {1702.00509},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/1702.00509.pdf:pdf},
issn = {18777503},
pages = {70--79},
title = {{Segmentation of optic disc, fovea and retinal vasculature using a single convolutional neural
network}},
volume = {20},
year = {2017}
@article{Kandan2016,
abstract = {This Paper provides the various analyses of Image Segmentation techniques for any field
of image processing based applications. Segmentation is considered as a basic need in image
processing for find the lines, curves, boundaries, etc in an image. In order to classify the
segmentation techniques such as GA, Neural Network, Soft Computing and various image
segmentation techniques and their performances analysis is done. Based on the performance
analysis of segmentation techniques has been analyzed and conclude that each technique as best
under the various field.},
number = {10},
pages = {975--8887},
url = {https://pdfs.semanticscholar.org/e6a5/ed3dbedcbced6fa371a3dbdac2afa4f21606.pdf},
volume = {153},
year = {2016}
@article{Chen2013a,
author = {Chen, Taolue and Kwiatkowska, Marta and Simaitis, Aistis and Wiltsche, Clemens},
year = {2013}
@article{Casadei2019,
doi = {10.1109/FMEC.2019.8795355},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/casadei2019.pdf:pdf},
isbn = {9781728117966},
journal = {2019 Fourth International Conference on Fog and Mobile Edge Computing (FMEC)},
pages = {60--67},
publisher = {IEEE},
year = {2019}
@article{Belzner2016,
author = {Belzner, Lenz and Matthias, H and Koch, Nora and B, Martin Wirsing},
doi = {10.1007/978-3-319-46508-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/belzner-et-al-collective-trans16.pdf:pdf},
isbn = {978-3-319-46507-4},
pages = {180--200},
title = {{Collective Autonomic Systems: Towards Engineering Principles and Their Foundation}},
url = {http://link.springer.com/10.1007/978-3-319-46508-1},
volume = {9960},
year = {2016}
@article{Ciancia2018,
abstract = {We present the use of a novel spatio-temporal model checker to detect problems in the
data and operation of a collective adaptive system. Data correctness is important to ensure
operational correctness in systems which adapt in response to data. We illustrate the theory with
several concrete examples, addressing both the detection of errors in vehicle location data for buses
in the city of Edinburgh and the undesirable phenomenon of “clumping” which occurs when there is
not enough separation between subsequent buses serving the same route. Vehicle location data are
visualised symbolically on a street map, and categories of problems identified by the spatial part of
the model checker are rendered by highlighting the symbols for vehicles or other objects that satisfy
a property of interest. Behavioural correctness makes use of both the spatial and temporal aspects
of the model checker to determine from a series of observations of vehicle locations whether the
system is failing to meet the expected quality of service demanded by system regulators.},
author = {Ciancia, Vincenzo and Gilmore, Stephen and Grilletti, Gianluca and Latella, Diego and
Loreti, Michele and Massink, Mieke},
doi = {10.1007/s10009-018-0483-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ciancia2018.pdf:pdf},
issn = {14332787},
number = {3},
pages = {289--311},
url = {https://doi.org/10.1007/s10009-018-0483-8},
volume = {20},
year = {2018}
@article{Diaconescu2017,
author = {Diaconescu, Ada and Bellman, Kirstie L and Esterle, Lukas and Giese, Holger and G{\"{o}}tz,
Sebastian and Lewis, Peter and Zisman, Andrea},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-319-47474-8{\_}7{\_}2.pdf:pdf},
isbn = {9783319474748},
pages = {191--192},
year = {2017}
@article{Umrigar1983,
author = {Umrigar, Zerksis D},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
pages = {221--227},
volume = {0},
year = {1983}
@book{Yang2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9789813293427},
title = {{Advances in Intelligent Systems and Computing 1027 Fourth International Congress on
Information and Communication Technology}},
volume = {2},
year = {2019}
@article{Garlan2004,
author = {Garlan, David and Cheng, Shang Wen and Huang, An Cheng and Schmerl, Bradley and
Steenkiste, Peter},
doi = {10.1109/MC.2004.175},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/autonomic-web.pdf:pdf},
issn = {00189162},
journal = {Computer},
number = {10},
pages = {46--54},
volume = {37},
year = {2004}
@article{Zeigler2018,
author = {Zeigler, Bernard P and Mittal, Saurabh and Traore, Mamadou K.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
keywords = {complex adaptive systems,healthcare systems,system engineering,systems of systems},
year = {2018}
@article{Svorenova2016,
abstract = {Design and control of computer systems that operate in uncertain, competitive or
adversarial, environments can be facilitated by formal modelling and analysis. In this paper, we focus
on analysis of complex computer systems modelled as turn-based 212-player games, or stochastic
games for short, that are able to express both stochastic and non-stochastic uncertainties. We offer
a systematic overview of the body of knowledge and algorithmic techniques for verification and
strategy synthesis for stochastic games with respect to a broad class of quantitative properties
expressible in temporal logic. These include probabilistic linear-time properties, expected total,
discounted and average reward properties, and their branching-time extensions and multi-objective
combinations. To demonstrate applicability of the framework as well as its practical implementation
in a tool called PRISM-games, we describe several case studies that rely on analysis of stochastic
games, from areas such as robotics, and networked and distributed systems.},
doi = {10.1016/j.ejcon.2016.04.009},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0947358016300292-
main.pdf:pdf},
issn = {09473580},
pages = {15--30},
volume = {30},
year = {2016}
@article{J.2017,
doi = {10.1007/s40846-017-0233-5},
issn = {2199-4757},
number = {3},
pages = {386--394},
url = {http://www.embase.com/search/results?subaction=viewrecord{\&}from=export{\
&}id=L615956151{\%}0Ahttp://dx.doi.org/10.1007/s40846-017-0233-5},
volume = {37},
year = {2017}
@article{Holland2010,
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/47051db0431ab350a3ff7a6852cba7e7ac01.pd
f:pdf},
number = {1},
pages = {17--30},
volume = {121},
year = {2010}
@article{Lin2002,
abstract = {Residuals have long been used for graphical and numerical examinations of the adequacy
of regression models. Conventional residual analysis based on the plots of raw residuals or their
smoothed curves is highly subjective, whereas most numerical goodness-of-fit tests provide little
information about the nature of model misspecification. In this paper, we develop objective and
informative model-checking techniques by taking the cumulative sums of residuals over certain
coordinates (e.g., covariates or fitted values) or by considering some related aggregates of residuals,
such as moving sums and moving averages. For a variety of statistical models and data structures,
including generalized linear models with independent or dependent observations, the distributions
of these stochastic processes under the assumed model can be approximated by the distributions of
certain zero-mean Gaussian processes whose realizations can be easily generated by computer
simulation. Each observed process can then be compared, both graphically and numerically, with a
number of realizations from the Gaussian process. Such comparisons enable one to assess
objectively whether a trend seen in a residual plot reflects model misspecification or natural
variation. The proposed techniques are particularly useful in checking the functional form of a
covariate and the link function. Illustrations with several medical studies are provided.},
doi = {10.1111/j.0006-341X.2002.00001.x},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/4ec409632d70461ab0da85d28be24211121dd
b919bf7deddb1840cb78b602f4f.pdf:pdf},
issn = {0006341X},
journal = {Biometrics},
number = {1},
pages = {1--12},
pmid = {11890304},
volume = {58},
year = {2002}
@book{Civicioglu2013,
abstract = {In this paper, the algorithmic concepts of the Cuckoo-search (CK), Particle swarm
optimization (PSO), Differential evolution (DE) and Artificial bee colony (ABC) algorithms have been
analyzed. The numerical optimization problem solving successes of the mentioned algorithms have
also been compared statistically by testing over 50 differ- ent benchmark functions. Empirical results
reveal that the problem solving success of the CK algorithm is very close to the DE algorithm. The
run-time complexity and the required function-evaluation number for acquiring globalminimizer by
theDE algorithm is generally smaller than the comparison algorithms.The performances of
theCKandPSOalgorithms are statistically closer to the performance of the DE algorithm than the ABC
algorithm. The CK andDE algorithms supplymore robust and precise results than the PSO
andABCalgorithms.},
doi = {10.1007/s10462-011-9276-0},
isbn = {0000000000000},
issn = {02692821},
number = {4},
pages = {315--346},
title = {{A conceptual comparison of the Cuckoo-search, particle swarm optimization, differential
evolution and artificial bee colony algorithms}},
volume = {39},
year = {2013}
@article{Brihaye2016,
author = {Brihaye, Thomas and Dhar, Amit Kumar and Geeraerts, Gilles and Haddad, Axel and
Monmege, Benjamin},
doi = {10.4204/EPTCS.220.1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1608.00652.pdf:pdf},
issn = {20752180},
pages = {1--12},
volume = {220},
year = {2016}
@book{Nawaz2019,
abstract = {The recent introduction of the big data paradigm and advancements in machine learning
and deep mining techniques have made proof guidance and automation in interactive theorem
provers (ITPs) an important research topic. In this paper, we provide a learning approach based on
sequential pattern mining (SPM) for proof guidance in the PVS proof assistant. Proofs in a PVS theory
are first abstracted to a computer-processable corpus. SPM techniques are then used on the corpus
to discover frequent proof steps and proof patterns, relationships of proof steps / patterns with each
other, dependency of new conjectures on already proved facts and to predict the next proof step(s).
Obtained results suggest that the integration of SPM in proof assistants can be used to guide the
proof process and in the development of proof tactics/strategies.},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-31517-7_4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030315160},
issn = {16113349},
pages = {45--60},
year = {2019}
@article{Systems,
author = {Systems, Complex and Fontana, Walter Los and Mexi, New Nonlinear and Alamos, Studies
and Submitted, National and Stein, Lectures D L Santa and Le, Sciences},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/6135641.pdf:pdf},
@article{Gong2012,
number = {c},
pages = {1--16},
title = {{Complex Network Clustering by Multiobjective Discrete Particle Swarm Optimization Based
on Decomposition}},
year = {2012}
@article{Rodonaia2015,
number = {1},
pages = {41--47},
title = {{Statistic complexity metrics as a basis for formal probabilistic model checking}},
volume = {4},
year = {2015}
@article{Smedlund2018,
abstract = {We present a typology of strategies employed by firms using the Internet of Things (IoT).
The IoT is a distributed network of connected physical objects. As these devices exchange data with
each other instead of through an intermediary, the IoT increases complexity of business ecosystems,
and opens up new business opportunities. When the platform owner does not own the data and
technology is mostly open source, other actors can use and build on them. In addition to platform
owner's strategy, we propose a framework with three additional strategies, based on whether the
firms' offering integrates into the specific industrial value chain or contributes to the IoT ecosystem,
and whether the firm offering is by nature stand-alone or systemic. With a multiple case study
design, we explore this framework in the setting of 23 firms in a large research project context. The
descriptions of the identified IoT strategies support our framework.},
doi = {10.24251/hicss.2018.199},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper0199.pdf:pdf},
isbn = {9780998133119},
pages = {1591--1600},
title = {{Firm Strategies in Open Internet of Things Business Ecosystems: Framework and Case
Study}},
volume = {9},
year = {2018}
@article{Zhang2017,
abstract = {Study objective A stable and readily accessible work surface for bedside medical
procedures represents a valuable tool for acute care providers. In emergency department (ED)
settings, the design and implementation of traditional Mayo stands and related surface devices often
limit their availability, portability, and usability, which can lead to suboptimal clinical practice
conditions that may affect the safe and effective performance of medical procedures and delivery of
patient care. We designed and built a novel, open-source, portable, bedside procedural surface
through an iterative development process with use testing in simulated and live clinical
environments. Methods The procedural surface development project was conducted between
October 2014 and June 2016 at an academic referral hospital and its affiliated simulation facility. An
interdisciplinary team of emergency physicians, mechanical engineers, medical students, and design
students sought to construct a prototype bedside procedural surface out of off-the-shelf hardware
during a collaborative university course on health care design. After determination of end-user
needs and core design requirements, multiple prototypes were fabricated and iteratively modified,
with early variants featuring undermattress stabilizing supports or ratcheting clamp mechanisms.
Versions 1 through 4 underwent 2 hands-on usability-testing simulation sessions; version 5 was
presented at a design critique held jointly by a panel of clinical and industrial design faculty for
expert feedback. Responding to select feedback elements over several surface versions,
investigators arrived at a near-final prototype design for fabrication and use testing in a live clinical
setting. This experimental procedural surface (version 8) was constructed and then deployed for
controlled usability testing against the standard Mayo stands in use at the study site ED. Clinical
providers working in the ED who opted to participate in the study were provided with the prototype
surface and just-in-time training on its use when performing bedside procedures. Subjects
completed the validated 10-point System Usability Scale postshift for the surface that they had used.
The study protocol was approved by the institutional review board. Results Multiple prototypes and
recursive design revisions resulted in a fully functional, portable, and durable bedside procedural
surface that featured a stainless steel tray and intuitive hook-and-lock mechanisms for attachment
to ED stretcher bed rails. Forty-two control and 40 experimental group subjects participated and
completed questionnaires. The median System Usability Scale score (out of 100; higher scores
associated with better usability) was 72.5 (interquartile range [IQR] 51.3 to 86.3) for the Mayo stand;
the experimental surface was scored at 93.8 (IQR 84.4 to 97.5 for a difference in medians of 17.5
(95{\%} confidence interval 10 to 27.5). Subjects reported several usability challenges with the Mayo
stand; the experimental surface was reviewed as easy to use, simple, and functional. In accordance
with experimental live environment deployment, questionnaire responses, and end-user
suggestions, the project team finalized the design specification for the experimental procedural
surface for open dissemination. Conclusion An iterative, interdisciplinary approach was used to
generate, evaluate, revise, and finalize the design specification for a new procedural surface that
met all core end-user requirements. The final surface design was evaluated favorably on a validated
usability tool against Mayo stands when use tested in simulated and live clinical settings.},
author = {Zhang, Xiao C. and Bermudez, Ana M. and Reddy, Pranav M. and Sarpatwari, Ravi R. and
Chheng, Darin B. and Mezoian, Taylor J. and Schwartz, Victoria R. and Simmons, Quinneil J. and Jay,
Gregory D. and Kobayashi, Leo},
doi = {10.1016/j.annemergmed.2016.08.436},
isbn = {0196-0644},
issn = {10976760},
number = {3},
pages = {275--283},
pmid = {27856021},
url = {http://dx.doi.org/10.1016/j.annemergmed.2016.08.436},
volume = {69},
year = {2017}
@article{Lamani2014,
pages = {158--163},
title = {{Early Detection of Glaucoma Through Retinal Nerve Fiber Layer Analysis Using Fractal
Dimension and Texture Feature}},
year = {2014}
@article{Graefenstein2020,
author = {Graefenstein, Julian and Winkels, Jan and Lenz, Lisa and Weist, Kai and Krebil, Kevin and
Gralla, Mike},
doi = {10.24251/hicss.2020.806},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/0650.pdf:pdf},
isbn = {9780998133133},
pages = {6580--6588},
title = {{A Hybrid Approach of Modular Planning – Synchronizing Factory and Building Planning by
Using Component based Synthesis}},
volume = {3},
year = {2020}
@article{Camacho2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/cam-mci-icaps19.pdf:pdf},
number = {2011},
year = {2015}
@article{Theodoropoulos2016,
author = {Theodoropoulos, D and {\'{A}}lvarez, C and Ayguad{\'{e}}, E and Bueno, J and Filgueras, A},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/auj-37-4.pdf:pdf},
title = {{USER}},
year = {2016}
@article{Zhang2015a,
abstract = {Particle swarm optimization is a stochastic population-based algorithm based on social
interaction of bird flocking or fish schooling. In this paper, a new adaptive inertia weight adjusting
approach is proposed based on Bayesian techniques in PSO, which is used to set up a sound tradeoff
between the exploration and exploitation characteristics. It applies the Bayesian techniques to
enhance the PSO's searching ability in the exploitation of past particle positions and uses the cauchy
mutation for exploring the better solution. A suite of benchmark functions are employed to test the
performance of the proposed method. The results demonstrate that the new method exhibits higher
accuracy and faster convergence rate than other inertia weight adjusting methods in multimodal and
unimodal functions. Furthermore, to show the generalization ability of BPSO method, it is compared
with other types of improved PSO algorithms, which also performs well.},
author = {Zhang, Limin and Tang, Yinggan and Hua, Changchun and Guan, Xinping},
doi = {10.1016/j.asoc.2014.11.018},
issn = {15684946},
pages = {138--149},
title = {{A new particle swarm optimization algorithm with adaptive inertia weight based on Bayesian
techniques}},
url = {http://dx.doi.org/10.1016/j.asoc.2014.11.018},
volume = {28},
year = {2015}
@book{Michele2016,
doi = {10.1007/978-3-319-34096-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/main{\_}2.pdf:pdf},
isbn = {9783319340968},
title = {{Modelling and Analysis of Collective Adaptive Systems with CARMA and its Tools}},
year = {2016}
@article{Koldehofe2018,
url = {https://www.kuvs.de/fg/fogcomputing/},
year = {2018}
@article{Basu2011,
abstract = {Rigorous system design requires the use of a single powerful component framework
allowing the representation of the designed system at different detail levels, from application
software to its implementation. A single framework allows the maintenance of the overall coherency
and correctness by comparing different architectural solutions and their properties. The authors
present the BIP (behavior, interaction, priority) component framework, which encompasses an
expressive notion of composition for heterogeneous components by combining interactions and
priorities. This allows description at different abstraction levels from application software to mixed
hardware/software systems. A rigorous design flow that uses BIP as a unifying semantic model
derives a correct implementation from an application software, a model of the target architecture,
and a mapping. Implementation correctness is ensured by applying source-to-source
transformations that preserve correctness of essential design properties. The design is fully
automated and supported by a toolset including a compiler, the D-Finder verification tool, and
model transformers. The authors present an autonomous robot case study to illustrate BIP's use as a
modeling formalism as well as crucial aspects of the design flow for ensuring correctness. {\
textcopyright} 2011 IEEE.},
author = {Basu, Ananda and Bensalem, Bensalem and Bozga, Marius and Combaz, Jacques and Jaber,
Mohamad and Nguyen, Thanh Hung and Sifakis, Joseph},
doi = {10.1109/MS.2011.27},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/basu2011.pdf:pdf},
issn = {07407459},
number = {3},
pages = {41--48},
volume = {28},
year = {2011}
@article{Souri2019,
abstract = {Model checking is an influential method to verify complex interactions, concurrent and
distributed systems. Model checking constructs a behavioral model of the system using formal
concepts such as operations, states, events and actions. The model checkers suffer some
weaknesses such as state space explosion problem that has high memory consumption and time
complexity. Also, automating temporal logic is the main challenge to define critical specification
rules in the model checking. To improve the model checking weaknesses, this paper presents
Graphical Symbolic Modeling Toolkit (GSMT) to design and verify the behavioral models of
distributed systems. A behavioral modeling framework is presented to design the system behavior in
the forms of Kripke structure (KS) and Labeled Transition System (LTS). The behavioral models are
created and edited using a graphical user interface platform in four layers that include a design layer,
a modeling layer, a logic layer and a symbolic code layer. The GSMT generates a graphical modeling
diagram visually for creating behavioral models of the system. Also, the temporal logic formulas are
constructed according to some functional properties automatically. The executable code is
generated according to the symbolic model verifier that user can choose the original model or
reduced model with respect to a recursive reduced model. Finally, the generated code is executed
using the NuSMV model checker for evaluating the constructed temporal logic formulas. The code
generation time for transforming the behavioral model is compared to other model checking
platforms. The proposed GSMT platform has outperformed evaluation than other platforms.},
author = {Souri, Alireza and Rahmani, Amir Masoud and Navimipour, Nima Jafari and Rezaei, Reza},
doi = {10.1186/s13673-019-0165-x},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/s13673-019-0165-x.pdf:pdf},
issn = {21921962},
number = {1},
title = {{A symbolic model checking approach in formal verification of distributed systems}},
url = {https://doi.org/10.1186/s13673-019-0165-x},
volume = {9},
year = {2019}
@article{Chen2015,
abstract = {Deep convolutional neural networks (CNNs) are the backbone of state-of-art semantic
image segmentation systems. Recent work has shown that complementing CNNs with fully-
connected conditional random fields (CRFs) can significantly enhance their object localization
accuracy, yet dense CRF inference is computationally expensive. We propose replacing the fully-
connected CRF with domain transform (DT), a modern edge-preserving filtering method in which the
amount of smoothing is controlled by a reference edge map. Domain transform filtering is several
times faster than dense CRF inference and we show that it yields comparable semantic segmentation
results, accurately capturing object boundaries. Importantly, our formulation allows learning the
reference edge map from intermediate CNN features instead of using the image gradient magnitude
as in standard DT filtering. This produces task-specific edges in an end-to-end trainable system
optimizing the target semantic segmentation quality.},
archivePrefix = {arXiv},
arxivId = {1511.03328},
author = {Chen, Liang-Chieh and Barron, Jonathan T. and Papandreou, George and Murphy, Kevin
and Yuille, Alan L.},
doi = {10.1109/CVPR.2016.492},
eprint = {1511.03328},
isbn = {978-1-4673-8851-1},
issn = {10636919},
pages = {4545--4554},
title = {{Semantic Image Segmentation with Task-Specific Edge Detection Using CNNs and a
Discriminatively Trained Domain Transform}},
url = {http://arxiv.org/abs/1511.03328},
year = {2015}
@article{Menze2015,
abstract = {In this paper we report the set-up and results of the Multimodal Brain Tumor Image
Segmentation (BRATS) benchmark organized in conjunction with the MICCAI 2012 and 2013
conferences. Twenty state-of-the-art tumor segmentation algorithms were applied to a set of 65
multi-contrast MR scans of low- and high-grade glioma patients - manually annotated by up to four
raters - and to 65 comparable scans generated using tumor simulation software. Quantitative
evaluations revealed considerable disagreement between the human raters in segmenting various
tumor sub-regions (Dice scores in the range 74-85{\%}), illustrating the difficulty of this task. We
found that different algorithms worked best for different sub-regions (reaching performance
comparable to human inter-rater variability), but that no single algorithm ranked in the top for all
subregions simultaneously. Fusing several good algorithms using a hierarchical majority vote yielded
segmentations that consistently ranked above all individual algorithms, indicating remaining
opportunities for further methodological improvements. The BRATS image data and manual
annotations continue to be publicly available through an online evaluation system as an ongoing
benchmarking resource.},
archivePrefix = {arXiv},
arxivId = {15334406},
author = {Menze, Bjoern H. and Jakab, Andras and Bauer, Stefan and Kalpathy-Cramer, Jayashree
and Farahani, Keyvan and Kirby, Justin and Burren, Yuliya and Porz, Nicole and Slotboom, Johannes
and Wiest, Roland and Lanczi, Levente and Gerstner, Elizabeth and Weber, Marc Andr{\'{e}} and
Arbel, Tal and Avants, Brian B. and Ayache, Nicholas and Buendia, Patricia and Collins, D. Louis and
Cordier, Nicolas and Corso, Jason J. and Criminisi, Antonio and Das, Tilak and Delingette, Herv{\'{e}}
and Demiralp, {\c{C}}ağatay and Durst, Christopher R. and Dojat, Michel and Doyle, Senan and Festa,
Joana and Forbes, Florence and Geremia, Ezequiel and Glocker, Ben and Golland, Polina and Guo,
Xiaotao and Hamamci, Andac and Iftekharuddin, Khan M. and Jena, Raj and John, Nigel M. and
Konukoglu, Ender and Lashkari, Danial and Mariz, Jos{\'{e}} Ant{\'{o}}nio and Meier, Raphael and
Pereira, S{\'{e}}rgio and Precup, Doina and Price, Stephen J. and Raviv, Tammy Riklin and Reza, Syed
M.S. and Ryan, Michael and Sarikaya, Duygu and Schwartz, Lawrence and Shin, Hoo Chang and
Shotton, Jamie and Silva, Carlos A. and Sousa, Nuno and Subbanna, Nagesh K. and Szekely, Gabor
and Taylor, Thomas J. and Thomas, Owen M. and Tustison, Nicholas J. and Unal, Gozde and Vasseur,
Flor and Wintermark, Max and Ye, Dong Hye and Zhao, Liang and Zhao, Binsheng and Zikic, Darko
and Prastawa, Marcel and Reyes, Mauricio and {Van Leemput}, Koen},
doi = {10.1109/TMI.2014.2377694},
eprint = {15334406},
issn = {1558254X},
number = {10},
pages = {1993--2024},
pmid = {25494501},
volume = {34},
year = {2015}
@book{Weigl2020,
abstract = {Reactive software is often deployed in long-running systems with high dependability
requirements. Despite their safety- and mission-critical use, their functionalities must occasionally
be adapted, for example to support new features or regulations. But software evolution bears the
risk of introducing new malfunctions. Regression verification helps preventing the introduction of
unintended, faulty behaviour. In this paper we present a novel approach for modular regression
verification proofs for reactive systems based on the idea of relational regression verification
contracts. The approach allows the decomposition of a larger regression verification proof into
smaller proofs on its subcomponents. We embedded the decomposition rule in a new algorithm for
regression verification, which orchestrates several light- and heavyweight techniques. We
implemented our approach for software used by Programmable Logic Controllers (PLC) written in
Structured Text (IEC 611131-3) and show the potential of the approach with selected scenarios of a
Pick-and-Place-Unit case study.},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-61470-6_3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030614690},
issn = {16113349},
pages = {25--43},
year = {2020}
@article{Gadelha2017,
abstract = {The first attempts to apply the k-induction method to software verification are only
recent. In this paper, we present a novel proof by induction algorithm, which is built on the top of a
symbolic context-bounded model checker and uses an iterative deepening approach to verify, for
each step k up to a given maximum, whether a given safety property ϕ holds in the program. The
proposed k-induction algorithm consists of three different cases, called base case, forward condition,
and inductive step. Intuitively, in the base case, we aim to find a counterexample with up to k loop
unwindings; in the forward condition, we check whether loops have been fully unrolled and that ϕ
holds in all states reachable within k unwindings; and in the inductive step, we check that whenever
ϕ holds for k unwindings, it also holds after the next unwinding of the system. The algorithm was
implemented in two different ways, a sequential and a parallel one, and the results were compared.
Experimental results show that both forms of the algorithm can handle a wide variety of safety
properties extracted from standard benchmarks, ranging from reachability to time constraints. And
by comparison, the parallel algorithm solves more verification tasks in less time. This paper marks
the first application of the k-induction algorithm to a broader range of C programs; in particular, we
show that our k-induction method outperforms CPAChecker in terms of correct results, which is a
state-of-the-art k-induction-based verification tool for C programs.},
author = {Gadelha, Mikhail Y.R. and Ismail, Hussama I. and Cordeiro, Lucas C.},
doi = {10.1007/s10009-015-0407-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/gadelha2015.pdf:pdf},
issn = {14332787},
number = {1},
pages = {97--114},
volume = {19},
year = {2017}
@article{Buzhinsky2019,
abstract = {Model checking is an established technique to formally verify automation systems which
are required to be trusted. However, for sufficiently complex systems model checking becomes
computationally infeasible. On the other hand, testing, which offers less reliability, often does not
present a serious computational challenge. Searching for synergies between these two approaches,
this paper proposes a framework to ensure reliability of industrial automation systems by means of
hybrid use of model checking and testing. This framework represents a way to achieve a trade-off
between verification reliability and computational complexity which has not yet been explored in
other approaches. Instead of undergoing usual model checking, system requirements are checked
only on particular system behaviors which represent a test suite achieving coverage for both the
system and the requirements. Then, all stages of the framework support the case of a closed-loop
model, where not only the controller, but also the plant is modeled.},
archivePrefix = {arXiv},
arxivId = {1907.11895},
eprint = {1907.11895},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1907.11895.pdf:pdf},
title = {{Combining closed-loop test generation and execution by means of model checking}},
url = {http://arxiv.org/abs/1907.11895},
year = {2019}
@article{He2006,
abstract = {Alloy is a new modeling language for software design, while Unified Modeling Language
(UML) is a standard modeling language widely used in industry. This paper analyzes the similarities
and the differences between Alloy and UML. It focuses on the complexity differences, accuracy
differences, and the expression differences between these two languages. Both Alloy and UML can
be used to specify the requirements to design complex software systems. The syntax of Alloy is
largely compatible with UML. UML is more complicated, while Alloy is more concise. UML is more
ambiguous, while Alloy is more accurate. UML is more expressive, while Alloy is more abstract. It is
promising that part of UML can be formalized and transmitted to Alloy to allow automatic model
validation analyzing in order to reduce errors in the requirement and design stages.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1.1.89.2147.pdf:pdf},
pages = {671--677},
url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.89.2147{\&}rep=rep1{\
&}type=pdf},
volume = {2},
year = {2006}
@article{Smistad2015,
abstract = {Segmentation of anatomical structures, from modalities like computed tomography (CT),
magnetic resonance imaging (MRI) and ultrasound, is a key enabling technology for medical
applications such as diagnostics, planning and guidance. More efficient implementations are
necessary, as most segmentation methods are computationally expensive, and the amount of
medical imaging data is growing. The increased programmability of graphic processing units (GPUs)
in recent years have enabled their use in several areas. GPUs can solve large data parallel problems
at a higher speed than the traditional CPU, while being more affordable and energy efficient than
distributed systems. Furthermore, using a GPU enables concurrent visualization and interactive
segmentation, where the user can help the algorithm to achieve a satisfactory result. This review
investigates the use of GPUs to accelerate medical image segmentation methods. A set of criteria for
efficient use of GPUs are defined and each segmentation method is rated accordingly. In addition,
references to relevant GPU implementations and insight into GPU optimization are provided and
discussed. The review concludes that most segmentation methods may benefit from GPU processing
due to the methods' data parallel structure and high thread count. However, factors such as
synchronization, branch divergence and memory usage can limit the speedup.},
author = {Smistad, Erik and Falch, Thomas L. and Bozorgi, Mohammadmehdi and Elster, Anne C. and
Lindseth, Frank},
doi = {10.1016/j.media.2014.10.012},
isbn = {13618415},
issn = {13618423},
journal = {Medical Image Analysis},
keywords = {GPU,Image,Medical,Parallel,Segmentation},
number = {1},
pages = {1--18},
pmid = {25534282},
url = {http://dx.doi.org/10.1016/j.media.2014.10.012},
volume = {20},
year = {2015}
@article{Kwiatkowska2016,
abstract = {We are witnessing a huge growth of cyber-physical systems, which are autonomous,
mobile, endowed with sensing, controlled by software, and often wirelessly connected and Internet-
enabled. They include factory automation systems, robotic assistants, self-driving cars, and wearable
and implantable devices. Since they are increasingly often used in safety- or business-critical
contexts, to mention invasive treatment or biometric authentication, there is an urgent need for
modelling and verification technologies to support the design process, and hence improve the
reliability and reduce production costs. This paper gives an overview of quantitative verification and
synthesis techniques developed for cyber-physical systems, summarising recent achievements and
future challenges in this important field.},
doi = {10.1109/SOSCYPS.2016.7579999},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Marta+Kwiatkowska,
+Advances+and+challenges+of+quantitative+verification+and+synthesis+for+cyber-
physical+systems.pdf:pdf},
isbn = {9781509043040},
journal = {2016 Science of Security for Cyber-Physical Systems Workshop, SOSCYPS 2016},
title = {{Advances and challenges of quantitative verification and synthesis for cyber-physical
systems}},
year = {2016}
@article{Tumuluru2015,
abstract = {A new hybrid genetic algorithm was developed which combines a stochastic evolutionary
algorithm with a deterministic adaptive step steepest descent hill climbing algorithm in order to
optimize complex multivariate problems. By combining both algorithms computational resources are
conserved and the solution converges rapidly as compared to either algorithm alone. In genetic
algorithms natural selection is mimicked by random events such as breeding and mutation. In the
adaptive step steepest descent algorithm the solution moves toward the lowest surrounding point.
Step sizes start big and get progressively smaller, increasing computational efficiency. The genetic
algorithm ensures the solution samples the entire global search space, thus a global minimum is
found. The steepest descent method tine tunes the solution by moving it to the nearest local
minimum. The code was developed, including a graphical user interface, in MATLAB. Additional
features such as bounding the input, weighting the objective functions individually, and constraining
the output are also built into the interface. The algorithm developed was used to optimize the
response surface models which use process variables (feedstock moisture content, die speed, and
preheating temperature) to predict pellet properties (pellet moisture content, unit, bulk and tapped
density, durability, and specific energy consumption). The solution found by the hybrid algorithm
was validated experimentally. Execution times were decreased by approximately 40{\%}, based on 1,
0000 trials with each method, using the new hybrid algorithm as compared to using a genetic
algorithm alone with the same parameters, both developed at INL Performance of the hybrid
algorithm versus the commercial Matlab genetic algorithm is investigated. Results show that the
hybrid genetic algorithm converged to the global maximum for bulk density in one iteration,
whereas the commercial genetic algorithm took twenty nine iterations to converge.},
doi = {10.13031/aim.20152188606},
isbn = {9781510810501},
journal = {American Society of Agricultural and Biological Engineers Annual International Meeting
2015},
number = {October},
title = {{A new hybrid genetic algorithm for optimizing the single and multivariate objective
functions}},
volume = {3},
year = {2015}
@book{DeLemos2017a,
abstract = {In the feedback control loop, uncertainty is associated to different sources (e.g., the
environment), and appears in different forms (e.g., as noise in variables or imperfections in
techniques being used). In the MAPE-K control loop, uncertainty is normally handled by the decision
maker at the Plan stage. However, depending on the complexity of the stages of the MAPE-K control
loop, uncertainties need also to be handled at other stages, depending on the uncertainties
associated with that stage. Moreover, uncertainties may also propagate between the stages of the
control loop, which might affect how uncertainties are handled. In this position chapter, we claim
that uncertainties should be identified and handled at the different stages of the feedback control
loop. We propose an approach for the identification of internal and external sources of uncertainty
for a given stage, and we promote error propagation analysis as a method for analyzing the
propagation of uncertainties between stages. In terms of trade-off analysis, which can take place at
any stage of the MAPE-K control loop, such an approach provides a clear benefit since it leads to a
more accurate estimation of the system quality attributes because uncertainties are handled in the
context where they arise.},
doi = {10.1016/b978-0-12-802855-1.00014-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/delemos2017.pdf:pdf},
isbn = {9780128028551},
number = {i},
pages = {353--367},
url = {http://dx.doi.org/10.1016/B978-0-12-802855-1.00014-9},
year = {2017}
@article{Kaisler2009,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/CAS-Briefing.pdf:pdf},
title = {{Complex Adaptive Systems : Emergence and Self-Organization Tutorial Presented at HICSS-
42 Big Island , HI Greg Madey}},
year = {2009}
@article{Souri2018,
abstract = {Cloud computing as a new internet-based computing model provides different resources
as a service dynamically. Today, cloud computing is actually one of the main improvements in the
computing procedure. However, by raising user interactions, the complexity of cloud processes is
increasing with the advancement of technology. To evaluate the cloud computing challenges, the
simulation experiments just satisfy the non-functional properties with a limited majority in forms of
QoS factors. In addition, using simulation approaches have not been sufficient for developed
complex cloud services that omit some critical test cases in the state space of the model. On the
other hand, formal verification is an essential section in the complex information systems
development that satisfies both functional and non-functional properties. Therefore, it is essential
that the cloud systems use formal verification approaches for increasing the correctness of the
system quality in all of the state space of the model. Despite the importance of the formal
verification approaches in the cloud environments, to the best of our knowledge, there is not any
systematic, comprehensive and detailed survey and review in the field of formal verification
approaches and standards in the cloud computing. This paper provides a Systematic Literature
Review (SLR) method to examine the current technical studies (published between 2011 and July
2017) in formal verification of the cloud computing. Also, this paper categorizes the formal
verification approaches in three classic fields: specification and process algebra, model checking, and
theorem proving. The verification approaches are compared with each other according to some
technical properties such as specification methods, modeling approaches, verification tools and
verification methods. The advantages and disadvantages of each selected study as well as some
hints are discussed for solving their problems. The brief contributions of this paper are as follows: (1)
providing a comprehensive literature review of the formal verification approaches in the cloud
computing, (2) designing a technical taxonomy for the verification approaches in various modeling
and specification methods, (3) presenting a technical analysis and comparison for the main
challenges of the formal verification in the cloud and (4) highlighting the future open issues in the
recent topics.},
author = {Souri, Alireza and Navimipour, Nima Jafari and Rahmani, Amir Masoud},
doi = {10.1016/j.csi.2017.11.007},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/souri2017.pdf:pdf},
issn = {09205489},
pages = {1--22},
title = {{Formal verification approaches and standards in the cloud computing: A comprehensive and
systematic review}},
url = {https://doi.org/10.1016/j.csi.2017.11.007},
volume = {58},
year = {2018}
@article{Yahav,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Synth.pdf:pdf},
url = {http://cs.tau.ac.il/{~}msagiv/courses/sp/Synth.pdf}
}
@article{Gao2018,
author = {Gao, Weinan and Jiang, Zhong Ping and Lewis, Frank L. and Wang, Yebin},
doi = {10.1109/TAC.2018.2799526},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/TR2018-013.pdf:pdf},
issn = {15582523},
number = {10},
pages = {3581--3587},
volume = {63},
year = {2018}
@article{Sap2016,
doi = {10.5772/711},
isbn = {9789537619343},
issn = {9789533070865},
journal = {Intech},
pages = {111--133},
title = {{World ' s largest Science , Technology {\&} Medicine Open Access book publisher c}},
volume = {6},
year = {2016}
@article{Awodele2012,
author = {Awodele, Oludele and Onuiri, Ernest E and Edom, Stanley N and Obomighie, Oshomeghie
G and Aloba, A},
number = {3},
pages = {1698--1712},
title = {{A Secure E-Commerce System for a Campus Mall : A View on the Business to Consumer
Relationship}},
volume = {3},
year = {2012}
@article{Hashemi2013,
abstract = {— Lung cancer is distinguished by presenting one of the highest incidences and one of
the highest rates of mortality among all other types of cancers. Detecting and curing the disease in
the early stages provides the patients with a high chance of survival. This work aims at detecting lung
nodules automatically through computerized tomography (CT) image. Accordingly, this article aim at
presenting a method to improve the efficiency of the lung cancer diagnosis system, through
proposing a region growing segmentation method to segment CT scan lung images. Afterwards,
cancer recognition are presenting by Fuzzy Inference System (FIS) for differentiating between
malignant, benign and advanced lung nodules. In the following, this paper is testing the diagnostic
performances of FIS system by using artificial neural networks (ANNs). Our experiments show that
the average sensitivity of the proposed method is 95{\%}.},
author = {Hashemi, Atiyeh and {Hamid Pilevar}, Abdol and Rafeh, Reza},
doi = {10.5815/ijigsp.2013.06.03},
issn = {20749074},
number = {6},
pages = {16--24},
title = {{Mass Detection in Lung CT Images Using Region Growing Segmentation and Decision Making
Based on Fuzzy Inference System and Artificial Neural Network}},
url = {http://www.mecs-press.org/ijigsp/ijigsp-v5-n6/IJIGSP-V5-N6-3.pdf},
volume = {6},
year = {2013}
@article{Villegas2011,
abstract = {Over the past decade the dynamic capabilities of self-adaptive software-intensive
systems have proliferated and improved significantly. To advance the field of self-adaptive and self-
managing systems further and to leverage the benefits of self-adaptation, we need to develop
methods and tools to assess and possibly certify adaptation properties of self-adaptive systems, not
only at design time but also, and especially, at run-time. In this paper we propose a framework for
evaluating quality-driven self-adaptive software systems. Our framework is based on a survey of self-
adaptive system papers and a set of adaptation properties derived from control theory properties.
We also establish a mapping between these properties and software quality attributes. Thus,
corresponding software quality metrics can then be used to assess adaptation properties. {\
textcopyright} 2011 ACM.},
author = {Villegas, Norha M. and M{\"{u}}ller, Hausi A. and Tamura, Gabriel and Duchien, Laurence
and Casallas, Rubby},
doi = {10.1145/1988008.1988020},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1988008.1988020.pdf:pdf},
isbn = {9781450305754},
issn = {02705257},
pages = {80--89},
year = {2011}
@article{Bulychev2012,
abstract = {This paper offers a survey of UPPAAL-SMC, a major extension of the real-Time verification
tool UPPAAL. UPPAAL-SMC allows for the efficient analysis of performance properties of networks of
priced timed automata under a natural stochastic semantics. In particular, UPPAAL-SMC relies on a
series of extensions of the statistical model checking approach generalized to handle real-Time
systems and estimate undecidable problems. UPPAAL-SMC comes together with a friendly user
interface that allows a user to specify complex problems in an efficient manner as well as to get
feedback in the form of probability distributions and compare probabilities to analyze performance
aspects of systems. The focus of the survey is on the evolution of the tool - including modeling and
specification formalisms as well as techniques applied -Together with applications of the tool to case
studies.},
author = {Bulychev, Peter and David, Alexandre and Larsen, Kim Guldstrand and Miku{\v{c}}ionis,
Marius and Poulsen, Danny B{\o}gsted and Legay, Axel and Wang, Zheng},
doi = {10.4204/EPTCS.85.1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1207.1272.pdf:pdf},
issn = {20752180},
number = {Qapl},
pages = {1--16},
volume = {85},
year = {2012}
@article{Liang2004,
doi = {10.1109/AINA.2004.1283982},
title = {{A Web-Based Dual Mode Virtual Laboratory Supporting Cooperative Learning . A Web-Based
Dual Mode Virtual Laboratory Supporting Cooperative Learning}},
year = {2004}
@article{Nenzi2015,
abstract = {We address the specification and verification of spatiotemporal behaviours of complex
systems, extending Signal Spatio-Temporal Logic (SSTL) with a spatial operator capable of specifying
topological properties in a discrete space. The latter is modelled as a weighted graph, and provided
with a boolean and a quantitative semantics. Furthermore, we define efficient monitoring algorithms
for both the boolean and the quantitative semantics. These are implemented in a Java tool available
online. We illustrate the expressiveness of SSTL and the effectiveness of the monitoring procedures
on the formation of patterns in a Turing reaction-diffusion system.},
author = {Nenzi, Laura and Bortolussi, Luca and Ciancia, Vincenzo and Loreti, Michele and Massink,
Mieke},
doi = {10.1007/978-3-319-23820-3_2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1706.09334.pdf:pdf},
isbn = {9783319238197},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
keywords = {Boolean semantics,Monitoring algorithms,Quantitative semantics,Signal spatio-
temporal logic,Turing patterns,Weighted graphs},
pages = {21--37},
volume = {9333},
year = {2015}
@article{Ciancia2015a,
abstract = {In this paper we explore the combination of novel spatio-temporal model-checking
techniques, and of a recently developed model-based approach to the study of bike sharing systems,
in order to detect, visualize and investigate potential problems with bike sharing system
configurations. In particular the formation and dynamics of clusters of full stations is explored. Such
clusters are likely to be related to the difficulties of users to find suitable parking places for their
hired bikes and show up as surprisingly long cycling trips in the trip duration statistics of real bike
sharing systems of both small and large cities. Spatio-temporal analysis of the pattern formation may
help to explain the phenomenon and possibly lead to alternative bike repositioning strategies aiming
at the reduction of the size of such clusters and improving the quality of service.},
author = {Ciancia, Vincenzo and Latella, Diego and Massink, Mieke and Pakauskas, Rytis},
doi = {10.1109/SASOW.2015.17},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ciancia2015.pdf:pdf},
isbn = {9781467384391},
journal = {Proceedings - 2015 IEEE 9th International Conference on Self-Adaptive and Self-Organizing
Systems Workshops, SASOW 2015},
pages = {74--79},
year = {2015}
@article{Mohanty2013,
number = {2},
pages = {42--48},
title = {{Analysis of Color Images using Cluster based Segmentation Techniques}},
volume = {79},
year = {2013}
@article{Majeed2014,
number = {3},
pages = {233--240},
volume = {5},
year = {2014}
@article{Hillston2016,
doi = {10.1007/978-3-319-43425-4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/CARMA-Eclipse-Plug-in.pdf:pdf},
isbn = {9783319434254},
pages = {167--171},
title = {{C ARMA Eclipse Plug-in : A Tool Supporting Design and Analysis of Collective Adaptive
Systems}},
volume = {2},
year = {2016}
@article{Sanderson2019,
abstract = {Adaptive production systems are a key trend in modern advanced manufacturing. This
stems from the requirement for the system to respond to disruption, either in the form of product
changes or changes to other operational parameters. The design and reconfiguration of these
systems are therefore a unique challenge for the community. One approach to systems design is
based on functional and behavioural modelling, drawn from the field of design theory. Existing
approaches suffer from lack of focus on the adaptive properties of the system. While traditional
production systems design focusses on the physical system structure and associated processes, new
approaches based on functional and behavioural models are particularly suited to addressing the
challenges of disruptive production environments resulting from Industry 4.0 and similar trends. We
therefore present a Function-Behaviour-Structure (FBS) methodology for Evolvable Assembly
Systems (EAS), a class of self-adaptive reconfigurable production systems, comprising an ontology
model and design process. The ontology model provides definitions for Function, Structure, and
Behaviour of an adaptive production system. This model is used as the input to a functional
modelling design process for EAS-like systems, where the design process must be integrated into the
system control behaviour. The framework is illustrated with an example taken from a real EAS
instantiation using industrial hardware.},
doi = {10.1007/s00170-019-03823-x},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sanderson2019.pdf:pdf},
issn = {14333015},
number = {9},
pages = {3731--3742},
volume = {105},
year = {2019}
@article{Dooley1997,
abstract = {The study of complex adaptive systems has yielded great insight into how complex,
organic-like structures can evolve order and purpose over time. Business organizations, typified by
semi-autonomous organizational members interacting at many levels of cognition and action, can be
portrayed by the generic constructs and driving mechanisms of complex adaptive systems theory.
The purpose of this paper is to forge a unified description of complex adaptive systems from several
sources, and then investigate the issue of change in a business organization via the framework of
complex adaptive systems. The theory of complex adaptive systems uses components from three
paradigms of management thought: systems theory, population ecology, and information
processing. Specific propositions regarding the nature of dynamical change will be developed, driven
by the complex adaptive systems model. Supporting evidence for these propositions is then sought
within the existing management theory literature. In doing so, the complex adaptive systems
approach to understanding organization change will be better grounded in domain-specific theory,
and new insights and research areas will come to light.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/A{\_}complex{\_}adaptive{\_}systems{\
_}model{\_}of{\_}orga.pdf:pdf},
isbn = {1011961725},
issn = {10900578},
keywords = {Embryo,Orchid,Pollination},
number = {1},
pages = {69--97},
url = {http://link.springer.com/10.1023/A:1022375910940},
volume = {1},
year = {1997}
@article{Fernandez2013,
abstract = {The OpenFlow architecture is a proposal from the Clean Slate initiative to define a new
Internet architecture where the network devices are simple, and the control and management plane
is performed by a centralized controller. The simplicity and centralization architecture makes it
reliable and inexpensive, but the centralization causes problems concerning controller scalability. An
OpenFlow controller has two operations paradigm: reactive and proactive. This paper proposes a
methodology to evaluate OpenFlow controller performance. The performances of both paradigms
were analyzed in different known controllers. The performance evaluation was done in a real
environment and emulation. It was tested distinct OpenFlow controller and using a different amount
of OpenFlow devices. Finally, we will present some conclusions about the controller scalability. {\
textcopyright} 2013 IEEE.},
doi = {10.1109/AINA.2013.113},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/fernandez2013.pdf:pdf},
isbn = {9780769549538},
issn = {1550445X},
pages = {1009--1016},
@article{Zurita2005a,
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/14b718bc1bce1bfae2d0417aa2ba0256854b.p
df:pdf},
pages = {149--161},
volume = {8},
year = {2005}
@article{Coit2019,
abstract = {System reliability optimization is a living problem, with solutions methodologies that
have evolved with the advancements of mathematics, development of new engineering technology,
and changes in management perspectives. In this paper, we consider the different types of system
reliability optimization problems, including as examples, the redundancy allocation problem (RAP),
the reliability allocation problem and the reliability-redundancy allocation problem (RRAP), and
provide a flow of discussion and analysis on the evolution of the approaches for their solutions. We
consider the development and advancement in the fields of operations research and optimization
theory, which have allowed the formalization and continuous improvement of the methods and
techniques to address reliability design problems of even very complex systems in different
technological domains. Technological advances have naturally brought changes of perspectives in
response to the needs, interests and priorities of the practical engineering world. The flow is
organized in a structure of successive “Eras of Evolution,” namely the Era of Mathematical
Programming, the Era of Pragmatism, the Era of Active Reliability Improvement. Insights, challenges
and opportunities are highlighted.},
doi = {10.1016/j.ress.2018.09.008},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0951832018306021-
main.pdf:pdf},
issn = {09518320},
pages = {106259},
publisher = {Elsevier Ltd},
url = {https://doi.org/10.1016/j.ress.2018.09.008},
volume = {192},
year = {2019}
@article{Tsiropoulou2017,
abstract = {In this paper, the problem of coalition formation among Machine-to-Machine (M2M)
communication type devices and the resource management problem is addressed. Each M2M device
is characterized by its energy availability, as well as by differentiated interests to communicate with
other devices based on the Internet of Things (IoT) application that they jointly serve. Physical ties
among devices also exist based on their physical distance proximity and communication channel
quality. Those three factors: energy availability, interest and physical ties, are considered into the
coalition formation process and the coalition-head selection. Each M2M device is associated with a
holistic utility function, which appropriately represents its degree of satisfaction with respect to
Quality of Service (QoS) prerequisites fulfillment. Given the created coalitions among the M2M
devices, a distributed power control framework is proposed towards determining each M2M
device's optimal transmission power in order to fulfill its QoS prerequisites. The performance of the
proposed approach is evaluated via modeling and simulation and its superiority compared to other
state of the art approaches is illustrated. {\textcopyright} 2017 IEEE.},
author = {Tsiropoulou, Eirini Eleni and Paruchuri, Surya Teja and Baras, John S.},
doi = {10.1109/CISS.2017.7926111},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Tsiropoulou{\_}CISS{\_}2016.pdf:pdf},
isbn = {9781509047802},
journal = {2017 51st Annual Conference on Information Sciences and Systems, CISS 2017},
title = {{Interest, energy and physical-aware coalition formation and resource allocation in smart IoT
applications}},
year = {2017}
@article{Bartocci2016a,
doi = {10.1371/journal.pcbi.1004591},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Computational{\_}Modeling{\_}Formal{\
_}Analysis{\_}and{\_}Tools{\_}f.pdf:pdf},
number = {January},
title = {{Computational Modeling , Formal Analysis , and Tools for Systems Biology}},
year = {2016}
@article{Rauf2016,
abstract = {In the Region Growing Algorithm (RGA) results of segmentation are totally dependent on
the selection of seed point, as an inappropriate seed point may lead to poor segmentation.
However, the majority of MRA (Magnetic Resonance Angiography) datasets do not contain required
region (vessels) in starting slices. An Enhanced Region Growing Algorithm (ERGA) is proposed for
blood vessel segmentation. The ERGA automatically calculates the threshold value on the basis of
maximum intensity values of all the slices and selects an appropriate starting slice of the image
which has a appropriate seed point. We applied our proposed technique on different patients of
MRA datasets of different resolutions and have got improved segmented images with reduction of
noise as compared to tradition RGA.},
author = {Rauf, Sonia and Qureshi, Kalim and Kazmi, Jawad and Sarfraz, Muhammad},
doi = {10.1016/j.aci.2015.06.002},
isbn = {2210-8327},
issn = {22108327},
number = {2},
pages = {128--133},
url = {http://dx.doi.org/10.1016/j.aci.2015.06.002},
volume = {12},
year = {2016}
@article{Lu2005,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}13.pdf:pdf},
isbn = {078039254X},
title = {{Embedded Tutorial : Formal equivalence checking between system-level models and RTL
Alfred Koelbl Anmol Mathur Calypto Design Systems 1 Introduction}},
year = {2005}
@book{Vilhelm2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/PriorVol1{\_}eBook.pdf:pdf},
isbn = {9788772100081},
title = {{Roskilde Logic and Philosophy of Time : Themes from Prior Edited by :}},
volume = {1},
year = {2019}
@article{Vidal2013,
abstract = {The paper presents an efficient Hybrid Genetic Search with Advanced Diversity Control
for a large class of time-constrained vehicle routing problems, introducing several new features to
manage the temporal dimension. New move evaluation techniques are proposed, accounting for
penalized infeasible solutions with respect to time-window and duration constraints, and allowing to
evaluate moves from any classical neighbourhood based on arc or node exchanges in amortized
constant time. Furthermore, geometric and structural problem decompositions are developed to
address efficiently large problems. The proposed algorithm outperforms all current state-of-the-art
approaches on classical literature benchmark instances for any combination of periodic, multi-depot,
site-dependent, and duration-constrained vehicle routing problem with time windows.},
author = {Vidal, Thibaut and Crainic, Teodor Gabriel and Gendreau, Michel and Prins, Christian},
doi = {10.1016/j.cor.2012.07.018},
isbn = {03050548},
issn = {03050548},
number = {1},
pages = {475--489},
title = {{A hybrid genetic algorithm with adaptive diversity management for a large class of vehicle
routing problems with time-windows}},
url = {http://dx.doi.org/10.1016/j.cor.2012.07.018},
volume = {40},
year = {2013}
@article{Kwiatkowska2020a,
abstract = {Concurrent stochastic games (CSGs) are an ideal formalism for modelling probabilistic
systems that feature multiple players or components with distinct objectives making concurrent,
rational decisions. Examples include communication or security protocols and multi-robot
navigation. Verification methods for CSGs exist but are limited to scenarios where agents or players
are grouped into two coalitions, with those in the same coalition sharing an identical objective. In
this paper, we propose multi-coalitional verification techniques for CSGs. We use subgame-perfect
social welfare (or social cost) optimal Nash equilibria, which are strategies where there is no
incentive for any coalition to unilaterally change its strategy in any game state, and where the total
combined objectives are maximised (or minimised). We present an extension of the temporal logic
rPATL (probabilistic alternating-time temporal logic with rewards) to specify equilibria-based
properties for any number of distinct coalitions, and a corresponding model checking algorithm for a
variant of stopping games. We implement our techniques in the PRISM-games tool and apply them
to several case studies, including a secret sharing protocol and a public good game.},
archivePrefix = {arXiv},
arxivId = {2007.03365},
author = {Kwiatkowska, Marta and Norman, Gethin and Parker, David and Santos, Gabriel},
eprint = {2007.03365},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2007.03365.pdf:pdf},
url = {http://arxiv.org/abs/2007.03365},
year = {2020}
@article{Aslam2015,
author = {Aslam, Asra and Khan, Ekram and Beg, M.M. Sufyan},
doi = {10.1016/j.procs.2015.08.057},
issn = {18770509},
pages = {430--437},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1877050915021687},
volume = {58},
year = {2015}
@article{Burch1992,
abstract = {Many different methods have been devised for automatically verifying finite state
systems by examining state-graph models of system behavior. These methods all depend on decision
procedures that explicitly represent the state space using a list or a table that grows in proportion to
the number of states. We describe a general method that represents the state space symbolically
instead of explicitly. The generality of our method comes from using a dialect of the Mu-Calculus as
the primary specification language. We describe a model checking algorithm for Mu-Calculus
formulas that uses Bryant's Binary Decision Diagrans (Bryant, R. E., 1986, IEEE Trans. Comput.C-35)
to represent relations and formulas. We then show how our new Mu-Calculus model checking
algorithm can be used to derive efficient decision procedures for CTL model checking, satisfiability of
linear-time temporal logic formulas, strong and weak observational equivalence of finite transition
systems, and language containment for finite $\omega$-automata. The fixed point computations for
each decision procedure are sometimes complex, but can be concisely expressed in the Mu-Calculus.
We illustrate the practicality of our approach to symbolic model checking by discussing how it can be
used to verify a simple synchronous pipeline circuit. {\textcopyright} 1992.},
author = {Burch, J. R. and Clarke, E. M. and McMillan, K. L. and Dill, D. L. and Hwang, L. J.},
doi = {10.1016/0890-5401(92)90017-A},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-089054019290017A-
main.pdf:pdf},
isbn = {0818620730},
issn = {10902651},
number = {2},
pages = {142--170},
volume = {98},
year = {1992}
}
@article{Reviewed2014,
doi = {10.1016/j.pharmthera.2016.09.005},
isbn = {9781441183606},
year = {2014}
@article{Deshpande2014a,
abstract = {The Domain Name System (DNS) is an Internet-wide, hierarchical naming system used to
translate domain names into numeric IP addresses. Any disruption of DNS service can have serious
consequences. We present a formal game-theoretic analysis of a notable threat to DNS, namely the
bandwidth amplification attack (BAA), and the countermeasures designed to defend against it. We
model the DNS BAA as a two-player, turn-based, zero-sum stochastic game between an attacker and
a defender. The attacker attempts to flood a victim DNS server with malicious traffic by choosing an
appropriate number of zombie machines with which to attack. In response, the defender chooses
among five BAA countermeasures, each of which seeks to increase the amount of legitimate traffic
the victim server processes. To simplify the model and optimize the analysis, our model does not
explicitly track the handling of each packet. Instead, our model is based on calculations of the rates
at which the relevant kinds of events occur in each state. We use our game-based model of DNS BAA
to generate optimal attack strategies, which vary the number of zombies, and optimal defense
strategies, which aim to enhance the utility of the BAA countermeasures by combining them in
advantageous ways. The goal of these strategies is to optimize the attacker's and defender's payoffs,
which are defined using probabilistic reward-based properties, and are measured in terms of the
attacker's ability to minimize the volume of legitimate traffic that is processed, and the defender's
ability to maximize the volume of legitimate traffic that is processed. {\textcopyright} 2014 IEEE.},
author = {Deshpande, Tushar and Katsaros, Panagiotis and Smolka, Scott A. and Stoller, Scott D.},
doi = {10.1109/EDCC.2014.37},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/deshpande2014.pdf:pdf},
isbn = {9781479938032},
journal = {Proceedings - 2014 10th European Dependable Computing Conference, EDCC 2014},
title = {{Stochastic game-based analysis of the DNS bandwidth amplification attack using probabilistic
model checking}},
year = {2014}
@article{Holzmann1997,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ieee97.pdf:pdf},
issn = {00985589},
number = {5},
pages = {279--295},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=588521},
volume = {23},
year = {1997}
@article{Gao2015,
abstract = {Particle swarm optimization (PSO) is a nature-inspired algorithm that has shown
outstanding performance in solving many realistic problems. In the original PSO and most of its
variants all particles are treated equally, overlooking the impact of structural heterogeneity on
individual behavior. Here we employ complex networks to represent the population structure of
swarms and propose a selectively-informed PSO (SIPSO), in which the particles choose different
learning strategies based on their connections: a densely-connected hub particle gets full
information from all of its neighbors while a non-hub particle with few connections can only follow a
single yet best-performed neighbor. Extensive numerical experiments on widely-used benchmark
functions show that our SIPSO algorithm remarkably outperforms the PSO and its existing variants in
success rate, solution quality, and convergence speed. We also explore the evolution process from a
microscopic point of view, leading to the discovery of different roles that the particles play in
optimization. The hub particles guide the optimization process towards correct directions while the
non-hub particles maintain the necessary population diversity, resulting in the optimum overall
performance of SIPSO. These findings deepen our understanding of swarm intelligence and may
shed light on the underlying mechanism of information exchange in natural swarm and flocking
behaviors.},
doi = {10.1038/srep09295},
number = {1},
pages = {9295},
pmid = {25787315},
url = {http://www.nature.com/articles/srep09295},
volume = {5},
year = {2015}
@article{Ali2016,
author = {Ali, Muslihah and Noorakma, Abdullah C W and Yusof, Norliana and Mohamad, W. N F and
Soin, N. and Hatta, S. F Wan Muhamad},
doi = {10.1109/SMELEC.2016.7573619},
isbn = {9781509023837},
pages = {173--176},
volume = {2016-Septe},
year = {2016}
@article{Bouyer2008,
abstract = {Metric Interval Temporal Logic (MITL) is a popular formalism for expressing real-time
specifications. This logic achieves decidability by restricting the precision of timing constraints, in
particular, by banning so-called punctual specifications. In this paper we introduce a significantly
more expressive logic that can express a wide variety of punctual specifications, but whose model-
checking problem has the same complexity as that of MITL. We conclude that for model checking
the most commonly occurring specifications, such as invariance and bounded response, punctuality
can be accommodated at no cost. {\textcopyright} 2008 Springer-Verlag.},
author = {Bouyer, Patricia and Markey, Nicolas and Ouaknine, Jo{\"{e}}l and Worrell, James},
doi = {10.1007/978-3-540-70583-3_11},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-540-70583-3{\_}11.pdf:pdf},
isbn = {3540705821},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {124--135},
year = {2008}
@article{Donoho2014,
abstract = {An unknown {\$}m{\$} by {\$}n{\$} matrix {\$}X{\_}0{\$} is to be estimated from noisy
measurements {\$}Y = X{\_}0 + Z{\$}, where the noise matrix {\$}Z{\$} has i.i.d Gaussian entries. A
popular matrix denoising scheme solves the nuclear norm penalization problem {\$}\backslashmin{\
_}X || Y - X ||{\_}F{\^{}}2/2 + \backslashlambda ||X||{\_}* {\$}, where {\$} ||X||{\_}*{\$} denotes
the nuclear norm (sum of singular values). This is the analog, for matrices, of {\$}\backslashell{\_}1{\
$} penalization in the vector case. It has been empirically observed that, if {\$}X{\_}0{\$} has low
rank, it may be recovered quite accurately from the noisy measurement {\$}Y{\$}. In a proportional
growth framework where the rank {\$}r{\_}n{\$}, number of rows {\$}m{\_}n{\$} and number of
columns {\$}n{\$} all tend to {\$}\backslashinfty{\$} proportionally to each other ({\$} r{\_}n/m{\_}n
-{\textgreater} \backslashrho{\$}, {\$}m{\_}n/n -{\textgreater}\backslashbeta{\$}), we evaluate the
asymptotic minimax MSE {\$}M(\backslashrho, \backslashbeta) = \backslashlim{\_}{\{}m{\_}n,n \
backslashgoto \backslashinfty{\}} \backslashinf{\_}\backslashlambda \backslashsup{\_}{\{}rank(X) \
backslashleq r{\_}n{\}} MSE(X,\backslashhat{\{}X{\}}{\_}\backslashlambda){\$} Our formulas involve
incomplete moments of the quarter- and semi-circle laws ({\$}\backslashbeta = 1{\$}, square case)
and the Mar$\backslash$v{\{}c{\}}enko-Pastur law ({\$}\backslashbeta {\textless} 1{\$}, non square
case). We also show that any least-favorable matrix {\$}X{\_}0{\$} has norm "at infinity". The nuclear
norm penalization problem is solved by applying soft thresholding to the singular values of {\$}Y{\$}.
We also derive the minimax threshold, namely the value {\$}\backslashlambda{\^{}}*(\backslashrho)
{\$} which is the optimal place to threshold the singular values. All these results are obtained for
general (non square, non symmetric) real matrices. Comparable results are obtained for square
symmetric nonnegative- definite matrices.},
archivePrefix = {arXiv},
arxivId = {arXiv:1304.2085v3},
doi = {10.1214/14-AOS1257},
eprint = {arXiv:1304.2085v3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/1304.2085.pdf:pdf},
issn = {00905364},
number = {6},
pages = {2413--2440},
volume = {42},
year = {2014}
@book{Majumdar2018,
abstract = {Model checking is a computer-assisted method for the analysis of dynamical systems that
can be modeled by state-transition systems. Drawing from research traditions in mathematical logic,
programming languages, hardware design, and theoretical computer science, model checking is now
widely used for the verification of hardware and software in industry. The editors and authors of this
handbook are among the world's leading researchers in this domain, and the 32 contributed
chapters present a thorough view of the origin, theory, and application of model checking. In
particular, the editors classify the advances in this domain and the chapters of the handbook in
terms of two recurrent themes that have driven much of the research agenda: the algorithmic
challenge, that is, designing model-checking algorithms that scale to real-life problems; and the
modeling challenge, that is, extending the formalism beyond Kripke structures and temporal logic.
The book will be valuable for researchers and graduate students engaged with the development of
formal methods and verification tools.},
doi = {10.1007/978-3-319-10575-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/majumdar2018.pdf:pdf},
isbn = {9783319105758},
pages = {1--1210},
year = {2018}
@article{Wang2014,
author = {Wang, Gai-Ge and {Hossein Gandomi}, Amir and Yang, Xin-She and {Hossein Alavi}, Amir},
doi = {10.1108/EC-10-2012-0232},
issn = {0264-4401},
number = {7},
pages = {1198--1220},
title = {{A novel improved accelerated particle swarm optimization algorithm for global numerical
optimization}},
url = {http://www.emeraldinsight.com/doi/10.1108/EC-10-2012-0232},
volume = {31},
year = {2014}
@article{Vig2006,
abstract = {As the community strives towards autonomous multi-robot systems, there is a need for
these systems to autonomously form coalitions to complete assigned missions. Numerous coalition
formation algorithms have been proposed in the software agent literature. Algorithms exist that
form agent coalitions in both super additive and non-super additive environments. The algorithmic
techniques vary from negotiation-based protocols in multi-agent system (MAS) environments to
those based on computation in distributed problem solving (DPS) environments. Coalition formation
behaviors have also been discussed in relation to game theory. Despite the plethora of MAS coalition
formation literature, to the best of our knowledge none of the proposed algorithms have been
demonstrated with an actual multi-robot system. There exists a discrepancy between the multi-
agent algorithms and their applicability to the multi-robot domain. This paper aims to bridge that
discrepancy by unearthing the issues that arise while attempting to tailor these algorithms to the
multi-robot domain. A well-known multi-agent coalition formation algorithm has been studied in
order to identify the necessary modifications to facilitate its application to the multi-robot domain.
This paper reports multi-robot coalition formation results based upon simulation and actual robot
experiments. A multi-agent coalition formation algorithm has been demonstrated on an actual robot
system. {\textcopyright} 2006 IEEE.},
doi = {10.1109/TRO.2006.878948},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/TRO.2006.878948.pdf:pdf},
issn = {15523098},
number = {4},
pages = {637--649},
volume = {22},
year = {2006}
@article{Zhang2010b,
abstract = {Glaucoma is the second leading cause of permanent blindness worldwide. Glaucoma can
be diagnosed through measurement of neuro-retinal optic cup-to-disc ratio (CDR). Correctly
determining the optic disc region of interest (ROI) will produce a smaller initial image which takes
much lesser time taken to process compared to the entire image. The earlier ROI localization in the
ARGALI system used a grid based method. The new algorithm adds a preprocessing step before
analyzing the image. This step significantly improves the performance of the ROI detection. A batch
of 1564 retinal images from the Singapore Eye Research Centre was used to compare the
performance of the two methods. From the results, the earlier and new algorithm detects the ROI
correctly for 88{\%} and 96{\%} of the images respectively. The results indicate potential applicability
of the method for automated and objective mass screening for early detection of glaucoma.},
author = {Zhang, Zhuo and Lee, Beng Hai and Liu, Jiang and Wong, Damon Wing Kee and Tan, Ngan
Meng and Lim, Joo Hwee and Yin, Fengshou and Huang, Weimin and Li, Huiqi and Wong, Tien Yin},
doi = {10.1109/ICIEA.2010.5515221},
isbn = {9781424450466},
journal = {Proceedings of the 2010 5th IEEE Conference on Industrial Electronics and Applications,
ICIEA 2010},
pages = {1686--1689},
title = {{Optic disc region of interest localization in fundus image for glaucoma detection in ARGALI}},
year = {2010}
@article{DeMoura2008,
abstract = {Satisfiability Modulo Theories (SMT) problem is a decision problem for logical first order
formulas with respect to combinations of background theories such as: arithmetic, bit-vectors,
arrays, and uninterpreted functions. Z3 is a new and efficient SMT Solver freely available from
Microsoft Research. It is used in various software verification and analysis applications. {\
textcopyright} 2008 Springer-Verlag Berlin Heidelberg.},
author = {{De Moura}, Leonardo and Bj{\o}rner, Nikolaj},
doi = {10.1007/978-3-540-78800-3_24},
isbn = {3540787992},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {337--340},
year = {2008}
@book{Kounev2017b,
abstract = {This book provides formal and informal definitions and taxonomies for self-aware
computing systems, and explains how self-aware computing relates to many existing subfields of
computer science, especially software engineering. It describes architectures and algorithms for self-
aware systems as well as the benefits and pitfalls of self-awareness, and reviews much of the latest
relevant research across a wide array of disciplines, including open research challenges. The
chapters of this book are organized into five parts: Introduction, System Architectures, Methods and
Algorithms, Applications and Case Studies, and Outlook. Part I offers an introduction that defines
self-aware computing systems from multiple perspectives, and establishes a formal definition, a
taxonomy and a set of reference scenarios that help to unify the remaining chapters. Next, Part II
explores architectures for self-aware computing systems, such as generic concepts and notations
that allow a wide range of self-aware system architectures to be described and compared with both
isolated and interacting systems. It also reviews the current state of reference architectures,
architectural frameworks, and languages for self-aware systems. Part III focuses on methods and
algorithms for self-aware computing systems by addressing issues pertaining to system design, like
modeling, synthesis and verification. It also examines topics such as adaptation, benchmarks and
metrics. Part IV then presents applications and case studies in various domains including cloud
computing, data centers, cyber-physical systems, and the degree to which self-aware computing
approaches have been adopted within those domains. Lastly, Part V surveys open challenges and
future research directions for self-aware computing systems. It can be used as a handbook for
professionals and researchers working in areas related to self-aware computing, and can also serve
as an advanced textbook for lecturers and postgraduate students studying subjects like advanced
software engineering, autonomic computing, self-adaptive systems, and data-center resource
management. Each chapter is largely self-contained, and offers plenty of references for anyone
wishing to pursue the topic more deeply.},
author = {Kounev, Samuel and Kephart, Jeffrey O. and Milenkoski, Aleksandar and Zhu, Xiaoyun},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/kephart2017.pdf:pdf},
isbn = {9783319474748},
pages = {1--722},
year = {2017}
@article{Mehra2019,
author = {Mehra, Agrim and Tripathy, Priyansha and Faridi, Ashhad and Chinmay, Ayes},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/pq3s601594707500.pdf:pdf},
number = {12},
pages = {25--29},
volume = {4},
year = {2019}
@article{DSouza2012,
doi = {10.1142/9789814271059_0002},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/tcs-96-2.pdf:pdf},
pages = {45--78},
year = {2012}
@article{Antiochos,
archivePrefix = {arXiv},
arxivId = {arXiv:astro-ph/9808199v1},
author = {Antiochos, S K and Macneice, P J and Spicer, D S and Data, Space and Division, Computing},
eprint = {9808199v1},
primaryClass = {arXiv:astro-ph},
@article{Shen2018a,
author = {Shen, Di and Lim, Cheng-chew and Member, Senior and Shi, Peng and Bujlo, Piotr},
doi = {10.1109/TCST.2018.2878173},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/shen2018.pdf:pdf},
pages = {1--13},
publisher = {IEEE},
title = {{Energy Management of Fuel Cell Hybrid Vehicle Based on Partially Observable Markov
Decision Process}},
volume = {PP},
year = {2018}
@book{Kounev2017a,
abstract = {This book provides formal and informal definitions and taxonomies for self-aware
computing systems, and explains how self-aware computing relates to many existing subfields of
computer science, especially software engineering. It describes architectures and algorithms for self-
aware systems as well as the benefits and pitfalls of self-awareness, and reviews much of the latest
relevant research across a wide array of disciplines, including open research challenges. The
chapters of this book are organized into five parts: Introduction, System Architectures, Methods and
Algorithms, Applications and Case Studies, and Outlook. Part I offers an introduction that defines
self-aware computing systems from multiple perspectives, and establishes a formal definition, a
taxonomy and a set of reference scenarios that help to unify the remaining chapters. Next, Part II
explores architectures for self-aware computing systems, such as generic concepts and notations
that allow a wide range of self-aware system architectures to be described and compared with both
isolated and interacting systems. It also reviews the current state of reference architectures,
architectural frameworks, and languages for self-aware systems. Part III focuses on methods and
algorithms for self-aware computing systems by addressing issues pertaining to system design, like
modeling, synthesis and verification. It also examines topics such as adaptation, benchmarks and
metrics. Part IV then presents applications and case studies in various domains including cloud
computing, data centers, cyber-physical systems, and the degree to which self-aware computing
approaches have been adopted within those domains. Lastly, Part V surveys open challenges and
future research directions for self-aware computing systems. It can be used as a handbook for
professionals and researchers working in areas related to self-aware computing, and can also serve
as an advanced textbook for lecturers and postgraduate students studying subjects like advanced
software engineering, autonomic computing, self-adaptive systems, and data-center resource
management. Each chapter is largely self-contained, and offers plenty of references for anyone
wishing to pursue the topic more deeply.},
author = {Kounev, Samuel and Kephart, Jeffrey O. and Milenkoski, Aleksandar and Zhu, Xiaoyun},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783319474748},
pages = {1--722},
year = {2017}
@article{Roveri2019,
abstract = {Learning under concept drift is a novel and promising research area aiming at designing
learning algorithms able to deal with nonstationary data-generating processes. In this research field,
most of the literature focuses on learning nonstationary probabilistic frameworks, while some
extensions about learning graphs and signals under concept drift exist. For the first time in the
literature, this paper addresses the problem of learning discrete-time Markov chains (DTMCs) under
concept drift. More specifically, following a hybrid active/passive approach, this paper introduces
both a family of change-detection mechanisms (CDMs), differing in the required assumptions and
performance, for detecting changes in DTMCs and an adaptive learning algorithm able to deal with
DTMCs under concept drift. The effectiveness of both the proposed CDMs and the adaptive learning
algorithm has been extensively tested on synthetically generated experiments and real data sets.},
doi = {10.1109/TNNLS.2018.2886956},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/roveri2019.pdf:pdf},
issn = {21622388},
number = {9},
pages = {2570--2582},
publisher = {IEEE},
year = {2019}
@article{Lazzaretto2019,
abstract = {This Special Issue addresses the general problem of a proper match between the
demands of energy users and the units for energy conversion and storage, by means of proper
design and operation of the overall energy system configuration. The focus is either on systems
including single plants or groups of plants, connected or not to one or more energy distribution
networks. In both cases, the optimum design and operation involve decisions about thermodynamic
processes, about the type, number, design parameters of components/plants, and storage
capacities, and about mutual interconnections and the interconnections with the distribution grids.
The problem is very wide, can be tackled with different methodologies and may have several, more
or less valuable and complicated solutions. The twelve accepted papers certainly represent a good
contribution to perceive its difficulty.},
doi = {10.3390/en12203957},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/energies-12-03957.pdf:pdf},
issn = {19961073},
journal = {Energies},
number = {20},
title = {{Optimum choice of energy system configuration and storages for a proper match between
energy conversion and demands}},
volume = {12},
year = {2019}
@article{Souri2018a,
abstract = {Today, service composition is emerging paradigm on the communication networks such
as cloud environments, internet of things, wireless sensor network, and software-defined network.
The goal of service composition method is to provide the interactions between user requirements
and smart objects of intelligent communication systems. There have been many efforts to use formal
verification and behavioral modeling methods to evaluate the service composition mechanisms. Up
to now, there is not a comprehensive analysis research on this topic. Therefore, this paper focuses
on several formal verification approaches that are performed to confirm the service composition
correctness in communication networks. The objective of this paper is to comprehensively
categorize and examine current research techniques on formal verification of the service
composition. This research analysis provides an overview of recent service composition approaches
according to structural and functional properties. Comparison results show that most of the
verification approaches in explanation of the service composition correctness are semantic-aware
approach with 43{\%}. The most used verification method for the service composition is model
checking with 69{\%}. The process algebra is used 29{\%}, and some theorem proving methods are
applied in 9{\%} of the investigated mechanism. Moreover, most widely used modeling tools are
NuSMV (22{\%}), SPIN (17{\%}), CPN (12{\%}), UPPAAL (12{\%}), Event-B (10{\%}), and PAT (5{\%}).},
author = {Souri, Alireza and Rahmani, Amir Masoud and Navimipour, Nima},
doi = {10.1002/dac.3808},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/souri2018.pdf:pdf},
issn = {10991131},
number = {17},
pages = {1--27},
title = {{Formal verification approaches in the web service composition: A comprehensive analysis of
the current challenges for future research}},
volume = {31},
year = {2018}
@article{Mwanza2013,
abstract = {PURPOSE: To create a multivariable predictive model for glaucoma with early visual field
loss using a combination of spectral-domain optical coherence tomography (SD-OCT) parameters,
and to compare the results with single variable models.$\backslash$n$\backslash$nMETHODS: Two
hundred fifty-three subjects (149 healthy controls and 104 with early glaucoma) underwent optic
disc and macular scanning using SD-OCT in one randomly selected eye per subject. Sixteen
parameters (rim area, cup-to-disc area ratio, vertical cup-to-disc diameter ratio, average and
quadrant RNFL thicknesses, average, minimum, and sectoral ganglion cell inner-plexiform layer
[GCIPL] thicknesses) were collected and submitted to an exploratory factor analysis (EFA) followed
by logistic regression with the backward elimination variable selection technique. Area under the
curve (AUC) of the receiver operating characteristic (ROC), sensitivity, specificity, Akaike's
information criterion (AIC), predicted probability, prediction interval length (PIL), and classification
rates were used to determine the performances of the univariable and multivariable models.$\
backslash$n$\backslash$nRESULTS: The multivariable model had an AUC of 0.995 with 98.6{\%}
sensitivity, 96.0{\%} specificity, and an AIC value of 43.29. Single variable models yielded AUCs of
0.943 to 0.987, sensitivities of 82.6{\%} to 95.7{\%}, specificities of 88.0{\%} to 94.0{\%}, and AICs of
113.16 to 59.64 (smaller is preferred). The EFA logistic regression model correctly classified 91.67{\
%} of cases with a median PIL of 0.050 in the validation set. Univariable models correctly classified
80.62{\%} to 90.48{\%} of cases with median PILs 1.9 to 3.0 times larger.$\backslash$n$\
backslash$nCONCLUSIONS: The multivariable model was successful in predicting glaucoma with
early visual field loss and outperformed univariable models in terms of AUC, AIC, PILs, and
classification rates.},
author = {Mwanza, Jean-Claude and Warren, Joshua L and Budenz, Donald L},
doi = {10.1167/iovs.13-12749},
isbn = {9198430297},
issn = {1552-5783},
number = {13},
pages = {8393--400},
pmid = {24282232},
title = {{Combining spectral domain optical coherence tomography structural parameters for the
diagnosis of glaucoma with early visual field loss.}},
url = {http://www.ncbi.nlm.nih.gov/pubmed/24282232},
volume = {54},
year = {2013}
@article{Wenrong2008,
author = {Wenrong, Si and Junhao, Li and Peng, Yuan and Yanming, Li},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/IEEETDEISCI-378GIEI-
090211850871Digitaldetectiongroupingandclassificationofpartialdischargeatdcvoltage.pdf:pdf},
number = {6},
pages = {1663--1674},
title = {{Digital Detection , Grouping and Classification of Partial Discharge Signals at DC Voltage}},
volume = {15},
year = {2008}
}
@article{Lavaei2020,
abstract = {In this paper, we provide a compositional approach for constructing finite abstractions
(a.k.a. finite Markov decision processes (MDPs)) of interconnected discrete-time stochastic switched
systems. The proposed framework is based on a notion of stochastic simulation functions, using
which one can employ an abstract system as a substitution of the original one in the controller
design process with guaranteed error bounds on their output trajectories. To this end, we first
provide probabilistic closeness guarantees between the interconnection of stochastic switched
subsystems and that of their finite abstractions via stochastic simulation functions. We then leverage
sufficient small-gain type conditions to show compositionality results of this work. Afterwards, we
show that under standard assumptions ensuring incremental input-to-state stability of switched
systems (i.e., existence of common incremental Lyapunov functions, or multiple incremental
Lyapunov functions with dwell-time), one can construct finite MDPs for the general setting of
nonlinear stochastic switched systems. We also propose an approach to construct finite MDPs
together with their corresponding stochastic simulation functions for a particular class of nonlinear
stochastic switched systems. We show that for this class of systems, the aforementioned
incremental stability property can be readily checked by matrix inequalities. To demonstrate the
effectiveness of our proposed results, we first apply our approaches to a road traffic network in a
circular cascade ring composed of 200 cells, and construct compositionally a finite MDP of the
network. We employ the constructed finite abstractions as substitutes to compositionally synthesize
policies keeping the density of the traffic lower than 20 vehicles per cell. We then apply our
proposed techniques to a fully interconnected network of 500 nonlinear subsystems (totally 1000
dimensions), and construct their finite MDPs with guaranteed error bounds. We compare our
proposed results with those available in the literature.},
doi = {10.1016/j.automatica.2020.108827},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {00051098},
journal = {Automatica},
pages = {108827},
url = {https://doi.org/10.1016/j.automatica.2020.108827},
volume = {114},
year = {2020}
@article{DeBoer2007,
abstract = {A long and lasting problem in agent research has been to close the gap between agent
logics and agent programming frameworks. The main reason for this problem of establishing a link
between agent logics and agent programming frameworks is identified and explained by the fact
that agent programming frameworks have hardly incorporated the concept of a declarative goal.
Instead, such frameworks have focused mainly on plans or goals-to-do instead of the end goals to be
realised which are also called goals-to-be. In this paper, the programming language GOAL is
introduced which incorporates such declarative goals. The notion of a commitment strategy-one of
the main theoretical insights due to agent logics, which explains the relation between beliefs and
goals-is used to construct a computational semantics for GOAL. Finally, a proof theory for proving
properties of GOAL agents is introduced. Thus, the main contribution of this paper, rather than the
language GOAL itself, is that we offer a complete theory of agent programming in the sense that our
theory provides both for a programming framework and a programming logic for such agents. An
example program is proven correct by using this programming logic. {\textcopyright} 2005 Elsevier
B.V. All rights reserved.},
author = {de Boer, F. S. and Hindriks, K. V. and van der Hoek, W. and Meyer, J. J.Ch},
doi = {10.1016/j.jal.2005.12.014},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.jal.2005.12.014.pdf:pdf},
issn = {15708683},
number = {2},
pages = {277--302},
title = {{A verification framework for agent programming with declarative goals}},
volume = {5},
year = {2007}
@article{Jeannet2010,
author = {Jeannet, Bertrand and Argenio, Pedro R D and Larsen, Kim G},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper-197.pdf:pdf},
volume = {2},
year = {2002}
@article{ImranQureshi2015,
abstract = {Glaucoma is a disease associated with human eyes and second conducting
movementofblindness across the globe if$\backslash$r$\backslash$neyes are not treated at
preliminary stage. Glaucoma normally occurs with increased intra-ocular pressure (IOP) in eyes and
gradually damagesthe vision field of eyes. The term ocular-hypertension is related to those people in
whom IOP increases consistently and does not damage the optic nerve. Glaucoma has different
types such as$\backslash$r$\backslash$nopen-angle, close-angle, congenital, normal tension and
etcetera. Normal tension glaucoma affects vision field and damages optic nerve as well. The term
angle means the distance between iris and cornea; if this distance is large it is referred to as open-
angle glaucoma and similarly if the distance between iris and cornea is short than this is$\
backslash$r$\backslash$ncalled close-angle glaucoma. Open-angle glaucoma is common as
compared to close-angle glaucoma. Close-angle glaucoma is very painful and affects vision field of
eyes quickly as compared to open-angle glaucoma. In this paper, the state of the art CAD systems
and image processing methods are studied and compared systematically in terms of their
classification accuracy, methodology approach, sensitivity and specificity. The comparison results$\
backslash$r$\backslash$nindicate that the accuracy of these CAD systems and image processing
methods is not up to the mark.},
number = {02},
pages = {2705--2718},
title = {{Glaucoma Detection in Retinal Images Using Image Processing Techniques: A Survey}},
url = {http://www.ijana.in/papers/V7I2-10.pdf},
volume = {7},
year = {2015}
@article{Lucia2018,
author = {Lucia, Andrea D E and Deufemia, Vincenzo and Gravino, Carmine and Risi, Michele},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/lucia2018.pdf:pdf},
number = {4},
title = {{Detecting the Behavior of Design Patterns through Model Checking and Dynamic Analysis}},
volume = {26},
year = {2018}
@article{Sanctis2019,
author = {Sanctis, Martina De and Spalazzese, Romina and B, Catia Trubiani},
doi = {10.1007/978-3-030-29983-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007978-3-030-29983-5.pdf:pdf},
isbn = {9783030299835},
pages = {178--194},
url = {http://dx.doi.org/10.1007/978-3-030-29983-5{\_}12},
volume = {1},
year = {2019}
@article{Purian2013,
author = {Purian, Fatemeh Khosravi and Farokhi, Fardad and Nadooshan, Reza Sabbaghi},
number = {2},
pages = {29--44},
title = {{Comparing the Performance of Genetic Algorithm and Ant Colony Optimization Algorithm for
Mobile Robot Path Planning in the Dynamic Environments with Different Complexities}},
volume = {3},
year = {2013}
@article{Camara2019,
abstract = {Designing software in a way that guarantees run-time behavior while achieving an
acceptable balance among multiple quality attributes is an open problem. Providing guarantees
about the satisfaction of the same requirements under uncertain environments is even more
challenging. Tools and techniques to inform engineers about poorly-understood design spaces in the
presence of uncertainty are needed, so that engineers can explore the design space, especially when
tradeoffs are crucial. To tackle this problem, we describe an approach that combines synthesis of
spaces of system design alternatives from formal specifications of architectural styles with
probabilistic formal verification. The main contribution of this paper is a formal framework for
specification-driven synthesis and analysis of design spaces that provides formal guarantees about
the correctness of system behaviors and satisfies quantitative properties (e.g., defined over system
qualities) subject to uncertainty, which is treated as a first-class entity. We illustrate our approach in
two case studies: a service-based adaptive system and a mobile robotics architecture. Our results
show how the framework can provide useful insights into how average case probabilistic guarantees
can differ from worst case guarantees, emphasizing the relevance of combining quantitative formal
verification methods with structural synthesis, in contrast with techniques based on simulation and
dynamic analysis that can only provide estimates about average case probabilistic properties.},
doi = {10.1016/j.jss.2019.02.055},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0164121219300445-
main.pdf:pdf},
issn = {01641212},
pages = {33--49},
title = {{Synthesizing tradeoff spaces with quantitative guarantees for families of software systems}},
volume = {152},
year = {2019}
@article{Gulwani2017,
abstract = {Program synthesis is the task of automatically finding a program in the underlying
programming language that satisfies the user intent expressed in the form of some specification.
Since the inception of AI in the 1950s, this problem has been considered the holy grail of Computer
Science. Despite inherent challenges in the problem such as ambiguity of user intent and a typically
enormous search space of programs, the field of program synthesis has developed many different
techniques that enable program synthesis in different real-life application domains. It is now used
successfully in software engineering, biological discovery, computer-aided education, end-user
programming, and data cleaning. In the last decade, several applications of synthesis in the field of
programming by examples have been deployed in mass-market industrial products. This survey is a
general overview of the state-of-the-art approaches to program synthesis, its applications, and
subfields. We discuss the general principles common to all modern synthesis approaches such as
syntactic bias, oracle-guided inductive search, and optimization techniques. We then present a
literature review covering the four most common state-of-the-art techniques in program synthesis:
enumerative search, constraint solving, stochastic search, and deduction-based programming by
examples. We conclude with a brief list of future horizons for the field.},
doi = {10.1561/2500000010},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/gulwani2017.pdf:pdf},
issn = {23251131},
number = {1-2},
pages = {1--119},
volume = {4},
year = {2017}
@article{Sivaswamy2015,
abstract = {OPEN ACCESS Keywords • • • nerve head • • disk Abstract Optic nerve head (ONH)
segmentation problem is of interest for automated glaucoma assessment. Although various
segmentation methods have been proposed in the recent past, it is difficult to evaluate and compare
the performance of individual methods due to a lack of a benchmark dataset. The assessment
involves segmentation of optic disk and cup region within the ONH. In this paper, we present a
comprehensive dataset of retinal images of both normal and glaucomatous eyes with manual
segmentations from multiple human experts. The dataset also provides expert opinion on an image
representing a normal or glaucomatous eye and on the presence of notching in an image. Several
state of the art methods are assessed against this dataset using cup to disc diameter ratio (CDR),
area and boundary-based evaluation measures. These are presented to aid benchmarking of new
methods. A supervised, notch detection method based on the segmentation results is also proposed
and its assessment results are included for benchmarking.},
author = {Sivaswamy, Jayanthi and Chakravarty, Arunava and {Datt Joshi}, Gopal and {Abbas Syed},
Tabish},
number = {1},
pages = {1--7},
title = {{JSM BIOMEDICAL IMAGING DATA PAPERS A Comprehensive Retinal Image Dataset for the
Assessment of Glaucoma from the Optic Nerve Head Analysis}},
volume = {2},
year = {2015}
@article{Glazier2019a,
author = {Glazier, Thomas and Garlan, David},
doi = {10.1109/fas-w.2019.00038},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/aamcas{\_}2.pdf:pdf},
pages = {110--115},
year = {2019}
@article{Chen2005,
abstract = {The semi-Markov decision model is a powerful tool in analyzing sequential decision
processes with random decision epochs. In this paper, we have built the semi-Markov decision
process (SMDP) for the maintenance policy optimization of condition-based preventive maintenance
problems, and have presented the approach for joint optimization of inspection rate and
maintenance policy. Through numerical examples, the improvement of this method is compared
with the scheme, which optimizes only over the inspection rate. We also find that under a special
case when the deterioration rate at each failure stage is the same, the optimal policy obtained by
SMDP algorithm is a dynamic threshold-type scheme with threshold value depending on the
inspection rate. {\textcopyright} 2004 Elsevier Ltd. All rights reserved.},
doi = {10.1016/j.ress.2004.11.001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Optimization{\_}for{\_}condition-based{\
_}mainten.pdf:pdf},
issn = {09518320},
number = {1},
pages = {25--29},
volume = {90},
year = {2005}
@article{Echaveguren2017,
doi = {10.4067/s0718-50732017000100005},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/en{\_}art05.pdf:pdf},
journal = {Revista ingenier{\'{i}}a de construcci{\'{o}}n},
number = {1},
pages = {47--56},
volume = {32},
year = {2017}
@article{PhilipChen2014,
abstract = {It is already true that Big Data has drawn huge attention from researchers in information
sciences, policy and decision makers in governments and enterprises. As the speed of information
growth exceeds Moore's Law at the beginning of this new century, excessive data is making great
troubles to human beings. However, there are so much potential and highly useful values hidden in
the huge volume of data. A new scientific paradigm is born as data-intensive scientific discovery
(DISD), also known as Big Data problems. A large number of fields and sectors, ranging from
economic and business activities to public administration, from national security to scientific
researches in many areas, involve with Big Data problems. On the one hand, Big Data is extremely
valuable to produce productivity in businesses and evolutionary breakthroughs in scientific
disciplines, which give us a lot of opportunities to make great progresses in many fields. There is no
doubt that the future competitions in business productivity and technologies will surely converge
into the Big Data explorations. On the other hand, Big Data also arises with many challenges, such as
difficulties in data capture, data storage, data analysis and data visualization. This paper is aimed to
demonstrate a close-up view about Big Data, including Big Data applications, Big Data opportunities
and challenges, as well as the state-of-the-art techniques and technologies we currently adopt to
deal with the Big Data problems. We also discuss several underlying methodologies to handle the
data deluge, for example, granular computing, cloud computing, bio-inspired computing, and
quantum computing. {\textcopyright} 2014 Elsevier Inc. All rights reserved.},
archivePrefix = {arXiv},
arxivId = {1312.4722},
doi = {10.1016/j.ins.2014.01.015},
eprint = {1312.4722},
isbn = {0020-0255},
issn = {00200255},
pages = {314--347},
pmid = {96027714},
title = {{Data-intensive applications, challenges, techniques and technologies: A survey on Big Data}},
url = {http://dx.doi.org/10.1016/j.ins.2014.01.015},
volume = {275},
year = {2014}
@article{He,
archivePrefix = {arXiv},
arxivId = {arXiv:1804.07995v1},
author = {He, Xingshi and Yang, Xin-she and Karamanoglu, Mehmet and Zhao, Yuxin},
eprint = {arXiv:1804.07995v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1804.07995.pdf:pdf},
pages = {1--12},
title = {{Global Convergence Analysis of the Flower Pollination Algorithm : A Discrete-Time Markov
Chain Approach arXiv : 1804 . 07995v1 [ math . OC ] 21 Apr 2018}}
@article{SamiSalamaHussenHajjaj2016,
abstract = {In a ROS application, robot software is often distributed across multiple networked
components, forming the ROS network, where every component acts as server and/or a client,
publishing and/or receiving robot data simultaneously. For indoor robots, a local ROS network,
through a WiFi hotspot, is sufficient. But for outdoor robots, a remote ROS network is needed, to the
ROS application to the cloud. Although a number cloud-based solutions support this, implementing
them is challenging, as they need to be configured to facilitate ROS's unique multi-directional, and
simultaneous flow of robot data. This paper presents PortForwarding (PF) as an alternative
approach, which offers a private, secured, and a direct ROS-to-ROS remote connection, eliminating
the need for middle-ware and its configuration and setup complexities. But PF has its own
challenges; chiefly, the beforehand knowledge of IP addresses of all networked components, and the
need to update PF settings when these address change, which they often do. This paper addresses
this issue (and others), and presents a detailed procedure for setting PF for ROS applications,
highlighting configuration and troubleshooting steps. Also, the paper compares between PF and
cloud-solutions, in terms of setup, performance, and others. Results show that robot performance
under PF is on par with cloud-based solutions, but it required a fraction of setup time. The authors
developed a set of shell-scripts that monitor the IP addresses of all networked components and
auto-update PF settings when they change, solving this issue. With this, PortForwarding could be
considered a viable option for remote ROS networks, on par with cloud-based solutions.},
archivePrefix = {arXiv},
arxivId = {arXiv:1508.04886v1},
doi = {10.1177/ToBeAssigned},
eprint = {arXiv:1508.04886v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ijrr19.pdf:pdf},
isbn = {0037549716666},
issn = {00219193},
pages = {1--28},
pmid = {6358196},
title = {{Establishing Remote ROS Networks via Port Forwarding: a Detailed Tutorial}},
year = {2016}
@article{Swikir2018,
archivePrefix = {arXiv},
arxivId = {2004.00131},
doi = {10.1145/3178126.3187000},
eprint = {2004.00131},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2004.00131.pdf:pdf},
pages = {275--276},
year = {2018}
@article{Stoll2012,
author = {Stoll, Enrico and Jaekel, Steffen and Katz, Jacob and {Saenz Otero}, Alvar and Varatharajoo,
Renuganth},
doi = {10.1002/rob},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/parker2015.pdf:pdf},
isbn = {9783902661623},
issn = {14746670},
number = {4},
pages = {554--575},
title = {{Exploiting spatial locality and heterogeneity of agents for search and rescue teamwork}},
volume = {29},
year = {2012}
@article{Smirnov2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S2212827115001651-
main.pdf:pdf},
issn = {22128271},
pages = {329--334},
url = {http://dx.doi.org/10.1016/j.procir.2015.02.089},
volume = {30},
year = {2015}
@article{Kavzoglu2017,
doi = {10.1109/RAST.2017.8002984},
isbn = {978-1-5386-1605-5},
journal = {2017 8th International Conference on Recent Advances in Space Technologies (RAST)},
pages = {113--117},
title = {{A comparative study of segmentation quality for multi-resolution segmentation and
watershed transform}},
url = {http://ieeexplore.ieee.org/document/8002984/},
year = {2017}
@article{Lokesh2015,
pages = {18--28},
@article{Pauwels2014,
abstract = {PURPOSE: To develop a fully automated, accurate and robust segmentation technique for
dental implants on cone-beam CT (CBCT) images.$\backslash$n$\backslash$nMETHODS: A head-size
cylindrical polymethyl methacrylate phantom was used, containing titanium rods of 5.15 mm
diameter. The phantom was scanned on 17 CBCT devices, using a total of 39 exposure protocols.
Images were manually thresholded to verify the applicability of adaptive thresholding and to
determine a minimum threshold value (Tmin). A three-step automatic segmentation technique was
developed. Firstly, images were pre-thresholded using Tmin. Next, edge enhancement was
performed by filtering the image with a Sobel operator. The filtered image was thresholded using an
iteratively determined fixed threshold (Tedge) and converted to binary. Finally, a particle counting
method was used to delineate the rods. The segmented area of the titanium rods was compared to
the actual area, which was corrected for phantom tilting.$\backslash$n$\backslash$nRESULTS:
Manual thresholding resulted in large variation in threshold values between CBCTs. After applying
the edgeenhancing filter, a stable Tedge value of 7.5{\%} was found. Particle counting successfully
detected the rods for all but one device. Deviations between the segmented and real area ranged
between -2.7 and +14.4mm(2) with an average absolute error of 2.8mm(2). Considering the
diameter of the segmented area, submillimeter accuracy was seen for all but two data sets.$\
backslash$n$\backslash$nCONCLUSION: A segmentation technique was defined which can be
applied to CBCT data for an accurate and fully automatic delineation of titanium rods. The technique
was validated in vitro and will be further tested and refined on patient data.},
author = {Pauwels, Ruben and Jacobs, Reinhilde and Bosmans, Hilde and Pittayapat, Pisha and
Kosalagood, Pasupen and Silkosessak, Onanong and Panmekiate, Soontra},
doi = {10.1007/s11548-013-0946-z},
isbn = {1861-6410},
issn = {18616429},
number = {4},
pages = {733--743},
pmid = {24078371},
title = {{Automated implant segmentation in cone-beam CT using edge detection and particle
counting}},
volume = {9},
year = {2014}
}
@article{Brandao2016,
abstract = {Purpose. To compare two different spectral-domain optical coherence tomography (OCT)
systems in regard to full macular thickness (MT) and ganglion cell layer-inner plexiform layer (GCIPL)
measures and in regard to structure-function correlation when compared to standard automated
perimetry (SAP). Methods. Seventeen primary open angle glaucoma patients and 16 controls (one
eye per subject) were enrolled. MT and GCIPL thicknesses were measured by Cirrus and Spectralis
OCTs. Octopus Perimeter 101 (G2 protocol) reports sensitivity in mean defect (dB). Differences
between measurements were assessed with Student's {\'{i}} µ{\'{i}}±¡-test and Bland Altman.
Diagnostic performance was also compared between each parameter calculating the areas under
the operator receiver (ROC). Linear models were used to investigate structure-function association
between OCT and SAP. Results. Disagreement between OCTs in both MT and GCIPL values was
significant. Spectralis values were thicker than Cirrus. Average difference between OCTs was 21.64
{\'{i}} µ{\'{i}}¼m (SD 4.5) for MT and 9.8 {\'{i}} µ{\'{i}}¼m (SD 5.4) for GCIPL ({\'{i}} µ{\'{i}}± {\
textless} 0.001). Patients differed significantly from controls in both OCTs, in both measurements.
MT and GCIPL were negatively associated with MD ({\'{i}} µ{\'{i}}± {\textless} 0.001). Conclusions.
Although OCT values were not interchangeable, both machines differentiated patients from controls
with statistical significance. Structure-function analysis results were comparable, when either OCT
was compared to SAP.},
author = {Brandao, Livia M. and Ledolter, Anna A. and Sch{\"{o}}tzau, Andreas and Palmowski-Wolfe,
Anja M.},
doi = {10.1155/2016/8307639},
issn = {20900058},
pmid = {26966557},
title = {{Comparison of Two Different OCT Systems: Retina Layer Segmentation and Impact on
Structure-Function Analysis in Glaucoma}},
volume = {2016},
year = {2016}
@article{Kobylin2014,
number = {8},
pages = {572--580},
title = {{Comparison of standard image edge detection techniques and of method based on wavelet
transform}},
volume = {2},
year = {2014}
@article{Kelley2005,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/US6904423.pdf:pdf},
number = {12},
volume = {1},
year = {2005}
@article{Alrahman2017a,
abstract = {Collective adaptive systems are new emerging computational systems consisting of a
large number of interacting components and featuring complex behaviour. These systems are
usually distributed, heterogeneous, decentralised and interdependent, and are operating in dynamic
and possibly unpredictable environments. Finding ways to understand and design these systems
and, most of all, to model the interactions of their components, is a difficult but important
endeavour. In this article we propose a language-based approach for programming the interactions
of collective-adaptive systems by relying on attribute-based communication; a paradigm that
permits a group of partners to communicate by considering their run-time properties and
capabilities. We introduce AbC, a foundational calculus for attribute-based communication and show
how its linguistic primitives can be used to program a complex and sophisticated variant of the well-
known problem of Stable Allocation in Content Delivery Networks. Also other interesting case
studies, from the realm of collective-adaptive systems, are considered. We also illustrate the
expressive power of attribute-based communication by showing the natural encoding of other
existing communication paradigms into AbC.},
archivePrefix = {arXiv},
arxivId = {1711.06092},
author = {Alrahman, Yehia Abd and {De Nicola}, Rocco and Loreti, Michele},
eprint = {1711.06092},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1711.06092.pdf:pdf},
url = {http://arxiv.org/abs/1711.06092},
year = {2017}
@article{Nishimura1998,
abstract = {A number of different eye disorders with the presence of early-onset glaucoma as a
component of the phenotype have been mapped to human chromosome 6p25. These disorders
have been postulated to be either allelic to each other or associated with a cluster of tightly linked
genes. We have identified two primary congenital glaucoma (PCG) patients with chromosomal
anomalies involving 6p25. In order to identify a gene involved in PCG, the chromosomal breakpoints
in a patient with a balanced translocation between 6p25 and 13q22 were cloned. Cloning of the
6p25 breakpoint led to the identification of two candidate genes based on proximity to the
breakpoint. One of these, FKHL7, encoding a forkhead transcription factor, is in close proximity to
the breakpoint in the balanced translocation patient and is deleted in a second PCG patient with
partial 6p monosomy. Furthermore, FKHL7 was found to harbour mutations in patients diagnosed
with Rieger anomaly (RA), Axenfeld anomaly (AA) and iris hypoplasia (IH). This study demonstrates
that mutations in FKHL7 cause a spectrum of glaucoma phenotypes.},
author = {Nishimura, D Y and Swiderski, R E and Alward, W L and Searby, C C and Patil, S R and
Bennet, S R and Kanis, a B and Gastier, J M and Stone, E M and Sheffield, V C},
doi = {10.1038/493},
issn = {1061-4036},
number = {2},
pages = {140--147},
pmid = {9620769},
title = {{The forkhead transcription factor gene FKHL7 is responsible for glaucoma phenotypes which
map to 6p25.}},
volume = {19},
year = {1998}
@article{Choudhary2015,
pages = {8--14},
title = {{ANN Glaucoma Detection using Cup-to-Disk Ratio and Neuroretinal Rim}},
volume = {111},
year = {2015}
@article{Sharma2013a,
abstract = {We present a data driven algorithm for equivalence checking of two loops. The algorithm
infers simulation relations using data from test runs. Once a candidate simulation relation has been
obtained, off-the-shelf SMT solvers are used to check whether the simulation relation actually holds.
The algorithm is sound: insufficient data will cause the proof to fail. We demonstrate a prototype
implementation, called DDEC, of our algorithm, which is the first sound equivalence checker for
loops written in x86 assembly.},
author = {Sharma, Rahul and Schkufza, Eric and Churchill, Berkeley and Aiken, Alex},
doi = {10.1145/2544173.2509509},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}12.pdf:pdf},
isbn = {9781450323741},
issn = {03621340},
number = {10},
pages = {391--406},
volume = {48},
year = {2013}
@article{Mansour2015,
abstract = {In this paper, a new strategy of observation and analysis of the ophthalmologic images in
order to detect the 16 glaucoma is presented. In fact, we have treated the imagery applied to the
Retina and we were able to develop some 17 algorithm of digital image treatment, and the motif
analysis techniques for the detection of the optic disc. And to do 18 this, we have exploited the
pretreatment of the image techniques such as morphological filters to improve the quality of 19 the
image and the Canny filter for the edge detection. Under the framework of the detection of circular
shapes, we have 20 opted the use of Algorithm which is the most famous detector of shapes and
Hough Transform to detect the Papilla and 21 Excavation. All these images will be applied on the
fundus in order to create software illustrating some techniques of 22 the image treatment, capable
of determining precisely the relationship between the excavation diameter and the Papilla 23
diameter which is a determining factor in the diagnosis of the Glaucoma. Copyright {\textcopyright}
Research Institute for Intelligent 24 Computer Systems, 2015. All rights reserved},
author = {Mansour, Maroua Ben and Mlouhi, Yosra and Jabri, Imed and Battikh, Tahar and Maalej,
Lotfi and Lakhoua, Mohamed Najeh},
number = {January},
pages = {1--7},
title = {{An image-processing technique for glaucoma detection on the basis of ophthalmic images}},
volume = {14},
year = {2015}
@article{Yang2019a,
author = {Yang, Kai and Tian, Cong and Zhang, Nan and Duan, Zhenhua and Du, Hongwei},
doi = {10.1007/s10878-019-00389-y},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {1573-2886},
url = {https://doi.org/10.1007/s10878-019-00389-y},
year = {2019}
@article{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Feng2017.pdf:pdf},
title = {{The Process Algebra for Located Markovian Agents and Scalable Analysis Techniques for the
Modelling of Collective Adaptive Systems}}
}
@article{David2015a,
author = {David, Alexandre and Jensen, Peter Gj{\o}l and Larsen, Kim Guldstrand and Taankvist,
Jakob Haahr},
doi = {10.1007/978-3-662-46681-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/david2015{\_}2.pdf:pdf},
isbn = {9783662466810},
pages = {206--211},
year = {2015}
@book{Symposium2013,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2013{\_}Book{\_}.pdf:pdf},
isbn = {9783319024431},
number = {October},
year = {2013}
@article{Ma2015,
abstract = {The nervous system is composed of a large number of neurons, and the electrical
activities of neurons can present multiple modes during the signal transmission between neurons by
changing intrinsic bifurcation parameters or under appropriate external forcing. In this review, the
dynamics for neuron, neuronal network is introduced, for example, the mode transition in electrical
activity, functional role of autapse connection, bifurcation verification in biological experiments,
interaction between neuron and astrocyte, noise effect, coherence resonance, pattern formation
and selection in network of neurons. Finally, some open problems in this field such as
electromagnetic radiation on electrical activities of neuron, energy consumption in neurons are
presented.},
doi = {10.1007/s11431-015-5961-6},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/s11431-015-5961-6.pdf:pdf},
issn = {1862281X},
number = {12},
pages = {2038--2045},
volume = {58},
year = {2015}
@article{Budinsky2005,
issn = {1044789X},
number = {8},
pages = {28--32},
volume = {30},
year = {2005}
@article{Zambonelli2011,
abstract = {Here we present the overall objectives and approach of the SAPERE ("Self-aware
Pervasive Service Ecosystems") project, focussed on the development of a highly-innovative nature-
inspired framework, suited for the decentralized deployment, execution, and management, of self-
aware and adaptive pervasive services in future network scenarios. {\textcopyright} Selection and
peer-review under responsibility of FET11 conference organizers and published by Elsevier B.V.},
author = {Zambonelli, Franco and Castelli, Gabriella and Ferrari, Laura and Mamei, Marco and Rosi,
Alberto and {Di Marzo}, Giovanna and Risoldi, Matteo and Tchao, Akla Esso and Dobson, Simon and
Stevenson, Graeme and Ye, Juan and Nardini, Elena and Omicini, Andrea and Montagna, Sara and
Viroli, Mirko and Ferscha, Alois and Maschek, Sascha and Wally, Bernhard},
doi = {10.1016/j.procs.2011.09.006},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zambonelli2011.pdf:pdf},
issn = {18770509},
pages = {197--199},
volume = {7},
year = {2011}
@book{.,
author = {سالمة.}م,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/advanced-formal-verification-
2004.pdf:pdf},
isbn = {1402025300},
@article{Chen2013,
abstract = {We present automatic verification techniques for the modelling and analysis of
probabilistic systems that incorporate competitive behaviour. These systems are modelled as turn-
based stochastic multi-player games, in which the players can either collaborate or compete in order
to achieve a particular goal. We define a temporal logic called rPATL for expressing quantitative
properties of stochastic multi-player games. This logic allows us to reason about the collective ability
of a set of players to achieve a goal relating to the probability of an event's occurrence or the
expected amount of cost/reward accumulated. We give an algorithm for verifying properties
expressed in this logic and implement the techniques in a probabilistic model checker, as an
extension of the PRISM tool. We demonstrate the applicability and efficiency of our methods by
deploying them to analyse and detect potential weaknesses in a variety of large case studies,
including algorithms for energy management in Microgrids and collective decision making for
autonomous systems. {\textcopyright} 2013 Springer Science+Business Media New York.},
author = {Chen, Taolue and Forejt, Vojt{\v{e}}ch and Kwiatkowska, Marta and Parker, David and
Simaitis, Aistis},
doi = {10.1007/s10703-013-0183-7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/aistis-simaitis-dphil-thesis.pdf:pdf},
issn = {09259856},
number = {1},
pages = {61--92},
volume = {43},
year = {2013}
@article{Camara2015,
author = {C{\'{a}}mara, Javier and Garlan, David and Schmerl, Bradley and Pandey, Ashutosh},
doi = {10.1145/2695664.2695680},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9781450331968},
pages = {428--435},
title = {{Optimal planning for architecture-based self-adaptation via model checking of stochastic
games}},
volume = {13-17-Apri},
year = {2015}
@article{Bejjani2020,
abstract = {Cognitive control refers to the use of internal goals to guide how we process stimuli, and
control can be applied proactively (in anticipation of a stimulus) or reactively (once that stimulus has
been presented). The application of control can be guided by memory; for instance, people typically
learn to adjust their level of attentional selectivity to changing task statistics, such as different
frequencies of hard and easy trials in the Stroop task. This type of control-learning is highly adaptive,
but its boundary conditions are currently not well understood. In the present study, we assessed
how the presence of performance feedback shapes control-learning in the context of item-specific
(reactive control, Experiments 1a and 1b) and list-wide (proactive control, Experiments 2a and 2b)
proportion of congruency manipulations in a Stroop protocol. We found that performance feedback
did not alter the modulation of the Stroop effect by item-specific cueing, but did enhance the
modulation of the Stroop effect by a list-wide context. Performance feedback thus selectively
promoted proactive, but not reactive, adaptation of cognitive control. These results have important
implications for experimental designs, potential psychiatric treatment, and theoretical accounts of
the mechanisms underlying control-learning. (PsycInfo Database Record (c) 2020 APA, all rights
reserved).},
doi = {10.1037/xhp0000720},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/BejjaniTanEgner{\_}FBcontrollearning{\
_}LMC{\_}final.pdf:pdf},
issn = {19391277},
number = {4},
pages = {369--387},
pmid = {32223290},
title = {{Performance feedback promotes proactive but not reactive adaptation of conflict-control}},
volume = {46},
year = {2020}
@article{Dunne2007,
doi = {10.1016/j.artint.2007.05.001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0004370207000793-
main.pdf:pdf},
pages = {619--641},
volume = {171},
year = {2007}
@article{Whitley1993,
pages = {45--62},
year = {1993}
@article{Mezghani2017,
abstract = {Due to its abilities to capture real-time data concerning the physical world, the Internet
of Things (IoT) phenomenon is fast gaining momentum in different applicative domains. Its benefits
are not limited to connecting things, but lean on how the collected data are transformed into
insights and interact with domain experts for better decisions. Nonetheless, a set of challenges
including the complexity of IoT-based systems and the management of the ensuing big and
heterogeneous data and as well as the system scalability need to be addressed for the development
of flexible smart IoT-based systems that drive the business decision-making. Consequently, inspired
from the human nervous system and cognitive abilities, we have proposed a set of autonomic
cognitive design patterns that alleviate the design complexity of smart IoT-based systems, while
taking into consideration big data and scalability management. The ultimate goal of these patterns is
providing generic and reusable solutions for elaborating flexible smart IoT-based systems able to
perceive the collected data and provide decisions. These patterns are articulated within a model-
driven methodology that we have proposed to incrementally refine the system functional and
nonfunctional requirements. Following the proposed methodology, we have combined and
instantiated a set of patterns for developing a flexible cognitive monitoring system to manage
patients' health based on heterogeneous wearable devices. We have highlighted the gained
flexibility and demonstrated the ability of our system to integrate and process heterogeneous large-
scale data streams. Finally, we have evaluated the system performance in terms of response time
and scalability management.},
doi = {10.1109/tetci.2017.2699218},
number = {3},
pages = {224--234},
title = {{A Model-Driven Methodology for the Design of Autonomic and Cognitive IoT-Based Systems:
Application to Healthcare}},
volume = {1},
year = {2017}
@article{Eum2015,
author = {Eum, Hyukmin and Bae, Jaeyun and Yoon, Changyong and Kim, Euntai},
number = {4},
pages = {251--259},
title = {{Ship Detection Using Edge-Based Segmentation and Histogram of Oriented Gradient with
Ship Size Ratio}},
volume = {15},
year = {2015}
@article{Zhang2015,
author = {Zhang, Yudong and Wang, Shuihua and Dong, Zhengchao and Phillip, Preetha and Ji, Genlin
and Yang, Jiquan},
doi = {10.2528/PIER15040602},
pages = {41--58},
title = {{Pathological Brain Detection in Magnetic Resonance Imaging Scanning by Wavelet Entropy
and Hybridization of Biogeography-Based Optimization and Particle Swarm Optimization}},
url = {http://www.jpier.org/PIER/pier152/04.15040602.pdf},
volume = {152},
year = {2015}
@article{Rata2016,
author = {Rata, Scott and {\~{A}}, Helfrid Hochegger and Nadia, H},
doi = {10.1002/bies.201600057},
pages = {1--17},
title = {. 1) 2)},
year = {2016}
@article{Autili2017,
doi = {10.1016/j.scico.2017.10.010},
file = {:C$\backslash$:/Users/Asus/Downloads/autili2017.pdf:pdf},
issn = {0167-6423},
pages = {1--27},
url = {https://doi.org/10.1016/j.scico.2017.10.010},
volume = {1},
year = {2017}
@article{Matthias,
author = {Matthias, Holze and Koch, Nora and Puviani, Mariachiara and Wirsing, Martin},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/hoelzl-et-al-ascensbook{\
_}ch141.pdf:pdf},
pages = {1--30},
title = {{The Ensemble Development Life Cycle and Best Practises for Collective Autonomic Systems}},
volume = {257414}
@article{Systems2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-1-4615-3190-6{\_}3.pdf:pdf},
pages = {25--26},
year = {2015}
@article{Saini2014,
number = {14},
pages = {1445--1452},
volume = {4},
year = {2014}
}
@article{Pham2006,
abstract = {The analysis of the reliability and availability of power plants is frequently based on
simple indexes that do not take into account the criticality of some failures used for availability
analysis. This criticality should be evaluated based on concepts of reliability which consider the effect
of a component failure on the performance of the entire plant. System reliability analysis tools
provide a root-cause analysis leading to the improvement of the plant maintenance plan. Taking in
view that the power plant performance can be evaluated not only based on thermodynamic related
indexes, such as heat-rate, Thermal Power Plant Performance Analysis focuses on the presentation
of reliability-based tools used to define performance of complex systems and introduces the basic
concepts of reliability, maintainability and risk analysis aiming at their application as tools for power
plant performance improvement, including:{\textperiodcentered} selection of critical
equipment and components,{\textperiodcentered} definition of maintenance plans, mainly for
auxiliary systems, and {\textperiodcentered} execution of decision analysis based on risk
concepts.The comprehensive presentation of each analysis allows future application of the
methodology making Thermal Power Plant Performance Analysis a key resource for undergraduate
and postgraduate students in mechanical and nuclear engineering.},
doi = {10.1007/978-1-4471-4588-2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zio2012.pdf:pdf},
isbn = {9781846283246},
journal = {Springer},
pages = {352},
year = {2006}
@article{K2011a,
author = {K, Ali Sinan and Kuncak, Viktor and Suter, Philippe},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-642-22438-6{\_}30.pdf:pdf},
pages = {400--401},
year = {2011}
@article{Markey2015,
author = {Markey, K and Ph, D and Mckinnon, Stuart J and Iii, H Grady Rylander},
doi = {10.1097/IJG.0b013e31829ea2a7.Retinal},
pages = {1--23},
title = {diagnosis},
volume = {0},
year = {2015}
@article{Bures2017,
author = {Bures, Tomas and Gerostathopoulos, Ilias and Hnetynka, Petr and Krijt, Filip and Vinarek,
Jiri and Kofron, Jan},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/0025fc9a20881955b3d38019bb765c2c6e96.p
df:pdf},
number = {Ld},
year = {2017}
@article{Zhang2019,
author = {Zhang, Ying and Chakrabarty, Krishnendu and Peng, Zebo and Rezine, Ahmed and Li,
Huawei and Eles, Petru and Jiang, Jianhui},
doi = {10.1109/tcad.2018.2890695},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zhang2019.pdf:pdf},
issn = {0278-0070},
number = {c},
pages = {1--1},
publisher = {IEEE},
title = {{Software-based Self-Testing using Bounded Model Checking for Out-of-Order Superscalar
Processors}},
volume = {PP},
year = {2019}
@article{Zhong2020,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zhong2020.pdf:pdf},
isbn = {9781728181561},
pages = {441--448},
year = {2020}
@article{Deshmukh2015,
author = {Deshmukh, Anup V. and Patil, Tejas G. and Patankar, Sanika S. and Kulkarni, Jayant V.},
doi = {10.1109/ICACCI.2015.7275850},
isbn = {9781479987917},
pages = {1652--1655},
year = {2015}
@article{Pezze1995,
abstract = {The problem of analyzing concurrent systems has been investigated by many researchers,
and several solutions have been proposed. Among the proposed techniques, reachability analysis—
systematic enumeration of reachable states in a finite-state model—is attractive because it is
conceptually simple and relatively straightforward to automate and can be used in conjunction with
model-checking procedures to check for application-specific as well as general properties. This
article shows that the nature of the translation from source code to a modeling formalism is of
greater practical importance than the underlying formalism. Features identified as pragmatically
important are the representation of internal choice, selection of a dynamic or static matching rule,
and the ease of applying reductions. Since combinatorial explosion is the primary impediment to
application of reachability analysis, a particular concern in choosing a model is facilitating divide-
and-conquer analysis of large programs. Recently, much interest in finite-state verification systems
has centered on algebraic theories of concurrency. Algebraic structure can be used to decompose
reachability analysis based on a flowgraph model. The semantic equivalence of graph and Petri net-
based models suggests that one ought to be able to apply a similar strategy for decomposing Petri
nets. We describe how category-theoretic treatments of Petri nets provide a basis for decomposition
of Petri net reachability analysis. {\textcopyright} 1995, ACM. All rights reserved.},
doi = {10.1145/210134.210180},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {1049331X},
number = {2},
pages = {171--213},
volume = {4},
year = {1995}
@article{Asarin2000,
abstract = {In this paper we describe an experimental system called d/dt for approximating
reachable states for hybrid systems whose continuous dynamics is defined by linear differential
equations. We use an approximation algorithm whose accumulation of errors during the continuous
evolution is much smaller than in previously-used methods. The d/dt system can, so far, treat non-
trivial continuous systems, hybrid systems, convex differential inclusions and controller synthesis
problems.},
author = {Asarin, Eugene and Bournez, Olivier and Dang, Thao and Maler, Oded},
doi = {10.1007/3-540-46430-1_6},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}15.pdf:pdf},
pages = {20--31},
volume = {5},
year = {2000}
@article{Neilsen,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/FEC4065.pdf:pdf},
isbn = {1601324774},
@article{Ma2013,
abstract = {Principal component analysis (PCA) is a classical dimension reduction method which
projects data onto the principal subspace spanned by the leading eigenvectors of the covariance
matrix. However, it behaves poorly when the number of features {\$}p{\$} is comparable to, or even
much larger than, the sample size {\$}n{\$}. In this paper, we propose a new iterative thresholding
approach for estimating principal subspaces in the setting where the leading eigenvectors are
sparse. Under a spiked covariance model, we find that the new approach recovers the principal
subspace and leading eigenvectors consistently, and even optimally, in a range of high-dimensional
sparse settings. Simulated examples also demonstrate its competitive performance.},
archivePrefix = {arXiv},
arxivId = {arXiv:1112.2432v2},
doi = {10.1214/13-AOS1097},
eprint = {arXiv:1112.2432v2},
issn = {00905364},
number = {2},
pages = {772--801},
volume = {41},
year = {2013}
@article{Choi2001,
abstract = {In much of the current literature on supply chain management, supply networks are
recognized as a system. In this paper, we take this observation to the next level by arguing the need
to recognize supply networks as a complex adaptive system (CAS). We propose that many supply
networks emerge rather than result from purposeful design by a singular entity. Most supply chain
management literature emphasizes negative feedback for purposes of control; however, the
emergent patterns in a supply network can much better be managed through positive feedback,
which allows for autonomous action. Imposing too much control detracts from innovation and
flexibility; conversely, allowing too much emergence can undermine managerial predictability and
work routines. Therefore, when managing supply networks, managers must appropriately balance
how much to control and how much to let emerge. {\textcopyright} 2001 Elsevier Science B.V.},
doi = {10.1016/S0272-6963(00)00068-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Supply{\_}networks{\_}and{\_}complex{\
_}adaptive{\_}sys.pdf:pdf},
issn = {02726963},
number = {3},
pages = {351--366},
title = {{Supply networks and complex adaptive systems: Control versus emergence}},
volume = {19},
year = {2001}
@article{Ciancia2016a,
abstract = {Spatial aspects of computation are becoming increasingly relevant in Computer Science,
especially in the field of collective adaptive systems and when dealing with systems distributed in
physical space. Traditional formal verification techniques are well suited to analyse the temporal
evolution of programs; however, properties of space are typically not taken into account explicitly.
We present a topology-based approach to formal verification of spatial properties depending upon
physical space. We define an appropriate logic, stemming from the tradition of topological
interpretations of modal logics, dating back to earlier logicians such as Tarski, where modalities
describe neighbourhood. We lift the topological definitions to the more general setting of closure
spaces, also encompassing discrete, graph-based structures. We extend the framework with a spatial
surrounded operator, a propagation operator and with some collective operators. The latter are
interpreted over arbitrary sets of points instead of individual points in space. We define efficient
model checking procedures, both for the individual and the collective spatial fragments of the logic
and provide a proof-of-concept tool.},
author = {Ciancia, Vincenzo and Latella, Diego and Loreti, Michele and Massink, Mieke},
doi = {10.2168/LMCS-12(4:2)2016},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1609.06513.pdf:pdf},
issn = {18605974},
number = {4},
pages = {1--51},
volume = {12},
year = {2016}
@article{Data2009,
author = {Data, Twente and Twente, Management and Management, Data and Xml, Workshop and
Workshop, Databases and Databases, X M L and Retrieval, Information},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/4288{\_}ECMDA2009-Tools{\
_}Consultancy.pdf:pdf},
title = {{Twente Data M Workshop on Fifth European Conference on Data Management Data TDM
2004}},
year = {2009}
@article{Truong2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/BPCAS2014-Truong.pdf:pdf},
title = {{Context-aware Programming for Hybrid and Diversity-aware Collective Adaptive Systems}},
year = {2014}
@article{Andre2020,
abstract = {Service composition aims at achieving a business goal by composing existing service-
based applications or components. The response time of a service is crucial, especially in time-critical
business environments, which is often stated as a clause in service-level agreements between
service providers and service users. To meet the guaranteed response time requirement of a
composite service, it is important to select a feasible set of component services such that their
response time will collectively satisfy the response time requirement of the composite service. In
this work, we use the BPEL modeling language that aims at specifying Web services. We extend it
with timing parameters and equip it with a formal semantics. Then, we propose a fully automated
approach to synthesize the response time requirement of component services modeled using BPEL,
in the form of a constraint on the local response times. The synthesized requirement will guarantee
the satisfaction of the global response time requirement, statically or dynamically. We implemented
our work into a tool, Selamat and performed several experiments to evaluate the validity of our
approach.},
author = {Andr{\'{e}}, {\'{E}}tienne and Tan, Tian Huat and Chen, Manman and Liu, Shuang and Sun,
Jun and Liu, Yang and Dong, Jin Song},
doi = {10.1007/s10270-020-00787-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {16191374},
url = {https://doi.org/10.1007/s10270-020-00787-5},
year = {2020}
@article{Mohsin2019,
author = {Mohsin, Ahmad and Janjua, Naeem Khalid and Islam, Syed M.S. and {Graciano Neto},
Valdemar Vicente},
doi = {10.1109/SYSOSE.2019.8753877},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/08753877.pdf:pdf},
isbn = {9781728104577},
journal = {2019 14th Annual Conference System of Systems Engineering, SoSE 2019},
pages = {49--56},
publisher = {IEEE},
title = {{Modeling approaches for system-of-systems dynamic architecture: Overview, taxonomy and
future prospects}},
year = {2019}
@article{Kavitha2015,
doi = {10.3980/j.issn.2222-3959.2015.06.33},
isbn = {2222-3959},
number = {6},
pages = {1255--1257},
pmid = {26682184},
title = {{Assessment of glaucoma using extreme learning machine and fractal feature analysis.}},
volume = {8},
year = {2015}
@article{Gulwani2010,
abstract = {Program Synthesis, which is the task of discovering programs that realize user intent, can
be useful in several scenarios: enabling people with no programming background to develop utility
programs, helping regular programmers automatically discover tricky/mundane details, program
understanding, discovery of new algorithms, and even teaching. This paper describes three key
dimensions in program synthesis: expression of user intent, space of programs over which to search,
and the search technique. These concepts are illustrated by brief description of various program
synthesis projects that target synthesis of a wide variety of programs such as standard
undergraduate textbook algorithms (e.g., sorting, dynamic programming), program inverses (e.g.,
decoders, deserializers), bitvector manipulation routines, deobfuscated programs, graph algorithms,
text-manipulating routines, mutual exclusion algorithms, etc. {\textcopyright} 2010 ACM.},
doi = {10.1145/1836089.1836091},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1836089.1836091.pdf:pdf},
isbn = {9781450301329},
journal = {PPDP'10 - Proceedings of the 2010 Symposium on Principles and Practice of Declarative
Programming},
keywords = {Belief propagation,Deductive synthesis,Genetic programming,Inductive
synthesis,Machine learning,Probabilistic inference,Programming by demonstration,Programming by
examples,SAT solving,SMT solving},
pages = {13--24},
year = {2010}
@article{Gyftopoulos2016,
abstract = {{\textcopyright} 2016 ACM. DeGroot learning is a model of opinion diffusion and
formation in a social network of individuals. We examine the behavior of the DeGroot learning
model when external strategic players that aim to bias the final consensus of the social network, are
introduced to the model. More precisely, we consider the case of a single decision maker and the
case of two competing external players, and a fixed number of possible Influence actions on each
individual. When studying the Influence problems, we focus on the stochastic processes underlying
the solution of DeGroot problems. In case of one decision maker, the analysis of the DeGroot model
leads to the formation of a Markov Decision Process (MDP) and in the case of two external
competing players the model is reduced to a Stochastic Game (SG). Since such models are heavily
used in probabilistic model checking we apply tools of the field to solve them. Preliminary
experimental results confirm the viability of our approach, which relies on the common
mathematical foundations of the DeGroot problems and probabilistic model checking.},
doi = {10.1145/3003733.3003780},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}26.pdf:pdf},
isbn = {9781450347891},
title = {{Solving influence problems on the DeGroot model with a probabilistic model checking tool}},
year = {2016}
@article{Ardeshir-Larijani2018,
doi = {10.1145/3231597},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ardeshir-larijani2018.pdf:pdf},
issn = {15293785},
pages = {1--32},
volume = {19},
year = {2018}
@article{Srivastava2010,
abstract = {This paper describes a novel technique for the synthesis of imperative programs.
Automated program synthesis has the potential to make programming and the design of systems
easier by allowing programs to be specified at a higher-level than executable code. In our approach,
which we call proof-theoretic synthesis, the user provides an input-output functional specification, a
description of the atomic operations in the programming language, and a specification of the
synthesized program's looping structure, allowed stack space, and bound on usage of certain
operations. Our technique synthesizes a program, if there exists one, that meets the input-output
specification and uses only the given resources. The insight behind our approach is to interpret
program synthesis as generalized program verification, which allows us to bring verification tools
and techniques to program synthesis. Our synthesis algorithm works by creating a program with
unknown statements, guards, inductive invariants, and ranking functions. It then generates
constraints that relate the unknowns and enforces three kinds of requirements: partial correctness,
loop termination, and well-formedness conditions on program guards. We formalize the
requirements that program verification tools must meet to solve these constraint and use tools from
prior work as our synthesizers. We demonstrate the feasibility of the proposed approach by
synthesizing programs in three different domains: arithmetic, sorting, and dynamic programming.
Using verification tools that we previously built in the VS3 project we are able to synthesize
programs for complicated arithmetic algorithms including Strassen's matrix multiplication and
Bresenham's line drawing; several sorting algorithms; and several dynamic programming algorithms.
For these programs, the median time for synthesis is 14 seconds, and the ratio of synthesis to
verification time ranges between 1x to 92x (with an median of 7x), illustrating the potential of the
approach. Copyright {\textcopyright} 2010 ACM.},
author = {Srivastava, Saurabh and Gulwani, Sumit and Foster, Jeffrey S.},
doi = {10.1145/1706299.1706337},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/popl10-synthesis.pdf:pdf},
isbn = {9781605584799},
issn = {07308566},
pages = {313--326},
year = {2010}
@article{Series2018,
doi = {10.1088/1757-899X/288/1/012153},
file = {:C$\backslash$:/Users/Asus/Downloads/Gerhana{\_}2018{\_}IOP{\_}Conf.{\_}Ser.{\%}3A{\
_}Mater.{\_}Sci.{\_}Eng.{\_}288{\_}012153.pdf:pdf},
title = {{Implementation of Nearest Neighbor using HSV to Identify Skin Disease Implementation of
Nearest Neighbor using HSV to Identify Skin Disease}},
year = {2018}
@article{Bucchiarone2019,
abstract = {Modern software systems are becoming more and more socio-technical systems
composed of distributed and heterogeneous agents from a mixture of people, their environment,
and software components. These systems operate under continuous perturbations due to the
unpredicted behaviors of people and the occurrence of exogenous changes in the environment. In
this article, we introduce a notion of ensembles for which, systems with collective adaptability can
be built as an emergent aggregation of autonomous and self-adaptive agents. Building upon this
notion of ensemble, we present a distributed adaptation approach for systems composed by
ensembles: collections of agents with their respective roles and goals. In these systems, adaptation
is triggered by the run-time occurrence of an extraordinary circumstance, called issue. It is handled
by an issue resolution process that involves agents affected by the issue to collaboratively adapt with
minimal impact on their own preferences. Central to our approach is the implementation of a
collective adaptation engine (CAE) able to solve issues in a collective fashion. The approach is
instantiated in the context of a smart mobility scenario through which its main features are
illustrated. To demonstrate the approach in action and evaluate it, we exploit the DeMOCAS
framework, simulating the operation of an urban mobility scenario. We have executed a set of
experiments with the goal to show how the CAE performs in terms of feasibility and scalability. With
this approach, we are able to demonstrate how collective adaptation opens up new possibilities for
tackling urban mobility challenges making it more sustainable respect to selfish and competitive
behaviours.},
doi = {10.1145/3355562},
file = {:C$\backslash$:/Users/Asus/Downloads/bucchiarone2019.pdf:pdf},
issn = {15564703},
number = {2},
title = {{Collective adaptation through multi-agents ensembles: The case of smart urban mobility}},
volume = {14},
year = {2019}
@article{B2015a,
abstract = {This Festschrift volume contains 28 refereed papers including personal memories, essays,
and regular research papers by close collaborators and friends of Jos{\'{e}} Meseguer to honor him
on the occasion of his 65th birthday.
doi = {10.1007/978-3-319-23165-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/logic-rewriting-and-concurrency-2015-
342-371.pdf:pdf},
isbn = {978-3-319-23164-8},
pages = {475--492},
url = {http://link.springer.com/10.1007/978-3-319-23165-5},
volume = {9200},
year = {2015}
@article{Wang2013,
abstract = {Particle Swarm Optimization (PSO) has shown an effective performance for solving
variant benchmark and real-world optimization problems. However, it suffers from premature
convergence because of quick losing of diversity. In order to enhance its performance, this paper
proposes a hybrid PSO algorithm, called DNSPSO, which employs a diversity enhancing mechanism
and neighborhood search strategies to achieve a trade-off between exploration and exploitation
abilities. A comprehensive experimental study is conducted on a set of benchmark functions,
including rotated multimodal and shifted high-dimensional problems. Comparison results show that
DNSPSO obtains a promising performance on the majority of the test problems. {\textcopyright}
2012 Elsevier Inc. All rights reserved.},
author = {Wang, Hui and Sun, Hui and Li, Changhe and Rahnamayan, Shahryar and Pan, Jeng
Shyang},
doi = {10.1016/j.ins.2012.10.012},
isbn = {0020-0255},
issn = {00200255},
pages = {119--135},
url = {http://dx.doi.org/10.1016/j.ins.2012.10.012},
volume = {223},
year = {2013}
@article{Qasim2015,
abstract = {In this study we have formally specified and verified the actions of communicating real-
time software agents (RTAgents). Software agents are expected to work autonomously and deal with
unfamiliar situations astutely. Achieving cent percent test cases coverage for these agents has
always been a problem due to limited resources. Also a high degree of dependability and
predictability is expected from real-time software agents. In this research we have used Timed-Arc
Petri Net's for formal specification and verification. Formal specification of e-agents has been done
in the past using Linear Temporal Logic (LTL) but we believe that Timed-Arc Petri Net's being more
visually expressive provides a richer framework for such formalism. A case study of Stock Market
System (SMS) based on Real Time Multi Agent System framework (RTMAS) using Timed-Arc Petri
Net's is taken to illustrate the proposed modeling approach. The model was verified used AF, AG, EG,
and EF fragments of Timed Computational Tree Logic (TCTL) via translations to timed automata.},
author = {Qasim, Awais and Kazmi, Syed Asad Raza and Fakhir, Ilyas},
doi = {10.4316/AECE.2015.03010},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/aece{\_}2015{\_}3{\_}10.pdf:pdf},
issn = {18447600},
number = {3},
pages = {73--78},
title = {{Formal specification and verification of real- time multi-agent systems using timed-arc petri
nets}},
volume = {15},
year = {2015}
@article{Kwiatkowska2004,
abstract = {This paper gives a brief overview of version 2.0 of PRISM, a tool for the automatic formal
verification of probabilistic systems, and some of the case studies to which it has already been
applied. {\textcopyright} 2004 IEEE.},
doi = {10.1109/QEST.2004.1348048},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/PRISM{\_}20{\_}A{\_}tool{\_}for{\
_}probabilistic{\_}model{\_}checking.pdf:pdf},
isbn = {0769521851},
pages = {322--323},
year = {2004}
@article{Holz2007,
doi = {10.1007/978-3-642-33932-5_7},
isbn = {978-3-642-33932-5},
journal = {Aisc},
pages = {61--73},
title = {{Fast Range Image Segmentation and Smoothing using Approximate Surface Reconstruction
and Region Growing Related Work}},
year = {2007}
}
@article{Zhang2016,
abstract = {We propose an assume-guarantee reasoning (AGR) framework for verification problem of
a system with two components modeled by Markov Decision Process (MDP) and Partially Observable
MDP (POMDP), respectively. MDP-POMDP model describes system's sensing, actuation and
environment uncertainties, which can be used in the modeling of systems containing different
subsystems, e.g., human-robot collaboration process. While the verification problem of MDP-
POMDP asks whether or not a specification can be satisfied by the regulated behavior under certain
control policies, our main contribution in this paper is to present and prove a sound and complete
AGR rule based on POMDP strong simulation relation to reduce the verification complexity.},
doi = {10.1109/CDC.2016.7798365},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zhang2016.pdf:pdf},
isbn = {9781509018376},
journal = {2016 IEEE 55th Conference on Decision and Control, CDC 2016},
number = {Cdc},
pages = {795--800},
volume = {795},
year = {2016}
@article{Wang2018,
abstract = {In the multi-robot systems, a great challenge is the development of algorithms to
dynamically assign positions to robots and to form a desired formation, not relying on a priori
assignment information. In this paper, to address this problem, we propose a dynamic role
assignment formation control algorithm based on an improved Hungarian method that combines
role assignment and formation synthesis together. This algorithm can synchronously finish role
assignment and decide where to form the predefined formation with the lowest cost. And the
improved Hungarian method guarantees that no matter the number of robots and the number of
positions are equal or not the algorithm can always achieve the desired formation. We evaluate the
performance of our algorithm through a series of simulation experiments and comparison with other
algorithms. The results prove that the presented algorithm is flexible, accurate and efficient.},
doi = {10.1109/SmartWorld.2018.00135},
file = {:C$\backslash$:/Users/Asus/Downloads/wang2018.pdf:pdf},
isbn = {9781538693803},
journal = {Proceedings - 2018 IEEE SmartWorld, Ubiquitous Intelligence and Computing, Advanced
and Trusted Computing, Scalable Computing and Communications, Cloud and Big Data Computing,
Internet of People and Smart City Innovations, SmartWorld/UIC/ATC/ScalCom/CBDCo},
pages = {687--696},
publisher = {IEEE},
title = {{A dynamic role assignment formation control algorithm based on hungarian method}},
year = {2018}
@article{Ferscha2011,
abstract = {In 2011, now 20 years after M. Weiser's "The Computer for the 21st Century" (1991), the
vision impacting the evolution of Pervasive Computing is still the claim for an intuitive, unobtrusive
and distraction free interaction with omnipresent, technology-rich environments. In an attempt of
bringing interaction "back to the real world" after an era of keyboard and screen interaction
(Personal computing), computers are being understood as secondary artefacts, embedded and
operating in the background, whereas the set of all physical objects present in the environment are
understood as the primary artefacts, the "interface". Over it's more than two decades of evolution,
the field has been undergoing three generations of research challenges fertilizing Pervasive
Computing: The first generation aiming towards autonomic systems and their adaptation was driven
by the availability of technology to connect literally everything to everything (Connectedness, 1991-
2005). The second generation inherited from the upcoming context recognition and knowledge
processing technologies (Awareness, 2000-2007), e.g. context-awareness, self-awareness, resource-
awareness, etc. Finally, a third generation, building upon connectedness and awareness, attempts to
exploit the (ontological) semantics of Pervasive Computing systems, services and interactions (i.e.
giving meaning to situations and actions, and "intelligence" to systems) (Smartness, 2004-). While
Pervasive Computing research has its success in the first, partly also in the second generation, the
third generation is evolving as we speak. The FP7 FET proactive project PANORAMA (FET
proactive/Goal 8.3: Pervasive Adaptation) picked up on the challenge of identifying the new trails of
Pervasive Computing research, involving some 240 of the most distinguished researchers in the field
in a solicitation process that lasted for about three years. The result of this process is manifested in
the Pervasive Adaptation Research Agenda Book (www.perada.eu/research-agenda), which is
presented in this article and the respective fett11 session. {\textcopyright} Selection and peer-review
under responsibility of FET11 conference organizers and published by Elsevier B.V.},
doi = {10.1016/j.procs.2011.12.027},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S1877050911006922-
main.pdf:pdf},
issn = {18770509},
pages = {88--91},
url = {http://dx.doi.org/10.1016/j.procs.2011.12.027},
volume = {7},
year = {2011}
@article{SanfordBernhardt2008,
doi = {10.1061/(asce)1076-0342(2008)14:3(253)},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sanfordbernhardt2008.pdf:pdf},
issn = {1076-0342},
number = {3},
pages = {253--261},
volume = {14},
year = {2008}
@article{Varshosaz2013,
abstract = {Software product line engineering (SPLE) enables systematic reuse in development of a
family of related software systems by explicitly defining commonalities and variabilities among the
individual products in the family. Nowadays, SPLE is used in a variety of complex domains such as
avionics and automotive. As such domains include safety critical systems which exhibit probabilistic
behavior, there is a major need for modeling and verification approaches dealing with probabilistic
aspects of systems in the presence of variabilities. In this paper, we introduce a mathematical model,
Discrete Time Markov Chain Family (DTMCF), which compactly represents the probabilistic behavior
of all the products in the product line. We also provide a probabilistic model checking method to
verify DTMCFs against Probabilistic Computation Tree Logic (PCTL) properties. This way, instead of
verifying each product individually, the whole family is model checked at once, resulting in the set of
products satisfying the desired property. This reduces the required cost for model checking by
eliminating redundant processing caused by the commonalities among the products. Copyright 2013
ACM.},
doi = {10.1145/2499777.2500725},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/varshosaz2013.pdf:pdf},
isbn = {9781450323253},
pages = {34--41},
title = {{Discrete time Markov chain families: Modeling and verification of probabilistic software
product lines}},
year = {2013}
@article{Abeywickrama2012,
abstract = {To deal with the increasing complexity and uncertainty of software systems, novel
software engineering models and tools are required to make such systems self-adaptive. As part of
ongoing research, we investigate various models, schemes and mechanisms to model and engineer
self-adaptation in complex software systems. To this end, we have defined SOTA (State of the
Affairs) as a general goal-oriented modeling framework for the analysis and design of self-adaptive
systems. In this paper, by transforming the conceptual SOTA model into an operational one, we
show how SOTA can be an effective tool to perform an early, goal-level, model checking analysis for
adaptive systems. This allows the developers of complex self-adaptive systems to validate the actual
correctness of the self-adaptive requirements at an early stage in the software life-cycle. The
approach is explored and validated using a case study in the area of e-mobility. {\textcopyright} 2012
IEEE.},
doi = {10.1109/ECBS.2012.30},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/abeywickrama2012.pdf:pdf},
isbn = {9780769546643},
journal = {Proceedings - 2012 IEEE 19th International Conference and Workshops on Engineering of
Computer-Based Systems, ECBS 2012},
keywords = {goal-oriented requirements engineering,model checking,self-adaptive systems,software
architecture},
pages = {33--42},
year = {2012}
@article{Lauriere1978,
doi = {10.1016/0004-3702(78)90029-2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/0004-3702-2878-2990029-2.pdf:pdf},
issn = {00043702},
number = {1},
pages = {29--127},
title = {{A language and a program for stating and solving combinatorial problems}},
volume = {10},
year = {1978}
@article{Kounev2017c,
abstract = {This book provides formal and informal definitions and taxonomies for self-aware
computing systems, and explains how self-aware computing relates to many existing subfields of
computer science, especially software engineering. It describes architectures and algorithms for self-
aware systems as well as the benefits and pitfalls of self-awareness, and reviews much of the latest
relevant research across a wide array of disciplines, including open research challenges. The
chapters of this book are organized into five parts: Introduction, System Architectures, Methods and
Algorithms, Applications and Case Studies, and Outlook. Part I offers an introduction that defines
self-aware computing systems from multiple perspectives, and establishes a formal definition, a
taxonomy and a set of reference scenarios that help to unify the remaining chapters. Next, Part II
explores architectures for self-aware computing systems, such as generic concepts and notations
that allow a wide range of self-aware system architectures to be described and compared with both
isolated and interacting systems. It also reviews the current state of reference architectures,
architectural frameworks, and languages for self-aware systems. Part III focuses on methods and
algorithms for self-aware computing systems by addressing issues pertaining to system design, like
modeling, synthesis and verification. It also examines topics such as adaptation, benchmarks and
metrics. Part IV then presents applications and case studies in various domains including cloud
computing, data centers, cyber-physical systems, and the degree to which self-aware computing
approaches have been adopted within those domains. Lastly, Part V surveys open challenges and
future research directions for self-aware computing systems. It can be used as a handbook for
professionals and researchers working in areas related to self-aware computing, and can also serve
as an advanced textbook for lecturers and postgraduate students studying subjects like advanced
software engineering, autonomic computing, self-adaptive systems, and data-center resource
management. Each chapter is largely self-contained, and offers plenty of references for anyone
wishing to pursue the topic more deeply.},
author = {Kounev, Samuel and Kephart, Jeffrey O. and Milenkoski, Aleksandar and Zhu, Xiaoyun},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-319-47474-8{\_}7.pdf:pdf},
isbn = {9783319474748},
pages = {1--722},
year = {2017}
@article{Wirsing2013,
author = {Wirsing, Martin and H{\"{o}}lzl, Matthias and Tribastone, Mirco and Zambonelli, Franco},
doi = {10.1007/978-3-642-35887-6-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/WHTZ{\_}FMCO11{\_}121011.pdf:pdf},
isbn = {9783642358869},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {1--24},
year = {2013}
@article{Azzopardi2018,
abstract = {Smart contracts present new challenges for runtime verification techniques, due to
features such as immutability of the code and the notion of gas that must be paid for the execution
of code. In this paper we present the runtime verification tool ContractLarva and outline its use in
instrumenting monitors in smart contracts written in Solidity, for the Ethereum blockchain-based
distributed computing platform. We discuss the challenges faced in doing so, and how some of these
can be addressed, using the ERC-20 token standard to illustrate the techniques. We conclude by
proposing a list of open challenges in smart contract and blockchain monitoring.},
author = {Azzopardi, Shaun and Ellul, Joshua and Pace, Gordon J},
doi = {10.1007/978-3-030-03769-7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/falcone2018.pdf:pdf},
isbn = {978-3-030-03769-7},
pages = {113--137},
url = {http://dx.doi.org/10.1007/978-3-030-03769-7{\_}8},
volume = {1},
year = {2018}
@article{Kamali2017,
abstract = {The coordination of multiple autonomous vehicles into convoys or platoons is expected
on our highways in the near future. However, before such platoons can be deployed, the behaviours
of the vehicles in these platoons must be certified. This is non-trivial and goes beyond current
certification requirements, for human-controlled vehicles, in that these vehicles can act
autonomously. In this paper, we show how formal verification can contribute to the analysis of these
new, and increasingly autonomous, systems. An appropriate overall representation for vehicle
platooning is as a multi-agent system in which each agent captures the “autonomous decisions”
carried out by each vehicle. In order to ensure that these autonomous decision-making agents in
vehicle platoons never violate safety requirements, we use formal verification. However, as the
formal verification technique used to verify the individual agent's code does not scale to the full
system, and as the global system verification technique does not capture the essential verification of
autonomous behaviour, we use a combination of the two approaches. This mixed strategy allows us
to verify safety requirements not only of a model of the system, but of the actual agent code used to
program the autonomous vehicles.},
author = {Kamali, Maryam and Dennis, Louise A. and McAree, Owen and Fisher, Michael and Veres,
Sandor M.},
doi = {10.1016/j.scico.2017.05.006},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0167642317301168-
main.pdf:pdf},
issn = {01676423},
pages = {88--106},
url = {http://dx.doi.org/10.1016/j.scico.2017.05.006},
volume = {148},
year = {2017}
@article{Pnueli1977,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/pnueli{\_}temporal{\_}1977.pdf:pdf},
year = {1977}
@article{Mundhenk2015,
abstract = {This paper proposes a novel approach to security analysis of automotive architectures at
the system-level. With an increasing amount of software and connectedness of cars, security
challenges are emerging in the automotive domain. Our proposed approach enables assessment of
the security of architecture variants and can be used by decision makers in the design process. First,
the automotive Electronic Control Units (ECUs) and networks are modelled at the system-level using
parameters per component, including an exploitability score and patching rates that are derived
from an automated or manual assessment. For any specific architecture variant, a Continuous-Time
Markov Chain (CTMC) model is determined and analyzed in terms of confidentiality, integrity and
availability, using probabilistic model checking. The introduced case study demonstrates the
applicability of our approach, enabling, for instance, the exploration of parameters like patch rate
targets for ECU manufacturers.},
author = {Mundhenk, Phil IPp and Steinhorst, Sebastian and Lukasiewycz, Martin and Fahmy, Suhaib
A. and Chakraborty, Samarjit},
doi = {10.1145/2744769.2744906},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/mundhenk2015.pdf:pdf},
isbn = {9781450335201},
issn = {0738100X},
volume = {2015-July},
year = {2015}
}
@inproceedings{Krijt2016,
author = {Krijt, Filip and Jiracek, Zbynek and Bures, Tomas and Hnetynka, Petr and Plasil, Frantisek},
year = {2016}
@article{Marc2016,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/12488-241-9979-1-10-
20201029.pdf:pdf},
year = {2016}
@article{Kasner2013a,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Kasner, E and Hunter, Christopher A and Ph, D and Kariko, Katalin and Ph, D},
doi = {10.1002/ana.22528.Toll-like},
eprint = {NIHMS150003},
isbn = {3300000106},
issn = {09652140},
number = {4},
pages = {646--656},
pmid = {20402989},
volume = {70},
year = {2013}
@article{Wilson2016,
abstract = {A novel methodology for intelligent music production has been developed using
evolutionary computation. Mixes are generated by exploration of a " mix-space " , which consists of
a series of inter-channel volume ratios, allowing efficient generation of random mixes. An interactive
genetic algo-rithm was used, allowing the user to rate mixes and guide the system towards their
ideal mix. Currently, fitness eval-uation is subjective but can be aided by specific domain knowledge
obtained from a large-scale study of real mixes.},
number = {September},
pages = {4--5},
year = {2016}
@article{Chun2005,
abstract = {The colonists had adopted their architectural styles in building modified to the climate
context (warm and humid climate). These colonial buildings in Malaysia also have combinations of
the styles from other cultures such as Indian and Chinese due to migrations and from the local Malay
traditions. This paper will discuss about the development of architectural styles that have been
experienced in Malaysia since the colonial era. These unique architectural styles and motifs have
heritage values, many of which are being conserved until today. Apart from that, we will also look
into their influences in a smaller scale as well as in a larger scale – as this has formed an identity for
Malaysia's architecture. We have been arguing for some time about the true identity that represents
Malaysia's architecture, and this matter has recently become a main factor to consider when
deciding the style of architectural decorations to be integrated into building design. On the contrary,
there is also an urgent need to develop new appropriate design linked with the past and present. A
synchronised awareness by both the architects and the planners of the historical and cultural
perspective of a place and its architectural precedence provides a firmer basis in the pursuit to relate
new designs with the local technology and sosio-economic development.},
author = {Chun, Ho Kah and {Sanusi Hasan}, Ahmad and Noordin, Norizal M.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/colonialarch.pdf:pdf},
pages = {1--14},
@article{Calinescu2017,
author = {Calinescu, Radu and Autili, Marco and C{\'{a}}mara, Javier and Marco, Antinisca Di and
Inverardi, Paola and Perucci, Alexander and Jansen, Nils and Kwiatkowska, Marta and Mengshoel,
Ole J and Spalazzese, Romina},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-319-47474-8{\_}11.pdf:pdf},
isbn = {9783319474748},
pages = {337--338},
year = {2017}
@article{Alrahman2017,
archivePrefix = {arXiv},
arxivId = {1711.09762},
author = {Alrahman, Yehia Abd and {De Nicola}, Rocco and Loreti, Michele},
eprint = {1711.09762},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1711.09762.pdf:pdf},
url = {https://arxiv.org/abs/1711.09762},
year = {2017}
@article{Levin2013,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Levins1998.pdf:pdf},
number = {5},
pages = {431--436},
volume = {1},
year = {2013}
@article{AliJanGhasab2015,
abstract = {In the present paper, an expert system for automatic recognition of different plant
species through their leaf images is investigated by employing the ant colony optimization (ACO) as a
feature decision-making algorithm. The ACO algorithm is employed to investigate inside the feature
search space in order to obtain the best discriminant features for the recognition of individual
species. In order to establish a feature search space, a set of feasible characteristics such as shape,
morphology, texture and color are extracted from the leaf images. The selected features are used by
support vector machine (SVM) to classify the species. The efficiency of the system was tested on
around 2050 leaf images collected from two different plant databases, FCA and Flavia. The results of
the study achieved an average accuracy of 95.53{\%} from the ACO-based approach, confirming the
potentials of using the proposed system for an automatic classification of various plant species.},
author = {{Ali Jan Ghasab}, Mohammad and Khamis, Shamsul and Mohammad, Faruq and {Jahani
Fariman}, Hessam},
doi = {10.1016/j.eswa.2014.11.011},
isbn = {0957-4174},
issn = {09574174},
number = {5},
pages = {2361--2370},
title = {{Feature decision-making ant colony optimization system for an automated recognition of
plant species}},
volume = {42},
year = {2015}
@article{Gomez2010,
abstract = {Many epidemic processes in networks spread by stochastic contacts among their
connected vertices. There are two limiting cases widely analyzed in the physics literature, the so-
called contact process (CP) where the contagion is expanded at a certain rate from an infected
vertex to one neighbor at a time, and the reactive process (RP) in which an infected individual
effectively contacts all its neighbors to expand the epidemics. However, a more realistic scenario is
obtained from the interpolation between these two cases, considering a certain number of
stochastic contacts per unit time. Here we propose a discrete-time formulation of the problem of
contact-based epidemic spreading. We resolve a family of models, parameterized by the number of
stochastic contact trials per unit time, that range from the CP to the RP. In contrast to the common
heterogeneous mean-field approach, we focus on the probability of infection of individual nodes.
Using this formulation, we can construct the whole phase diagram of the different infection models
and determine their critical properties. {\textcopyright} 2010 Europhysics Letters Association.},
archivePrefix = {arXiv},
arxivId = {0907.1313},
author = {G{\'{o}}mez, S. and Arenas, A. and Borge-Holthoefer, J. and Meloni, S. and Moreno, Y.},
doi = {10.1209/0295-5075/89/38009},
eprint = {0907.1313},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1209@0295-
5075@[email protected]:pdf},
issn = {02955075},
journal = {Epl},
number = {3},
volume = {89},
year = {2010}
@article{Jena2015,
number = {1},
pages = {9--14},
volume = {1},
year = {2015}
@article{Prudhomme2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/user{\_}guide-3.3.0.pdf:pdf},
title = {{Choco3 Documentation}},
url = {http://choco-solver.org/},
year = {2014}
@article{Ruan2018,
abstract = {Verifying correctness for deep neural networks (DNNs) is challenging. We study a generic
reachability problem for feed-forward DNNs which, for a given set of inputs to the network and a
Lipschitz-continuous function over its outputs computes the lower and upper bound on the function
values. Because the network and the function are Lipschitz continuous, all values in the interval
between the lower and upper bound are reachable. We show how to obtain the safety verification
problem, the output range analysis problem and a robustness measure by instantiating the
reachability problem. We present a novel algorithm based on adaptive nested optimisation to solve
the reachability problem. The technique has been implemented and evaluated on a range of DNNs,
demonstrating its efficiency, scalability and ability to handle a broader class of networks than state-
of-the-art verification approaches.},
archivePrefix = {arXiv},
arxivId = {arXiv:1805.02242v1},
eprint = {arXiv:1805.02242v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1805.02242.pdf:pdf},
isbn = {9780999241127},
issn = {10450823},
pages = {2651--2659},
volume = {2018-July},
year = {2018}
@article{Jamroga2020,
abstract = {The design and implementation of an e-voting system is a challenging task. Formal
analysis can be of great help here. In particular, it can lead to a better understanding of how the
voting system works, and what requirements on the system are relevant. In this paper, we propose
that the state-of-art model checker Uppaal provides a good environment for modelling and
preliminary verification of voting protocols. To illustrate this, we present an Uppaal model of Pr$\
backslash${\^{}}et $\backslash$`a Voter, together with some natural extensions. We also show how
to verify a variant of receipt-freeness, despite the severe limitations of the property specification
language in the model checker.},
archivePrefix = {arXiv},
arxivId = {2007.12412},
author = {Jamroga, Wojciech and Kim, Yan and Kurpiewski, Damian and Ryan, Peter Y. A.},
eprint = {2007.12412},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2007.12412.pdf:pdf},
pages = {1--17},
title = {{Model Checkers Are Cool: How to Model Check Voting Protocols in Uppaal}},
url = {http://arxiv.org/abs/2007.12412},
year = {2020}
@article{McDonnell2020,
author = {{Mc Donnell}, Nicola and Howley, Enda and Duggan, Jim},
doi = {10.1007/978-3-030-58112-1_44},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-030-58112-1{\_}2.pdf:pdf},
isbn = {9783030581114},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {637--649},
year = {2020}
}
@article{Wang2019a,
abstract = {This paper considers the adaptive time-varying formation tracking control of unmanned
aerial vehicles (UAVs) with quantized input. Uncertainties and nonholonomic constraint are involved
in the UAV model. With a novel transformation of the final control signal, a very coarse quantization
can be achieved. Adaptive quantized controllers are proposed by employing backstepping technique.
It is proved that, with our proposed strategy, all signals of the closed-loop system are globally
uniformly bounded, and the formation tracking error converges to an arbitrarily small residual set.
Simulation results are given to illustrate the effectiveness of the proposed strategy.},
doi = {10.1016/j.isatra.2018.09.013},
file = {:C$\backslash$:/Users/Asus/Downloads/1-s2.0-S0019057818303550-main.pdf:pdf},
issn = {00190578},
pages = {76--83},
title = {{Adaptive time-varying formation tracking control of unmanned aerial vehicles with quantized
input}},
url = {https://doi.org/10.1016/j.isatra.2018.09.013},
volume = {85},
year = {2019}
@article{Gelaye2016a,
archivePrefix = {arXiv},
arxivId = {15334406},
author = {Gelaye, Bizu and Rondon, Marta and Araya, Prof Ricardo and A, Prof Michelle},
doi = {10.1016/S2215-0366(16)30284-X.Epidemiology},
eprint = {15334406},
isbn = {0002-9297},
issn = {2045-2322},
number = {10},
pages = {973--982},
pmid = {28642624},
volume = {3},
year = {2016}
@article{Hoch2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/em.pdf:pdf},
pages = {513--533},
volume = {257414},
year = {2015}
@article{Pavic2020,
abstract = {The existing models designed to reap the benefits of electric vehicles' flexibility in the
literature almost exclusively identify charging stations as active players exploiting this flexibility. Such
stations are seen as static loads able to provide flexibility only when electric vehicles are connected
to them. This standpoint, however, suffers from two major issues. First, the charging stations need
to anticipate important parameters of the incoming vehicles, e.g. time of arrival/departure, state-of-
energy at arrival/departure. Second, it interacts with vehicles only when connected to a specific
charging station, thus overlooking the arbitrage opportunities when they are connected to other
stations. This conventional way of addressing the electric vehicles is referred to as charging station-
based e-mobility system. A new viewpoint is presented in this paper, where electric vehicles are
observed as dynamic movable storage that can provide flexibility at any charging station. The paper
defines both the existing system, where the flexibility is viewed from the standpoint of charging
stations, and the proposed one, where the flexibility is viewed from the vehicles' standpoint. The
both concepts are mathematically formulated as linear optimization programs and run over a simple
case study to numerically evaluate the differences. Each of the four issues identified are individually
examined and omission of corresponding constraints is analysed and quantified. The main result is
that the proposed system yields better results for the vehicle owners.},
doi = {10.1016/j.apenergy.2020.115153},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {03062619},
number = {February},
pages = {115153},
publisher = {Elsevier},
title = {{Electric vehicle based smart e-mobility system – Definition and comparison to the existing
concept}},
url = {https://doi.org/10.1016/j.apenergy.2020.115153},
volume = {272},
year = {2020}
@article{Molnar2018,
abstract = {The Gamma Statechart Composition Framework is an integrated tool to support the
design, verification and validation as well as code generation for component-based reactive systems.
The behavior of each component is captured by a statechart, while assembling the system from
components is driven by a domain-specific composition language. Gamma automatically synthesizes
executable Java code extending the output of existing statechart-based code generators with
composition related parts, and it supports formal verification by mapping composite statecharts to a
back-end model checker. Execution traces obtained as witnesses during verification are back-
Annotated as test cases to replay an error trace or to validate external code generators. Tool
demonstration video: https://youtu.be/ng7lKd1wlDo.},
author = {Moln{\'{a}}r, Vince and Graics, Bence and V{\"{o}}r{\"{o}}s, Andr{\'{a}}s and Majzik,
Istv{\'{a}}n and Varr{\'{o}}, D{\'{a}}niel},
doi = {10.1145/3183440.3183489},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/icse18.pdf:pdf},
isbn = {9781450356633},
issn = {02705257},
pages = {113--116},
title = {{The Gamma statechart composition framework: :design, verification and code generation for
component-based reactive systems}},
year = {2018}
@article{Hendriks2002,
abstract = {Different time scales do often occur in real-time systems, e.g., a polling real-time system
samples the environment many times per second, whereas the environment may only change a few
times per second. When these systems are modeled as (networks of) timed automata, the validation
using symbolic model checking techniques can significantly be slowed down by unnecessary
fragmentation of the symbolic state space. This paper introduces a syntactical adjustment to a
subset of timed automata that addresses this fragmentation problem and that can speed-up forward
symbolic reachability analysis in a significant way. We prove that this syntactical adjustment does
not alter reachability properties and that it indeed is effective. We illustrate our exact acceleration
technique with run-time data obtained with the model checkers UPPAAL and KRONOS. Moreover,
we demonstrate that automated application of our exact acceleration technique can significantly
speed-up the verification of the run-time behavior of LEGO Mindstorms programs. {\textcopyright}
2002 Published by Elsevier Science B.V.},
doi = {10.1016/S1571-0661(04)80473-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S1571066104804730-
main.pdf:pdf},
issn = {15710661},
number = {6},
pages = {120--139},
volume = {65},
year = {2002}
@article{Walker2009a,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Walker, Melanie and Kublin, James G and Zunt, Joseph R},
doi = {10.1086/498510.Parasitic},
eprint = {NIHMS150003},
isbn = {9780123850447},
issn = {9780123850447},
number = {1},
pages = {115--125},
pmid = {1000000221},
volume = {42},
year = {2009}
@article{B2017,
author = {B, Raoua Abdelkhalek and Boukhris, Imen and Elouedi, Zied},
doi = {10.1007/978-3-319-60042-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/erdeniz2017.pdf:pdf},
isbn = {9783319600420},
number = {June},
pages = {315--324},
volume = {1},
year = {2017}
@article{Bucchiarone2017,
abstract = {Modern service-based systems are progressively becoming more heterogeneous. They
form a socio-technical system, composed of distributed entities, software and human participants,
interacting with and within the environment. These systems operate under constant perturbations
that are due to unexpected changes in the environment and to the unpredictable behavior of the
participants. We argue that for a service-based system to be resilient, adaptation must be collective.
Multiple participants must adapt their behavior in concert to respond to critical runtime
impediments. In this work, we present a framework for the modeling and execution of large-scale
service-based Collective Adaptive Systems, where adaptation needs are solved in a decentralized
and collective manner.},
author = {Bucchiarone, Antonio and {De Sanctis}, Martina and Marconi, Annapaola},
doi = {10.1007/978-3-319-68136-8_1},
file = {:C$\backslash$:/Users/Asus/Downloads/bucchiarone2017.pdf:pdf},
isbn = {9783319681351},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {5--20},
year = {2017}
@article{Prajapati2015,
doi = {10.17148/IJARCCE.2015.43144},
issn = {22781021},
journal = {Ijarcce},
number = {3},
pages = {599--603},
title = {{Brain Tumor Detection By Various Image Segmentation Techniques With Introducation To
Non Negative Matrix Factorization}},
volume = {4},
year = {2015}
@article{Weyns2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/seams.2015.27.pdf:pdf},
isbn = {9781479919345},
journal = {Proceedings - 10th International Symposium on Software Engineering for Adaptive and
Self-Managing Systems, SEAMS 2015},
pages = {88--92},
year = {2015}
@article{AlAli2020,
abstract = {Dynamicity and context dependence are some of the key properties of autonomic
component systems that include a large spectrum of today's modern smart systems. In these
systems, components dynamically re-group themselves, interact and collaborate in an ad hoc
fashion to collectively cope with situations in their environment. Though security and access control
become the key concerns of these systems, the high degree of dynamicity and the potential open-
endedness is incompatible with the traditional approaches to access control, which typically rely on
static hierarchies of roles and a static assignment of roles. To address this problem, we formulate
access control rules which allow for dynamic ad hoc collaboration at runtime and which follow the
dynamicity and context dependence of the autonomic components. Based on our previous work
with autonomic component ensembles, we show how the concepts of ensembles can be extended
and exploited to define the access control rule to govern interactions in a system of autonomic
components.},
author = {{Al Ali}, Rima and Bures, Tomas and Hnetynka, Petr and Matejek, Jan and Plasil, Frantisek
and Vinarek, Jiri},
doi = {10.1007/s10009-020-00556-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/alali2020.pdf:pdf},
issn = {14332787},
url = {https://doi.org/10.1007/s10009-020-00556-1},
year = {2020}
}
@article{Akin2015,
abstract = {We consider a single-input single-output communications link in which a transmitter and
a receiver, both having data buffers with their own unique quality-of-service (QoS) constraints,
perform data transmission over a time-selective flat fading wireless channel. Regarding the
stochastic nature of the wireless channel in the physical layer, we first provide the effective capacity
at the transmitter buffer. Then, we assume a data arrival process with a constant rate to the
transmitter buffer and identify the departure process from the transmitter to the receiver. Noting
that the departure process is the arrival process at the receiver buffer, we find the effective
bandwidth at the receiver buffer. Establishing the maximum transmission link utilization as the ratio
between the effective capacity at the transmitter and the effective bandwidth at the receiver, we
investigate the relationship between the transmission power in the channel and the maximum
transmission link utilization under QoS requirements by employing different symbol modulation
techniques in a Rayleigh fading environment.},
doi = {10.1109/LCOMM.2015.2473158},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zhao2020.pdf:pdf},
issn = {10897798},
number = {11},
pages = {1953--1956},
publisher = {IEEE},
title = {{The Interplay between Data Transmission Power and Transmission Link Utilization}},
volume = {19},
year = {2015}
@article{Naeem2018,
author = {Naeem, Aaamir and Azam, Farooque and Amjad, Anam and Anwar, Muhammad Waseem},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/naeem2018{\_}2.pdf:pdf},
isbn = {9781538674376},
keywords = {-model},
pages = {248--253},
publisher = {IEEE},
title = {{Comparison of Model Checking Tools Using Timed Automata - PRISM and UPPAAL}},
year = {2018}
@article{Cecilia2013,
abstract = {Graphics Processing Units (GPUs) have evolved into highly parallel and fully
programmable architecture over the past five years, and the advent of CUDA has facilitated their
application to many real-world applications. In this paper, we deal with a GPU implementation of
Ant Colony Optimization (ACO), a population-based optimization method which comprises two
major stages: tour construction and pheromone update. Because of its inherently parallel nature,
ACO is well-suited to GPU implementation, but it also poses significant challenges due to irregular
memory access patterns. Our contribution within this context is threefold: (1) a data parallelism
scheme for tour construction tailored to GPUs, (2) novel GPU programming strategies for the
pheromone update stage, and (3) a new mechanism called I-Roulette to replicate the classic roulette
wheel while improving GPU parallelism. Our implementation leads to factor gains exceeding 20x for
any of the two stages of the ACO algorithm as applied to the TSP when compared to its sequential
counterpart version running on a similar single-threaded high-end CPU. Moreover, an extensive
discussion focused on different implementation paths on GPUs shows the way to deal with parallel
graph connected components. This, in turn, suggests a broader area of inquiry, where algorithm
designers may learn to adapt similar optimization methods to GPU architecture. {\textcopyright}
2012 Elsevier Inc. All rights reserved.},
author = {Cecilia, Jos{\'{e}} M. and Garc{\'{i}}a, Jos{\'{e}} M. and Nisbet, Andy and Amos, Martyn and
Ujald{\'{o}}n, Manuel},
doi = {10.1016/j.jpdc.2012.01.002},
isbn = {0743-7315},
issn = {07437315},
number = {1},
pages = {42--51},
url = {http://dx.doi.org/10.1016/j.jpdc.2012.01.002},
volume = {73},
year = {2013}
}
@article{Berkane2015,
abstract = {Adaptability is an increasingly important requirement for many systems, in particular for
those that are deployed in dynamically changing environments. The purpose is to let the systems
react and adapt autonomously to changing executing conditions without human intervention. Due to
the large number of variability decisions (e.g., user needs, environment characteristics) and the
current lack of reusable adaptation expertise, it becomes increasingly difficult to build a system that
satisfies all the requirements and constraints that might arise during its lifetime. In this paper, we
propose an approach for developing policies for self-adaptive systems at multiple levels of
abstraction. This approach is the first that allows the combination of variability with feature model
and reusability with design pattern into a single solution for product derivation that gives strong
support to develop self-adaptive systems in a modular way. We demonstrate the feasibility of the
proposed approach with a use case based on a smart home scenario.},
author = {Berkane, Mohamed Lamine and Seinturier, Lionel and Boufaida, Mahmoud},
doi = {10.1504/IJWET.2015.069359},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/berkane2015.pdf:pdf},
issn = {17419212},
number = {1},
pages = {65--93},
title = {{Using variability modelling and design patterns for self-adaptive system engineering:
Application to smart-home}},
volume = {10},
year = {2015}
@article{Ferscha2015,
abstract = {The pore size and pore structure in pure silica zeolite MFI in-situ and spin-on low
dielectric constant (low-k) zeolite films were characterized by positronium annihilation lifetime
spectroscopy (PALS). For the micropores in the in-situ and spin-on films, the pore size obtained from
the on-wafer PALS method is 0.55 ( 0.03 nm, and this is in excellent agreement with the known
crystallographically determined zeolitic pore size (0.55 nm). To our knowledge this is the first
comparison of a PALS thin film pore size measurement with a crystallographically defined zeolite
pore size. For mesopores in the spin-on film, PALS results show that they are open/interconnected
and give a pore size of 2.3-2.6 nm. Introduction},
doi = {10.1145/2800835.2809508},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}24.pdf:pdf},
isbn = {9781450335751},
journal = {UbiComp and ISWC 2015 - Proceedings of the 2015 ACM International Joint Conference on
Pervasive and Ubiquitous Computing and the Proceedings of the 2015 ACM International
Symposium on Wearable Computers},
pages = {893--896},
year = {2015}
@article{Juang2014,
abstract = {This paper proposes a cooperative continuous ant colony optimization (CCACO) algorithm
and applies it to address the accuracy-oriented fuzzy systems (FSs) design problems. All of the free
parameters in a zero- or first-order Takagi-Sugeno-Kang (TSK) FS are optimized through CCACO. The
CCACO algorithm performs optimization through multiple ant colonies, where each ant colony is
only responsible for optimizing the free parameters in a single fuzzy rule. The ant colonies cooperate
to design a complete FS, with a complete parameter solution vector (encoding a complete FS) that is
formed by selecting a subsolution component (encoding a single fuzzy rule) from each colony.
Subsolutions in each ant colony are evolved independently using a new continuous ant colony
optimization algorithm. In the CCACO, solutions are updated via the techniques of pheromone-based
tournament ant path selection, ant wandering operation, and best-ant-attraction refinement. The
performance of the CCACO is verified through applications to fuzzy controller and predictor design
problems. Comparisons with other population-based optimization algorithms verify the superiority
of the CCACO.},
author = {Juang, Chia Feng and Hung, Chi Wei and Hsu, Chia Hung},
doi = {10.1109/TFUZZ.2013.2272480},
isbn = {1063-6706},
issn = {10636706},
number = {4},
pages = {723--735},
title = {{Rule-based cooperative continuous ant colony optimization to improve the accuracy of fuzzy
system design}},
volume = {22},
year = {2014}
@article{Khakpour2019,
abstract = {An approach for modelling adaptive complex systems should be flexible and scalable to
allow a system to grow easily, and should have a formal foundation to guarantee the correctness of
the system behavior. In this paper, we present the architecture, and formal syntax and semantics of
HPobSAM which is a model for specifying behavioral and structural adaptations to model large-scale
systems and address re-usability concerns. Self-adaptive modules are used as the building blocks to
structure a system, and policies are used as the mechanism to perform both behavioral and
structural adaptations. While a self-adaptive module is autonomous to achieve its local goals by
collaborating with other self-adaptive modules, it is controlled by a higher-level entity to prevent
undesirable behavior. HPobSAM is formalized using a combination of algebraic, graph
transformation-based and actor-based formalisms.},
doi = {10.1007/978-3-030-31517-7_1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007978-3-030-31517-7.pdf:pdf},
isbn = {9783030315160},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {3--19},
title = {{A Formal Model to Integrate Behavioral and Structural Adaptations in Self-adaptive
Systems}},
year = {2019}
@article{Hansson1994,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/BF01211866.pdf:pdf},
year = {1994}
@article{Chakrabarty2016,
abstract = {Objective: To describe and evaluate the performance of an auto-mated CAD system for
detection of glaucoma from color fundus photographs. Design and Setting: Color fundus
photographs of 2252 eyes from 1126 subjects were collected from 2 centers: Aravind Eye Hospital,
Madurai and Coimbatore, India. The images of 1926 eyes (963 sub-jects) were used to train an
automated image analysis-based system, which was developed to provide a decision on a given
fundus image. A total of 163 subjects were clinically examined by 2 ophthalmolo-gists independently
and their diagnostic decisions were recorded. The consensus decision was defined to be the clinical
reference (gold standard). Fundus images of eyes with disagreement in diagnosis were excluded
from the study. The fundus images of the remaining 314 eyes (157 subjects) were presented to 4
graders and their diagnostic decisions on the same were collected. The performance of the system
was evaluated on the 314 images, using the reference standard. The sensitivity and specificity of the
system and 4 independent graders were determined against the clinical reference standard. Results:
The system achieved an area under receiver operating characteristic curve of 0.792 with a sensitivity
of 0.716 and spe-cificity of 0.717 at a selected threshold for the detection of glau-coma. The
agreement with the clinical reference standard as determined by Cohen k is 0.45 for the proposed
system. This is comparable to that of the image-based decisions of 4 ophthalmologists. Conclusions
and Relevance: An automated system was presented for glaucoma detection from color fundus
photographs. The overall evaluation results indicated that the presented system was com-parable in
performance to glaucoma classification by a manual grader solely based on fundus image
examination.},
author = {Chakrabarty, Lipi and Joshi, Gopal Datt and Chakravarty, Arunava and Raman, Ganesh V
and Krishnadas, S R and Sivaswamy, Jayanthi},
doi = {10.1097/IJG.0000000000000354},
isbn = {0000000000000},
issn = {1057-0829},
number = {7},
pages = {590--597},
title = {{Automated Detection of Glaucoma From Topographic Features of the Optic Nerve Head in
Color Fundus Photographs}},
volume = {25},
year = {2016}
@article{Holland2006,
abstract = {Complex adaptive systems (cas) - systems that involve many components that adapt or
learn as they interact - are at the heart of important contemporary problems. The study of cas poses
unique challenges: Some of our most powerful mathematical tools, particularly methods involving
fixed points, attractors, and the like, are of limited help in understanding the development of cas.
This paper suggests ways to modify research methods and tools, with an emphasis on the role of
computer-based models, to increase our understanding of cas. {\textcopyright} Springer Science +
Business Media, Inc. 2006.},
doi = {10.1007/s11424-006-0001-z},
issn = {10096124},
number = {1},
pages = {1--8},
volume = {19},
year = {2006}
@article{Marques2013,
abstract = {The last years have seen the development of many credit scoring models for assessing
the credit-worthiness of loan applicants. Traditional credit scoring methodology has involved the use
of statistical and mathematical programming techniques such as discriminant analysis, linear and
logistic regression, linear and quadratic programming, or decision trees. However, the importance of
credit grant decisions for financial institutions has caused growing interest in using a variety of
computational intelligence techniques. This paper concentrates on evolutionary computing, which is
viewed as one of the most promising paradigms of computational intelligence. Taking into account
the synergistic relationship between the communities of Economics and Computer Science, the aim
of this paper is to summarize the most recent developments in the application of evolutionary
algorithms to credit scoring by means of a thorough review of scientific articles published during the
period 2000-2012.},
author = {Marques, a I and Garcia, V and Sanchez, J S},
doi = {10.1057/jors.2012.145},
isbn = {0160-5682},
issn = {0160-5682},
number = {9},
pages = {1384--1399},
title = {{A literature review on the application of evolutionary computing to credit scoring}},
volume = {64},
year = {2013}
@article{Kurzhanski2000,
author = {Kurzhanski, A.B. and Varaiya, Pravin and Kurzhanskiy, Alex and Varaiya, Pravin},
doi = {10.1016/S0167-6911(00)00059-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}14.pdf:pdf},
issn = {01676911},
number = {3},
pages = {202--214},
title = {{Ellipsoidal techniques for reachability analysis}},
url = {http://link.springer.com/chapter/10.1007/3-540-46430-1{\_}19{\%}5Cnhttp://
www.sciencedirect.com/science/article/pii/S0167691100000591},
volume = {41},
year = {2000}
@article{K2011,
author = {K, Ali Sinan and Kuncak, Viktor and Suter, Philippe},
pages = {400--406},
year = {2011}
@book{Graf2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783319191942},
year = {2015}
@article{Yesserie2015,
author = {Yesserie},
doi = {10.1145/3132847.3132886},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/rfc7575.txt.pdf:pdf},
isbn = {9781450349185},
pages = {10--17},
volume = {151},
year = {2015}
}
@article{Tallapragada2015,
author = {Tallapragada, V V Satyanarayana and Reddy, D Manoj and Kiran, P Shashi and Reddy, D
Venkat},
pages = {2319--2322},
title = {{a Novel Medical Image Segmentation and Classification Using Combined Feature Set and
Decision Tree Classifier}},
year = {2015}
@article{Rawlings2018,
abstract = {We present an algorithm to compute the unique maximally permissive state-based
supervisor for any deterministic finite labeled transition system subject to a specification with
combined invariance and reachability requirements. The specifications that we consider are
expressed in computation tree logic and include specifications with multiple reachability
requirements, each of which should always be satisfied. The form of the controller (a state-based
supervisor) is purely memoryless, so the control decisions can be made by directly sampling the
state of the system that is being controlled, without recording any past event or transition history.
The algorithm has been implemented in SynthSMV, an extension of the well-known model-checking
solver NuSMV, which uses NuSMV's efficient implementation of symbolic model checking (based on
binary decision diagrams). A case study that involves coordinating the operation of a set of reactors
in a chemical plant shows how the methods that we develop apply in practice.},
doi = {10.1109/TCST.2018.2877621},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/rawlings2018.pdf:pdf},
issn = {1558-0865},
pages = {1--9},
publisher = {IEEE},
volume = {PP},
year = {2018}
@article{Guessi2019,
abstract = {As smart systems leverage capabilities of heterogeneous systems for accomplishing
complex combined behaviors, they pose new challenges to traditional software engineering
practices that considered software architectures to be mostly static and stable. The software
architecture of a smart system is inherently dynamic due to uncertainty surrounding its operational
environment. While the abstract architecture offers a way to implicitly describe different forms
taken by the software architecture at run time, it is still not sufficient to guarantee that all concrete
architectures will automatically adhere to it. To address this issue, this work presents a formal
method named Ark supporting the architectural synthesis of smart systems. This is achieved by
expressing abstract architectures as a set of constraints that must be valid for any concrete
architecture of the smart system. This way, we can benefit from existing model-checking techniques
to guarantee that all concrete architectures realized from such an abstract model will comply with
well-formed rules. We also describe how this method can be incorporated to a model-driven
approach for bridging the gap between abstract and concrete architectural models. We demonstrate
our method in an illustrative case study, showing how Ark can be used to support the synthesis of
concrete architectures as well check the correctness and completeness of abstract architecture
descriptions. Finally, we elaborate on future directions to consolidating a process for the synthesis of
run-rime architectures that are correct-by-construction.},
author = {Guessi, Milena and Oquendo, Flavio and Nakagawa, Elisa Yumi},
doi = {10.1007/s10270-019-00764-7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {16191374},
url = {https://doi.org/10.1007/s10270-019-00764-7},
year = {2019}
@inproceedings{Krijt2017,
author = {Krijt, Filip and Jiracek, Zbynek and Bures, Tomas and Hnetynka, Petr and Gerostathopoulos,
Ilias},
booktitle = {2017 IEEE/ACM 12th International Symposium on Software Engineering for Adaptive and
Self-Managing Systems (SEAMS)},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Krijt-SEAMS17-
IntelligentEnsembles.pdf:pdf},
title = {{Intelligent Ensembles – a Declarative Group Description Language and Java Framework}},
year = {2017}
@article{Effatparvar2016,
number = {7},
pages = {340--346},
title = {{Evaluation of Fault Tolerance in Cloud Computing using Colored Petri Nets}},
volume = {7},
year = {2016}
@article{Ramani2017,
number = {8},
pages = {38--43},
title = {{Automatic Detection of Glaucoma in Retinal Fundus Images through Image Processing and
Data Mining Techniques}},
volume = {166},
year = {2017}
@article{Klauck2020,
title = {{Bridging the Gap Between Probabilistic Model Checking and Probabilistic Planning : Survey ,
Compilations , and Empirical Comparison}},
volume = {68},
year = {2020}
@article{Grimm2018,
doi = {10.3390/electronics7060081},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/grimm2018.pdf:pdf},
isbn = {4923432217},
journal = {Electronics},
number = {6},
pages = {81},
volume = {7},
year = {2018}
@article{Pinzone2020,
abstract = {In a near future where manufacturing companies are faced with the rapid technological
developments of Cyber-Physical Systems (CPS) and Industry 4.0, a need arises to consider how this
will affect human operators remaining as a vital and important resource in modern production
systems. What will the implications of these orchestrated and ubiquitous technologies in production
– a concept we call Cyber-Physical Production Systems (CPPS) – be on the health, learning and
operative performance of human workers? This paper makes three main contributions to address
the question. First, it synthesizes the diverse literature regarding CPS and social sustainability in
production systems. Second, it conceptualizes a holistic framework, the CyFL Matrix, and outlines a
guideline to analyze how the functionalities of a CPPS relate to operational and social sustainability-
related performance impacts at different levels of analysis. Finally, it presents an industrial use case,
which the CyFL Matrix and the related guidelines are applied to. In doing so, the study offers first
support to researchers and managers of manufacturing companies willing to define suitable
operational and social sustainability-related performances for Human-centric Cyber-Physical
Production Systems of the future.},
author = {Pinzone, Marta and Alb{\`{e}}, Federico and Orlandelli, Davide and Barletta, Ilaria and
Berlin, Cecilia and Johansson, Bj{\"{o}}rn and Taisch, Marco},
doi = {10.1016/j.cie.2018.03.028},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.cie.2018.03.028.pdf:pdf},
issn = {03608352},
title = {{A framework for operative and social sustainability functionalities in Human-Centric Cyber-
Physical Production Systems}},
url = {https://doi.org/10.1016/j.cie.2018.03.028},
volume = {139},
year = {2020}
@article{Liu2014,
doi = {10.1371/journal.pone.0097822},
number = {5},
pages = {1--8},
volume = {9},
year = {2014}
@article{Horkoff2019,
abstract = {Over the last two decades, much attention has been paid to the area of goal-oriented
requirements engineering (GORE), where goals are used as a useful conceptualization to elicit,
model, and analyze requirements, capturing alternatives and conflicts. Goal modeling has been
adapted and applied to many sub-topics within requirements engineering (RE) and beyond, such as
agent orientation, aspect orientation, business intelligence, model-driven development, and
security. Despite extensive efforts in this field, the RE community lacks a recent, general systematic
literature review of the area. In this work, we present a systematic mapping study, covering the 246
top-cited GORE-related conference and journal papers, according to Scopus. Our literature map
addresses several research questions: we classify the types of papers (e.g., proposals, formalizations,
meta-studies), look at the presence of evaluation, the topics covered (e.g., security, agents,
scenarios), frameworks used, venues, citations, author networks, and overall publication numbers.
For most questions, we evaluate trends over time. Our findings show a proliferation of papers with
new ideas and few citations, with a small number of authors and papers dominating citations;
however, there is a slight rise in papers which build upon past work (implementations, integrations,
and extensions). We see a rise in papers concerning adaptation/variability/evolution and a slight rise
in case studies. Overall, interest in GORE has increased. We use our analysis results to make
recommendations concerning future GORE research and make our data publicly available.},
author = {Horkoff, Jennifer and Aydemir, Fatma Başak and Cardoso, Evellin and Li, Tong and
Mat{\'{e}}, Alejandro and Paja, Elda and Salnitri, Mattia and Piras, Luca and Mylopoulos, John and
Giorgini, Paolo},
doi = {10.1007/s00766-017-0280-z},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/horkoff2017.pdf:pdf},
issn = {1432010X},
number = {2},
pages = {133--160},
volume = {24},
year = {2019}
@article{Nolfi2006,
doi = {10.1159/000093690},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/93690.pdf:pdf},
pages = {195--203},
year = {2006}
@article{Hartong,
pages = {205--245},
@article{Garcia-Martin2014,
doi = {10.1167/iovs.14-14991},
issn = {1552-5783},
number = {8},
pages = {4729},
url = {http://iovs.arvojournals.org/article.aspx?doi=10.1167/iovs.14-14991},
volume = {55},
year = {2014}
@article{Bae2019,
abstract = {Signal temporal logic (STL) is a temporal logic formalism for specifying properties of
continuous signals. STL is widely used for analyzing programs in cyber-physical systems (CPS) that
interact with physical entities. However, existing methods for analyzing STL properties are
incomplete even for bounded signals, and thus cannot guarantee the correctness of CPS programs.
This paper presents a new symbolic model checking algorithm for CPS programs that is refutationally
complete for general STL properties of bounded signals. To address the difficulties of dealing with an
infinite state space over a continuous time domain, we first propose a syntactic separation of STL,
which decomposes an STL formula into an equivalent formula so that each subformula depends only
on one of the disjoint segments of a signal. Using the syntactic separation, an STL model checking
problem can be reduced to the satisfiability of a first-order logic formula, which is decidable for CPS
programs with polynomial dynamics using satisfiability modulo theories (SMT). Unlike the previous
methods, our method can verify the correctness of CPS programs for STL properties up to given
bounds.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/popl19main-p278-p.pdf:pdf},
issn = {2475-1421},
number = {POPL},
pages = {1--30},
title = {{Bounded model checking of signal temporal logic properties using syntactic separation}},
volume = {3},
year = {2019}
@article{Liao2013,
abstract = {In this article, we propose UACOR, a unified ant colony optimization (ACO) algorithm for
continuous optimization. UACOR includes algorithmic components from ACOR,DACOR and IACOR-LS,
three ACO algorithms for continuous optimization that have been proposed previously. Thus, it can
be used to instantiate each of these three earlier algorithms; in addition, from UACOR we can also
generate new continuous ACO algorithms that have not been considered before in the literature. In
fact, UACOR allows the usage of automatic algorithm configuration techniques to automatically
derive new ACO algorithms. To show the benefits of UACOR's flexibility, we automatically configure
two new ACO algorithms, UACOR-s and UACOR-c, and evaluate them on two sets of benchmark
functions from a recent special issue of the Soft Computing (SOCO) journal and the IEEE 2005
Congress on Evolutionary Computation (CEC'05), respectively. We show that UACOR-s is competitive
with the best of the 19 algorithms benchmarked on the SOCO benchmark set and that UACOR-c
performs superior to IPOP-CMA-ES and statistically significantly better than five other algorithms
benchmarked on the CEC'05 set. These results show the high potential ACO algorithms have for
continuous optimization and suggest that automatic algorithm configuration is a viable approach for
designing state-of-the-art continuous optimizers.},
author = {Liao, Tianjun and St{\"{u}}tzle, Thomas and {Montes de Oca}, Marco a. and Dorigo, Marco},
doi = {10.1016/j.ejor.2013.10.024},
issn = {03772217},
number = {February},
title = {{A Unified Ant Colony Optimization Algorithm for Continuous Optimization}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0377221713008473},
year = {2013}
@article{David2015,
abstract = {This tutorial paper surveys the main features of Uppaal SMC, amodel checking approach
in Uppaal family that allows us to reason on networks of complex real-timed systemswith a
stochastic semantic.We demonstrate the modeling features of the tool, new verification algorithms
and ways of applying them to potentially complex case studies.},
author = {David, Alexandre and Larsen, Kim G. and Legay, Axel and Mikŭcionis, Marius and Poulsen,
Danny B{\o}gsted},
doi = {10.1007/s10009-014-0361-y},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/david2015.pdf:pdf},
issn = {14332787},
number = {4},
pages = {397--415},
volume = {17},
year = {2015}
@article{Shen2018,
number = {3},
pages = {27--68},
title = {{Exploring Induced Pedagogical Strategies Through a Markov Decision Process Frame- work :
Lessons Learned}},
volume = {10},
year = {2018}
}
@book{Fiadeiro2015,
doi = {10.1016/j.scico.2015.11.001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/formal-aspects-of-component-software-
2015.pdf:pdf},
isbn = {9783319153162},
issn = {01676423},
pages = {221--222},
volume = {113},
year = {2015}
@article{Ciancia2014a,
abstract = {In this paper we present the use of a novel spatial model-checker to detect problems in
the data which an adaptive system gathers in order to inform future action. We categorise received
data as being plausible, implausible, possible or problematic. Data correctness is essential to ensure
correct functionality in systems which adapt in response to data and our categorisation influences
the degree of caution which should be used in acting in response to this received data. We illustrate
the theory with a concrete example of detecting errors in vehicle location data for buses in the city
of Edinburgh. Vehicle location data is visualised symbolically on a street map, and categories of
problems identified by the spatial model-checker are rendered by repainting the symbols for
vehicles in different colours.},
author = {Ciancia, Vincenzo and Gilmore, Stephen and Latella, Diego and Loreti, Michele and
Massink, Mieke},
doi = {10.1109/SASOW.2014.16},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ciancia2014.pdf:pdf},
isbn = {9781479963782},
journal = {Proceedings - 2014 IEEE 8th International Conference on Self-Adaptive and Self-Organizing
Systems Workshops, SASOW 2014},
pages = {32--37},
title = {{Data verification for collective adaptive systems: Spatial model-checking of vehicle location
Data}},
year = {2014}
}
@article{Estublier2000,
abstract = {This paper, in the first chapter summarizes the state of the art in SCM, showing the
evolution along the last 25 years. Chapter 2 shows the current issues and current research work
under way in the area. In chapter 3, the challenges SCM has to take up, as well as SCM future
research are discussed.},
doi = {10.1145/336512.336576},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Software{\_}configuration{\
_}management.pdf:pdf},
isbn = {1581132530},
journal = {Proceedings of the Conference on the Future of Software Engineering, ICSE 2000},
pages = {279--289},
year = {2000}
@article{Werder2018,
abstract = {Users who downloaded this article also downloaded: (2018),"Tapping into the wearable
device revolution in the work environment: a systematic review", Information Technology {\&}
People, Vol. 31 Iss 3 pp. 791-818 {\textless}a href="https://doi. (2018),"Perceptions of control
legitimacy in information systems development", Information Technology {\&} People, Vol. 31 Iss 3
pp. 712-740 {\textless}a href="https://doi.},
doi = {10.1108/ITP-04-2017-0125},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ITP{\_}Manuscript{\
_}EmergenceofTeamAgility{\_}RG.pdf:pdf},
pages = {1--29},
title = {{Explaining the emergence of team agility: a complex adaptive systems perspective}},
url = {https://doi.org/10.1108/ITP-04-2017-0125},
year = {2018}
}
@article{Kwiatkowska2010,
author = {Kwiatkowska, Marta and Norman, Gethin and Parker, David and Kwiatkowska, Marta and
Norman, Gethin and Parker, David and Kwiatkowska, Marta and Norman, Gethin and Parker, David},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/acmper{\_}prismperf.pdf:pdf},
number = {4},
pages = {40--45},
title = {{PRISM : probabilistic model checking for performance and reliability analysis}},
volume = {36},
year = {2010}
@article{Clarke2018,
abstract = {Model checking is a computer-assisted method for the analysis of dynamical systems that
can be modeled by state-transition systems. Drawing from research traditions in mathematical logic,
programming languages, hardware design, and theoretical computer science, model checking is now
widely used for the verification of hardware and software in industry. The editors and authors of this
handbook are among the world's leading researchers in this domain, and the 32 contributed
chapters present a thorough view of the origin, theory, and application of model checking. In
particular, the editors classify the advances in this domain and the chapters of the handbook in
terms of two recurrent themes that have driven much of the research agenda: the algorithmic
challenge, that is, designing model-checking algorithms that scale to real-life problems; and the
modeling challenge, that is, extending the formalism beyond Kripke structures and temporal logic.
The book will be valuable for researchers and graduate students engaged with the development of
formal methods and verification tools.},
author = {Clarke, Edmund M. and Henzinger, Thomas A. and Veith, Helmut and Bloem, Roderick},
doi = {10.1007/978-3-319-10575-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-319-10575-8{\_}8.pdf:pdf},
isbn = {9783319105758},
pages = {1--1210},
year = {2018}
@article{Song2000,
abstract = {The bidding decision making problem is studied from a supplier's viewpoint in a spot
market environment. The decision-making problem is formulated as a Markov Decision Process - a
discrete stochastic optimization method. All other suppliers are modeled by their bidding
parameters with corresponding probabilities. A systematic method is developed to calculate
transition probabilities and rewards. A simplified market clearing system is also included in the
implementation. A risk-neutral decision-maker is assumed, the optimal strategy is calculated to
maximize the expected reward over a planning horizon. Simulation cases are used to illustrate the
proposed method.},
author = {Song, Haili and Liu, Chen Ching and Lawarr{\'{e}}e, Jacques and Dahlgren, Robert W.},
doi = {10.1109/59.867150},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ElectricitySupplyBiddingMDP.pdf:pdf},
issn = {08858950},
number = {2},
pages = {618--624},
volume = {15},
year = {2000}
@article{Kwiatkowska2017a,
doi = {10.1007/s10009-017-0476-z},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007{\_}2Fs10009{\_}017{\_}0476{\
_}z.pdf:pdf},
issn = {1433-2787},
url = {https://doi.org/10.1007/s10009-017-0476-z},
year = {2017}
@article{RichardBuchi1966,
doi = {10.1016/S0049-237X(09)70564-6},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/S0049-237X-2809-2970564-6.pdf:pdf},
issn = {0049237X},
number = {C},
pages = {1--11},
volume = {44},
year = {1966}
@article{Ciancia2015,
abstract = {In this work we present a spatial extension of the global model checking algorithm of the
temporal logic CTL. This classical verification framework is augmented with ideas coming from the
tradition of topological spatial logics. More precisely, we add to CTL the operators of the Spatial
Logic of Closure Spaces, including the surrounded operator, with its intended meaning of a point
being surrounded by entities satisfying a specific property. The interplay of space and time permits
one to define complex spatio-temporal properties. The model checking algorithm that we propose
features no particular efficiency optimisations, as it is meant to be a reference specification of a
family of more efficient algorithms that are planned for future work. Its complexity depends on the
product of temporal states and points of the space. Nevertheless, a prototype model checker has
been implemented, made available, and used for experimentation of the application of spatio-
temporal verification in the field of collective adaptive systems.},
author = {Ciancia, Vincenzo and Grilletti, Gianluca and Latella, Diego and Loreti, Michele and
Massink, Mieke},
doi = {10.1007/978-3-662-49224-6_24},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/an{\_}experimental{\_}spatio-temporal{\
_}model{\_}checker.pdf:pdf},
isbn = {9783662492239},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
number = {600708},
pages = {297--311},
volume = {9509},
year = {2015}
}
@article{Sanderson2019a,
abstract = {{\textcopyright} 2019, IFIP International Federation for Information Processing. The
design and reconfiguration of adaptive production systems is a key driver in modern advanced
manufacturing. We summarise the use of an approach from the field of functional modelling to
capture the function, behaviour, and structure of a system. This model is an integral part of the
Evolvable Assembly Systems architecture, allowing the system to adapt its behaviour in response to
changing product requirements. The integrated approach is illustrated with an example taken from a
real EAS instantiation.},
doi = {10.1007/978-3-030-05931-6_4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Sanderson{\_}FMinEAS.pdf:pdf},
isbn = {9783030059309},
issn = {18684238},
pages = {40--48},
volume = {530},
year = {2019}
@article{Chaib2018,
doi = {10.1007/s12652-017-0510-8},
file = {:C$\backslash$:/Users/Asus/Downloads/chaib2017.pdf:pdf},
isbn = {0123456789},
issn = {18685145},
number = {2},
pages = {367--380},
volume = {9},
year = {2018}
@article{Fakhir2018,
doi = {10.1109/ACCESS.2018.2849821},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/08392667.pdf:pdf},
pages = {34790--34803},
publisher = {IEEE},
volume = {6},
year = {2018}
@article{Lehrig2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper9.pdf:pdf},
number = {317704},
pages = {141--151},
title = {{Approaching the Cloud: Using Palladio for Scalability, Elasticity, and Efficiency Analyses}},
volume = {i},
year = {2014}
@article{Yuan2019,
abstract = {This paper addresses the formation control problem for a group of mechanical systems
with nonlinear uncertain dynamics under the virtual leader-following framework. New cooperative
deterministic learning-based adaptive formation control algorithms are proposed. Specifically, the
virtual leader dynamics is constructed as a linear system subject to bounded inputs, so as to produce
more diverse reference signals for formation tracking control. A cooperative discontinuous nonlinear
estimation protocol is first proposed to estimate the leader's state information. Based on this, a
cooperative deterministic learning formation control protocol is developed using artificial neural
networks, such that formation tracking control and locally-accurate nonlinear identification with
learning knowledge consensus can be achieved simultaneously. Finally, by utilizing the learned
knowledge represented by constant neural networks, an experience-based distributed control
protocol is further proposed to enable position-swappable formation control. Numerical simulations
using a group of autonomous underwater vehicles have been conducted to demonstrate the
effectiveness and usefulness of the proposed results. IEEE},
doi = {10.1109/TII.2018.2792455},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/08255681.pdf:pdf},
issn = {15513203},
number = {1},
pages = {319--333},
volume = {15},
year = {2019}
@article{Muhammad2019,
abstract = {Wireless sensor networks (WSNs) have a wide variety of applications in environment
monitoring (such as air pollution and fire detection), industrial operations (such as machine
surveillance), and precision agriculture. It is an arduous task to manage a large WSN as constant
monitoring is required to keep it operational. Mobile robots are used to deploy, manage, and
perform various application specific tasks in WSNs. However, a fully autonomous robot lacks the
ability of proper decision-making in complex situations such as network coverage in disastrous areas.
A remote human operator can assist the robot in improved decision-making, specially in odd
situations that arise due to either inherent application needs or changes in the environment. In
addition to the complexity of WSN managed by a robot, analyzing the effect of human operator in
managing WSN poses further challenge. This is due to the fact that the performance of a human
operator is also influenced by internal (such as fatigue) as well as external (such as workload
conditions) factors. In this paper, we use probabilistic model checking to analyze the performance of
robot assisted WSN. This study enables WSN administrators to analyze and plan WSN management
before the actual deployment of robot and sensors in the field. Given specific application
requirements, we are able to examine key parameters such as size of the network, number of
sensors needed to keep the network operational, and time to service the farthest location. With the
help of remote human operator, we introduce several degrees of autonomy to the mobile robot
managing WSN. Markov decision process is used to capture uncertainties and imperfections in the
human−robot interactions. We demonstrate the benefits obtained due to intelligent decision-
making by a realistic human operator whose performance is affected by both external and internal
factors. We demonstrate the applicability of our approach via detailed case studies in planning and
managing WSNs.},
author = {Muhammad, Shahabuddin and Mohammad, Nazeeruddin and Bashar, Abul and Khan,
Majid Ali},
doi = {10.1007/s10846-018-0901-x},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/muhammad2018.pdf:pdf},
issn = {15730409},
number = {3-4},
pages = {687--709},
title = {{Designing Human Assisted Wireless Sensor and Robot Networks Using Probabilistic Model
Checking}},
volume = {94},
year = {2019}
@article{Subudhi2015,
author = {Subudhi, Badri Narayan and Patwa, Ishan and Ghosh, Ashish},
doi = {10.1007/978-81-322-2009-1},
isbn = {978-81-322-2008-4},
url = {http://link.springer.com/10.1007/978-81-322-2009-1},
volume = {309},
year = {2015}
@article{Skandylas2021,
abstract = {As threats to computer security become more common, complex and frequent, systems
that can automatically protect themselves from attacks are imminently needed. In this paper, we
propose a formal approach to achieve self-protection by performing security analysis on self-
adaptive systems, taking the adaptation process into account. We use probabilistic model checking
to quantitatively analyze adaptation security, rank the strategies available and select the most
secure one to apply in the system. We have incorporated our approach in Rainbow which is a
framework to develop architecture-based self-adaptive systems. To evaluate our approach's
effectiveness, we applied it on two case studies: a simple document storage system and ZNN, a well
known self-adaptive exemplar. The results show that applying our approach can guarantee a
reasonable degree of security, both during and after adaptation.},
doi = {10.1016/j.future.2020.09.005},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.future.2020.09.005.pdf:pdf},
issn = {0167739X},
pages = {421--437},
url = {https://doi.org/10.1016/j.future.2020.09.005},
volume = {115},
year = {2021}
@article{Fraser2020a,
author = {Fraser, Douglas and Giaquinta, Ruben and Hoffmann, Ruth and Ireland, Murray and Miller,
Alice and Norman, Gethin},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/fac20.pdf:pdf},
year = {2020}
@article{Meier2007,
abstract = {Early detection of glaucoma is essential for preventing one of the most common causes
of blindness. Our research is focused on a novel automated classification system based on image
features from fundus photographs which does not depend on structure segmentation or prior expert
knowledge. Our new data driven approach that needs no manual assistance achieves an accuracy of
detecting glaucomatous retina fundus images compareable to human experts. In this paper, we
study image preprocessing methods to provide better input for more reliable automated glaucoma
detection. We reduce disease independent variations without removing information that
discriminates between images of healthy and glaucomatous eyes. In particular, nonuniform
illumination is corrected, blood vessels are inpainted and the region of interest is normalized before
feature extraction and subsequent classification. The effect of these steps was evaluated using
principal component analysis for dimension reduction and support vector machine as classifier. ?
Springer-Verlag Berlin Heidelberg 2007.},
author = {Meier, Joerg and Bock, Rudiger and MichelS, Georg and Nyul, Laszlo G and Hornegger,
Joachim},
doi = {10.1007/978-3-540-74272-2},
isbn = {978-3-540-74271-5},
issn = {03029743},
pages = {165--172},
title = {{Effects of preprocessing eye fundus images on appearance based glaucoma classification}},
volume = {4673},
year = {2007}
@article{Fu1981,
doi = {http://dx.doi.org/10.1016/0031-3203(81)90028-5},
issn = {0031-3203},
number = {1},
pages = {3--16},
volume = {13},
year = {1981}
@article{Taneja2015,
author = {Taneja, Ankita},
number = {7},
pages = {26--32},
volume = {124},
year = {2015}
@article{Lin2014,
abstract = {The paper concentrates on the fundamental coordination problem that requires a
network of agents to achieve a specific but arbitrary formation shape. A new technique based on
complex Laplacian is introduced to address the problems of which formation shapes specified by
inter-agent relative positions can be formed and how they can be achieved with distributed control
ensuring global stability. Concerning the first question, we show that all similar formations subject to
only shape constraints are those that lie in the null space of a complex Laplacian satisfying certain
rank condition and that a formation shape can be realized almost surely if and only if the graph
modeling the inter-agent specification of the formation shape is 2-rooted. Concerning the second
question, a distributed and linear control law is developed based on the complex Laplacian
specifying the target formation shape, and provable existence conditions of stabilizing gains to
assign the eigenvalues of the closed-loop system at desired locations are given. Moreover, we show
how the formation shape control law is extended to achieve a rigid formation if a subset of
knowledgable agents knowing the desired formation size scales the formation while the rest agents
do not need to re-design and change their control laws. {\textcopyright} 1963-2012 IEEE.},
author = {Lin, Zhiyun and Wang, Lili and Han, Zhimin and Fu, Minyue},
doi = {10.1109/TAC.2014.2309031},
file = {:C$\backslash$:/Users/Asus/Downloads/TAC2014.pdf:pdf},
issn = {00189286},
number = {7},
pages = {1765--1777},
volume = {59},
year = {2014}
}
@article{Kwiatkowska2018,
abstract = {We present automatic verification techniques for concurrent stochastic multi-player
games (CSGs) with rewards. To express properties of such models, we adapt the temporal logic
rPATL (probabilistic alternating-time temporal logic with rewards), originally introduced for the
simpler model of turn-based games, which enables quantitative reasoning about the ability of
coalitions of players to achieve goals related to the probability of an event or reward measures. We
propose and implement a modelling approach and model checking algorithms for property
verification and strategy synthesis of CSGs, as an extension of PRISM-games. We evaluate the
performance, scalability and applicability of our techniques on case studies from domains such as
security, networks and finance, showing that we can analyse systems with probabilistic, cooperative
and competitive behaviour between concurrent components, including many scenarios that cannot
be analysed with turn-based models.},
author = {Kwiatkowska, Marta and Norman, Gethin and Parker, David and Santos, Gabriel},
doi = {10.1007/978-3-319-99154-2_14},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/KNP+18.pdf:pdf},
isbn = {9783319991535},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {223--239},
year = {2018}
@article{Maier2014,
abstract = {The development and application of evolutionary algorithms (EAs) and other
metaheuristics for the optimisation of water resources systems has been an active research field for
over two decades. Research to date has emphasized algorithmic improvements and individual
applications in specific areas (e.g. model calibration, water distribution systems, groundwater
management, river-basin planning and management, etc.). However, there has been limited
synthesis between shared problem traits, common EA challenges, and needed advances across
major applications. This paper clarifies the current status and future research directions for better
solving key water resources problems using EAs. Advances in understanding fitness landscape
properties and their effects on algorithm performance are critical. Future EA-based applications to
real-world problems require a fundamental shift of focus towards improving problem formulations,
understanding general theoretic frameworks for problem decompositions, major advances in EA
computational efficiency, and most importantly aiding real decision-making in complex, uncertain
application contexts.},
author = {Maier, H.R. and Kapelan, Z. and Kasprzyk, Joseph R. and Kollat, Joshua B. and Matott, L.S.
and Cunha, M.C. and Dandy, G.C. and Gibbs, M.S. and Keedwell, E. and Marchi, A. and Ostfeld, A.
and Savic, D. and Solomatine, D.P. and Vrugt, J.A. and Zecchin, A.C. and Minsker, B.S. and Barbour,
E.J. and Kuczera, G. and Pasha, F. and Castelletti, A. and Giuliani, M. and Reed, P.M.},
doi = {10.1016/j.envsoft.2014.09.013},
isbn = {1364-8152},
issn = {13648152},
pages = {271--299},
title = {{Evolutionary algorithms and other metaheuristics in water resources: Current status,
research challenges and future directions}},
url = {http://www.sciencedirect.com/science/article/pii/S1364815214002679},
volume = {62},
year = {2014}
@article{Lisboa2013,
doi = {10.1167/iovs.13-11676},
issn = {01460404},
keywords = {Diagnosis,Glaucoma,Preperimetric},
number = {5},
pages = {3417--3425},
pmid = {23532529},
title = {{Comparison of different spectral domain OCT scanning protocols for diagnosing
preperimetric glaucoma}},
volume = {54},
year = {2013}
@article{Keznikl2012,
abstract = {In the domain of dynamically evolving distributed systems composed of autonomous and
(self-) adaptive components, the task of systematically managing the design complexity of their
communication and composition is a pressing issue. This stems from the dynamic nature of such
systems, where components and their bindings may appear and disappear without anticipation. To
address this challenge, we propose employing separation of concerns via a mechanism of dynamic
implicit bindings with implicit communication. This way, we strive for dynamically formed, implicitly
interacting groups - ensembles - of autonomous components. In this context, we introduce the
DEECo component model, where such bindings, as well as the associated communication, are
managed in an automated way, enabling transparent handling of the dynamic changes in the
system. {\textcopyright} 2012 IEEE.},
author = {Keznikl, Jaroslav and Bure{\v{s}}, Tom{\'{a}}{\v{s}} and Pl{\'{a}}{\v{s}}il, Franti{\v{s}}ek and
Kit, Michal},
doi = {10.1109/WICSA-ECSA.212.39},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/keznikl2012.pdf:pdf},
isbn = {9780769548272},
journal = {Proceedings of the 2012 Joint Working Conference on Software Architecture and 6th
European Conference on Software Architecture, WICSA/ECSA 2012},
title = {{Towards dependable emergent ensembles of components: The DEECo component model}},
year = {2012}
@article{Soesanti2016,
number = {2},
pages = {272--278},
title = {{Batik Production Process Optimization Using Particle Swarm Optimization Method}},
volume = {86},
year = {2016}
@article{Zave2014,
abstract = {Because potential users have to choose a formal method before they can start using one,
research on assessing the applicability of specific formal methods might be as effective in
encouraging their use as work on the methods themselves. This comparison of Alloy and Spin is
based on a demanding project that exploited the full capabilities of both languages and tools. The
study exposed issues not apparent from more superficial studies, and resulted in some unexpected
conclusions. The paper provides tentative recommendations for two different classes of network
protocol, a research agenda for solidifying the recommendations, and a few general lessons learned
about research on selection of formal methods.},
doi = {10.1007/s00165-014-0302-2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zave2014.pdf:pdf},
isbn = {0016501403},
issn = {1433299X},
number = {2},
pages = {239--253},
year = {2014}
@article{Ismail2020,
abstract = {The ability to ensure an optimal decision is significant for self-adaptive systems especially
when dealing with uncertainty. For this reason, a synthesis-driven approach can be used to capture
and synthesize a decision that aims to satisfy the multi-objective properties. Assessing the quality of
the synthesis-driven approach is challenging, since it involves a set of activities from modeling,
simulating, and analyzing the outcomes. This paper presents the design and implementation of a
graphical user interface (GUI)-based prototype for assessing synthesis outcome and performance of
an adaptation decision. The prototype is designed and developed based on the component-based
development approach that is able to integrate the existing and related libraries from PRISM-games
model checker for the synthesis engine, JFreeChart libraries for the chart presentation, and Java
Universal Network/Graph Framework libraries for the graph visualization. This paper also presents
the implementation of the proposed prototype based on the cloud application deployment scenario
to illustrate its applicability. This work contributes to provide a fundamental work towards
automated synthesis for self-adaptive systems.},
doi = {10.11591/eei.v9i2.1716},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1716-4745-1-PB.pdf:pdf},
issn = {23029285},
number = {2},
pages = {792--800},
volume = {9},
year = {2020}
@article{Arias2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/main{\_}4.pdf:pdf},
number = {November},
pages = {108},
title = {{Formal Semantics and Automatic Verification of Hierarchical Multimedia Scenarios with
Interactive Choices}},
url = {https://tel.archives-ouvertes.fr/tel-01282677},
year = {2015}
@book{Carreira2020,
doi = {10.1007/978-3-030-43946-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-030-43946-0.pdf:pdf},
isbn = {9783030439453},
year = {2020}
@article{Wirsing2013a,
abstract = {Today's developers often face the demanding task of developing software for ensembles:
systems with massive numbers of nodes, operating in open and non-deterministic environments
with complex interactions, and the need to dynamically adapt to new requirements, technologies or
environmental conditions without redeployment and without interruption of the system's
functionality. Conventional development approaches and languages do not provide adequate
support for the problems posed by this challenge. The goal of the ASCENS project is to develop a
coherent, integrated set of methods and tools to build software for ensembles. To this end we
research foundational issues that arise during the development of these kinds of systems, and we
build mathematical models that address them. Based on these theories we design a family of
languages for engineering ensembles, formal methods that can handle the size, complexity and
adaptivity required by ensembles, and software-development methods that provide guidance for
developers. In this paper we provide an overview of several research areas of ASCENS: the SOTA
approach to ensemble engineering and the underlying formal model called GEM, formal notions of
adaptation and awareness, the SCEL language, quantitative analysis of ensembles, and finally
software-engineering methods for ensembles. {\textcopyright} 2013 Springer-Verlag Berlin
Heidelberg.},
author = {Wirsing, Martin and H{\"{o}}lzl, Matthias and Tribastone, Mirco and Zambonelli, Franco},
doi = {10.1007/978-3-642-35887-6-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-642-35887-6{\_}1.pdf:pdf},
isbn = {9783642358869},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {1--24},
year = {2013}
@article{Weyns2012,
abstract = {One major challenge in self-adaptive systems is to assure the required quality properties.
Formal methods provide the means to rigorously specify and reason about the behaviors of self-
adaptive systems, both at design time and runtime. To the best of our knowledge, no systematic
study has been performed on the use of formal methods in self-adaptive systems. As a result, there
is no clear view on what methods have been used to verify self-adaptive systems, and what support
these methods offer to software developers. As such insight is important for researchers and
engineers, we performed a systematic literature review covering 12 main software engineering
venues and 4 journals, resulting in 75 papers used for data collection. The study shows that the
attention for self-adaptive software systems is gradually increasing, but the number of studies that
employ formal methods remains low. The main focus of formalization is on modeling and reasoning.
Model checking and theorem proving have gained limited attention. The main concerns of interest in
formalization of self-adaptation are efficiency/performance and reliability. Important adaptation
concerns, such as security and scalability, are hardly considered. To verify the concerns of interest, a
set of new properties are defined, such as interference freedom, responsiveness, mismatch, and
loss-tolerance. A relevant part of the studies use formal methods at runtime, but the use is limited to
modeling and analysis. Formal methods can be applied to other runtime activities of self-adaptation,
and there is a need for light-weight tools to support runtime verification. {\textcopyright} 2012
ACM.},
author = {Weyns, Danny and Iftikhar, M. Usman and {De La Iglesia}, Didac Gil and Ahmad, Tanvir},
doi = {10.1145/2347583.2347592},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2347583.2347592.pdf:pdf},
isbn = {9781450310840},
pages = {67--79},
year = {2012}
@article{Yaqoob2019,
abstract = {The explosive growth of smart objects and their dependency on wireless technologies for
communication increases the vulnerability of Internet of Things (IoT) to cyberattacks. Cyberattacks
faced by IoT present daunting challenges to digital forensic experts. Researchers adopt various
forensic techniques to investigate such attacks. These techniques aim to track internal and external
attacks by emphasizing on communication mechanisms and IoT's architectural vulnerabilities. In this
study, we explore IoT's novel factors affecting traditional computer forensics. We investigate recent
studies on IoT forensics by analyzing their strengths and weaknesses. We categorize and classify the
literature by devising a taxonomy based on forensics phases, enablers, networks, sources of
evidence, investigation modes, forensics models, forensics layers, forensics tools, and forensics data
processing. We also enumerate a few prominent use cases of IoT forensics and present the key
requirements for enabling IoT forensics. Finally, we identify and discuss several indispensable open
research challenges as future research directions.},
author = {Yaqoob, Ibrar and Hashem, Ibrahim Abaker Targio and Ahmed, Arif and Kazmi, S. M.Ahsan
and Hong, Choong Seon},
doi = {10.1016/j.future.2018.09.058},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/yaqoob2019.pdf:pdf},
issn = {0167739X},
pages = {265--275},
title = {{Internet of things forensics: Recent advances, taxonomy, requirements, and open
challenges}},
url = {https://doi.org/10.1016/j.future.2018.09.058},
volume = {92},
year = {2019}
@article{Taduran2018,
author = {Taduran, R. J.O. and Ranjitkar, S. and Hughes, T. and Townsend, G. and Brook, A. H.},
doi = {10.2495/DNE-V13-N1-93-100},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/DNE130109f.pdf:pdf},
issn = {17557445},
number = {1},
pages = {93--100},
title = {{Two complex adaptive systems in human development: Further studies of dental and
fingerprint parameters}},
volume = {13},
year = {2018}
@article{Su2008,
doi = {10.1145/1329125.1329364},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0004370299000363-main{\
_}2.pdf:pdf},
pages = {1},
title = {{Coalition structure generation with worst case guarantees based on cardinality structure}},
volume = {111},
year = {2008}
@article{Bozhinoski2016,
abstract = {UAV-based systems are systems that are composed of a team of drones, various devices
(like movable cameras, sensors), and human agents, which collaborate each other to accomplish
defined missions. Since humans are constituent part of these systems, UAV-based systems are both
mission-critical and safety-critical. Moreover, these systems are requested to operate in potentially
unpredictable and unknown environments. A model of the environment describing, e.g. obstacles,
no-fly zones, wind and weather conditions might be available, however, the assumption that such
model is both correct and complete is often wrong. In this paper, we describe a novel approach for
managing the run-time adaptation of UAV-based systems. Our approach is based on a generic
collective adaptation engine that addresses collective adaptation problems in a decentralized
fashion, operates at run-time, and enables the addition of new entities at any time. Moreover, our
approach dynamically understands which parts of the system should be selected to solve an
adaptation issue. The feasibility and scalability of the approach have been empirically evaluated in
the context of a private company surveillance scenario.},
author = {Bozhinoski, Darko and Bucchiarone, Antonio and Malavolta, Ivano and Marconi, Annapaola
and Pelliccione, Patrizio},
doi = {10.1109/SEAA.2016.41},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bozhinoski2016.pdf:pdf},
isbn = {9781509028191},
pages = {214--221},
year = {2016}
@article{Savaglio2015,
abstract = {Internet of Things promises to be an innovative scenario in which the user experience will
be enriched by new cyber-physical services and content, shared by multiple actors (things, places,
people) with an higher frequency and quality of the current ones. The countless challenges and
opportunities that the development of such an ecosystem entails require a marked intervention on
the current Internet architectural frameworks and models, primarily as regards the management
function. Pointing in this direction, the most relevant autonomic and cognitive architectures for the
Internet of Things have been surveyed and compared. {\textcopyright} Springer International
Publishing Switzerland 2015.},
doi = {10.1007/978-3-319-23237-9_5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/IDCS{\_}2015{\_}v2.pdf:pdf},
isbn = {9783319232362},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {39--47},
volume = {9258},
year = {2015}
@article{DeNicola2020,
abstract = {An adaptive system is able to adapt at runtime to dynamically changing environments
and to new requirements. Adaptive systems can be single adaptive entities or collective ones that
consist of several collaborating entities. Rigorous engineering requires appropriate methods and
tools that help guaranteeing that an adaptive system lives up to its intended purpose. This paper
introduces the special section on “Rigorous Engineering of Collective Adaptive Systems.” It presents
the seven contributions of the section and gives a short overview of the field of rigorously
engineering collective adaptive systems by structuring it according to three topics: systematic
development, methods and theories for modelling and analysis, and techniques for programming
and operating collective adaptive systems.},
author = {{De Nicola}, Rocco and J{\"{a}}hnichen, Stefan and Wirsing, Martin},
doi = {10.1007/s10009-020-00565-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Nicola2020{\_}Article{\
_}RigorousEngineeringOfCollectiv.pdf:pdf},
isbn = {1000902000565},
issn = {14332787},
url = {https://doi.org/10.1007/s10009-020-00565-0},
volume = {22},
year = {2020}
@article{Liu2019,
author = {Liu, Wei and Guo, Jingzhi and Xu, Longlong and Chen, Deng},
doi = {10.1007/978-3-030-29551-6_21},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/weiliu2019.pdf:pdf},
isbn = {9783030295516},
pages = {234--246},
title = {{Context Aware Community Formation for MAS-Oriented Collective Adaptive System}},
volume = {2},
year = {2019}
@article{Verma2016,
doi = {10.17485/ijst/2016/v9i14/84976},
isbn = {9789380544212},
issn = {09745645},
number = {April},
pages = {1--15},
volume = {9},
year = {2016}
@article{Ciocchetta2009,
abstract = {Model-checking can provide valuable insight into the behaviour of biochemical systems,
answering quantitative queries which are more difficult to answer using stochastic simulation alone.
However, model-checking is a computationally intensive technique which can become infeasible if
the system under consideration is too large. Moreover, the finite nature of the state representation
used means that a priori bounds must be set for the numbers of molecules of each species to be
observed in the system. In this paper we present an approach which addresses these problems by
using stochastic simulation and the PRISM model checker in tandem. The stochastic simulation
identifies reasonable bounds for molecular populations in the context of the considered experiment.
These bounds are used to parameterise the PRISM model and limit its state space. A simulation pre-
run identifies interesting time intervals on which model-checking should focus, if this information is
not available from experimental data. {\textcopyright} 2009 Elsevier B.V. All rights reserved.},
author = {Ciocchetta, Federica and Gilmore, Stephen and Guerriero, Maria Luisa and Hillston, Jane},
doi = {10.1016/j.entcs.2009.02.048},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S157106610900053X-
main.pdf:pdf},
issn = {15710661},
journal = {Electronic Notes in Theoretical Computer Science},
number = {C},
pages = {17--38},
title = {{Integrated Simulation and Model-Checking for the Analysis of Biochemical Systems}},
url = {http://dx.doi.org/10.1016/j.entcs.2009.02.048},
volume = {232},
year = {2009}
@article{Jha2017,
abstract = {Formal synthesis is the process of generating a program satisfying a high-level formal
specification. In recent times, effective formal synthesis methods have been proposed based on the
use of inductive learning. We refer to this class of methods that learn programs from examples as
formal inductive synthesis. In this paper, we present a theoretical framework for formal inductive
synthesis. We discuss how formal inductive synthesis differs from traditional machine learning. We
then describe oracle-guided inductive synthesis (OGIS), a framework that captures a family of
synthesizers that operate by iteratively querying an oracle. An instance of OGIS that has had much
practical impact is counterexample-guided inductive synthesis (CEGIS). We present a theoretical
characterization of CEGIS for learning any program that computes a recursive language. In particular,
we analyze the relative power of CEGIS variants where the types of counterexamples generated by
the oracle varies. We also consider the impact of bounded versus unbounded memory available to
the learning algorithm. In the special case where the universe of candidate programs is finite, we
relate the speed of convergence to the notion of teaching dimension studied in machine learning
theory. Altogether, the results of the paper take a first step towards a theoretical foundation for the
emerging field of formal inductive synthesis.},
archivePrefix = {arXiv},
arxivId = {1505.03953},
doi = {10.1007/s00236-017-0294-5},
eprint = {1505.03953},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/jha2017.pdf:pdf},
issn = {14320525},
number = {7},
pages = {693--726},
publisher = {Springer Berlin Heidelberg},
volume = {54},
year = {2017}
@article{Graics2020,
abstract = {The increasing complexity of reactive systems can be mitigated with the use of
components and composition languages in model-driven engineering. Designing composition
languages is a challenge itself as both practical applicability (support for different composition
approaches in various application domains), and precise formal semantics (support for verification
and code generation) have to be taken into account. In our Gamma Statechart Composition
Framework, we designed and implemented a composition language for the synchronous, cascade
synchronous and asynchronous composition of statechart-based reactive components. We
formalized the semantics of this composition language that provides the basis for generating
composition-related Java source code as well as mapping the composite system to a back-end model
checker for formal verification and model-based test case generation. In this paper, we present the
composition language with its formal semantics, putting special emphasis on design decisions
related to the language and their effects on verifiability and applicability. Furthermore, we
demonstrate the design and verification functionality of the composition framework by presenting
case studies from the cyber-physical system domain.},
author = {Graics, Bence and Moln{\'{a}}r, Vince and V{\"{o}}r{\"{o}}s, Andr{\'{a}}s and Majzik,
Istv{\'{a}}n and Varr{\'{o}}, D{\'{a}}niel},
doi = {10.1007/s10270-020-00806-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/graics2020.pdf:pdf},
isbn = {1027002000806},
issn = {16191374},
number = {6},
pages = {1483--1517},
url = {https://doi.org/10.1007/s10270-020-00806-5},
volume = {19},
year = {2020}
}
@article{Esfahani2011,
isbn = {9781450304436},
journal = {ESEC/FSE '11: Proceedings of the 19th ACM SIGSOFT symposium and the 13th European
conference on Foundations of software engineering},
year = {2011}
@article{Reynolds2017,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/pres-vtsa2017.pdf:pdf},
year = {2017}
@article{Kounev2017d,
abstract = {This book provides formal and informal definitions and taxonomies for self-aware
computing systems, and explains how self-aware computing relates to many existing subfields of
computer science, especially software engineering. It describes architectures and algorithms for self-
aware systems as well as the benefits and pitfalls of self-awareness, and reviews much of the latest
relevant research across a wide array of disciplines, including open research challenges. The
chapters of this book are organized into five parts: Introduction, System Architectures, Methods and
Algorithms, Applications and Case Studies, and Outlook. Part I offers an introduction that defines
self-aware computing systems from multiple perspectives, and establishes a formal definition, a
taxonomy and a set of reference scenarios that help to unify the remaining chapters. Next, Part II
explores architectures for self-aware computing systems, such as generic concepts and notations
that allow a wide range of self-aware system architectures to be described and compared with both
isolated and interacting systems. It also reviews the current state of reference architectures,
architectural frameworks, and languages for self-aware systems. Part III focuses on methods and
algorithms for self-aware computing systems by addressing issues pertaining to system design, like
modeling, synthesis and verification. It also examines topics such as adaptation, benchmarks and
metrics. Part IV then presents applications and case studies in various domains including cloud
computing, data centers, cyber-physical systems, and the degree to which self-aware computing
approaches have been adopted within those domains. Lastly, Part V surveys open challenges and
future research directions for self-aware computing systems. It can be used as a handbook for
professionals and researchers working in areas related to self-aware computing, and can also serve
as an advanced textbook for lecturers and postgraduate students studying subjects like advanced
software engineering, autonomic computing, self-adaptive systems, and data-center resource
management. Each chapter is largely self-contained, and offers plenty of references for anyone
wishing to pursue the topic more deeply.},
author = {Kounev, Samuel and Kephart, Jeffrey O. and Milenkoski, Aleksandar and Zhu, Xiaoyun},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-319-47474-8{\_}2.pdf:pdf},
isbn = {9783319474748},
pages = {1--722},
year = {2017}
@article{Michalak2010,
abstract = {A major research challenge in multi-agent systems is the problem of partitioning a set of
agents into mutually disjoint coalitions, such that the overall performance of the system is
optimized. This problem is difficult because the search space is very large: the number of possible
coalition structures increases exponentially with the number of agents. Although several algorithms
have been proposed to tackle this Coalition Structure Generation (CSG) problem, all of them suffer
from being inherently centralized, which leads to the existence of a performance bottleneck and a
single point of failure. In this paper, we develop the first decentralized algorithm for solving the CSG
problem optimally. In our algorithm, the necessary calculations are distributed among the agents,
instead of being carried out centrally by a single agent (as is the case in all the available algorithms in
the literature). In this way, the search can be carried out in a much faster and more robust way, and
the agents can share the burden of the calculations. The algorithm combines, and improves upon,
techniques from two existing algorithms in the literature, namely DCVC [5] and IP [9], and applies
novel techniques for filtering the input and reducing the inter-agent communication load. Copyright
{\textcopyright} 2010, International Foundation for Autonomous Agents and Multiagent Systems
(www.ifaamas.org). All rights reserved.},
author = {Michalak, Tomasz and Sroka, Jacek and Rahwan, Talal and Wooldridge, Michael and
Mcburney, Peter and Jennings, Nicholas},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/aamas2010b.pdf:pdf},
isbn = {978-0-9826571-1-9},
issn = {15582914},
number = {Aamas},
pages = {1007--1014},
title = {{A Distributed Algorithm for Anytime Coalition Structure Generation}},
url = {http://eprints.ecs.soton.ac.uk/18491/},
year = {2010}
@inproceedings{Conzon2019,
abstract = {Modern applications in the Smart Building and Industry 4.0 scenarios will be complex
software ecosystems with strict requirements of geographic distribution, heterogeneity, dynamic
evolution, security and privacy protection, highly more challenging than the ones required by the
current environments. Two of the main challenges arising in the current Internet Of Things
scenarios, i.e., the Smart Building one, are, on one side, the requirement of interconnecting several
heterogeneous platforms and smart Things in the same environment and, on the other side, the
need to be able to evolve the complex software ecosystem deployed, reacting automatically and at
runtime to environmental changes, without the human intervention. To address these challenges,
BRAIN-IoT establishes a framework and methodology supporting smart cooperative behaviour in
fully de-centralized, composable and dynamic federations of heterogeneous Internet of Things
platforms. In this way, BRAIN-IoT enables smart autonomous behaviour in Internet of Things
scenarios, involving heterogeneous sensors and actuators autonomously cooperating to execute
complex, dynamic tasks. Furthermore, BRAIN-IoT enables dynamically deploying and orchestrating
distributed applications, allowing the automatic installation and replacement of smart behaviours
reacting to environmental changes and User events. Finally, BRAIN-IoT provides a set of components
that guarantee the security and privacy protection of the data exchanged using the solution. BRAIN-
IoT is a general purpose solution that aims at being adaptable for heterogeneous scenarios, from
Service Robotics to Critical Infrastructure Management. This paper introduces a Smart Building use
case of the solution, which allows highlighting the advantages given by BRAIN-IoT in such scenario.},
author = {Conzon, Davide and Rashid, Mohammad Rifat Ahmmad and Tao, Xu and Soriano, Angel
and Nicholson, Richard and Ferrera, Enrico},
booktitle = {2019 4th International Conference on Computing, Communications and Security, ICCCS
2019},
doi = {10.1109/CCCS.2019.8888136},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/cccs.2019.8888136.pdf:pdf},
isbn = {9781728108759},
pages = {1--8},
publisher = {IEEE},
title = {{BRAIN-IoT: Model-Based Framework for Dependable Sensing and Actuation in Intelligent
Decentralized IoT Systems}},
year = {2019}
}
@book{Deshmukh2019,
abstract = {Modern cyber-physical systems (CPS) are often developed in a model-based development
(MBD) paradigm. The MBD paradigm involves the construction of different kinds of models: (1) a
plant model that encapsulates the physical components of the system (e.g., mechanical, electrical,
chemical components) using representations based on differential and algebraic equations, (2) a
controller model that encapsulates the embedded software components of the system, and (3) an
environment model that encapsulates physical assumptions on the external environment of the CPS
application. In order to reason about the correctness of CPS applications, we typically pose the
following question: For all possible environment scenarios, does the closed-loop system consisting of
the plant and the controller exhibit the desired behavior? Typically, the desired behavior is
expressed in terms of properties that specify unsafe behaviors of the closed-loop system. Often,
such behaviors are expressed using variants of real-time temporal logics. In this chapter, we will
examine formal methods based on bounded-time reachability analysis, simulation-guided
reachability analysis, deductive techniques based on safety invariants, and formal, requirement-
driven testing techniques. We will review key results in the literature, and discuss the scalability and
applicability of such systems to various academic and industrial contexts. We conclude this chapter
by discussing the challenge to formal verification and testing techniques posed by newer CPS
applications that use AI-based software components.},
doi = {10.1007/978-3-030-13050-3_4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030130503},
pages = {69--105},
year = {2019}
@article{Ciancia2014,
abstract = {In this paper we present the use of a novel spatial model-checker to detect problems in
the data which an adaptive system gathers in order to inform future action. We categorise received
data as being plausible, implausible, possible or problematic. Data correctness is essential to ensure
correct functionality in systems which adapt in response to data and our categorisation influences
the degree of caution which should be used in acting in response to this received data. We illustrate
the theory with a concrete example of detecting errors in vehicle location data for buses in the city
of Edinburgh. Vehicle location data is visualised symbolically on a street map, and categories of
problems identified by the spatial model-checker are rendered by repainting the symbols for
vehicles in different colours.},
author = {Ciancia, Vincenzo and Gilmore, Stephen and Latella, Diego and Loreti, Michele and
Massink, Mieke},
doi = {10.1109/SASOW.2014.16},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/FoCAS-at-SASO-MC.pdf:pdf},
isbn = {9781479963782},
journal = {Proceedings - 2014 IEEE 8th International Conference on Self-Adaptive and Self-Organizing
Systems Workshops, SASOW 2014},
pages = {32--37},
title = {{Data verification for collective adaptive systems: Spatial model-checking of vehicle location
Data}},
year = {2014}
@article{Vala2013,
issn = {2278-1323},
number = {2},
pages = {387--389},
volume = {2},
year = {2013}
@article{Bures2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bures{\_}formal{\_}report{\
_}2019.pdf:pdf},
pages = {1--3},
@article{Thakkar2017,
number = {7},
pages = {487--493},
title = {{Detection of Glaucoma from Retinal Fundus Images by analysing ISNT Measurement and
features of Optic Cup and Blood Vessels}},
volume = {4},
year = {2017}
@article{Alrawashdeh2014,
abstract = {This study is concerned with the developing criteria for grading the semantics of UML
models. This is achieved by going through literature and studying the current approaches for grading
the semantics of UML diagrams. This paper concerns wit the ability of grading the semantics of UML
models in the logic of formal methods, where model checker or theorem provers can be run on
these graphical diagrams. For this reason we integrate a transformation tool called Hugo/RT into our
tool MUML that can help to map the model specifications and properties into Promela the C-like
input programming language for Spin model checker. The last area is concerns the integration of the
graphic editors selection and the grading criteria designs with the functions of a CBA system. The
proposed solution for this problem is to make provisions in the design and implementation of the
CourseMaster CBA system as it has been used successfully in academia computer programming
course work assessment. The result of this work is an enhanced environment for teaching and
grading UML behaviour using the means of formal techniques. Evaluation results on diagram-based
assessment do its expectations compared to some human evaluator of the students' assignments,
whereas circuit design and software design, show that the automation of diagrams assessment can
be as useful and valuable as that for programs.},
author = {Alrawashdeh, Hazim and Idris, Sufian and Zin, Abdullah Mohd},
doi = {10.15242/iie.e0114567},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/1c10c0b83b4956e52c8a0c64685860d7df72.pd
f:pdf},
title = {{Using Model Checking Approach for Grading the Semantics of UML Models}},
year = {2014}
}
@article{Specht2007,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/EJ840794.pdf:pdf},
pages = {1--13},
title = {{Modeling Adaptive Educational Methods with IMS Learning Design 1 Introduction}},
year = {2007}
@article{Dooley1997a,
abstract = {The study of complex adaptive systems has yielded great insight into how complex,
organic-like structures can evolve order and purpose over time. Business organizations, typified by
semi-autonomous organizational members interacting at many levels of cognition and action, can be
portrayed by the generic constructs and driving mechanisms of complex adaptive systems theory.
The purpose of this paper is to forge a unified description of complex adaptive systems from several
sources, and then investigate the issue of change in a business organization via the framework of
complex adaptive systems. The theory of complex adaptive systems uses components from three
paradigms of management thought: systems theory, population ecology, and information
processing. Specific propositions regarding the nature of dynamical change will be developed, driven
by the complex adaptive systems model. Supporting evidence for these propositions is then sought
within the existing management theory literature. In doing so, the complex adaptive systems
approach to understanding organization change will be better grounded in domain-specific theory,
and new insights and research areas will come to light.},
doi = {10.1023/A:1022375910940},
issn = {1090-0578},
number = {1},
pages = {69--97},
volume = {1},
year = {1997}
@article{Anagnostopoulos2018,
abstract = {{\textcopyright} The Author(s) 2018. The population of the Earth is moving towards urban
areas forming smart cities (SCs). Waste management is a component of SCs. We consider a SC which
contains a distribution of waste bins and a distribution of waste trucks located in the SC sectors. Bins
and trucks are enabled with Internet of Things (IoT) sensors and actuators. Prior approaches focus
mainly on the dynamic scheduling and routing issues emerging from IoT-enabled waste
management. However, less research has been done in the area of the stochastic reassignment
process during the four seasons of the year over a period of two years. In this paper we aim to
stochastically reassign trucks to collect waste from bins through time. We treat this problem with a
multi-agent system for stochastic analyses.},
author = {Anagnostopoulos, Theodoros and Zaslavsky, Arkady and Sosunova, Inna and Fedchenkov,
Petr and Medvedev, Alexey and Ntalianis, Klimis and Skourlas, Christos and Rybin, Andrei and
Khoruznikov, Sergei},
doi = {10.1177/0734242X18783843},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}2.pdf:pdf},
issn = {10963669},
number = {11},
pages = {1113--1121},
title = {{A stochastic multi-agent system for Internet of Things-enabled waste management in smart
cities}},
volume = {36},
year = {2018}
@article{Wilson2017,
number = {September},
pages = {3--6},
title = {{on the Suitability of Evolutionary Computing To Developing Tools for Intelligent Music
Production}},
year = {2017}
@article{Esfahani2013,
year = {2013}
@article{Anderson2014,
doi = {10.1109/SASOW.2014.39},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/anderson2014.pdf:pdf},
isbn = {9781479963782},
journal = {2014 IEEE Eighth International Conference on Self-Adaptive and Self-Organizing Systems
Workshops},
pages = {102--107},
title = {{Reflection , collectives and adaptation : the role of models in the design of Collective
Adaptive Systems}},
year = {2014}
@article{Zhang2020,
abstract = {This article proposes an evolutionary multiagent framework of the co-operative co-
evolutionary multiobjective model (CCMO-EMAS), specifically for equipment layout optimization in
engineering. In this framework, each agent is set in a multiobjective cooperative co-evolutionary
mode along with the algorithms and corresponding settings. In each iteration, agents are executed in
turn, and each agent optimizes a subpopulation from system decomposition. Additionally, the
collaboration mechanism is addressed to build complete solutions and evaluate individuals in the co-
operative co-evolutionary algorithm. Each subpopulation is optimized once, and the corresponding
agent is evaluated based on the improvement of the system memory. Moreover, the agent team is
also evolved through an elite genetic algorithm. Finally, the proposed CCMO-EMAS framework is
verified in a multimodule satellite equipment layout problem.},
author = {Zhang, Zihui and Han, Qiaomei and Li, Yanqiang and Wang, Yong and Shi, Yanjun},
doi = {10.1155/2020/9147649},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2020-2F9147649.pdf:pdf},
issn = {15635147},
volume = {2020},
year = {2020}
@article{Kwiatkowska2013,
pages = {5--22},
year = {2013}
@article{Reed,
abstract = {We demonstrate the use of Ant Colony System (ACS) to solve the capacitated vehicle
routing problem associated with collection of recycling waste from households, treated as nodes in a
spatial network. For networks where the nodes are concentrated in separate clusters, the use of k-
means clustering can greatly improve the efficiency of the solution. The ACS algorithm is extended to
model the use of multi-compartment vehicles with kerbside sorting of waste into separate
compartments for glass, paper, etc. The algorithm produces high-quality solutions for two-
compartment test problems.},
title = {{An ant colony algorithm for the multi-compartment vehicle routing problem}},
url = {http://opus.bath.ac.uk/37663/1/reed{\_}asoc{\_}a.pdf}
@article{Suna2020,
abstract = {Competitive Self-Play (CSP) based Multi-Agent Reinforcement Learning (MARL) has
shown phenomenal breakthroughs recently. Strong AIs are achieved for several benchmarks,
including Dota 2, Glory of Kings, Quake III, StarCraft II, to name a few. Despite the success, the MARL
training is extremely data thirsty, requiring typically billions of (if not trillions of) frames be seen
from the environment during training in order for learning a high performance agent. This poses
non-trivial difficulties for researchers or engineers and prevents the application of MARL to a
broader range of real-world problems. To address this issue, in this manuscript we describe a
framework, referred to as TLeague, that aims at large-scale training and implements several main-
stream CSP-MARL algorithms. The training can be deployed in either a single machine or a cluster of
hybrid machines (CPUs and GPUs), where the standard Kubernetes is supported in a cloud native
manner. TLeague achieves a high throughput and a reasonable scale-up when performing
distributed training. Thanks to the modular design, it is also easy to extend for solving other multi-
agent problems or implementing and verifying MARL algorithms. We present experiments over
StarCraft II, ViZDoom and Pommerman to show the efficiency and effectiveness of TLeague. The
code is open-sourced and available at https://github.com/tencent-ailab/tleague{\_}projpage},
archivePrefix = {arXiv},
arxivId = {2011.12895},
author = {Suna, Peng and Xiong, Jiechao and Han, Lei and Sun, Xinghai and Li, Shuxing and Xu, Jiawei
and Fang, Meng and Zhang, Zhengyou},
eprint = {2011.12895},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2011.12895.pdf:pdf},
issn = {23318422},
journal = {arXiv},
pages = {1--21},
title = {{TLeague: A framework for competitive self-play based distributed multi-agent reinforcement
learning}},
year = {2020}
@article{Lewis2015,
abstract = {Work on human self-awareness is the basis for a framework to develop computational
systems that can adaptively manage complex dynamic tradeoffs at runtime. An architectural case
study in cloud computing illustrates the framework's potential benefits.},
author = {Lewis, Peter R. and Chandra, Arjun and Faniyi, Funmilade and Glette, Kyrre and Chen, Tao
and Bahsoon, Rami and Torresen, Jim and Yao, Xin},
doi = {10.1109/MC.2015.235},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10227{\_}Chen.pdf:pdf},
issn = {00189162},
journal = {Computer},
number = {8},
pages = {62--70},
title = {{Architectural aspects of self-Aware and self-expressive computing systems: From psychology
to engineering}},
volume = {48},
year = {2015}
@article{Ghamisi2015,
doi = {10.1109/LGRS.2014.2337320},
isbn = {1545-598X},
number = {2},
pages = {309--313},
title = {{Feature Selection Based on Hybridization of Genetic Algorithm and Particle Swarm
Optimization}},
url = {http://ieeexplore.ieee.org/ielx7/8859/6881788/06866865.pdf?tp={\&}arnumber=6866865{\
&}isnumber=6881788},
volume = {12},
year = {2015}
}
@article{Autili2019,
author = {Autili, Marco and Inverardi, Paola and Spalazzese, Romina and Tivoli, Massimo and
Mignosi, Filippo},
doi = {10.1016/j.jcss.2019.03.001},
file = {:C$\backslash$:/Users/Asus/Downloads/autili2019.pdf:pdf},
issn = {0022-0000},
pages = {17--40},
url = {https://doi.org/10.1016/j.jcss.2019.03.001},
volume = {104},
year = {2019}
@article{Arabali2013,
abstract = {This paper proposes a new strategy to meet the controllable heating, ventilation, and air
conditioning (HVAC) load with a hybrid-renewable generation and energy storage system. Historical
hourly wind speed, solar irradiance, and load data are used to stochastically model the wind
generation, photovoltaic generation, and load. Using fuzzy C-Means (FCM) clustering, these data are
grouped into 10 clusters of days with similar data points to account for seasonal variations. In order
to minimize cost and increase efficiency, we use a GA-based optimization approach together with a
two-point estimate method. Minimizing the cost function guarantees minimum PV and wind
generation installation as well as storage capacity selection to supply the HVAC load. Different
scenarios are examined to evaluate the efficiency of the system with different percentages of load
shifting. The maximum capacity of the storage system and excess energy are calculated as the most
important indices for energy efficiency assessment. The cumulative distribution functions of these
indices are plotted and compared. A smart-grid strategy is developed for matching renewable energy
generation (solar and wind) with the HVAC load.},
author = {Arabali, A. and Ghofrani, M. and Etezadi-Amoli, M. and Fadali, M. S. and Baghzouz, Y.},
doi = {10.1109/TPWRD.2012.2219598},
isbn = {0885-8977},
issn = {08858977},
number = {1},
pages = {162--170},
pmid = {84489329},
volume = {28},
year = {2013}
@article{Monostori2016,
author = {Monostori, L and K{\'{a}}d{\'{a}}, B and Bauernhansl, T and Kondoh, S and Kumara, S and
Reinhart, G and Sauer, O and Schuh, G and Sihn, W},
doi = {10.1016/j.cirp.2016.06.005},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Cyber-physicalsystemsinmanufacturing{\
_}Monostori{\_}et{\_}al{\_}2016{\_}TEXT.pdf:pdf},
number = {August},
pages = {621--641},
volume = {65},
year = {2016}
@article{Gerasimou2018,
abstract = {An increasingly used method for the engineering of software systems with strict quality-
of-service (QoS) requirements involves the synthesis and verification of probabilistic models for
many alternative architectures and instantiations of system parameters. Using manual trial-and-
error or simple heuristics for this task often produces suboptimal models, while the exhaustive
synthesis of all possible models is typically intractable. The EvoChecker search-based software
engineering approach presented in our paper addresses these limitations by employing evolutionary
algorithms to automate the model synthesis process and to significantly improve its outcome.
EvoChecker can be used to synthesise the Pareto-optimal set of probabilistic models associated with
the QoS requirements of a system under design, and to support the selection of a suitable system
architecture and configuration. EvoChecker can also be used at runtime, to drive the efficient
reconfiguration of a self-adaptive software system. We evaluate EvoChecker on several variants of
three systems from different application domains, and show its effectiveness and applicability.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/gerasimou2018.pdf:pdf},
issn = {15737535},
number = {4},
pages = {785--831},
url = {https://doi.org/10.1007/s10515-018-0235-8},
volume = {25},
year = {2018}
@article{FernandezMaimo2018,
abstract = {The upcoming fifth-generation (5G) mobile technology, which includes advanced
communication features, is posing new challenges on cybersecurity defense systems. Although
innovative approaches have evolved in the last few years, 5G will make existing intrusion detection
and defense procedures become obsolete, in case they are not adapted accordingly. In this sense,
this paper proposes a novel 5G-oriented cyberdefense architecture to identify cyberthreats in 5G
mobile networks efficient and quickly enough. For this, our architecture uses deep learning
techniques to analyze network traffic by extracting features from network flows. Moreover, our
proposal allows adapting, automatically, the configuration of the cyberdefense architecture in order
to manage traffic fluctuation, aiming both to optimize the computing resources needed in each
particular moment and to fine tune the behavior and the performance of analysis and detection
processes. Experiments using a well-known botnet data set depict how a neural network model
reaches a sufficient classification accuracy in our anomaly detection system. Extended experiments
using diverse deep learning solutions analyze and determine their suitability and performance for
different network traffic loads. The experimental results show how our architecture can self-adapt
the anomaly detection system based on the volume of network flows gathered from 5G subscribers'
user equipments in real-time and optimizing the resource consumption.},
author = {{Fernandez Maimo}, Lorenzo and {Perales Gomez}, Angel Luis and {Garcia Clemente}, Felix
J. and {Gil Perez}, Manuel and {Martinez Perez}, Gregorio},
doi = {10.1109/ACCESS.2018.2803446},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/fernandezmaimo2018.pdf:pdf},
issn = {21693536},
number = {c},
pages = {7700--7712},
title = {{A Self-Adaptive Deep Learning-Based System for Anomaly Detection in 5G Networks}},
volume = {6},
year = {2018}
@inproceedings{Sanderson2016,
author = {Sanderson, David and Antzoulatos, Nikolas and Chaplin, Jack C and Pitt, Jeremy and
German, Carl and Norbury, Alan and Kelly, Emma and Ratchev, Svetan and Sanderson, Email David
and Antzoulatos, Nikolas and Chaplin, Jack and Kelly, Emma and Ratchev, Svetan},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Advanced{\_}Manufacturing{\_}an{\
_}Industrial{\_}App.pdf:pdf},
year = {2016}
@article{Ferry2014,
abstract = {Scalability and elasticity are key capabilities to tackle the variable workload of a service.
Cloud elasticity offers opportunities to manage dynamically the underlying resources of a service and
improve its scalability. However, managing scalability of cloud-based systems may lead to a
management overhead. Self-adaptive systems are a well-known approach to tame this complexity.
In this position paper, we propose an approach for the continuous design and management of
scalability in multi-cloud systems. Our approach is based on a three-layer architecture and relies on
two existing frameworks, namely SCALEDL and CLOUDMF. Copyright {\textcopyright} 2014
SCITEPRESS - Science and Technology Publications.},
author = {Ferry, Nicolas and Brataas, Gunnar and Rossini, Alessandro and Chauvel, Franck and
Solberg, Arnor},
doi = {10.5220/0004975307460751},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/49753.pdf:pdf},
isbn = {9789897580192},
journal = {CLOSER 2014 - Proceedings of the 4th International Conference on Cloud Computing and
Services Science},
pages = {746--751},
title = {{Towards bridging the gap between scalability and elasticity}},
year = {2014}
@article{Bodik2013,
doi = {10.1007/s10009-013-0287-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bodik2013.pdf:pdf},
issn = {14332779},
number = {5-6},
pages = {397--411},
volume = {15},
year = {2013}
@article{Chapman2019,
abstract = {Effective stormwater management requires sys- tems that operate safely and deliver
improved environmental outcomes in a cost-effective manner. However, current design practices
typically evaluate performance assuming that a given system starts empty and operates
independently from nearby stormwater infrastructure. There is a conspicuous need for more
realistic design-phase indicators of performance that consider a larger set of initial conditions and
the effects of coupled dynamics. To this end, we apply a control-theoretic method, called
reachability analysis, to produce a more objective measure of system robustness. We seek two
primary contribu- tions in this work. First, we demonstrate how the application of reachability
analysis to a dynamic model of a stormwater network can characterize the set of initial conditions
from which every element in the network can avoid overflowing under a given surface runoff signal
of finite duration. This is, to the authors' knowledge, the first published application of reachability
analysis to stormwater systems. Our second contribution is to offer an interpretation of the
outcomes of the proposed reachability analysis as a measure of system robustness that can provide
useful information when making critical design decisions. We illustrate the effectiveness of this
method in revealing the trade-offs of particular design choices relative to a desired level of
robustness.},
author = {Chapman, Margaret P. and Smith, Kevin M. and Cheng, Victoria and Freyberg, David L. and
Tomlin, Claire J.},
doi = {10.1109/SusTech.2018.8671362},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/chapman2018.pdf:pdf},
isbn = {9781538677919},
number = {1},
pages = {1--8},
year = {2019}
@article{B2019,
author = {B, Chih-duo Hong and B, Anthony W Lin and B, Rupak Majumdar},
doi = {10.1007/978-3-030-25540-4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030255404},
pages = {455--474},
volume = {1},
year = {2019}
@article{Gong2015,
abstract = {The increasing complexity of real-world optimization problems raises new challenges to
evolutionary computation. Responding to these challenges, distributed evolutionary computation
has received considerable attention over the past decade. This article provides a comprehensive
survey of the state-of-the-art distributed evolutionary algorithms and models, which have been
classified into two groups according to their task division mechanism. Population-distributed models
are presented with master-slave, island, cellular, hierarchical, and pool architectures, which
parallelize an evolution task at population, individual, or operation levels. Dimension-distributed
models include coevolution and multi-agent models, which focus on dimension reduction. Insights
into the models, such as synchronization, homogeneity, communication, topology, speedup,
advantages and disadvantages are also presented and discussed. The study of these models helps
guide future development of different and/or improved algorithms. Also highlighted are recent
hotspots in this area, including the cloud and MapReduce-based implementations, GPU and CUDA-
based implementations, distributed evolutionary multiobjective optimization, and real-world
applications. Further, a number of future research directions have been discussed, with a conclusion
that the development of distributed evolutionary computation will continue to flourish.},
archivePrefix = {arXiv},
arxivId = {1312.0086},
author = {Gong, Yue Jiao and Chen, Wei Neng and Zhan, Zhi Hui and Zhang, Jun and Li, Yun and
Zhang, Qingfu and Li, Jing Jing},
doi = {10.1016/j.asoc.2015.04.061},
eprint = {1312.0086},
isbn = {1099-0526},
issn = {15684946},
pages = {286--300},
title = {{Distributed evolutionary algorithms and their models: A survey of the state-of-the-art}},
url = {http://dx.doi.org/10.1016/j.asoc.2015.04.061},
volume = {34},
year = {2015}
@article{Lacroix2014,
number = {6},
pages = {1135--1159},
volume = {13},
year = {2014}
@article{Belardinelli2017,
abstract = {We analyse the verification problem for synchronous, perfect recall multi-Agent systems
with imperfect information against a specification language that includes strategic and epistemic
operators. While the verification problem is unde- cidable, we show that if the agents' actions are
public, then verification is 2EXPTiME-complete. To illustrate the formal framework we consider two
epistemic and strategic puzzles with imperfect information and public actions: The muddy children
puzzle and the classic game of battleships.},
author = {Belardinelli, Francesco and Lomuscio, Alessio and Murano, Aniello and Rubin, Sasha},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/main-ATL-broad.pdf:pdf},
isbn = {9781510855076},
issn = {15582914},
journal = {Proceedings of the International Joint Conference on Autonomous Agents and Multiagent
Systems, AAMAS},
pages = {1268--1276},
title = {{Verification of multi-Agent systems with imperfect information and public actions}},
volume = {2},
year = {2017}
@article{Bucchiarone2018,
abstract = {In this paper we address the challenges that impede collective adaptation in smart
mobility systems by proposing a notion of ensembles. Ensembles enable systems with collective
adaptability to be built as emergent aggregations of autonomous and self-adaptive agents.
Adaptation in these systems is triggered by a run-time occurrence, which is known as an issue. The
novel aspect of our approach is, it allows agents affected by an issue in the context of a smart
mobility scenario to adapt collaboratively with minimal impact on their own preferences through an
issue resolution process based on concurrent planning algorithms.},
author = {Bucchiarone, Antonio and Furelos-Blanco, Daniel and Jonsson, Anders and Khandokar,
Fahmida and Mourshed, Monjur},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/p1880.pdf:pdf},
isbn = {9781510868083},
issn = {15582914},
journal = {Proceedings of the International Joint Conference on Autonomous Agents and Multiagent
Systems, AAMAS},
pages = {1880--1882},
title = {{Collective adaptation through concurrent planning: The case of sustainable urban mobility}},
volume = {3},
year = {2018}
@inproceedings{Hnetynka2020,
booktitle = {SEAMS '20: Proceedings of the IEEE/ACM 15th International Symposium on Software
Engineering for Adaptive and Self-Managing Systems},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/SEAMS-2020-SmartFarming.pdf:pdf},
isbn = {9781450379625},
pages = {156--162},
title = {{Using Component Ensembles for Modeling Autonomic Component Collaboration in Smart
Farming}},
year = {2020}
@article{Mallu2015,
abstract = {Virtualization is a useful tool for administrators of data centers and clusters. Migration
process of VMs mitigates the overloaded condition of data centers and provides the uninterrupted
services. The main objective of migration process is to achieve less down time and migration time.
Various migration techniques like pre-copy, post-copy and suspend-copy are available to perform
live migration of VMs. Pre-copy migration method has optimum performance compare to the other
techniques. A great deal of research and study on pre-copy methods has been proposed by
researchers. This paper gives a comprehensive survey investigation of different pre-copy live
migration methods based on their working principles along with pros and cons.},
author = {Mallu, Lalithabhinaya and Ezhilarasie, R.},
doi = {10.17485/ijst/2015/v8i},
isbn = {1130830004},
issn = {09745645},
number = {September},
pages = {326--332},
volume = {8},
year = {2015}
@article{Jarrar2020,
abstract = {Complex adaptive systems provide a significant number of concepts such as reaction,
interaction, adaptation, and evolution. In general, these concepts are modelled employing different
techniques which give an inexplicit vision on the system. Therefore, all concepts must be carefully
modelled using the same approach to avoid contradiction and guarantee system homogeneity and
correctness. However, developing a computing system that includes all these concepts using the
same approach is not an easy task and requires a perfect understanding of the system's behaviour.
In this paper, we contribute as stepwise towards proposing an approach to model the most
important concepts of complex adaptive systems while ensuring homogeneity and the correctness of
models. For this aim, we present five standard agent-based models formalizing agent properties,
reaction, interaction, adaptation, and evolution. These models are adapted to all cases of complex
adaptive systems since they include an abstract description of these concepts. To implement our
approach formally, we choose the Event-B method due to the strong assurance of bugs' absence
that it guarantees. Besides, it supports horizontal and vertical refinement which facilitates the
specification process. Furthermore, the approach of this paper addresses the very abstract level of
modelling which expand the use of this approach to other formal methods and tools.[Figure not
available: see fulltext.]},
author = {Jarrar, Abdessamad and {Ait Wakrime}, Abderrahim and Balouki, Youssef},
doi = {10.1186/s40294-020-0069-7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/jarrar2020.pdf:pdf},
issn = {21943206},
number = {1},
url = {https://doi.org/10.1186/s40294-020-0069-7},
volume = {8},
year = {2020}
@article{Allen2012,
abstract = {AS BIG DATA emerges as a force in science,2,3 so, too, do new, onerous tasks for
researchers. Data from specialized instrumentation, numerical simulations, and downstream
manipulations must be collected, indexed, archived, shared, replicated, and analyzed. These tasks
are not new, but the complexities involved},
doi = {10.1145/2076450},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2076450.2076472.pdf:pdf},
number = {2},
pages = {81--88},
url = {http://dl.acm.org/citation.cfm?id=2076468},
volume = {55},
year = {2012}
@article{Aschoff2012,
abstract = {Adaptation of service compositions is considered a major research challenge for service-
based systems. In this paper we describe a proactive approach to support adaptation of service
compositions triggered by different types of problems. The approach allows for changes in the
composition workflow by replacing a service operation, or a group of operations, by another service
operation or a group of dynamically composed operations. The adaptation process is supported by
the use of QoS prediction techniques, analysis of dependencies between service operations, and use
of groups of service operations in a composition flow instead of isolated operations. A prototype
tool has been implemented to illustrate and evaluate the framework. We also present results of
experiments that we have conducted to evaluate the work. {\textcopyright} 2012 IEEE.},
author = {Aschoff, Rafael R. and Zisman, Andrea},
doi = {10.1109/SEAMS.2012.6224385},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/aschoff2012.pdf:pdf},
isbn = {9781467317870},
issn = {21572305},
journal = {ICSE Workshop on Software Engineering for Adaptive and Self-Managing Systems},
pages = {1--10},
year = {2012}
@article{WaseemKhan2014,
abstract = {This paper presents a literature review of basic image segmentation techniques from last
five years. Recent research in each of image segmentation technique is presented in this paper.
Index},
doi = {10.7763/IJFCC.2014.V3.274},
issn = {20103751},
number = {2},
pages = {89--93},
url = {http://www.ijfcc.org/index.php?m=content{\&}c=index{\&}a=show{\&}catid=46{\&}id=557},
volume = {3},
year = {2014}
@article{Baier2008a,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Principles{\_}of{\_}Model{\
_}Checking.pdf:pdf},
number = {January},
pages = {38},
year = {2008}
@inproceedings{Lon2012,
booktitle = {2012 IEEE Sixth International Conference on Self-Adaptive and Self-Organizing Systems},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/34539062.pdf:pdf},
pages = {1--2},
title = {{RinSim : A Simulator for Collective Adaptive Systems in Transportation and Logistics}},
year = {2012}
@article{Jain2014,
abstract = {The use of M-estimators in generalized linear regression models in high dimensional
settings requires risk minimization with hard L 0 constraints. Of the known methods, the class of pro-
jected gradient descent (also known as iterative hard thresholding (IHT)) methods is known to offer
the fastest and most scalable solutions. However, the current state-of-the-art is only able to analyze
these methods in extremely restrictive settings which do not hold in high dimensional statistical
models. In this work we bridge this gap by providing the first analysis for IHT-style methods in the
high dimensional statistical setting. Our bounds are tight and match known minimax lower bounds.
Our results rely on a general analysis framework that enables us to analyze several popular hard
thresholding style algorithms (such as HTP, CoSaMP, SP) in the high dimensional regression setting.
We also extend our analysis to a large family of " fully corrective methods " that includes two-stage
and partial hard-thresholding algorithms. We show that our results hold for the problem of sparse
regression, as well as low-rank matrix recovery.},
archivePrefix = {arXiv},
arxivId = {arXiv:1410.5137v2},
author = {Jain, Prateek and Tewari, Ambuj and Kar, Purushottam and Arbor, Ann},
eprint = {arXiv:1410.5137v2},
issn = {10495258},
journal = {Nips},
pages = {1--20},
title = {{On Iterative Hard Thresholding Methods for High-dimensional M-Estimation}},
url = {https://arxiv.org/pdf/1410.5137.pdf},
year = {2014}
@article{Jose2015,
isbn = {9781479970759},
title = {{A Novel Method for Glaucoma Detection Using Fundus Images}},
year = {2015}
@book{Badger2019,
author = {Badger, Julia M and Yvonne, Kristin and Eds, Rozier and Steffen, Bernhard},
doi = {10.1007/978-3-030-20652-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030206512},
year = {2019}
@article{Oughton2018,
abstract = { National infrastructure systems spanning energy, transport, digital, waste, and water are
well recognised as complex and interdependent. While some policy makers have been keen to adopt
the narrative of complexity, the application of complexity-based methods in public policy decision-
making has been restricted by the lack of innovation in associated methodologies and tools. In this
paper we firstly evaluate the application of complex adaptive systems theory to infrastructure
systems, comparing and contrasting this approach with traditional systems theory. We secondly
identify five key theoretical properties of complex adaptive systems including adaptive agents,
diverse agents, dynamics, irreversibility, and emergence, which are exhibited across three
hierarchical levels ranging from agents, to networks, to systems. With these properties in mind, we
then present a case study on the development of a system-of-systems modelling approach based on
complex adaptive systems theory capable of modelling an emergent national infrastructure system,
driven by agent-level decisions with explicitly modelled interdependencies between energy,
transport, digital, waste, and water. Indeed, the novel contribution of the paper is the articulation of
the case study describing a decade of research which applies complex adaptive systems properties
to the development of a national infrastructure system-of-systems model. This approach has been
used by the UK National Infrastructure Commission to produce a National Infrastructure Assessment
which is capable of coordinating infrastructure policy across a historically fragmented governance
landscape spanning eight government departments. The application will continue to be pertinent
moving forward due to the continuing complexity of interdependent infrastructure systems,
particularly the challenges of increased electrification and the proliferation of the Internet of
Things. },
author = {Oughton, Edward J. and Usher, Will and Tyler, Peter and Hall, Jim W.},
doi = {10.1155/2018/3427826},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/3427826.pdf:pdf},
issn = {1076-2787},
journal = {Complexity},
pages = {1--11},
volume = {2018},
year = {2018}
@article{Herrmann2019,
abstract = {Ever-increasing bit flip rates caused by shrinking hardware tiles increase the demand for
resilient systems. In particular, safety- and functionality-critical system parts need to be protected.
Inter-process communication is one such critical part. Applying fault tolerance techniques often
comes with a configuration problem, since real-world systems typically have tunable system
parameters. These need to be configured with respect to certain optimality criterion. The paper
addresses the parameter synthesis problem for inter-process communication protocols that are
affected by bit flips. Tunable parameters are the probability of error detection and the expected
time interval between system refresh. We provide a tool that automatically generates a model of
bit-flip-prone inter-process communication for a given set of processes and their communication
structure. The tool is used to exemplarily generate a model of a space probe. Parametric extensions
of probabilistic model checking are applied to obtain rational functions for the availability of the
space probe and other characteristics. We find a configuration setting that maximizes availability and
investigates side effects for this configuration. The paper also compares exemplarily for the space
probe model the most-standard probabilistic model checking methods (value iteration, interval
iteration, and exact model checking) with respect to their time consumption and accuracy and
reveals complexity concerns arising when evaluating the rational functions.},
author = {Herrmann, Linda and K{\"{u}}ttler, Martin and Stumpf, Tobias and Baier, Christel and
H{\"{a}}rtig, Hermann and Kl{\"{u}}ppelholz, Sascha},
doi = {10.1007/s10009-019-00536-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/herrmann2019.pdf:pdf},
issn = {14332787},
journal = {International Journal on Software Tools for Technology Transfer},
number = {6},
pages = {651--666},
url = {https://doi.org/10.1007/s10009-019-00536-0},
volume = {21},
year = {2019}
@article{Mahi2015,
abstract = {a b s t r a c t The Traveling Salesman Problem (TSP) is one of the standard test problems
used in performance anal-ysis of discrete optimization algorithms. The Ant Colony Optimization
(ACO) algorithm appears among heuristic algorithms used for solving discrete optimization
problems. In this study, a new hybrid method is proposed to optimize parameters that affect
performance of the ACO algorithm using Particle Swarm Opti-mization (PSO). In addition, 3-Opt
heuristic method is added to proposed method in order to improve local solutions. The PSO
algorithm is used for detecting optimum values of parameters ˛ an which are used for city selection
operations in the ACO algorithm and determines significance of inter-city pheromone and distances.
The 3-Opt algorithm is used for the purpose of improving city selection operations, which could not
be improved due to falling in local minimums by the ACO algorithm. The performance of pro-posed
hybrid method is investigated on ten different benchmark problems taken from literature and it is
compared to the performance of some well-known algorithms. Experimental results show that the
performance of proposed method by using fewer ants than the number of cities for the TSPs is
better than the performance of compared methods in most cases in terms of solution quality and
robustness.},
author = {Mahi, Mostafa and Baykan, {\"{O}}mer Kaan and Kodaz, Halife},
doi = {10.1016/j.asoc.2015.01.068},
issn = {15684946},
pages = {484--490},
title = {{A new hybrid method based on Particle Swarm Optimization, Ant Colony Optimization and 3-
Opt algorithms for Traveling Salesman Problem}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1568494615000940},
volume = {30},
year = {2015}
@article{Kuehlmann2003,
abstract = {[PDF]},
doi = {10.1145/266021.266090},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/kuehlmann1997.pdf:pdf},
isbn = {0897919203},
pages = {263--268},
year = {2003}
@article{Lomuscio2020,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/aamas20-LP.pdf:pdf},
number = {Aamas},
year = {2020}
@article{Schilling2017,
abstract = {Organizations constantly adapt their Information Systems (IS) architecture to reflect
changes in their environment. In general, such adaptations steadily increase the complexity of their
IS architecture, thereby negatively impacting IS efficiency and IS flexibility. Based on a Complex
Adaptive Systems (CAS) perspective, we present a more differentiated analysis of the impact of IS
architecture complexity. We hypothesize the relation between IS architecture complexity on the one
hand, and IS efficiency and IS flexibility on the other hand to be mediated by evolutionary and
revolutionary IS change. Subsequently, we test our hypotheses through a partial least squares (PLS)
approach to structural equation modelling (SEM) based on survey data from 185 respondents. We
find that the direct negative impact of IS architecture complexity on IS efficiency and IS flexibility is
no longer statistically relevant when also considering the mediating effects of revolutionary and
evolutionary IS change.},
author = {Schilling, Raphael David and Beese, Jannis and Haki, Kazem M. and Aier, Stephan and
Winter, Robert},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/RevisitingTheImpactOfInformationSystemsArc
hitectureComplexity.pdf:pdf},
number = {December},
pages = {1--18},
title = {{Revisiting the Impact of Information Systems Architecture Complexity: A Complex Adaptive
Systems Perspective}},
year = {2017}
@article{Behrmann2009,
author = {Behrmann, Gerd and Cougnard, Agn{\`{e}}s and David, Alexandre and Fleury, Emmanuel
and Behrmann, Gerd and Cougnard, Agn{\`{e}}s and David, Alexandre and Fleury, Emmanuel and
Larsen, Kim Guldstrand and Behrmann, Gerd},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/BCDFLL-nwpt06{\_}2.pdf:pdf},
journal = {Computer Aided Veri835 fication, Lecture Notes in Computer Science 4590 (2009) 121 –
125},
year = {2009}
@article{Hsiao2014,
abstract = {Hydrogels composed of assembled colloids is a material class that is currently receiving
much interest and shows great promise for use in biomedical applications. This emerging material
class presents unique properties derived from the combination of nanosized domains in the form of
colloidal particles with a continuous gel network and an interspersed liquid phase. Here we
developed an amphiphilic chitosan-based, thermogelling, shear-reversible colloidal gel system for
improved glaucoma treatment and addressed how preparation procedures and loading with the
anti-glaucoma drug latanoprost and commonly used preservative benzalkonium chloride influenced
the mechanical properties of and drug release from the colloidal gels. The results highlight that
incorporated substances and preparation procedures have effects both on mechanical properties
and drug release, but that the release of drug loaded in the colloidal carriers is mainly limited by
transport out of the carriers, rather than by diffusion within the gel. The developed colloidal chitosan
based gels hold outstanding biomedical potential, as confirmed by the ease of preparation and
administration, low cytotoxicity in MTT assay, excellent biocompatibility and lowering of intraocular
pressure for 40 days in a rabbit glaucoma model. The findings clearly justify further investigations
towards clinical use in the treatment of glaucoma. Furthermore, the use of this shear-reversible
colloidal gel could easily be extended to localized treatment of a number of critical conditions, from
chronic disorders to cancer, potentially resulting in a number of new therapeutics with improved
clinical performance. ?? 2014 Published by Elsevier Ltd. on behalf of Acta Materialia Inc.},
author = {Hsiao, Meng Hsuan and Chiou, Shih Hwa and Larsson, Mikael and Hung, Kuo Hsuan and
Wang, Yi Ling and Liu, Catherine Jui Ling and Liu, Dean Mo},
doi = {10.1016/j.actbio.2014.03.016},
issn = {18787568},
number = {7},
pages = {3188--3196},
pmid = {24681374},
url = {http://dx.doi.org/10.1016/j.actbio.2014.03.016},
volume = {10},
year = {2014}
@article{Jamroga2018,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/LIPIcs-TIME-2018-3.pdf:pdf},
number = {3},
pages = {1--3},
title = {{Model Checking Strategic Ability Why , What , and Especially : How ?}},
year = {2018}
@article{Sabatucci2018a,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video.pdf:pdf},
isbn = {978-3-319-59479-8},
url = {http://link.springer.com/10.1007/978-3-319-59480-4},
volume = {76},
year = {2018}
@article{Jagtap2020,
abstract = {In this paper, we provide a compositional framework for synthesizing hybrid controllers
for interconnected discrete-time control systems enforcing specifications expressed by co-Buchi
automata. In particular, we first decompose the given specification to simpler reachability tasks
based on automata representing the complements of original co-Buchi automata. Then, we provide
a systematic approach to solve those simpler reachability tasks by computing corresponding control
barrier functions. We show that such control barrier functions can be constructed compositionally by
assuming some small-gain type conditions and composing so-called local control barrier functions
computed for subsystems. We provide two systematic techniques to search for local control barrier
functions for subsystems based on the sum-of-squares optimization program and counter-example
guided inductive synthesis approach. Finally, we illustrate the effectiveness of our results through
two large-scale case studies.},
archivePrefix = {arXiv},
arxivId = {2002.00257},
eprint = {2002.00257},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2002.00257.pdf:pdf},
number = {804639},
url = {http://arxiv.org/abs/2002.00257},
year = {2020}
@article{Garway-Heath2015,
abstract = {Background Treatments for open-angle glaucoma aim to prevent vision loss through
lowering of intraocular pressure, but to our knowledge no placebo-controlled trials have assessed
visual function preservation, and the observation periods of previous (unmasked) trials have typically
been at least 5 years. We assessed vision preservation in patients given latanoprost compared with
those given placebo. Methods In this randomised, triple-masked, placebo-controlled trial, we
enrolled patients with newly diagnosed open-angle glaucoma at ten UK centres (tertiary referral
centres, teaching hospitals, and district general hospitals). Eligible patients were randomly allocated
(1:1) with a website-generated randomisation schedule, stratified by centre and with a permuted
block design, to receive either latanoprost 0{\textperiodcentered}005{\%} (intervention group) or
placebo (control group) eye drops. Drops were administered from identical bottles, once a day, to
both eyes. The primary outcome was time to visual field deterioration within 24 months. Analyses
were done in all individuals with follow-up data. The Data and Safety Monitoring Committee (DSMC)
recommended stopping the trial on Jan 6, 2011 (last patient visit July, 2011), after an interim
analysis, and suggested a change in primary outcome from the difference in proportions of patients
with incident progression between groups to time to visual field deterioration within 24 months.
This trial is registered, number ISRCTN96423140. Findings We enrolled 516 individuals between Dec
1, 2006, and March 16, 2010. Baseline mean intraocular pressure was 19{\textperiodcentered}6 mm
Hg (SD 4{\textperiodcentered}6) in 258 patients in the latanoprost group and 20{\
textperiodcentered}1 mm Hg (4{\textperiodcentered}8) in 258 controls. At 24 months, mean
reduction in intraocular pressure was 3{\textperiodcentered}8 mm Hg (4{\textperiodcentered}0) in
231 patients assessed in the latanoprost group and 0{\textperiodcentered}9 mm Hg (3{\
textperiodcentered}8) in 230 patients assessed in the placebo group. Visual field preservation was
significantly longer in the latanoprost group than in the placebo group: adjusted hazard ratio (HR)
0{\textperiodcentered}44 (95{\%} CI 0{\textperiodcentered}28-0{\textperiodcentered}69; p=0{\
textperiodcentered}0003). We noted 18 serious adverse events, none attributable to the study drug.
Interpretation This is the first randomised placebo-controlled trial to show preservation of the visual
field with an intraocular-pressure-lowering drug in patients with open-angle glaucoma. The study
design enabled significant differences in vision to be assessed in a relatively short observation
period. Funding Pfizer, UK National Institute for Health Research Biomedical Research Centre.},
author = {Garway-Heath, David F. and Crabb, David P. and Bunce, Catey and Lascaratos, Gerassimos
and Amalfitano, Francesca and Anand, Nitin and Azuara-Blanco, Augusto and Bourne, Rupert R. and
Broadway, David C. and Cunliffe, Ian A. and Diamond, Jeremy P. and Fraser, Scott G. and Ho, Tuan A.
and Martin, Keith R. and McNaught, Andrew I. and Negi, Anil and Patel, Krishna and Russell, Richard
A. and Shah, Ameet and Spry, Paul G. and Suzuki, Katsuyoshi and White, Edward T. and Wormald,
Richard P. and Xing, Wen and Zeyen, Thierry G.},
doi = {10.1016/S0140-6736(14)62111-5},
isbn = {doi:10.1016/S0140-6736(14)62111-5},
issn = {1474547X},
number = {9975},
pages = {1295--1304},
pmid = {25533656},
year = {2015}
@article{Abdelaziz2020,
abstract = {Automated theorem proving in first-order logic is an active research area which is
successfully supported by machine learning. While there have been various proposals for encoding
logical formulas into numerical vectors -- from simple strings to more involved graph-based
embeddings -- little is known about how these different encodings compare. In this paper, we study
and experimentally compare pattern-based embeddings that are applied in current systems with
popular graph-based encodings, most of which have not been considered in the theorem proving
context before. Our experiments show that the advantages of simpler encoding schemes in terms of
runtime are outdone by more complex graph-based embeddings, which yield more efficient search
strategies and simpler proofs. To support this, we present a detailed analysis across several
dimensions of theorem prover performance beyond just proof completion rate, thus providing
empirical evidence to help guide future research on neural-guided theorem proving towards the
most promising directions.},
archivePrefix = {arXiv},
arxivId = {2002.00423},
author = {Abdelaziz, Ibrahim and Thost, Veronika and Crouse, Maxwell and Fokoue, Achille},
eprint = {2002.00423},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2002.00423.pdf:pdf},
title = {{An Experimental Study of Formula Embeddings for Automated Theorem Proving in First-
Order Logic}},
url = {http://arxiv.org/abs/2002.00423},
year = {2020}
@article{Komuravelli2014,
abstract = {We present an SMT-based symbolic model checking algo-rithm for safety verification of
recursive programs. The algorithm is modular and analyzes procedures individually. Unlike other
SMT-based approaches, it maintains both over-and under-approximations of pro-cedure summaries.
Under-approximations are used to analyze procedure calls without inlining. Over-approximations are
used to block infeasi-ble counterexamples and detect convergence to a proof. We show that for
programs and properties over a decidable theory, the algorithm is guaranteed to find a
counterexample, if one exists. However, efficiency depends on an oracle for quantifier elimination
(QE). For Boolean Pro-grams, the algorithm is a polynomial decision procedure, matching the worst-
case bounds of the best BDD-based algorithms. For Linear Arith-metic (integers and rationals), we
give an efficient instantiation of the algorithm by applying QE lazily. We use existing interpolation
techniques to over-approximate QE and introduce Model Based Projection to under-approximate
QE. Empirical evaluation on SV-COMP benchmarks shows that our algorithm improves significantly
on the state-of-the-art.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Komuravelli2014{\_}Chapter{\_}SMT-
BasedModelCheckingForRecur.pdf:pdf},
journal = {Cav},
pages = {17--34},
year = {2014}
@article{Belmonte2016,
author = {Belmonte, Gina and Ciancia, Vincenzo and Latella, Diego and Massink, Mieke},
doi = {10.4204/EPTCS.217.10},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1607.02235.pdf:pdf},
issn = {20752180},
pages = {81--92},
title = {{From collective adaptive systems to human centric computation and back: Spatial model
checking for medical imaging}},
volume = {217},
year = {2016}
@article{Mendonca2019,
abstract = {A self-adaptive system can dynamically monitor and adapt its behavior to preserve or
enhance its quality attributes under uncertain operating conditions. This article identifies key
challenges for the development of microservice applications as self-adaptive systems, using a cloud-
based intelligent video surveillance application as a motivating example. It also suggests potential
new directions for addressing most of the identified challenges by leveraging existing microservice
practices and technologies.},
archivePrefix = {arXiv},
arxivId = {1910.07660},
author = {Mendonca, Nabor C. and Jamshidi, Pooyan and Garlan, David and Pahl, Claus},
doi = {10.1109/MS.2019.2955937},
eprint = {1910.07660},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/mendonca2019{\_}2.pdf:pdf},
issn = {19374194},
number = {c},
pages = {1--8},
volume = {7459},
year = {2019}
@article{Filieri2011,
author = {Filieri, Antonio and Ghezzi, Carlo and Leva, Alberto and Maggio, Martina},
doi = {10.1109/ASE.2011.6100064},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/filieri2011.pdf:pdf},
isbn = {9781457716393},
pages = {283--292},
title = {{Self-adaptive software meets control theory: A preliminary approach supporting reliability
requirements.}},
url = {http://dx.doi.org/10.1109/ASE.2011.6100064{\%}5Cnhttp://doi.ieeecomputersociety.org/
10.1109/ASE.2011.6100064},
year = {2011}
@article{DeNicola2015,
abstract = {SCEL (Service Component Ensemble Language) is a new language specifically designed to
rigorously model and program autonomic components and their interaction, while supporting
formal reasoning on their behaviors. SCEL brings together various programming abstractions that
allow one to directly represent aggregations, behaviors and knowledge according to specific policies.
It also naturally supports programming interaction, self-awareness, context-awareness, and
adaptation. The solid semantic grounds of the language is exploited for developing logics, tools and
methodologies for formal reasoning on system behavior to establish qualitative and quantitative
properties of both the individual components and the overall systems.},
author = {{De Nicola}, Rocco and Latella, Diego and Lafuente, Alberto Lluch and Loreti, Michele and
Margheri, Andrea and Massink, Mieke and Morichetta, Andrea and Pugliese, Rosario and Tiezzi,
Francesco and Vandin, Andrea},
doi = {10.1007/978-3-319-16310-9_1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/denicola2015.pdf:pdf},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {3--71},
volume = {8998},
year = {2015}
@article{Winterer2020,
author = {Winterer, Leonore and Junges, Sebastian and Wimmer, Ralf and Jansen, Nils and Topcu,
Ufuk and Katoen, Joost-Pieter and Becker, Bernd},
doi = {10.1109/tac.2020.2990140},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {0018-9286},
number = {c},
pages = {1--1},
title = {{Strategy Synthesis for POMDPs in Robot Planning via Game-Based Abstractions}},
volume = {9286},
year = {2020}
@article{Mirjalili2014,
abstract = {The PSOGSA is a novel hybrid optimization algorithm, combining strengths of both
particle swarm optimization (PSO) and gravitational search algorithm (GSA). It has been proven that
this algorithm outperforms both PSO and GSA in terms of improved exploration and exploitation.
The original version of this algorithm is well suited for problems with continuous search space. Some
problems, however, have binary parameters. This paper proposes a binary version of hybrid PSOGSA
called BPSOGSA to solve these kinds of optimization problems. The paper also considers integration
of adaptive values to further balance exploration and exploitation of BPSOGSA. In order to evaluate
the efficiencies of the proposed binary algorithm, 22 benchmark functions are employed and divided
into three groups: unimodal, multimodal, and composite. The experimental results confirm better
per-formance of BPSOGSA compared with binary gravita-tional search algorithm (BGSA), binary
particle swarm optimization (BPSO), and genetic algorithm in terms of avoiding local minima and
convergence rate.},
author = {Mirjalili, Seyedali and Wang, Gai-Ge and Leandro, @bullet and Coelho, S},
doi = {10.1007/s00521-014-1629-6},
issn = {0941-0643},
pages = {1423--1435},
title = {{Binary optimization using hybrid particle swarm optimization and gravitational search
algorithm}},
volume = {25},
year = {2014}
@book{Hutchison,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/formal-modeling-and-analysis-of-timed-
systems-2009.pdf:pdf},
isbn = {9783642043673},
@article{DaSilva2011,
abstract = {The self-adaptation of software systems is a complex process that depends on several
factors that can change during the system operational lifetime. Hence, it is necessary to define
mechanisms for providing a self-adaptive system the capability of generating during run-time the
process that controls its adaptation. This paper presents a framework for the automatic generation
of processes for self-adaptive software systems based on the use of workflows, model-based and
artificial intelligence planning techniques. Our approach can be applied to different application
domains, improves the scalability associated with the generation of adaptation plans, and enables
the usage of different planning techniques. For evaluating the approach, we have developed a
prototype for generating during run-time the workflows that coordinate the architectural
reconfiguration of a web-based application.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/329-396-1-PB.pdf:pdf},
issn = {03505596},
number = {1},
pages = {3--13},
title = {{A Framework for automatic generation of processes for self-adaptive software systems}},
volume = {35},
year = {2011}
@article{Alihodzic2014,
abstract = {Multilevel image thresholding is a very important image processing technique that is
used as a basis for image segmentation and further higher level processing. However, the required
computational time for exhaustive search grows exponentially with the number of desired
thresholds. Swarm intelligence metaheuristics are well known as successful and efficient
optimization methods for intractable problems. In this paper, we adjusted one of the latest swarm
intelligence algorithms, the bat algorithm, for the multilevel image thresholding problem. The results
of testing on standard benchmark images show that the bat algorithm is comparable with other
state-of-the-art algorithms. We improved standard bat algorithm, where our modifications add
some elements from the differential evolution and from the artificial bee colony algorithm. Our new
proposed improved bat algorithm proved to be better than five other state-of-the-art algorithms,
improving quality of results in all cases and significantly improving convergence speed.},
doi = {10.1155/2014/176718},
isbn = {1537744X},
issn = {1537744X},
pmid = {25165733},
volume = {2014},
year = {2014}
@article{Ismail2013,
doi = {10.1900/2004.1.18},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/00002707{\_}91920.pdf:pdf},
issn = {16136071},
keywords = {chinese,decorative,mallaca,mosque,motifs},
number = {1},
pages = {1--13},
title = {{Motif Dan Hiasan Cina Dalam Dekorasi Dalaman Masjid: Kajian Terhadap Masjid Tua di
Melaka Pada Awal Abad Ke 18}},
volume = {2},
year = {2013}
@article{Fraser2020b,
abstract = {We show how detailed simulation models and abstract Markov modelscan be developed
collaboratively to generate and implement effectivecontrollers for autonomous agent search and
retrieve missions. Weintroduce a concrete simulation model of an Unmanned Aerial Vehicle(UAV).
We then show how the probabilistic model checker PRISM isused for optimal strategy synthesis for a
sequence of scenariosrelevant to UAVs and potentially other autonomous agent systems. Foreach
scenario we demonstrate how it can be modelled using PRISM,give model checking statistics and
present the synthesised optimalstrategies. We then show how our strategies can be returned to
thecontroller for the simulation model and provide experimental resultsto demonstrate the
effectiveness of one such strategy. Finally weexplain how our models can be adapted, using
symmetry, for use onlarger search areas, and demonstrate the feasibility of thisapproach.},
author = {Fraser, Douglas and Giaquinta, Ruben and Hoffmann, Ruth and Ireland, Murray and Miller,
Alice and Norman, Gethin},
doi = {10.1007/s00165-020-00508-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/s00165-020-00508-1.pdf:pdf},
isbn = {0016502000},
issn = {1433299X},
number = {2-3},
pages = {157--186},
url = {https://doi.org/10.1007/s00165-020-00508-1},
volume = {32},
year = {2020}
@article{Sheppard2014,
author = {Sheppard, Adrian and Latham, Shane and Middleton, Jill and Kingston, Andrew and Myers,
Glenn and Varslot, Trond and Fogden, Andrew and Sawkins, Tim and Cruikshank, Ron and Saadatfar,
Mohammad and Francois, Nicolas and Arns, Christoph and Senden, Tim},
doi = {10.1016/j.nimb.2013.08.072},
issn = {0168-583X},
pages = {49--56},
title = {{Nuclear Instruments and Methods in Physics Research B Techniques in helical scanning ,
dynamic imaging and image segmentation for improved quantitative analysis with X-ray micro-CT}},
url = {http://dx.doi.org/10.1016/j.nimb.2013.08.072},
volume = {324},
year = {2014}
@article{Cyber-physical2018,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1071389.pdf:pdf},
@article{Angelo2019,
author = {Angelo, Mirko D and Gerasimou, Simos and Ghahremani, Sona and Grohmann, Johannes
and Nunes, Ingrid},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/PID5831287.pdf:pdf},
journal = {14th International Symposium on Software Engineering for Adaptive and Self-Managing
Systems},
title = {{On Learning in Collective Self-adaptive Systems : State of Practice and a 3D Framework}},
year = {2019}
@article{Yogamangalam2013,
abstract = {In day-to-day life, new technologies are emerging in the field of Image processing,
especially in the domain of segmentation. This paper presents a brief outline on some of the most
common segmentation techniques like thresholding, Model based, Edge detection, Clustering etc.,
mentioning its advantages as well as the drawbacks. Some of the techniques are suitable for noisy
images. In that Markov Random Field (MRF) is the strongest method of noise cancellation in images
whereas thresholding is the simplest technique for segmentation.},
issn = {09754024},
number = {1},
pages = {307--313},
volume = {5},
year = {2013}
@article{,
@article{Brun2009,
author = {Brun, Yuriy and Di, Giovanna and Serugendo, Marzo and Gacek, Cristina and Giese, Holger
and Shaw, Mary},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/brun2009.pdf:pdf},
pages = {48--70},
year = {2009}
@misc{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/papers{\_}0276{\_}02760079.pdf:pdf},
title = {papers{\_}0276{\_}02760079.pdf}
@article{Gardiner2015,
author = {Gardiner, Stuart K. and Swanson, William H. and Goren, Deborah and Mansberger, Steven
L. and Demirel, Shaban},
doi = {10.1016/j.ophtha.2014.01.020.Assessment},
pages = {1359--1369},
volume = {121},
year = {2015}
@article{Hillston2016a,
abstract = {For data flow analysis of Java program to be correct and precise, the flows induced by
exceptions must be properly analysed.$\backslash$r In our data flow analysis, the implicit control
flow for a raised exception is represented explicitly. Exception branches,$\backslash$r exception
plateaus, and exception exits for methods and method calls are introduced as additional control flow
structures$\backslash$r for analysis of exception handling. These structures are constructed
dynamically under control of data flow analysis.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Oct13-2016.pdf:pdf},
isbn = {9783319415789},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
number = {October},
pages = {1--5},
volume = {9609},
year = {2016}
@article{Vassev2015,
doi = {10.1007/978-3-319-16310-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ToolsEnsDesignRuntime.pdf:pdf},
isbn = {9783319163109},
number = {June},
title = {{Engineering Requirements for Autonomy Features: In: Software Engineering for Collective
Autonomic Systems: Results of the ASCENS Project. Lecture Notes in Computer Science}},
url = {http://link.springer.com/chapter/10.1007{\%}2F978-3-319-16310-9{\_}11},
volume = {8998},
year = {2015}
@article{Porta2015,
author = {Porta, Alberto and Baumert, Mathias and Cysarz, Dirk and Wessel, Niels},
doi = {10.1098/rsta.2014.0099},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/rsta.2014.0099.pdf:pdf},
issn = {1364503X},
journal = {Philosophical Transactions of the Royal Society A: Mathematical, Physical and Engineering
Sciences},
keywords = {Complexity,Entropy,Multivariate signal processing,Pattern classification,Symbolic
dynamics,Transfer entropy},
number = {2034},
pages = {1--6},
volume = {373},
year = {2015}
@misc{Autili2018,
author = {Autili, Marco and Inverardi, Paola and Perucci, Alexander and Tivoli, Massimo},
doi = {https://doi.org/10.1007/978-3-74183-3-10},
year = {2018}
@article{Lahijanian2018,
abstract = {The design of mobile autonomous robots is challenging due to the limited on-board
resources such as processing power and energy. A promising approach is to generate intelligent
schedules that reduce the resource consumption while maintaining best performance, or more
interestingly, to trade off reduced resource consumption for a slightly lower but still acceptable level
of performance. In this paper, we provide a framework to aid designers in exploring such resource-
performance trade-offs and finding schedules for mobile robots, guided by questions such as "what
is the minimum resource budget required to achieve a given level of performance?" The framework
is based on a quantitative multi-objective verification technique which, for a collection of possibly
conflicting objectives, produces the Pareto front that contains all the optimal trade-offs that are
achievable. The designer then selects a specific Pareto point based on the resource constraints and
desired performance level, and a correct-by-construction schedule that meets those constraints is
automatically generated. We demonstrate the efficacy of this framework on several robotic
scenarios in both simulations and experiments with encouraging results.},
author = {Lahijanian, Morteza and Svorenova, Maria and Morye, Akshay A. and Yeomans, Brian and
Rao, Dushyant and Posner, Ingmar and Newman, Paul and Kress-Gazit, Hadas and Kwiatkowska,
Marta},
doi = {10.1109/LRA.2018.2803814},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/main{\_}RAL{\_}final.pdf:pdf},
issn = {23773766},
journal = {IEEE Robotics and Automation Letters},
number = {3},
pages = {1840--1847},
volume = {3},
year = {2018}
@article{Takahira2014,
author = {Takahira, Ricardo Y and Laraia, Lilian R and Dias, Frederico A and Yu, Abraham S and
Nascimento, Paulo T S and Jr, Alceu S Camargo},
isbn = {9781890843298},
pages = {2664--2672},
title = {{Scrum and Embedded Software Development for the Automotive Industry}},
year = {2014}
@article{Agha2018,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/A{\_}Survey{\_}of{\_}Statistical{\
_}Model{\_}Checking.pdf:pdf},
number = {1},
pages = {1--39},
volume = {28},
year = {2018}
@article{Svore2016a,
doi = {10.1016/j.ejcon.2016.04.009},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
title = {{Quantitative verification and strategy synthesis for stochastic games {\$}}},
year = {2016}
@article{,
doi = {10.1145/2600428.2609482},
isbn = {9781450322577},
year = {2014}
@article{Bruni2012,
author = {Bruni, Roberto and Corradini, Andrea and Gadducci, Fabio and Lafuente, Alberto Lluch and
Vandin, Andrea},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bruni2012.pdf:pdf},
pages = {240--254},
year = {2012}
@article{Chatterjee2020,
doi = {10.1109/EIT48999.2020.9208273},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/EIT48999.2020.9208273.pdf:pdf},
isbn = {9781728153179},
issn = {21540373},
pages = {568--576},
volume = {2020-July},
year = {2020}
@article{HusandeepKaurandAmandeepKaur2014,
abstract = {Glaucoma is the most common cause of visual impairment and blindness is because of
diabetes retinopathy hypertension and glaucoma. Many people suffer from eye diseases in all over
the world. Glaucoma is the second leading cause of permanent blindness worldwide. The method
proposed for the detection of optic disc and optic cup segmentation using morphological operations.
The aim of this paper is to find the cup to disc ratio of glaucoma patient and check the level of
disease. If the cup to disc ratio exceeds 0.3 it indicates high glaucoma for the tested patient.},
number = {5},
pages = {271--274},
volume = {4},
year = {2014}
}
@article{Mcmillan,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/hmc-bdd18.pdf:pdf},
@article{Konishi2003,
abstract = {We study coalition formation as an ongoing, dynamic process, with payoffs generated as
coalitions form, disintegrate, or regroup. A process of coalition formation (PCF) is an equilibrium if a
coalitional move to some other state can be "justified" by the expectation of higher future value,
compared to inaction. This future value, in turn, is endogenous: it depends on coalitional
movements at each node. We study existence of equilibrium PCFs. We connect deterministic
equilibrium PCFs with unique absorbing state to the core, and equilibrium PCFs with multiple
absorbing states to the largest consistent set. In addition, we study cyclical as well as stochastic
equilibrium PCFs. {\textcopyright} 2003 Elsevier Science (USA). All rights reserved.},
doi = {10.1016/S0022-0531(03)00004-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/s0022-0531-2803-2900004-8.pdf:pdf},
issn = {00220531},
number = {1},
pages = {1--41},
volume = {110},
year = {2003}
@article{Dizdarevic2018,
abstract = {The fast increment in the number of IoT (Internet of Things) devices is accelerating the
research on new solutions to make cloud services scalable. In this context, the novel concept of fog
computing as well as the combined fog-to-cloud computing paradigm is becoming essential to
decentralize the cloud, while bringing the services closer to the end-system. This paper surveys on
the application layer communication protocols to fulfil the IoT communication requirements, and
their potential for implementation in fog- and cloud-based IoT systems. To this end, the paper first
presents a comparative analysis of the main characteristics of IoT communication protocols,
including request-reply and publish-subscribe protocols. After that, the paper surveys the protocols
that are widely adopted and implemented in each segment of the system (IoT, fog, cloud), and thus
opens up the discussion on their interoperability and wider system integration. Finally, the paper
reviews the main performance issues, including latency, energy consumption and network
throughput. The survey is expected to be useful to system architects and protocol designers when
choosing the communication protocols in an integrated IoT-to-fog-to-cloud system architecture.},
archivePrefix = {arXiv},
arxivId = {1804.01747},
author = {Dizdarevic, Jasenka and Carpio, Francisco and Jukan, Admela and Masip-Bruin, Xavi},
doi = {10.1145/3292674},
eprint = {1804.01747},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
number = {6},
pages = {1--29},
title = {{Survey of Communication Protocols for Internet-of-Things and Related Challenges of Fog and
Cloud Computing Integration}},
url = {http://arxiv.org/abs/1804.01747{\%}0Ahttp://dx.doi.org/10.1145/3292674},
volume = {51},
year = {2018}
@article{Garlan1993,
abstract = {As the size of software systems increases, the algorithms and data structures of the
computation no longer constitute the major design problems. When systems are constructed from
many components, the organization of the overall system - the software architecture - presents a
new set of design problems. This level of design has been addressed in a number of ways including
informal diagrams and descriptive terms, module interconnection languages, templates and
frameworks for systems that serve the needs of specific domains, and formal models of component
integration mechanisms. In this paper we provide an introduction to the emerging field of software
architecture. We begin by considering a number of common architectural styles upon which many
systems are currently based and show how different styles can be combined in a single design. Then
we present six case studies to illustrate how architectural representations can improve our
understanding of complex software systems. Finally, we survey some of the outstanding problems in
the field, and consider a few of the promising research directions.},
doi = {10.1142/9789812798039_0001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/garlan1993.pdf:pdf},
pages = {1--39},
@article{Nadel1989,
abstract = {Constraint satisfaction problems are ubiquitous in artificial intelligence and many
algorithms have been developed for their solution. This paper provides a unified survey of some of
these, in terms of three classes: (i) tree search, (ii) arc consistency (AC), and (iii) hybrid tree
search/arc consistency algorithms. It is shown that several important algorithms, when slightly
rearranged, are of the latter hybrid form, but with arc consistency components that do not
necessarily achieve full arc consistency at the tree nodes. Accordingly, we define several new partial
AC procedures, AC1/5, AC1/4, AC1/3, and AC½, analogous to the well‐known full AC algorithms
which Mackworth has called AC1, AC2, and AC3. The fractional suffixes on our AC algorithms are
roughly proportional to the degree of partial arc consistency they achieve. Unlike traditional
versions, our AC algorithms (full and partial) are presented in a parameterized form to allow them to
be embedded efficiently at the nodes of a tree search process. Algorithm complexities are compared
empirically, using the n‐queens problem and a new version called confused n‐queens. Gaschnig's
Backmarking (a tree search algorithm) and Haralick's Forward Checking (a hybrid algorithm) are
found to be the most efficient. For the hybrid algorithms, we find that it pays to do little arc
consistency processing at the nodes, incurring more nodes, but sufficiently reducing the work per
node so as to obtain less work over the whole tree. The unified view taken here suggests several
new algorithms. Preliminary results show one of these to be the best algorithm so far. Copyright {\
textcopyright} 1989, Wiley Blackwell. All rights reserved},
doi = {10.1111/j.1467-8640.1989.tb00328.x},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.1467-8640.1989.tb00328.x.pdf:pdf},
issn = {14678640},
number = {3},
pages = {188--224},
volume = {5},
year = {1989}
@article{OToole2014,
abstract = {Complex Adaptive Systems are systems composed of distributed, decentralized and
autonomous agents (software components, systems and people) and exhibit non-deterministic
interactions between these agents. These interactions can often lead to the appearance of
"emergent" behaviour or properties at the system level. These emergents can be harmful to the
system or individual constituents, but are by their nature impossible to predict in advance and must
therefore be detected at run-time. The characteristics of these systems mean that detecting
emergence at run-time presents a significant challenge, one that cannot be met by existing methods
that depend on a centralized controller with a global view of the system state. In this paper we
present an important step towards decentralised detection of emergence in Complex Adaptive
Systems. Our approach is based on observing the consequence of naturally arising feedback that
occurs from the system level (macro) to the component level (micro) when emergent behaviour or
properties appear in a system. This feedback results in the appearance of correlations, where none
existed before, between the internal variables of individual agents and the properties that an agent
detects in its local environment. In a case study of five different multi-agent systems we
demonstrate that the number of agents that report these correlations increases as emergence
occurs in each system. This provides the constituent agents with sufficient information to
collaboratively detect when emergence has occurred at a system level without the need for a
centralized, global view of the system.},
doi = {10.1109/SASO.2014.18},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/DetectInCas.pdf:pdf},
issn = {19493681},
number = {December},
pages = {60--69},
volume = {2014-Decem},
year = {2014}
@article{Jacob2014,
number = {1},
pages = {2536--2543},
title = {{A Method of Segmentation For Glaucoma Screening Using Superpixel Classification}},
volume = {2},
year = {2014}
@book{Fernandez1992,
abstract = {Although testing is the most widely used technique to control the quality of software
systems, it is a topic that, until relatively recently, has received scant attention from the computer
research community. Although some pioneering work was already done a considerable time ago
[Cho78,GG83,How78,Mye79], the testing of software systems has never become a mainstream
activity of scientific research. The reasons that are given to explain this situation usually include
arguments to the effect that testing as a technique is inferior to verification — testing can show only
the presence of errors, not their absence — and that we should therefore concentrate on
developing theory and tools for the latter. It has also been frequently said that testing is by its very
nature a non-formal activity, where formal methods and related tools are at best of little use. The
first argument is incorrect in the sense that it gives an incomplete picture of the situation. Testing is
inferior to verification if the verification model can be assumed to be correct and if its complexity
can be handled correctly by the person and or tool involved in the verification task. If these
conditions are not fulfilled, which is frequently the case, then testing is often the only available
technique to increase the confidence in the correctness of a system. In this talk we will show that
the second argument is flawed as well. It is based on the identification of testing with robustness
testing, where it is precisely the objective to find out how the system behaves under unspecified
circumstances. This excludes the important activity of conformance testing, which tries to test the
extent to which system behaviour conforms to its specification. It is precisely in this area where
formal methods and tools can help to derive tests systematically from specifications, which is a great
improvement over laborious, error-prone and costly manual test derivation. In our talk we show
how the process algebraic testing theory due to De Nicola and Hennessy [DNH84,DeN87], originally
conceived out of semantic considerations, may be used to obtain principles for test derivation. We
will give an overview of the evolution of these ideas over the past ten years or so, starting with the
conformance testing theory of simple synchronously communicating reactive systems [Bri88,Lan90]
and leading to realistic systems that involve sophisticated asynchronous message passing
mechanisms [Tre96,HT97]. Written accounts can be found in [BHT97,He98]. We discuss how such
ideas have been used to obtain modern test derivation tools, such as TVEDA and TGV [Pha94,
CGPT96,FJJV96], and the tool set that is currently being developed in the C{\^{o}}te-de-Resyste
project [STW96]. The advantage of a test theory that is based on well-established process algebraic
theory is that in principle there exists a clear link between testing and verification, which allows the
areas to share ideas and algorithms [FJJV96,VT98]. Time allowing, we look at some of the
methodological differences and commonalities between model checking techniques and testing, one
of the differences being that of state space coverage, and an important commonality that of test
property selection. In recent years the research into the use of formal methods and tools for
testing reactive systems has seen a considerable growth. An overview of different approaches and
school of thought can be found in [BPS98], reporting on the first ever Dagstuhl seminar devoted to
testing. The formal treatment of conformance testing based on process algebra and/or concurrency
theory is certainly not the only viable approach. An important school of thought is the FSM-testing
theory grown out of the seminal work of Chow [Cho78], of which a good overview is given in [LY96].
Another interesting formal approach to testing is based on abstract data type theory
[Gau95,BGM91].},
archivePrefix = {arXiv},
arxivId = {1301.4779},
doi = {10.1007/3-540-55179-4_18},
eprint = {1301.4779},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/computer-aided-verification-
2019.pdf:pdf},
isbn = {9783540272311},
issn = {0018-9235},
number = {July},
pages = {1--13},
pmid = {4520227},
volume = {575},
year = {1992}
@article{Basin2004,
abstract = {Since the early days of programming and automated reasoning, researchers have
developed methods for systematically constructing programs from their specifications. Especially the
last decade has seen a flurry of activities including the advent of specialized conferences, such as
LOPSTR, covering the synthesis of programs in computational logic. In this paper we analyze and
compare three state-of-the-art methods for synthesizing recursive programs in computational logic.
The three approaches are constructive/deductive synthesis, schema-guided synthesis, and inductive
synthesis. Our comparison is carried out in a systematic way where, for each approach, we describe
the key ideas and synthesize a common running example. In doing so, we explore the synergies
between the approaches, which we believe are necessary in order to achieve progress over the next
decade in this field. {\textcopyright} Springer-Verlag 2004.},
author = {Basin, David and Deville, Yves and Flener, Pierre and Hamfelt, Andreas and Nilsson, J{\
o}rgen Fischer},
doi = {10.1007/978-3-540-25951-0_2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/basin2004.pdf:pdf},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {30--65},
volume = {3049},
year = {2004}
@article{Sekkal2020,
abstract = {The web of things (WoT) uses web technologies to connect embedded objects to each
other and to deliver services to stakeholders. The context of these interactions (situation) is a key
source of information which can be sometimes uncertain. In this paper, we focus on the
development of intelligent web services. The main requirements for intelligent service are to deal
with context diversity, semantic context representation and the capacity to reason with uncertain
information. From this perspective, we propose a framework for intelligent services to deal with
various contexts, to reactively respond to real-time situations and proactively predict future
situations. For the semantic representation of context, we use PR-OWL, a probabilistic ontology
based on multi-entity Bayesian networks. PR-OWL is flexible enough to represent complex and
uncertain contexts. We validate our framework with an intelligent plant watering use case to show
its reasoning capabilities.},
author = {Sekkal, Nawel and Benslimane, Sidi Mohamed and Mrissa, Michael and Park, Cheol Young
and Boudaa, Boudjemaa},
doi = {10.1504/IJDMMM.2020.105609},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sekkal2020.pdf:pdf},
issn = {17591171},
number = {1},
pages = {1--27},
title = {{Proactive and reactive context reasoning architecture for smart web services}},
volume = {12},
year = {2020}
@article{Unal2013,
abstract = {For the formal verification of security in mobile networks, a requirement is that security
policies associated with mobility and location constraints are formally specified and verified. For the
formal specification and verification of security policies, formal methods ensure that a given network
configuration that includes certain network elements satisfies a given security policy. A process
calculus based approach is presented, where ambient calculus is used for formal specification of
security policies and ambient logic is used for formal representation of mobility and location
constraints. A spatiotemporal model checking algorithm is presented for the model checking of
formal specifications in ambient calculus with respect to formulas in ambient logic. The presented
algorithm allows spatiotemporal model checking of security policy rules and consists of spatial and
temporal model checking algorithms. The spatial model checking algorithm is implemented in the
Java language and the temporal model checking algorithm is implemented using the NuSMV model
checker. {\textcopyright} T{\"{u}}bi{\textperiodcentered}tak.},
doi = {10.3906/elk-1105-54},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Devrim-Unal-EECS.pdf:pdf},
issn = {13000632},
number = {1},
pages = {144--173},
title = {{Spatiotemporal model checking of location and mobility related security policy
specifications}},
volume = {21},
year = {2013}
@article{Furlanetto2014,
abstract = {Purpose To investigate risk factors for disc hemorrhage detection in the Low-Pressure
Glaucoma Treatment Study. Design Cohort of a randomized, double-masked, multicenter clinical
trial. Methods Low-Pressure Glaucoma Treatment Study patients with at least 16 months of follow-
up were included. Exclusion criteria included untreated intraocular pressure (IOP) of more than 21
mm Hg, visual field mean deviation worse than -16 dB, or contraindications to study medications.
Patients were randomized to topical treatment with timolol 0.5{\%} or brimonidine 0.2{\%}.
Stereophotographs were reviewed independently by 2 masked graders searching for disc
hemorrhages. The main outcomes investigated were the detection of disc hemorrhage at any time
during follow-up and their recurrence. Ocular and systemic risk factors for disc hemorrhage
detection were analyzed using the Cox proportional hazards model and were tested further for
independence in a multivariate model. Results Two hundred fifty-three eyes of 127 subjects (mean
age, 64.7 ± 10.9 years; women, 58{\%}; European ancestry, 71{\%}) followed up for an average ±
standard deviation of 40.6 ± 12 months were included. In the multivariate analysis, history of
migraine (hazard ratio [HR], 5.737; P =.012), narrower neuroretinal rim width at baseline (HR, 2.91; P
=.048), use of systemic $\beta$-blockers (HR, 5.585; P =.036), low mean systolic blood pressure (HR,
1.06; P =.02), and low mean arterial ocular perfusion pressure during follow-up (HR, 1.172; P =.007)
were significant and independent risk factors for disc hemorrhage detection. Treatment
randomization was not associated with either the occurrence or recurrence of disc hemorrhages.
Conclusions In this cohort of Low-Pressure Glaucoma Treatment Study patients, migraine, baseline
narrower neuroretinal rim width, low systolic blood pressure and mean arterial ocular perfusion
pressure, and use of systemic $\beta$-blockers were risk factors for disc hemorrhage detection.
Randomization assignment did not influence the frequency of disc hemorrhage detection. {\
textcopyright} 2014 by elsevier inc. all rights reserved.},
author = {Furlanetto, Rafael L. and {De Moraes}, Carlos Gustavo and Teng, Christopher C. and
Liebmann, Jeffrey M. and Greenfield, David S. and Gardiner, Stuart K. and Ritch, Robert and Krupin,
Theodore},
doi = {10.1016/j.ajo.2014.02.009},
issn = {18791891},
number = {5},
pages = {945--952.e1},
pmid = {24513094},
title = {{Risk factors for optic disc hemorrhage in the low-pressure glaucoma treatment study}},
url = {http://dx.doi.org/10.1016/j.ajo.2014.02.009},
volume = {157},
year = {2014}
@article{MarcoDorigoMauroBirattari2006,
abstract = {Swarm intelligence is a relatively new approach to problem solving that takes inspiration
from the social behaviors of insects and of other animals. In particular, ants have inspired a number
of methods and techniques among which the most studied and the most successful is the general
purpose opti-mization technique known as ant colony optimization. Ant colony optimization (ACO)
takes inspiration from the foraging behavior of some ant species. These ants deposit pheromone on
the ground in order to mark some favorable path that should be followed by other members of the
colony. Ant colony optimization exploits a similar mechanism for solving optimization problems.
From the early nineties, when the first ant colony optimiza-tion algorithm was proposed, ACO
attracted the attention of more researchers and a relatively large amount of successful applications
are now available. Moreover, a substantial corpus of theoretical results is becoming available that
provides useful guidelines to researchers and practitioners in further applications of ACO. The goal of
this article is to introduce ant colony optimiza-tion and to survey its most notable applications.
Section I provides some background information on the foraging be-havior of ants, which is the
biological source of inspiration of ant colony optimization. Section II describes ant colony
optimization and its main variants. Section III surveys the most notable theoretical results
concerning ACO, and Section IV illustrates some of its most successful applications. Section V
highlights some current hot research topics, and Section VI provides an overview of some other
algorithms that, although not directly related to ACO, are nonetheless inspired by the behavior of
ants. Section VII concludes the article.},
doi = {http://dx.doi.org/10.1109/CI-M.2006.248054},
isbn = {9782800413266},
issn = {1781-3794},
number = {4},
pages = {28--39},
volume = {1},
year = {2006}
@article{Kern1999,
abstract = {In recent years, formal methods have emerged as an alternative approach to ensuring
the quality and correctness of hardware designs, overcoming some of the limitations of traditional
validation techniques such as simulation and testing. There are two main aspects to the application
of formal methods in a design process: the formal framework used to specify desired properties of a
design and the verification techniques and tools used to reason about the relationship between a
specification and a corresponding implementation. We survey a variety of frameworks and
techniques proposed in the literature and applied to actual designs. The specification frameworks
we describe include temporal logics, predicate logic, abstraction and refinement, as well as
containment between {\&}ohgr;-regular languages. The verification techniques presented include
model checking, automata-theoretic techniques, automated theorem proving, and approaches that
integrate the above methods. In order to provide insight into the scope and limitations of currently
available techniques, we present a selection of case studies where formal methods were applied to
industrial-scale designs, such as microprocessors, floating-point hardware, protocols, memory
subsystems, and communications hardware.},
doi = {10.1145/307988.307989},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/p123-kern.pdf:pdf},
issn = {10844309},
journal = {ACM Transactions on Design Automation of Electronic Systems},
number = {2},
pages = {123--193},
volume = {4},
year = {1999}
@misc{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/model-checking-software-2015.pdf:pdf},
title = {model-checking-software-2015.pdf}
@article{VanEijk1998,
doi = {10.1109/DATE.1998.655922},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}9.pdf:pdf},
issn = {15301591},
pages = {618--623},
year = {1998}
@article{Azadmanesh2014,
author = {Azadmanesh, Mohammad Reza and Eynard, Davide and Hauswirth, Matthias},
isbn = {9781450322959},
keywords = {computer science education,programming for mobile},
pages = {25--28},
year = {2014}
@article{Bjørner2007,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/System{\_}Description{\_}Z3{\_}0{\
_}1.pdf:pdf},
year = {2007}
@book{Giaquinta2018,
abstract = {We present probabilistic models for autonomous agent search and retrieve missions
derived from Simulink models for an Unmanned Aerial Vehicle (UAV) and show how probabilistic
model checking and the probabilistic model checker PRISM can be used for optimal controller
generation. We introduce a sequence of scenarios relevant to UAVs and other autonomous agents
such as underwater and ground vehicles. For each scenario we demonstrate how it can be modelled
using the PRISM language, give model checking statistics and present the synthesised optimal
controllers. We conclude with a discussion of the limitations when using probabilistic model
checking and PRISM in this context and what steps can be taken to overcome them. In addition, we
consider how the controllers can be returned to the UAV and adapted for use on larger search
areas.},
author = {Giaquinta, Ruben and Hoffmann, Ruth and Ireland, Murray and Miller, Alice and Norman,
Gethin},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-319-77935-5_16},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/giaquinta2018.pdf:pdf},
isbn = {9783319779348},
issn = {16113349},
pages = {220--236},
url = {http://dx.doi.org/10.1007/978-3-319-77935-5{\_}16},
volume = {10811 LNCS},
year = {2018}
@misc{,
title = {{747Lec01-TrianglePseudoCodeFromJorgensenText.pdf}}
@article{Chen2014,
abstract = {This paper introduces an online and intelligent energy management controller to improve
the fuel economy of a power-split plug-in hybrid electric vehicle (PHEV). Based on analytic analysis
between fuel-rate and battery current at different driveline power and vehicle speed, quadratic
equations are applied to simulate the relationship between battery current and vehicle fuel-rate.
The power threshold at which engine is turned on is optimized by genetic algorithm (GA) based on
vehicle fuel-rate, battery state of charge (SOC) and driveline power demand. The optimal battery
current when the engine is on is calculated using quadratic programming (QP) method. The
proposed algorithm can control the battery current effectively, which makes the engine work more
efficiently and thus reduce the fuel-consumption. Moreover, the controller is still applicable when
the battery is unhealthy. Numerical simulations validated the feasibility of the proposed controller.
{\textcopyright} 2013 Elsevier B.V. All rights reserved.},
author = {Chen, Zheng and Mi, Chris Chunting and Xiong, Rui and Xu, Jun and You, Chenwen},
doi = {10.1016/j.jpowsour.2013.09.085},
isbn = {03787753},
issn = {03787753},
pages = {416--426},
title = {{Energy management of a power-split plug-in hybrid electric vehicle based on genetic
algorithm and quadratic programming}},
url = {http://dx.doi.org/10.1016/j.jpowsour.2013.09.085},
volume = {248},
year = {2014}
}
@article{Patil2013,
abstract = {— Image segmentation is the most critical functions in image analysis and processing.
Fundamentally segmentation results affect all the subsequent processes of image analysis such as
object representation and description, feature measurement, and even the following higher level
tasks such as object classification. Hence, image segmentation is the most essential and crucial
process for facilitating the delineation, characterization, and visualization of regions of interest in
any medical image. Manual segmentation of medical image by the radiologist is not only a tedious
and time consuming process, but also not very accurate especially with the increasing medical
imaging modalities and unmanageable quantity of medical images that need to be examined. It
becomes therefore necessary to review current methodologies of image segmentation using
automated algorithms that are accurate and require as little user interaction as possible especially
for medical images. In the segmentation process, the anatomical structure or the region of interest
needs to be delineated and extracted out so that it can be viewed individually. In this paper we
project the important place of segmentation of images in extracting information for decision making.
Indexed Terms: -Medical image segmentation, image analysis I. INTRODUCTION Medical images play
vital role in assisting health care providers to access patients for diagnosis and treatment. Studying
medical images depends mainly on the visual interpretation of the radiologists. However, this
consumes time and usually subjective, depending on the experience of the radiologist. Consequently
the use of computer-aided systems becomes very necessary to overcome these limitations. Artificial
Intelligence methods such as digital image processing when combined with others like machine
learning, fuzzy logic and pattern recognition are so valuable in Image techniques can be grouped
under a general framework; Image Engineering (IE). This is comprised of three layers: image
processing (lower layer), image analysis (middle layer), and image understanding (high layer), as
shown in Fig 1. Image segmentation is shown to be the first step and also one of the most critical
tasks of image analysis. Its objective is that of extracting information (represented by data) from an
image via image segmentation, object representation, and feature measurement, as shown in Fig 1.
Result of segmentation; obviously have considerable influence over the accuracy of feature
measurement [2]. The computerization of medical image segmentation plays an important role in
medical imaging applications. It has found wide application in different areas such as diagnosis,
localization of pathology, study of anatomical structure, treatment planning, and computer-
integrated surgery. However, the variability and the complexity of the anatomical structures in the
human body have resulted in medical image segmentation remaining a hard problem [3].},
journal = {Ijcsmc},
number = {1},
pages = {22--27},
volume = {2},
year = {2013}
}
@article{Wycisk2008,
abstract = {Purpose - The purpose of this paper is to critically analyze whether supply networks may
be validly treated as complex adaptive systems (CAS). Finding this to be true, the paper turns into
the latest concerns of complexity science like Pareto distributions to explain well-known phenomena
of extreme events in logistics, like the bullwhip effect. It aims to introduce a possible solution to
handle these effects. Design/methodology/approach - The method is a comparative analysis of
current literature in the fields of logistics and complexity science. The discussion of CAS in supply
networks is updated to include recent complexity research on power laws, non-linear dynamics,
extreme events, Pareto distribution, and long tails. Findings - Based on recent findings of complexity
science, the paper concludes that it is valid to call supply networks CAS. It then finds that supply
networks are vulnerable to all the nonlinear and extreme dynamics found in CAS within the business
world. These possible outcomes have to be considered in supply network management. It is found
that the use of a neural network model could work to manage these new challenges. Practical
implications - Since, smart parts are the future of logistics systems, managers need to worry about
the combination of human and smart parts, resulting design challenges, the learning effects of
interacting smart parts, and possible exacerbation of the bullwhip effect. In doing so, the paper
suggests several options concerning the design and management of supply networks.
Originality/value - The novel contribution of this paper lies in its analysis of supply networks from a
new theoretical approach: complexity science, which the paper updates. It enhances and reflects on
existing attempts in this field to describe supply networks as CAS through the comprehensive
theoretical base of complexity science. More specifically, it suggests the likely vulnerability to
extreme outcomes as the "parts" in supply networks become smarter. The paper also suggests
different ways of using a neural network approach for their management - depending on how smart
the logistics parts actually are. {\textcopyright} Emerald Group Publishing Limited.},
doi = {10.1108/09600030810861198},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1.1.164.5100.pdf:pdf},
issn = {09600035},
number = {2},
pages = {108--125},
title = {{"Smart parts" supply networks as complex adaptive systems: Analysis and implications}},
volume = {38},
year = {2008}
@article{Merizig2017,
abstract = {Nowadays, service composition is one of the major problems in the Cloud due to the
exceptional growth in the number of services deployed by providers. Recently, atomic services have
been found to be unable to deal with all client requirements. Traditional service composition gives
the clients a composite service without non-functional parameters. To respond to both functional
and non-functional parameters, we need a service composition. Since web services cannot
communicate with each other or participate dynamically to handle changes service parameters in
service composition, this issue has led us to use a dynamic entity represented by an agent based on
dynamic architecture. This work proposes an agent-based architecture with a new cooperation
protocol that can offer an automatic and adaptable service composition by providing a composite
service with the maximum quality of service. The implementation of this model has been provided in
order to evaluate the authors' system. The obtained results demonstrate the effectiveness of their
proposed system.},
doi = {10.4018/ijitwe.2018010104},
file = {:C$\backslash$:/Users/Asus/Downloads/merizig2018.pdf:pdf},
issn = {1554-1045},
number = {1},
pages = {50--68},
title = {{A Dynamic and Adaptable Service Composition Architecture in the Cloud Based on a Multi-
Agent System}},
volume = {13},
year = {2017}
@article{Bhadauria2014,
number = {4},
pages = {328--334},
volume = {3},
year = {2014}
@article{Katz2017a,
abstract = {Deep neural networks have emerged as a widely used and effective means for tackling
complex, real-world problems. However, a major obstacle in applying them to safety-critical systems
is the great difficulty in providing formal guarantees about their behavior. We present a novel,
scalable, and efficient technique for verifying properties of deep neural networks (or providing
counter-examples). The technique is based on the simplex method, extended to handle the non-
convex Rectified Linear Unit (ReLU) activation function, which is a crucial ingredient in many modern
neural networks. The verification procedure tackles neural networks as a whole, without making any
simplifying assumptions. We evaluated our technique on a prototype deep neural network
implementation of the next-generation airborne collision avoidance system for unmanned aircraft
(ACAS Xu). Results show that our technique can successfully prove properties of networks that are
an order of magnitude larger than the largest networks verified using existing methods.},
archivePrefix = {arXiv},
arxivId = {1702.01135},
author = {Katz, Guy and Barrett, Clark and Dill, David L. and Julian, Kyle and Kochenderfer, Mykel J.},
doi = {10.1007/978-3-319-63387-9_5},
eprint = {1702.01135},
isbn = {9783319633862},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {97--117},
title = {{Reluplex: An efficient smt solver for verifying deep neural networks}},
year = {2017}
@book{Tekinerdogan2002,
doi = {10.1007/978-1-4615-0883-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Tekinerdogan-2002.pdf:pdf},
isbn = {9781461508830},
pages = {0--31},
year = {2002}
@article{Septiarini2016,
doi = {10.11591/ijece.v6i6.11053},
issn = {20888708},
number = {6},
pages = {2797--2804},
title = {{The contour extraction of cup in fundus images for glaucoma detection}},
volume = {6},
year = {2016}
@article{Matena2018,
abstract = {Smart Cyber-Physical Systems (sCPS) are complex systems performing smart coordination
that often require decentralized and network resilient operation. New development in the fields of
the robotic systems, Industry 4.0 and autonomous vehicular system brings challenges that can be
tackled with deployment of ensemble based sCPS, but require further refinement in terms of
network resilience and data propagation. This thesis maps the use cases of the sCPS in the
aforementioned domains, discusses requirements on the ensemble based architecture in terms of
network properties, and proposes recommendations and technical means that help to design
network aware ensemble based sCPS. The proposed solutions are evaluated by the means of target
systems simulation using state of the art realistic network and vehicular simulators.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/140068133.pdf:pdf},
title = {{Vladim{\'{i}}r Mat{\v{e}}na Integration Paradigms for Ensemble- based Smart Cyber-Physical
Systems}},
year = {2018}
@article{Calinescu2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9781450370448},
pages = {3369375},
title = {{Taming Service Uncertainty through Probabilistic Model Learning , Analysis and Synthesis}},
year = {2019}
@article{Iftikhar2017,
abstract = {Internet of Things (IoT) consists of networked tiny embedded computers (motes) that are
capable of monitoring and controlling the physical world. Examples range from building security
monitoring to smart factories. A central problem of IoT is minimising the energy consumption of the
motes, while guaranteeing high packet delivery performance, regardless of uncertainties such as
sudden changes in traffic load and communication interference. Traditionally, to deal with
uncertainties the network settings are either hand-tuned or over-provisioned, resulting in
continuous network maintenance or inefficiencies. Enhancing the IoT network with self-adaptation
can automate these tasks. This paper presents DeltaIoT, an exemplar that enables researchers to
evaluate and compare new methods, techniques and tools for self-adaptation in IoT. DeltaIoT is the
first exemplar for research on self-adaptation that provides both a simulator for offline
experimentation and a physical setup that can be accessed remotely for real-world experimentation.
{\textcopyright} 2017 IEEE.},
author = {Iftikhar, Muhammad Usman and Ramachandran, Gowri Sankar and Bollans{\'{e}}e, Pablo
and Weyns, Danny and Hughes, Danny},
doi = {10.1109/SEAMS.2017.21},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}6.pdf:pdf},
isbn = {9781538615508},
journal = {Proceedings - 2017 IEEE/ACM 12th International Symposium on Software Engineering for
Adaptive and Self-Managing Systems, SEAMS 2017},
pages = {76--82},
year = {2017}
@article{Henzinger,
author = {Henzinger, Tom},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ViC-Slides-Tom-Henzinger.pdf:pdf},
@article{Walker2009,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Walker, Melanie and Kublin, James G and Zunt, Joseph R},
doi = {10.1086/498510.Parasitic},
eprint = {NIHMS150003},
isbn = {9780123850447},
issn = {9780123850447},
number = {1},
pages = {115--125},
pmid = {1000000221},
volume = {42},
year = {2009}
@article{Birch2020,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/EMAS2020{\_}paper{\_}3.pdf:pdf},
year = {2020}
@article{Rahwan2012,
abstract = {Much of the literature on multi-agent coalition formation has focused on Characteristic
Function Games, where the effectiveness of a coalition is not affected by how the other agents are
arranged in the system. In contrast, very little attention has been given to the more general class of
Partition Function Games, where the emphasis is on how the formation of one coalition could
influence the performance of other co-existing coalitions in the system. However, these inter-
coalitional dependencies, called externalities from coalition formation, play a crucial role in many
real-world multi-agent applications where agents have either conflicting or overlapping goals.
Against this background, this paper is the first computational study of coalitional games with
externalities in the multi-agent system context. We focus on the Coalition Structure Generation
(CSG) problem which involves finding an exhaustive and disjoint division of the agents into coalitions
such that the performance of the entire system is optimized. While this problem is already very
challenging in the absence of externalities, due to the exponential size of the search space, taking
externalities into consideration makes it even more challenging as the size of the input, given n
agents, grows from O(2n) to O(nn). Our main contribution is the development of the first CSG
algorithm for coalitional games with either positive or negative externalities. Specifically, we prove
that it is possible to compute upper and lower bounds on the values of any set of disjoint coalitions.
Building upon this, we prove that in order to establish a worst-case guarantee on solution quality it is
necessary to search a certain set of coalition structures (which we define). We also show how to
progressively improve this guarantee with further search. Since there are no previous CSG
algorithms for games with externalities, we benchmark our algorithm against other state-of-the-art
approaches in games where no externalities are present. Surprisingly, we find that, as far as worst-
case guarantees are concerned, our algorithm outperforms the others by orders of magnitude. For
instance, to reach a bound of 3 given 24 agents, the number of coalition structures that need to be
searched by our algorithm is only 0.0007{\%} of that needed by Sandholm et al. (1999) [1], and 0.5{\
%} of that needed by Dang and Jennings (2004) [2]. This is despite the fact that the other algorithms
take advantage of the special properties of games with no externalities, while ours does not. {\
textcopyright} 2012 Elsevier B.V.},
author = {Rahwan, Talal and Michalak, Tomasz and Wooldridge, Michael and Jennings, Nicholas R.},
doi = {10.1016/j.artint.2012.03.007},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0004370212000288-main{\
_}2.pdf:pdf},
issn = {00043702},
pages = {95--122},
title = {{Anytime coalition structure generation in multi-agent systems with positive or negative
externalities}},
url = {http://dx.doi.org/10.1016/j.artint.2012.03.007},
volume = {186},
year = {2012}
@article{Biere2003,
author = {Biere, Armin and Cimatti, Alessandro and Clarke, Edmund M},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/file.pdf:pdf},
number = {99},
volume = {58},
year = {2003}
@article{Zulkifley2014,
abstract = {? 2014 IEEE.Optic disc segmentation is a crucial step in automated glaucoma detection
system through Cup-to-Disc ratio measurement. Recent approaches focus on deterministic algorithm
of RGB or grey model only. In this paper, we proposed a statistically integrated approach by
combining various colour models. The driving motivation is the ability of each colour model to work
accurately in certain environments or cases. Histogram of ach colour model of HSV, RGB and grey
will be tabulated to approximate the colour distribution. The ratio between the highest and the
second highest will be the contribution weightage of the fused output. The performance is simulated
by using RIM-One database. The average sensitivity and specificity of the detection are 0.912 and
0.832 respectively.},
author = {Zulkifley, Mohd Asyraf and Hussain, Aini and Mustafa, Mohd Marzuki and Mustapha,
Aouache},
doi = {10.1109/ELINFOCOM.2014.6914379},
isbn = {9781479939428},
title = {{On analyzing optic disc extraction through weighted colour models}},
year = {2014}
@article{Godefroid2018,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/testing.pdf:pdf},
year = {2018}
@article{Abeywickrama2020,
author = {Abeywickrama, Dhaminda B. and Bicocchi, Nicola and Mamei, Marco and Zambonelli,
Franco},
doi = {10.1007/s10009-020-00554-3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/abeywickrama2020.pdf:pdf},
issn = {1433-2779},
url = {https://doi.org/10.1007/s10009-020-00554-3},
year = {2020}
@article{Michie1968,
abstract = {It would be useful if computers could learn from experience and thus automatically
improve the efficiency of their own programs during execution. A simple but effective rote-learning
facility can be provided within the framework of a suitable programming language. {\textcopyright}
1968 Nature Publishing Group.},
doi = {10.1038/218019a0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/jt687kv7146.pdf:pdf},
issn = {00280836},
journal = {Nature},
number = {5136},
pages = {19--22},
volume = {218},
year = {1968}
@article{Yaqoob2019a,
abstract = {The explosive growth of smart objects and their dependency on wireless technologies for
communication increases the vulnerability of Internet of Things (IoT) to cyberattacks. Cyberattacks
faced by IoT present daunting challenges to digital forensic experts. Researchers adopt various
forensic techniques to investigate such attacks. These techniques aim to track internal and external
attacks by emphasizing on communication mechanisms and IoT's architectural vulnerabilities. In this
study, we explore IoT's novel factors affecting traditional computer forensics. We investigate recent
studies on IoT forensics by analyzing their strengths and weaknesses. We categorize and classify the
literature by devising a taxonomy based on forensics phases, enablers, networks, sources of
evidence, investigation modes, forensics models, forensics layers, forensics tools, and forensics data
processing. We also enumerate a few prominent use cases of IoT forensics and present the key
requirements for enabling IoT forensics. Finally, we identify and discuss several indispensable open
research challenges as future research directions.},
author = {Yaqoob, Ibrar and Hashem, Ibrahim Abaker Targio and Ahmed, Arif and Kazmi, S. M.Ahsan
and Hong, Choong Seon},
doi = {10.1016/j.future.2018.09.058},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/yaqoob2019.pdf:pdf},
issn = {0167739X},
pages = {265--275},
title = {{Internet of things forensics: Recent advances, taxonomy, requirements, and open
challenges}},
url = {https://doi.org/10.1016/j.future.2018.09.058},
volume = {92},
year = {2019}
@article{Grimm2005,
abstract = {Agent-based complex systems are dynamic networks of many interacting agents;
examples include ecosystems, financial markets, and cities. The search for general principles
underlying the internal organization of such systems often uses bottom-up simulation models such
as cellular automata and agent-based models. No general framework for designing, testing, and
analyzing bottom-up models has yet been established, but recent advances in ecological modeling
have come together in a general strategy we call pattern-oriented modeling. This strategy provides a
unifying framework for decoding the internal organization of agent-based complex systems and may
lead toward unifying algorithmic theories of the relation between adaptive behavior and system
complexity.},
author = {Grimm, Volker and Revilla, Eloy and Berger, Uta and Jeltsch, Florian and Mooij, Wolf M.
and Railsback, Steven F. and Thulke, Hans Hermann and Weiner, Jacob and Wiegand, Thorsten and
DeAngelis, Donald L.},
doi = {10.1126/science.1116681},
issn = {00368075},
journal = {Science},
number = {5750},
pages = {987--991},
volume = {310},
year = {2005}
@article{Chan2003,
author = {Chan, Chiu-shui and Tong, Ziyu and Dang, Anrong and Qian, Jingping},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Final{\_}CSC{\_}VSMM2003.pdf:pdf},
pages = {13--22},
year = {2003}
@article{Jackson2019,
abstract = {ALLOY IS A language and a toolkit for exploring the kinds of structures that arise in many
software designs. This article aims to give readers a flavor of Alloy in action, and some examples of
its applications to date, thus giving a sense of how it can be used in software design work. Software
involves structures of many sorts: architectures, database schemas, network topologies, ontologies,
and so on. When designing a software system, you need to be able to express the structures
essential to the design and to check that they have the properties you expect. You can express a
structure by sketching it on a napkin. That's a good start, but it's limited. Informal representations
give inconsistent interpretations, and they cannot be analyzed mechanically. So people have turned
to formal notations that define structure and behavior precisely and objectively, and that can exploit
the power of computation.},
doi = {10.1145/3338843},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/3338843.pdf:pdf},
issn = {15577317},
number = {9},
pages = {66--76},
volume = {62},
year = {2019}
@article{Kwiatkowskaa,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/kwiatkowska2016.pdf:pdf},
title = {{Advances and Challenges of Quantitative Verification and Synthesis for Cyber-Physical
Systems}}
@book{Steffen2013,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}23.pdf:pdf},
isbn = {9783319068589},
year = {2013}
@article{Xie2005,
abstract = {The primary purpose of a network is to provide reachability between applications running
on end hosts. In this paper, we describe how to compute the reachability a network provides from a
snapshot of the configuration state from each of the routers. Our primary contribution is the precise
definition of the potential reachability of a network and a substantial simplification of the problem
through a unified modeling of packet filters and routing protocols. In the end, we reduce a complex,
important practical problem to computing the transitive closure to set union and intersection
operations on reachability set representations. We then extend our algorithm to model the
influence of packet transformations (e.g., by NATs or ToS remapping) along the path. Our technique
for static analysis of network reachability is valuable for verifying the intent of the network designer,
troubleshooting reachability problems, and performing "what-if" analysis of failure scenarios. {\
textcopyright} 2005 IEEE.},
author = {Xie, Geoffrey G. and Zhan, Jibin and Maltz, David A. and Zhang, Hui and Greenberg, Albert
and Hjalmtysson, Gisli and Rexford, Jennifer},
doi = {10.1109/INFCOM.2005.1498492},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}17.pdf:pdf},
isbn = {0780389689},
issn = {0743166X},
number = {C},
pages = {2170--2183},
volume = {3},
year = {2005}
@article{Khan2014,
abstract = {—Image segmentation is a mechanism used to divide an image into multiple segments. It
will make image smooth and easy to evaluate. Segmentation process also helps to find region of
interest in a particular image. The main goal is to make image more simple and meaningful. Existing
segmentation techniques can't satisfy all type of images. This survey addressed various image
segmentation techniques, evaluates them and presents the issues related to those techniques.
Index Terms—segmentation, image processing, clustering, partial differential equations},
doi = {10.12720/joig.1.4.166-170},
issn = {23013699},
number = {4},
pages = {166--170},
url = {http://www.joig.org/index.php?m=content{\&}c=index{\&}a=show{\&}catid=34{\&}id=57},
volume = {1},
year = {2014}
}
@article{Ali2014,
author = {Ali, Rima Al and Bures, Tomas and Gerostathopoulos, Ilias and Hnetynka, Petr and Keznikl,
Jaroslav and Kit, Michal and Plasil, Frantisek},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/alali2014.pdf:pdf},
isbn = {9781450327688},
pages = {610--611},
year = {2014}
@article{Camara2020a,
abstract = {Architecting IoT systems able to guarantee Quality of Service (QoS) levels can be a
challenging task due to the inherent uncertainties (induced by changes in e.g., energy availability,
network traffic) that they are subject to. Existing work has shown that machine learning (ML)
techniques can be effectively used at run time for selecting self-adaptation patterns that can help
maintain adequate QoS levels. However, this class of approach suffers from learning bias, which
induces accuracy problems that might lead to sub-optimal (or even unfeasible) adaptations in some
situations. To overcome this limitation, we propose an approach for proactive self-adaptation which
combines ML and formal quantitative verification (probabilistic model checking). In our approach,
ML is tasked with selecting the best adaptation pattern for a given scenario, and quantitative
verification checks the feasibility of the adaptation decision, preventing the execution of unfeasible
adaptations and providing feedback to the ML engine which helps to achieve faster convergence
towards optimal decisions. The results of our evaluation show that our approach is able to produce
better decisions than ML and quantitative verification used in isolation.},
doi = {10.1109/ICSA47634.2020.00010},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/camara2020.pdf:pdf},
isbn = {9781728146591},
journal = {Proceedings - IEEE 17th International Conference on Software Architecture, ICSA 2020},
pages = {11--22},
title = {{Quantitative verification-aided machine learning: A tandem approach for architecting self-
adaptive IoT systems}},
year = {2020}
@article{Jiang2013,
abstract = {In order to achieve maximum efficiency a photovoltaic (PV) arrays should operate at their
maximum power point (MPP). Therefore, an MPP tracking (MPPT) scheme is implemented between
the PV system and the load to obtain maximum power. When the irradiance distribution on the PV
arrays is uniform, many traditional MPPT techniques can track the MPP effectively. However, when
the PV arrays are partially shaded, multiple MPPs show up, which usually results in the failure of
finding the global MPP. Some researchers have reported this problem and tried to solve it, but most
of the MPP control schemes are relatively complicated or fail to guarantee the MPP under all
shading circumstances. In order to overcome this difficulty, this paper presents a novel ant colony
optimization (ACO)-based MPPT scheme for PV systems. A new control scheme is also introduced
based on the proposed MPPT method. This heuristic algorithm based technique not only ensures the
ability to find the global MPP, but also gives a simpler control scheme and lower system cost. The
feasibility of this proposed method is verified with the irradiance of various shading patterns by
simulation. In addition, the performance comparison with other traditional MPPT techniques, such
as: constant voltage tracking (CVT), perturb and observe (P{\&}O), particle swarm optimization
(PSO), is also presented. The results show that the proposed algorithm can track the global MPP
effectively, and is robust to various shading patterns. {\textcopyright} 2012 Elsevier B.V.},
author = {Jiang, Lian Lian and Maskell, Douglas L. and Patra, Jagdish C.},
doi = {10.1016/j.enbuild.2012.12.001},
isbn = {0378-7788},
issn = {03787788},
pages = {227--236},
title = {{A novel ant colony optimization-based maximum power point tracking for photovoltaic
systems under partially shaded conditions}},
volume = {58},
year = {2013}
@article{Lomuscio2020a,
abstract = {We present a framework for verifying strategic behaviour in an unbounded multi-agent
system. We introduce a novel probabilistic semantics for parameterised multi-agent systems and
define the corresponding verification problem against two probabilistic variants of alternating-time
temporal logic. We define a verification procedure using an abstract model construction. We show
that the procedure is complete for one variant of our specification language, and partial for the
other. We present an implementation and report experimental results.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/p762.pdf:pdf},
journal = {Ifaamas},
number = {Aamas},
pages = {762--770},
url = {www.ifaamas.org},
volume = {9},
year = {2020}
@article{Tewani2017,
number = {2},
pages = {69--70},
volume = {21},
year = {2017}
@article{Eberhardinger2018,
author = {Eberhardinger, Benedikt and Ponsar, Hella and Klumpp, Dominik and Reif, Wolfgang},
doi = {10.1007/978-3-030-03424-5_14},
file = {:C$\backslash$:/Users/Asus/Downloads/2018-Measuring{\_}and{\_}Evaluating{\_}the{\
_}Performance{\_}of{\_}Self-Organization{\_}Mechanisms{\_}Within{\_}Collective{\_}Adaptive{\
_}Systems.pdf:pdf},
number = {September},
pages = {202--220},
title = {{Measuring and Evaluating the Performance of Self-Organization Mechanisms Within
Collective Adaptive Systems}},
year = {2018}
@article{Perry1992,
abstract = {The purpose of this paper is to build the foundation for software architecture. We first
develop an intuition for software architecture by appealing to several well-established architectural
disciplines. On the basis of this intuition, we present a model of software architecture that consists
of three components: elements, form, and rationale. Elements are either processing, data, or
connecting elements. Form is defined in terms of the properties of, and the relationships among, the
elements --- that is, the constraints on the elements. The rationale provides the underlying basis for
the architecture in terms of the system constraints, which most often derive from the system
requirements. We discuss the components of the model in the context of both architectures and
architectural styles and present an extended example to illustrate some important architecture and
style considerations. We conclude by presenting some of the benefits of our approach to software
architecture, summarizing our contributions, and relating our approach to other current work.},
doi = {10.1145/141874.141884},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/swa-sen.pdf:pdf},
issn = {0163-5948},
number = {4},
pages = {40--52},
volume = {17},
year = {1992}
@article{Asarin1995,
abstract = {In this paper we consider a class of hybrid systems, namely dynamical systems with
piecewise-constant derivatives (PCD systems). Such systems consist of a partition of the Euclidean
space into a finite set of polyhedral sets (regions). Within each region the dynamics is defined by a
constant vector field, hence discrete transitions occur only on the boundaries between regions
where the trajectories change their direction. With respect to such systems we investigate the
reachability question: Given an effective description of the systems and of two polyhedral subsets P
and Q of the state-space, is there a trajectory starting at some x$\epsilon${\{}lunate{\}}P and
reaching some point in Q? Our main results are a decision procedure for two-dimensional systems,
and an undecidability result for three or more dimensions. {\textcopyright} 1995.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-030439759400228B-
main.pdf:pdf},
issn = {03043975},
number = {1},
pages = {35--65},
volume = {138},
year = {1995}
@article{EuropePMCFundersGroup2014,
abstract = {Elevated intraocular pressure (IOP) is an important risk factor in developing glaucoma
and IOP variability may herald glaucomatous development or progression. We report the results of a
genome-wide association study meta-analysis of 18 population cohorts from the International
Glaucoma Genetics Consortium (IGGC), comprising 35,296 multiethnic participants for IOP. We
confirm genetic association of known loci for IOP and primary open angle glaucoma (POAG) and
identify four new IOP loci located on chromosome 3q25.31 within the FNDC3B gene (p=4.19×10 −08
for rs6445055), two on chromosome 9 (p=2.80×10 −11 for rs2472493 near ABCA1 and p=6.39×10
−11 for rs8176693 within ABO) and one on chromosome 11p11.2 (best p=1.04×10 −11 for rs747782).
Separate meta-analyses of four independent POAG cohorts, totaling 4,284 cases and 95,560
controls, show that three of these IOP loci are also associated with POAG. Primary open angle
glaucoma (POAG) is the leading cause of irreversible blindness in the world 1 . The only modifiable
risk factor for the development and progression of glaucoma is high intraocular pressure (IOP) 2 ,
and lowering IOP is currently the only therapy that can reduce glaucomatous progression, even in
forms of glaucoma that have IOP close to the statistical norm for the population (normal tension
glaucoma or NTG) 34 . POAG and IOP are highly heritable; the lifetime risk of developing POAG is
22{\%} among first degree relatives of patients 5 , which is approximately 10 times higher than the
rest of the population 1 . The IOP heritability is estimated to be approximately 55{\%} 6 . Genetic
studies have shown that the genetic risk of POAG and IOP are partly shared; polymorphisms within
the TMCO1 gene are associated with both POAG risk 7 and IOP 8 . Studying genetic determinants of
IOP is therefore likely to provide critical insights into the genetic architecture of POAG and open new
avenues for therapeutic intervention.},
doi = {10.1038/ng.3087.Genome-wide},
issn = {1061-4036},
pages = {1126--1130},
title = {{Genome-wide analysis of multiethnic cohorts identifies new loci influencing intraocular
pressure and susceptibility to glaucoma}},
volume = {46},
year = {2014}
@article{Xu2013,
author = {Xu, Yanwu and Duan, Lixin and Lin, Stephen and Chen, Xiangyu and Wing, Damon and
Wong, Kee},
pages = {1--8},
title = {{Optic Cup Segmentation for Glaucoma Detection Using Low-Rank Superpixel
Representation}},
year = {2013}
@article{Kuehlmann2002,
abstract = {Many tasks in computer-aided design (CAD), such as equivalence checking, property
checking, logic synthesis, and false paths analysis, require efficient Boolean reasoning for problems
derived from circuits. Traditionally, canonical representations, e.g., binary decision diagrams (BDDs),
or structural satisfiability (SAT) methods, are used to solve different problem instances. Each of
these techniques offer specific strengths that make them efficient for particular problem structures.
However, neither structural techniques based on SAT, nor functional methods using BDDs offer an
overall robust reasoning mechanism that works reliably for a broad set of applications. The authors
present a combination of techniques for Boolean reasoning based on BDDs, structural
transformations, an SAT procedure, and random simulation natively working on a shared graph
representation of the problem. The described intertwined integration of the four techniques results
in a powerful summation of their orthogonal strengths. The presented reasoning technique was
mainly developed for formal equivalence checking and property verification but can equally be used
in other CAD applications. The authors' experiments demonstrate the effectiveness of the approach
for a broad set of applications.},
author = {Kuehlmann, Andreas and Paruthi, Viresh and Krohm, Florian and Ganai, Malay K.},
doi = {10.1109/TCAD.2002.804386},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}10.pdf:pdf},
issn = {02780070},
number = {12},
pages = {1377--1394},
title = {{Robust boolean reasoning for equivalence checking and functional property verification}},
volume = {21},
year = {2002}
@article{Nawaldgi2017,
doi = {10.17485/ijst/2017/v10i13/111722},
issn = {0974-5645},
number = {13},
pages = {1--6},
title = {{A Novel Combined Color Channel and ISNT Rule Based Automatic Glaucoma Detection from
Color Fundus Images}},
url = {http://www.indjst.org/index.php/indjst/article/view/111722},
volume = {10},
year = {2017}
@article{Leroy2009,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/compcert-CACM.pdf:pdf},
number = {7},
pages = {107--115},
title = {{Formal verification of a realistic compiler To cite this version : HAL Id : inria-00415861 Formal
verification of a realistic compiler}},
volume = {52},
year = {2009}
@article{Zon2016,
doi = {10.1007/978-3-319-47166-2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783319471662},
pages = {674--688},
year = {2016}
@book{Mahfoudh2018,
author = {Mahfoudh, Houssem Ben and Di, Giovanna and Serugendo, Marzo},
doi = {10.1007/978-3-030-03424-5},
file = {:C$\backslash$:/Users/Asus/Downloads/mahfoudh2018.pdf:pdf},
isbn = {978-3-030-03423-8},
pages = {257--273},
url = {http://link.springer.com/10.1007/978-3-030-03424-5},
volume = {11246},
year = {2018}
@article{Han2020,
abstract = {The design synthesis is the key issue in the mechanical conceptual design to generate the
design candidates that meet the design requirements. This paper devotes to propose a novel and
computable synthesis approach of mechanisms based on graph theory and polynomial operation.
The graph framework of the synthesis approach is built firstly, and it involves: (1) the kinematic
function units extracted from mechanisms; (2) the kinematic link graph that transforms the synthesis
problem from mechanical domain into graph domain; (3) two graph representations, i.e., walk
representation and path representation, of design candidates; (4) a weighted matrix theorem that
transforms the synthesis process into polynomial operation. Then, the formulas and algorithm to the
polynomial operation are presented. Based on them, the computational flowchart to the synthesis
approach is summarized. A design example is used to validate and illustrate the synthesis approach
in detail. The proposed synthesis approach is not only supportive to enumerate the design
candidates to the conceptual design of a mechanical system exhaustively and automatically, but also
helpful to make that enumeration process computable.},
author = {Han, Lin and Liu, Geng and Yang, Xiaohui and Han, Bing},
doi = {10.1186/s10033-019-0424-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Han2020{\_}Article{\
_}AComputationalSynthesisApproac.pdf:pdf},
issn = {21928258},
number = {1},
title = {{A Computational Synthesis Approach of Mechanical Conceptual Design Based on Graph
Theory and Polynomial Operation}},
url = {https://doi.org/10.1186/s10033-019-0424-9},
volume = {33},
year = {2020}
@book{EmersonE.Allen1990,
abstract = {We give a comprehensive and unifying survey of the theoretical aspects of Temporal and
Modal Logic},
doi = {10.1016/b978-0-444-88074-1.50021-4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/emerson1990.pdf:pdf},
pages = {995--1072},
url = {http://dx.doi.org/10.1016/B978-0-444-88074-1.50021-4},
year = {1990}
@article{Eckert2018,
abstract = {Heap metadata attacks have become one of the primary ways in which attackers exploit
memory corruption vulnerabilities. While heap implementation developers have introduced miti-
gations to prevent and detect corruption, it is still possible for attackers to work around them. In
part, this is because these mitigations are created and evaluated without a principled foundation,
resulting, in many cases, in complex, inefficient, and ineffective attempts at heap metadata
defenses. In this paper, we present HEAPHOPPER, an automated approach, based on model
checking and symbolic execution , to analyze the exploitability of heap implementations in the
presence of memory corruption. Using HEAPHOPPER, we were able to perform a systematic analysis
of different, widely used heap implementations, finding surprising weaknesses in them. Our results
show, for instance, how a newly introduced caching mechanism in ptmalloc (the heap allo-cator
implementation used by most of the Linux distributions) significantly weakens its security.
Moreover, HEAPHOPPER guided us in implementing and evaluating improvements to the security of
ptmalloc, replacing an ineffective recent attempt at the mitigation of a specific form of heap
metadata corruption with an effective defense.},
author = {Eckert, Moritz and Bianchi, Antonio and Wang, Ruoyu and Shoshitaishvili, Yan and Kruegel,
Christopher and Vigna, Giovanni},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sec18-eckert.pdf:pdf},
isbn = {978-1-931971-46-1},
journal = {Proceedings of the 27th USENIX Security Symposium - USENIX Security '18},
pages = {99----116},
url = {https://seclab.cs.ucsb.edu/media/uploads/papers/sec2018-heap-hopper.pdf},
year = {2018}
@article{Wang2017,
abstract = {As an extension of the classical job shop scheduling problem, the flexible job shop
scheduling problem (FJSP) plays an important role in real production systems. In FJSP, an operation
is allowed to be processed on more than one alternative machine. It has been proven to be a
strongly NP-hard problem. Ant colony optimization (ACO) has been proven to be an efficient
approach for dealing with FJSP. However, the basic ACO has two main disadvantages including low
computational efficiency and local optimum. In order to overcome these two disadvantages, an
improved ant colony optimization (IACO) is proposed to optimize the makespan for FJSP. The
following aspects are done on our improved ant colony optimization algorithm: select machine rule
problems, initialize uniform distributed mechanism for ants, change pheromone's guiding
mechanism, select node method, and update pheromone's mechanism. An actual production
instance and two sets of well-known benchmark instances are tested and comparisons with some
other approaches verify the effectiveness of the proposed IACO. The results reveal that our
proposed IACO can provide better solution in a reasonable computational time.},
author = {Wang, Lei and Cai, Jingcao and Li, Ming and Liu, Zhihu},
doi = {10.1155/2017/9016303},
issn = {10589244},
title = {{Flexible Job Shop Scheduling Problem Using an Improved Ant Colony Optimization}},
volume = {2017},
year = {2017}
@article{Lamport2005,
abstract = {It is easy to write and verify real-time specifications with existing languages and methods;
one just represents time as an ordinary variable and expresses timing requirements with special
timer variables. The resulting specifications can be verified with an ordinary model checker. This
basic idea and some less obvious details are explained, and results are presented for two examples.
{\textcopyright} IFIP International Federation for Information Processing 2005.},
doi = {10.1007/11560548_14},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Lamport2005{\_}Chapter{\_}Real-
TimeModelCheckingIsReally.pdf:pdf},
isbn = {3540291059},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {162--175},
year = {2005}
@article{Bankes2002,
abstract = {Agent-based models (ABM) are examples of complex adaptive systems, which can be
characterized as those systems for which no model less complex than the system itself can
accurately predict in detail how the system will behave at future times. Consequently, the standard
tools of policy analysis, based as they are on devising policies that perform well on some best
estimate model of the system, cannot be reliably used for ABM. This paper argues that policy
analysis by using ABM requires an alternative approach to decision theory. The general
characteristics of such an approach are described, and examples are provided of its application to
policy analysis.},
doi = {10.1073/pnas.092081399},
issn = {00278424},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
pages = {7263--7266},
title = {{Tools and techniques for developing policies for complex and uncertain systems}},
volume = {99},
year = {2002}
@article{Viroli2018,
abstract = {Collective adaptive systems are an emerging class of networked computational systems
particularly suited for application domains such as smart cities, complex sensor networks, and the
Internet of Things. These systems tend to feature large-scale, heterogeneity of communication
model (including opportunistic peer-to-peer wireless interaction) and require inherent self-
adaptiveness properties to address unforeseen changes in operating conditions. In this context, it is
extremely difficult (if not seemingly intractable) to engineer reusable pieces of distributed behaviour
to make them provably correct and smoothly composable. Building on the field calculus, a
computational model (and associated toolchain) capturing the notion of aggregate network-level
computation, we address this problem with an engineering methodology coupling formal theory and
computer simulation. On the one hand, functional properties are addressed by identifying the
largest-to-date field calculus fragment generating self-stabilising behaviour, guaranteed to
eventually attain a correct and stable final state despite any transient perturbation in state or
topology and including highly reusable building blocks for information spreading, aggregation, and
time evolution. On the other hand, dynamical properties are addressed by simulation, empirically
evaluating the different performances that can be obtained by switching between implementations
of building blocks with provably equivalent functional properties. Overall, our methodology sheds
light on how to identify core building blocks of collective behaviour and how to select
implementations that improve system performance while leaving overall system function and
resiliency properties unchanged.},
archivePrefix = {arXiv},
arxivId = {1711.08297},
author = {Viroli, Mirko and Audrito, Giorgio and Beal, Jacob and Damiani, Ferruccio and Pianini,
Danilo},
doi = {10.1145/3177774},
eprint = {1711.08297},
file = {:C$\backslash$:/Users/Asus/Downloads/2018-Engineering{\_}resilient{\_}collective{\
_}adaptive{\_}systems{\_}by{\_}self-stabilisation.pdf:pdf},
issn = {15581195},
number = {2},
volume = {28},
year = {2018}
@article{Bjørner2012,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/automated-reasoning-2012{\
_}2.pdf:pdf},
pages = {1--8},
volume = {3},
year = {2012}
@article{Salem2014,
number = {02},
title = {{Tuning PID Controllers Using Artificial Intelligence Techniques Applied To DC-Motor and AVR
System}},
volume = {2},
year = {2014}
@article{Chatterjee2015,
abstract = {Consider the problem of estimating the entries of a large matrix, when the observed
entries are noisy versions of a small random fraction of the original entries. This problem has
received widespread attention in recent times, especially after the pioneering works of Emmanuel
Candes and collaborators. Typically, it is assumed that the underlying matrix has low rank. This paper
introduces a simple estimation procedure, called Universal Singular Value Thresholding (USVT), that
works for any matrix that has `a little bit of structure'. In particular, the matrix need not be of low
rank. The procedure is very simple and fast, works under minimal assumptions, and is applicable for
very large matrices. Surprisingly, this simple estimator achieves the minimax error rate up to a
constant factor. The method is applied to give simple solutions to difficult questions in low rank
matrix estimation, blockmodels, distance matrix completion, latent space models, positive definite
matrix completion, problems related to graph limits, and generalized Bradley-Terry models for
pairwise comparison.},
archivePrefix = {arXiv},
arxivId = {1212.1247},
doi = {10.1214/14-AOS1272},
eprint = {1212.1247},
issn = {00905364},
number = {1},
pages = {177--214},
volume = {43},
year = {2015}
@article{Levin2002,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/S0273-0979-02-00965-5.pdf:pdf},
pages = {3--19},
volume = {40},
year = {2002}
@article{Teknik2020,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/FULLTEXT01{\_}2.pdf:pdf},
year = {2020}
@article{Bucchiarone2017a,
abstract = {In this paper we propose CAStlE, a MDE approach to enhance Collective Adaptive System
(CAS) specification. In particular, we introduce a domain-specific language (DSL) made-up of three
main views: one devoted to adaptive systems design; one addressing ensembles definition; and one
tackling the collective adaptation. These three separate aspects are woven seamlessly by the DSL to
constitute a complete CAS design. Moreover, each of the defined views conveys the creation of a
corresponding model editor, which allows for the three aspects of a CAS to be independently
designed by CAStlE.},
author = {Bucchiarone, Antonio and Cicchetti, Antonio and Sanctis, Martina De},
doi = {10.1109/FAS-W.2017.183},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bucchiarone2017.pdf:pdf},
isbn = {9781509065585},
journal = {Proceedings - 2017 IEEE 2nd International Workshops on Foundations and Applications of
Self* Systems, FAS*W 2017},
pages = {385--386},
year = {2017}
@article{Hock2012,
author = {Hock, Howard S},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Hock-Dynamic{\_}grouping{\_}motion{\
_}A{\_}method{\_}for{\_}determining{\_}perceptual{\_}organization{\_}for{\_}objects{\_}with{\
_}connected{\_}surfaces.pdf:pdf},
number = {1923},
pages = {1--17},
year = {2012}
@article{,
@article{Thum2012,
abstract = {Software product line engineering aims at the efficient development of program variants
that share a common set of features and that differ in other features. Product lines can be efficiently
developed using feature-oriented programming. Given a feature selection and the code artifacts for
each feature, program variants can be generated automatically. The quality of the program variants
can be rigorously ensured by formal verification. However, verification of all program variants can be
expensive and include redundant verification tasks. We introduce a classification of existing software
product line verification approaches and propose proof composition as a novel approach. Proof
composition generates correctness proofs of each program variant based on partial proofs of each
feature. We present a case study to evaluate proof composition and demonstrate that it reduces the
effort for verification.},
author = {Th{\"{u}}m, Thomas and Schaefer, Ina and Hentschel, Martin and Apel, Sven},
doi = {10.1145/2371401.2371404},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9781450311298},
pages = {11},
year = {2012}
@article{Tanganelli2020,
abstract = {The pervasiveness and the growing processing capabilities of mobile and embedded
computing systems are leading to a shift from the Internet of Things (IoT) paradigm to the Fog
computing scenario where the environment is instrumented with high-performance computing in
the proximity to cyber–physical systems. The design of such systems requires an accurate planning,
on the one hand, to ensure that specific application requirements will be properly met at run-time,
and, on the other hand, to minimize the system's monetary costs. In this paper we present a
methodology for an automated design and deployment of distributed cyber–physical systems into
smart environments. We propose an engine based on a Mixed Integer Linear Programming (MILP)
formulation which takes in input a planimetry of the environment and a description of the
applications and, based on a repository of available processing boards, identifies the cost-optimized
instantiation of the processing architecture and the corresponding distribution of the application
functionalities. By comparing our proposal with the existing methodologies that address similar
problems we can highlight the following novelties: (i) we address a system architecture composed of
heterogeneous devices, (ii) we adopt a realistic model of the environment, and (iii) we perform a
joint co-exploration of architecture instantiation and applications mapping. An experimental
evaluation, considering a smart office case study, demonstrates the potential of the proposed
approach in minimizing the overall system monetary cost around 42{\%} w.r.t. a baseline approach
not exploiting planimetry information. Such results have been also confirmed by an extensive
experimental campaign using synthetic problems, which also highlighted how the execution times of
the optimization process are affordable for the design-time process.},
author = {Tanganelli, Giacomo and Cassano, Luca and Miele, Antonio and Vallati, Carlo},
doi = {10.1016/j.future.2020.02.047},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.future.2020.02.047.pdf:pdf},
issn = {0167739X},
pages = {420--430},
title = {{A methodology for the design and deployment of distributed cyber–physical systems for
smart environments}},
url = {https://doi.org/10.1016/j.future.2020.02.047},
volume = {109},
year = {2020}
@article{Basset2018,
doi = {10.1016/j.ic.2017.09.010},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0890540117301670-
main.pdf:pdf},
issn = {10902651},
pages = {536--587},
title = {{Compositional strategy synthesis for stochastic games with multiple objectives}},
url = {http://dx.doi.org/10.1016/j.ic.2017.09.010},
volume = {261},
year = {2018}
@article{Gupta2014,
abstract = {In this paper we study the problem of object detection for RGB-D images using
semantically rich image and depth features. We propose a new geocentric embedding for depth
images that encodes height above ground and angle with gravity for each pixel in addition to the
horizontal disparity. We demonstrate that this geocentric embedding works better than using raw
depth images for learning feature representations with convolutional neural networks. Our final
object detection system achieves an average precision of 37.3{\%}, which is a 56{\%} relative
improvement over existing methods. We then focus on the task of instance segmentation where we
label pixels belonging to object instances found by our detector. For this task, we propose a decision
forest approach that classifies pixels in the detection window as foreground or background using a
family of unary and binary tests that query shape and geocentric pose features. Finally, we use the
output from our object detectors in an existing superpixel classification framework for semantic
scene segmentation and achieve a 24{\%} relative improvement over current state-of-the-art for the
object categories that we study. We believe advances such as those represented in this paper will
facilitate the use of perception in fields like robotics.},
archivePrefix = {arXiv},
arxivId = {1407.5736},
author = {Gupta, Saurabh and Girshick, Ross and Arbel{\'{a}}ez, Pablo and Malik, Jitendra},
doi = {10.1007/978-3-319-10584-0_23},
eprint = {1407.5736},
isbn = {9783319105833},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {345--360},
pmid = {604804},
title = {{Learning rich features from RGB-D images for object detection and segmentation}},
year = {2014}
@article{Sikder2018,
abstract = {The concept of Internet of Things (IoT) has become more popular in the modern era of
technology than ever before. From small household devices to large industrial machines, the vision
of IoT has made it possible to connect the devices with the physical world around them. This
increasing popularity has also made the IoT devices and applications in the center of attention
among attackers. Already, several types of malicious activities exist that attempt to compromise the
security and privacy of the IoT devices. One interesting emerging threat vector is the attacks that
abuse the use of sensors on IoT devices. IoT devices are vulnerable to sensor-based threats due to
the lack of proper security measurements available to control use of sensors by apps. By exploiting
the sensors (e.g., accelerometer, gyroscope, microphone, light sensor, etc.) on an IoT device,
attackers can extract information from the device, transfer malware to a device, or trigger a
malicious activity to compromise the device. In this survey, we explore various threats targeting IoT
devices and discuss how their sensors can be abused for malicious purposes. Specifically, we present
a detailed survey about existing sensor-based threats to IoT devices and countermeasures that are
developed specifically to secure the sensors of IoT devices. Furthermore, we discuss security and
privacy issues of IoT devices in the context of sensor-based threats and conclude with future
research directions.},
archivePrefix = {arXiv},
arxivId = {1802.02041},
author = {Sikder, Amit Kumar and Petracca, Giuseppe and Aksu, Hidayet and Jaeger, Trent and
Uluagac, A. Selcuk},
eprint = {1802.02041},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/A{\_}Survey{\_}on{\_}Sensor-based{\
_}Threats{\_}to{\_}Internet-of-Th.pdf:pdf},
number = {February},
title = {{A Survey on Sensor-based Threats to Internet-of-Things (IoT) Devices and Applications}},
url = {http://arxiv.org/abs/1802.02041},
year = {2018}
@article{Wilsey2016,
abstract = {PURPOSE OF REVIEW Electrophysiological measures of vision function have for decades
generated interest among glaucoma researchers and clinicians alike because of their potential to
help elucidate pathophysiological processes and sequence of glaucomatous damage, as well as to
offer a potential complementary metric of function that might be more sensitive than standard
automated perimetry. The purpose of this article is to review the recent literature to provide an
update on the role of the electroretinogram (ERG) in glaucoma diagnosis. RECENT FINDINGS The
pattern reversal ERG (PERG) and the photopic negative response (PhNR) of the cone-driven full-field,
focal or multifocal ERG provide objective measures of retinal ganglion cell function and are all
sensitive to glaucomatous damage. Recent studies demonstrate that a reduced PERG amplitude is
predictive of subsequent visual field conversion (from normal to glaucomatous) and an increased
rate of progressive retinal nerve fiber layer thinning in suspect eyes, indicating a potential role for
PERG in risk stratification. Converging evidence indicates that some portion of PERG and PhNR
abnormality represents a reversible aspect of dysfunction in glaucoma. SUMMARY PERG and PhNR
responses obtained from the central macula are capable of detecting early-stage, reversible
glaucomatous dysfunction.},
doi = {10.1097/ICU.0000000000000241},
isbn = {0000000000000},
issn = {1040-8738},
number = {2},
pages = {118--124},
pmid = {26720775},
volume = {27},
year = {2016}
@article{Kohler2014,
title = {{Computer Aided Diagnostics and Pattern Recognition : Automated Glaucoma Detection}},
year = {2014}
@article{Kasner2013,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Kasner, E and Hunter, Christopher A and Ph, D and Kariko, Katalin and Ph, D},
doi = {10.1002/ana.22528.Toll-like},
eprint = {NIHMS150003},
isbn = {3300000106},
issn = {09652140},
number = {4},
pages = {646--656},
pmid = {20402989},
volume = {70},
year = {2013}
@article{Dehnert,
archivePrefix = {arXiv},
arxivId = {arXiv:1702.04311v1},
author = {Dehnert, Christian and Junges, Sebastian and Katoen, Joost-pieter and Volk, Matthias},
eprint = {arXiv:1702.04311v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1702.04311.pdf:pdf},
@book{AlDuhaiby2018,
abstract = {Maintaining legacy software is one of the most common struggles of the software
industry, being costly yet essential. We tackle that problem by providing better understanding of
software by extracting behavioural models using the model learning technique. The used technique
interacts with a running component and extracts abstract models that would help developers make
better informed decisions. As promising in theory, as slippery in application it is, however. This
report describes our experience in applying model learning to legacy software, and aims to prepare
the newcomer for what shady pitfalls lie therein as well as provide the seasoned researcher with
concrete cases and open problems. We narrate our experience in analysing certain legacy
components at Philips Healthcare describing challenges faced, solutions implemented, and lessons
learned.},
author = {al Duhaiby, Omar and Mooij, Arjan and van Wezep, Hans and Groote, Jan Friso},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-03427-6_13},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030034269},
issn = {16113349},
pages = {121--138},
year = {2018}
@article{Delevacq2013,
abstract = {The purpose of this paper is to propose effective parallelization strategies for the Ant
Colony Optimization (ACO) metaheuristic on Graphics Processing Units (GPUs). The Max-Min Ant
System (MMAS) algorithm augmented with 3-opt local search is used as a framework for the
implementation of the parallel ants and multiple ant colonies general parallelization approaches. The
four resulting GPU algorithms are extensively evaluated and compared on both speedup and
solution quality on a state-of-the-art Fermi GPU architecture. A rigorous effort is made to keep
parallel algorithms true to the original MMAS applied to the Traveling Salesman Problem. We report
speedups of up to 23.60 with solution quality similar to the original sequential implementation. With
the intent of providing a parallelization framework for ACO on GPUs, a comparative experimental
study highlights the performance impact of ACO parameters, GPU technical configuration, memory
structures and parallelization granularity. {\textcopyright} 2012 Elsevier Inc. All rights reserved.},
author = {Del{\'{e}}vacq, Audrey and Delisle, Pierre and Gravel, Marc and Krajecki, Micha{\"{e}}l},
doi = {10.1016/j.jpdc.2012.01.003},
isbn = {0743-7315},
issn = {07437315},
number = {1},
pages = {52--61},
url = {http://dx.doi.org/10.1016/j.jpdc.2012.01.003},
volume = {73},
year = {2013}
@article{Levin,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/using-markov-decision-process-for-
learning-dialogue-strategies.pdf:pdf},
isbn = {0780344286},
pages = {201--204},
@article{Holzl2015a,
abstract = {Reasoning and learning for awareness and adaptation are challenging endeavors since
cogitation has to be tightly integrated with action execution and reaction to unforeseen
contingencies. After discussing the notion of awareness and presenting a classification scheme for
awareness mechanisms, we introduce Extended Behavior Trees (XBTs), a novel modeling method for
hierarchical, concurrent behaviors that allows the interleaving of reasoning, learning and actions.
The semantics of XBTs are defined by a transformation to SCEL so that sophisticated synchronization
strategies are straightforward to realize and different kinds of distributed, hierarchical learning and
reasoning—from centrally coordinated to fully autonomic—can easily be expressed. We propose
novel hierarchical reinforcement-learning strategies called Hierarchical (Lenient) Frequency-
Adjusted Q-learning, that can be implemented using XBTs. Finally we discuss how XBTs can be used
to define a multi-layer approach to learning, called teacher-student learning, that combines
centralized and distributed learning in a seamless way.},
doi = {10.1007/978-3-319-16310-9_7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
pages = {249--290},
volume = {257414},
year = {2015}
@article{Ishaque2012,
abstract = {This paper proposes a deterministic particle swarm optimization to improve the
maximum power point tracking (MPPT) capability for photovoltaic system under partial shading
condition. The main idea is to remove the random number in the accelerations factor of the
conventional PSO velocity equation. Additionally, the maximum change in velocity is restricted to a
particular value, which is determined based on the critical study of P-V characteristics during partial
shading. Advantages of the method include: 1) consistent solution is achieved despite a small
number of particles, 2) only one parameter, i.e., the inertia weight, needs to be tuned, and 3) the
MPPT structure is much simpler compared to the conventional PSO. To evaluate the idea, the
algorithm is implemented on a buck-boost converter and compared to the conventional hill climbing
(HC) MPPT method. Simulation results indicate that the proposed method outperforms the HC
method in terms of global peak tracking speed and accuracy under various partial shading
conditions. Furthermore, it is tested using the measured data of a tropical cloudy day, which
includes rapid movement of the passing clouds and partial shading. Despite the wide fluctuations in
array power, the average efficiency for the 10-h test profile reaches 99.5{\%}.},
doi = {10.1109/TIE.2012.2200223},
isbn = {0278-0046},
issn = {0278-0046},
journal = {IEEE Transactions on Industrial Electronics},
number = {8},
pages = {1--1},
title = {{A Deterministic Particle Swarm Optimization Maximum Power Point Tracker for Photovoltaic
System under Partial Shading Condition}},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6202691},
volume = {60},
year = {2012}
@article{Biere1999,
abstract = {Symbolic Model Checking [3], [14] has proven to be a powerful technique for the
verification of reactive systems. BDDs [2] have traditionally been used as a symbolic representation
of the system. In this paper we show how boolean decision procedures, like St{\aa}lmarck's Method
[16] or the Davis {\&} Putnam Procedure [7], can replace BDDs. This new technique avoids the space
blow up of BDDs, generates counterexamples much faster, and sometimes speeds up the
verification. In addition, it produces counterexamples of minimal length. We introduce a bounded
model checking procedure for LTL which reduces model checking to propositional satisfiability.We
show that bounded LTL model checking can be done without a tableau construction. We have
implemented a model checker BMC, based on bounded model checking, and preliminary results are
presented.},
author = {Biere, Armin and Cimatti, Alessandro and Clarke, Edmund and Zhu, Yunshan},
doi = {10.1007/3-540-49059-0_14},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Biere1999{\_}Chapter{\
_}SymbolicModelCheckingWithoutBD.pdf:pdf},
isbn = {3540657037},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
number = {97},
pages = {193--207},
volume = {1579},
year = {1999}
}
@article{BanciBuonamici2019,
abstract = {Recent research on spatial and spatio-temporal model checking provides novel image
analysis methodologies, rooted in logical methods for topological spaces. Medical imaging (MI) is a
field where such methods show potential for ground-breaking innovation. Our starting point is SLCS,
the Spatial Logic for Closure Spaces—closure spaces being a generalisation of topological spaces,
covering also discrete space structures—and topochecker, a model checker for SLCS (and extensions
thereof). We introduce the logical language ImgQL (“Image Query Language”). ImgQL extends SLCS
with logical operators describing distance and region similarity. The spatio-temporal model checker
topochecker is correspondingly enhanced with state-of-the-art algorithms, borrowed from
computational image processing, for efficient implementation of distance-based operators, namely
distance transforms. Similarity between regions is defined by means of a statistical similarity
operator, based on notions from statistical texture analysis. We illustrate our approach by means of
an example of analysis of Magnetic Resonance images: segmentation of glioblastoma and its
oedema.},
author = {{Banci Buonamici}, Fabrizio and Belmonte, Gina and Ciancia, Vincenzo and Latella, Diego
and Massink, Mieke},
doi = {10.1007/s10009-019-00511-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bancibuonamici2019.pdf:pdf},
issn = {14332787},
url = {https://doi.org/10.1007/s10009-019-00511-9},
year = {2019}
@article{Calinescu2020,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ACSOS{\_}2020{\_}camera{\
_}ready.pdf:pdf},
journal = {1st IEEE International Conference on Autonomic Computing and Self-Organizing Systems},
year = {2020}
@article{Shehory1998,
abstract = {Task execution in multi-agent environments may require cooperation among agents.
Given a set of agents and a set of tasks which they have to satisfy, we consider situations where each
task should be attached to a group of agents that will perform the task. Task allocation to groups of
agents is necessary when tasks cannot be performed by a single agent. However it may also be
beneficial when groups perform more efficiently with respect to the single agents' performance. In
this paper we present several solutions to the problem of task allocation among autonomous agents.
and suggest that the agents form coalitions in order to perform tasks or improve the efficiency of
their performance. We present efficient distributed algorithms with low ratio bounds and with low
computational complexities. These properties are proven theoretically and supported by simulations
and an implementation in an agent system. Our methods are based on both the algorithmic aspects
of combinatorics and approximation algorithms for NP-hard problems. We first present an approach
to agent coalition formation where each agent must be a member of only one coalition. Next, we
present the domain of overlapping coalitions. We proceed with a discussion of the domain where
tasks may have a precedence order. Finally, we discuss the case of implementation in an open,
dynamic agent system. For each case we provide an algorithm that will lead agents to the formation
of coalitions, where each coalition is assigned a task. Our algorithms are any-time algorithms, they
are simple, efficient and easy to implement. 0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0004370298000459-
main.pdf:pdf},
number = {1-2},
pages = {165--200},
title = {{Artificial Intelligence Methods for Task Allocation via Agent Coalition Formation}},
volume = {101},
year = {1998}
@article{Ibraheem2013,
doi = {10.5120/ijais13-450985},
number = {10},
pages = {24--38},
year = {2013}
@article{Zur2020,
author = {Zur, Issertation and Des, Rlangung and Der, A N and Der, Nformatik},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/reichstaller{\_}diss.pdf:pdf},
year = {2020}
@article{Tsiropoulou2018,
doi = {10.4108/eai.12-1-2018.154176},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/eai.12-1-2018.1541762.pdf:pdf},
number = {January},
year = {2018}
@article{Sun2009,
abstract = {Recent development on distributed systems has shown that a variety of fairness
constraints (some of which are only recently defined) play vital roles in designing self-stabilizing
population protocols. Current practice of system analysis is, however, deficient under fairness. In
this work, we present PAT, a toolkit for flexible and efficient system analysis under fairness. A unified
algorithm is proposed to model check systems with a variety of fairness effectively in two different
settings. Empirical evaluation shows that PAT complements existing model checkers in terms of
fairness. We report that previously unknown bugs have been revealed using PAT against systems
functioning under strong global fairness. {\textcopyright} 2009 Springer Berlin Heidelberg.},
author = {Sun, Jun and Liu, Yang and Dong, Jin Song and Pang, Jun},
doi = {10.1007/978-3-642-02658-4_59},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/computer-aided-verification-2009{\
_}2.pdf:pdf},
isbn = {3642026575},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {709--714},
year = {2009}
@book{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/White2001{\_}ReferenceWorkEntry{\
_}MarkovDecisionProcessesMarkovD.pdf:pdf},
isbn = {079237827X},
@article{Khamespanah2018,
abstract = {Programmers often use informal worst-case analysis and debugging to ensure that
schedulers satisfy real-time requirements. Not only can this process be tedious and error-prone, it is
inherently conservative and thus likely to lead to an inefficient use of resources. We propose to use
model checking to find a schedule which optimizes the use of resources while satisfying real-time
requirements. Specifically, we represent a Wireless sensor and actuator network (WSAN) as a
collection of actors whose behaviors are specified using a Java-based actor language extended with
operators for real-time scheduling and delay representation. We show how the abstraction
mechanism and the compositionality of actors in the actor model may be used to incrementally build
a model of a WSAN's behavior from node-level and network models. We demonstrate the approach
with a case study of a distributed real-time data acquisition system for high-frequency sensing using
Timed Rebeca modeling language and the Afra model checking tool.},
author = {Khamespanah, Ehsan and Sirjani, Marjan and Mechitov, Kirill and Agha, Gul},
doi = {10.1007/s10009-017-0480-3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/khamespanah2017.pdf:pdf},
issn = {14332787},
number = {5},
pages = {547--561},
publisher = {Springer Berlin Heidelberg},
title = {{Modeling and analyzing real-time wireless sensor and actuator networks using actors and
model checking}},
volume = {20},
year = {2018}
@article{Mark2012,
pages = {2--4},
volume = {153},
year = {2012}
@article{Wang2016,
isbn = {9781450341578},
title = {{Towards Applying a Safety Analysis and Verification Method based on STPA to Agile Software
Development}},
year = {2016}
@article{Autili2015,
doi = {10.1109/MS.2014.131},
number = {February},
@article{Bortolussi2015,
abstract = {In this paper we present CARMA, a language recently defined to support specification
and analysis of collective adaptive systems. CARMA is a stochastic process algebra equipped with
linguistic constructs specifically developed for modelling and programming systems that can operate
in open-ended and unpredictable environments. This class of systems is typically composed of a
huge number of interacting agents that dynamically adjust and combine their behaviour to achieve
specific goals. A CARMA model, termed a collective, consists of a set of components, each of which
exhibits a set of attributes. To model dynamic aggregations, which are sometimes referred to as
ensembles, CARMA provides communication primitives that are based on predicates over the
exhibited attributes. These predicates are used to select the participants in a communication. Two
communication mechanisms are provided in the CARMA language: multicast-based and unicast-
based. In this paper, we first introduce the basic principles of CARMA and then we show how our
language can be used to support specification with a simple but illustrative example of a socio-
technical collective adaptive system.},
author = {Bortolussi, Luca and {De Nicola}, Rocco and Galpin, Vashti and Gilmore, Stephen and
Hillston, Jane and Latella, Diego and Loreti, Michele and Massink, Mieke},
doi = {10.4204/eptcs.194.2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1509.08560.pdf:pdf},
pages = {16--31},
volume = {194},
year = {2015}
@article{Gao2019,
abstract = {In recent years, increasing numbers of researchers have concentrated on service
workflow to support cross-domain software development. However, the uncertain characteristics of
the Internet impose high risks on service workflow reliability. The risk of failure caused by
unavailable services may increase costs when using service workflow-based applications. Thus, it is
necessary to consider the non-functional factors, such as service cost and reliability. In this paper,
we propose a cost-driven services composition approach for enterprise workflows that employs
formal verification to recommend appropriate services for abstract workflows. The services
composition is measured quantitatively to ensure that the configuration to service the workflow
solution has the best performance, high reliability and low cost. First, this solution introduces a
service search approach based on an inverted index, and the service recommendation method is
based on an improved Pearson formula. Next, the solution returns a minimum set of candidate
services for constructing a workflow instance. Second, the service and workflow models are defined
to formalize the behaviour of service composition; this is considered to be a verification model.
Third, transformation rules are provided to change BPEL4WS into a verification model, and PCTL
(Probabilistic Computation Tree Logic) formulae are used to specify the reliability and cost-related
properties. The quantitative verification method checks each possible plan for service composition
using probabilistic model checking. Finally, the results of a series of experiments show that our
approach is effective in generating an optimal service workflow.},
author = {Gao, Honghao and Huang, Wanqiu and Duan, Yucong and Yang, Xiaoxian and Zou, Qiming},
doi = {10.3966/160792642019052003009},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2054-2416-1-SM.pdf:pdf},
issn = {20794029},
number = {3},
pages = {755--769},
volume = {20},
year = {2019}
@article{Iglesia2015,
abstract = {Designing software systems that have to deal with dynamic operating conditions, such as
changing availability of resources and faults that are difficult to predict, is complex. A promising
approach to handle such dynamics is self-adaptation that can be realized by a MAPE-K feedback loop
(Monitor-Analyze-Plan-Execute plus Knowledge). To provide evidence that the system goals are
satisfied, given the changing conditions, the state of the art advocates the use of formal methods.
However, little research has been done on consolidating design knowledge of self-adaptive systems.
To support designers, this paper contributes with a set of formally specified MAPE-K templates that
encode design expertise for a family of self-adaptive systems. The templates comprise: (1) behavior
specification templates for modeling the different components of a MAPE-K feedback loop (based on
networks of timed automata), and (2) property specification templates that support verification of
the correctness of the adaptation behaviors (based on timed computation tree logic). To
demonstrate the reusability of the formal templates, we performed four case studies in which final-
year Masters students used the templates to design different self-adaptive systems. {\textcopyright}
2015 ACM.},
doi = {10.1145/2724719},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/MFT.pdf:pdf},
issn = {15564665},
journal = {ACM Transactions on Autonomous and Adaptive Systems},
number = {3},
pages = {1--31},
title = {{MAPE-K Formal Templates to Rigorously Design Behaviors for Self-Adaptive Systems}},
volume = {10},
year = {2015}
@article{Jaenen2007,
abstract = {PURPOSE To compare the prevalence of side effects between eyedrops with or without
preservatives, in terms of subjective symptoms and objective signs in patients with open-angle
glaucoma. METHODS In a multicenter cross-sectional epidemiologic survey in four European
countries, ophthalmologists in private practice enrolled 9658 nonconsecutive patients using
preservative (P) or preservative-free (PF) beta-blocking eyedrops between June 1997 and December
2003. Subjective symptoms, conjunctival and palpebral signs, and superficial punctate keratitis were
explored before and after a change in therapy. For statistical analysis, a Chi-square test was used to
calculate the differences in the prevalence of symptoms and signs with or without preservatives.
RESULTS A total of 74{\%} of the patients used P, 12{\%} PF, 10{\%} a P-PF combination, and in 4{\%}
the type of medication was unknown. Each recorded symptom and all the palpebral, conjunctival,
and corneal signs were significantly more frequent (p{\textless}0.0001) in the P-group than in the PF-
group, such as pain or discomfort during instillation (48 vs 19{\%}), foreign body sensation (42 vs
15{\%}), stinging or burning (48 vs 20{\%}), and dry eye sensation (35 vs 16{\%}). A total of 68{\%} of
the patients had a sec-ond visit performed, of whom 63{\%} (6083) had been evaluated on
treatment difference. A significant decrease (p{\textless}0.0001) of all ocular symptoms and signs
was observed in patients in whom the preserved eyedrops were diminished in number or altered
into preservative free drops. CONCLUSIONS Compared to preserved eyedrops, preservative free
eyedrops are significantly less associated with ocular symptoms and signs of irritation.},
author = {Jaenen, N. and Baudouin, C. and Pouliquen, P. and Manni, G. and Figueiredo, A. and Zeyen,
Thierry},
isbn = {1120-6721},
issn = {11206721},
number = {3},
pages = {341--349},
pmid = {17534814},
title = {{Ocular symptoms and signs with preserved and preservative-free glaucoma medications}},
volume = {17},
year = {2007}
@article{Chakravarty2016,
doi = {10.1109/ISBI.2016.7493360},
isbn = {9781479923502},
issn = {19458452},
keywords = {Co-training,Fusion,Glaucoma,Retina},
number = {i},
pages = {689--692},
volume = {2016-June},
year = {2016}
@article{Clarke1996,
abstract = {We propose HyDICE, Hybrid DIscrete Continuous Exploration, a multi-layered approach
for hybrid-system testing that integrates continuous sampling-based$\backslash$n robot motion
planning with discrete searching. The discrete search uses the discrete transitions of the hybrid
system and$\backslash$n coarse-grained decompositions of the continuous state spaces or related
projections to guide the motion planner during the$\backslash$n search for witness trajectories.
Experiments presented in this paper, using a hybrid system inspired by robot motion planning$\
backslash$n and with nonlinear dynamics associated with each of several thousand modes, provide
an initial validation of HyDICE and demonstrate its promise as a hybrid-system testing method.
Comparisons to related work show computational speedups of$\backslash$n up to two orders of
magnitude.},
doi = {10.1109/6.499951},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}7.pdf:pdf},
isbn = {9783319961422},
issn = {00189235},
number = {6},
pages = {61--67},
volume = {33},
year = {1996}
@article{Bures2020a,
abstract = {Smart system applications (SSAs) built on top of cyber-physical and socio-technical
systems are increasingly composed of components that can work both autonomously and by
cooperating with each other. Cooperating robots, fleets of cars and fleets of drones, emergency
coordination systems are examples of SSAs. One approach to enable cooperation of SSAs is to form
dynamic cooperation groups—ensembles—between components at runtime. Ensembles can be
formed based on predefined rules that determine which components should be part of an ensemble
based on their current state and the state of the environment (e.g., “group together 3 robots that
are closer to the obstacle, their battery is sufficient and they would not be better used in another
ensemble”). This is a computationally hard problem since all components are potential members of
all possible ensembles at runtime. In our experience working with ensembles in several case studies
the past years, using constraint programming to decide which ensembles should be formed does not
scale for more than a limited number of components and ensembles. Also, the strict formulation in
terms of hard/soft constraints does not easily permit for runtime self-adaptation via learning. This
poses a serious limitation to the use of ensembles in large-scale and partially uncertain SSAs. To
tackle this problem, in this paper we propose to recast the ensemble formation problem as a
classification problem and use machine learning to efficiently form ensembles at scale.},
author = {Bure{\v{s}}, Tom{\'{a}}{\v{s}} and Gerostathopoulos, Ilias and Hn{\v{e}}tynka, Petr and
Pacovsk{\'{y}}, Jan},
doi = {10.1007/978-3-030-61470-6_26},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007978-3-030-61470-6.pdf:pdf},
isbn = {9783030614690},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {440--456},
year = {2020}
@article{Prieto2018,
abstract = {The series “Advances in Intelligent Systems and Computing” contains publications on
theory, applications, and design methods of Intelligent Systems and Intelligent Computing. Virtually
all disciplines such as engineering, natural sciences, computer and information science, ICT,
economics, business, e-commerce, environment, healthcare, life science are covered. The list of
topics spans all the areas of modern intelligent systems and computing. The publications within
“Advances in Intelligent Systems and Computing” are primarily textbooks and proceedings of
important conferences, symposia and congresses. They cover significant recent developments in the
field, both of a foundational and applicable character. An important characteristic feature of the
series is the short publication time and world-wide distribution.},
doi = {10.1007/978-3-319-70581-1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/telenyk2016.pdf:pdf},
isbn = {978-3-319-70580-4},
pages = {41--62},
url = {http://link.springer.com/10.1007/978-3-319-70581-1},
volume = {689},
year = {2018}
@article{Zhukova2019,
abstract = {The paper considers the problem of dynamic modeling of complex natural and technical
objects. The objects that have hierarchical structure are in focus. It is proposed to use new multilevel
relatively finite automata models as formal models of such objects. A new algorithm based on
deductive synthesis that allows automatically build automata models is presented. Automata models
and the algorithm are implemented in program system. A number of examples of building models in
the domain of Internet of Things are given.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/3338290.3338406.pdf:pdf},
journal = {FRUCT'24: Proceedings of the 24th Conference of Open Innovations Association FRUCT},
year = {2019}
@article{Wu2014,
author = {Wu, G and Qiu, D and Yu, Y and Pedrycz, W and Ma, M and Li, H},
number = {16},
pages = {7536--7548},
title = {{Superior solution guided particle swarm optimization combined with local search
techniques}},
volume = {41},
year = {2014}
@article{Bussel2014,
abstract = {Optical coherence tomography (OCT) is a commonly used imaging modality in the
evaluation of glaucomatous damage. The commercially available spectral domain (SD)-OCT offers
benefits in glaucoma assessment over the earlier generation of time domain-OCT due to increased
axial resolution, faster scanning speeds and has been reported to have improved reproducibility but
similar diagnostic accuracy. The capabilities of SD-OCT are rapidly advancing with 3D imaging,
reproducible registration, and advanced segmentation algorithms of macular and optic nerve head
regions. A review of the evidence to date suggests that retinal nerve fibre layer remains the
dominant parameter for glaucoma diagnosis and detection of progression while initial studies of
macular and optic nerve head parameters have shown promising results. SD-OCT still currently lacks
the diagnostic performance for glaucoma screening.},
author = {Bussel, Igor I and Wollstein, Gadi and Schuman, Joel S},
doi = {10.1136/bjophthalmol-2013-304326},
issn = {0007-1161},
pages = {ii15--ii19},
pmid = {24357497},
title = {{OCT for glaucoma diagnosis, screening and detection of glaucoma progression}},
url = {http://bjo.bmj.com/lookup/doi/10.1136/bjophthalmol-2013-304326},
volume = {98},
year = {2014}
@article{Bu2011,
abstract = {Many Cyber-Physical Systems (CPS) are highly nondeterministic. This often makes it
impractical to model and predict the complete system behavior. To address this problem, we
propose that instead of offline modeling and verification, many CPS systems should be modeled and
verified online, and we shall focus on the system�s
time-bounded behavior in short-run future, which is more describable and predictable. Meanwhile,
as the system model is generated/ updated online, the verification has to be fast. It is meaningless to
tell an online model is unsafe when it is already outdated.
To demonstrate the feasibility of our proposal, we study two cases of our ongoing projects, one on
the modeling and verification of a train control system, and the other on a Medical Device Plug-and-
Play (MDPnP) application. Both cases are about safetycritical CPS systems. Through these two cases,
we exemplify how to build online models that describe the time-bounded short-run behavior of CPS
systems; and we show that fast online modeling and verification is possible.},
author = {Bu, Lei and Wang, Qixin and Chen, Xin and Wang, Linzhang and Zhang, Tian and Zhao,
Jianhua and Li, Xuandong},
doi = {10.1145/2000367.2000368},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bu2011.pdf:pdf},
number = {2},
pages = {7--10},
title = {{Toward online hybrid systems model checking of cyber-physical systems' time-bounded
short-run behavior}},
volume = {8},
year = {2011}
}
@article{Ronneberger2015,
abstract = {There is large consent that successful training of deep networks requires many thousand
annotated training samples. In this paper, we present a network and training strategy that relies on
the strong use of data augmentation to use the available annotated samples more efficiently. The
architecture consists of a contracting path to capture context and a symmetric expanding path that
enables precise localization. We show that such a network can be trained end-to-end from very few
images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI
challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same
network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI
cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast.
Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation
(based on Caffe) and the trained networks are available at
http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net .},
archivePrefix = {arXiv},
arxivId = {1505.04597},
doi = {10.1007/978-3-319-24574-4_28},
eprint = {1505.04597},
isbn = {9783319245737},
issn = {16113349},
pages = {1--8},
pmid = {23285570},
url = {http://arxiv.org/abs/1505.04597},
year = {2015}
@book{Westphall2015,
author = {Westphall, Carlos Becker and Borcoci, Eugen and Pozniak-koszalka, Iwona},
isbn = {9781612083988},
year = {2015}
}
@article{Autili2015a,
doi = {10.4204/EPTCS.201.3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1512.07682.pdf:pdf},
pages = {33--47},
title = {{On the Automated Synthesis of Enterprise Integration Patterns to Adapt Choreography-
based Distributed Systems}},
year = {2015}
@article{Sen,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/qest05.pdf:pdf},
pages = {4--5},
@article{Wang2015,
abstract = {Background: The lateral geniculate nucleus (LGN) is a key relay center of the visual
system. Because the LGN morphology is affected by different diseases, it is of interest to analyze its
morphology by segmentation. However, existing LGN segmentation methods are non-automatic,
inefficient and prone to experimenters' bias. New method: To address these problems, we proposed
an automatic LGN segmentation algorithm based on T1-weighted imaging. First, the prior
information of LGN was used to create a prior mask. Then region growing was applied to delineate
LGN. We evaluated this automatic LGN segmentation method by (1) comparison with manually
segmented LGN, (2) anatomically locating LGN in the visual system via LGN-based tractography, (3)
application to control and glaucoma patients. Results: The similarity coefficients of automatic
segmented LGN and manually segmented one are 0.72 (0.06) for the left LGN and 0.77 (0.07) for the
right LGN. LGN-based tractography shows the subcortical pathway seeding from LGN passes the
optic tract and also reaches V1 through the optic radiation, which is consistent with the LGN location
in the visual system. In addition, LGN asymmetry as well as LGN atrophy along with age is observed
in normal controls. The investigation of glaucoma effects on LGN volumes demonstrates that the
bilateral LGN volumes shrink in patients. Comparison with existing methods: The automatic LGN
segmentation is objective, efficient, valid and applicable. Conclusions: Experiment results proved the
validity and applicability of the algorithm. Our method will speed up the research on visual system
and greatly enhance studies of different vision-related diseases.},
author = {Wang, Jieqiong and Miao, Wen and Li, Jing and Li, Meng and Zhen, Zonglei and Sabel,
Bernhard and Xian, Junfang and He, Huiguang},
doi = {10.1016/j.jneumeth.2015.08.006},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/1000013243102.pdf:pdf},
issn = {1872678X},
pages = {104--114},
title = {{Automatic segmentation of the lateral geniculate nucleus: Application to control and
glaucoma patients}},
url = {http://dx.doi.org/10.1016/j.jneumeth.2015.08.006},
volume = {255},
year = {2015}
@article{Gilmore2016,
doi = {10.1007/978-3-319-47166-2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/article.pdf:pdf},
isbn = {9783319471662},
year = {2016}
@article{Zheng2015,
abstract = {{\textcopyright} 2015-IOS Press and the authors.Fuzzy c-means (FCM) has been
considered as an effective algorithm for image segmentation. However, it still suffers from two
problems: one is insufficient robustness to image noise, and the other is the Euclidean distance in
FCM, which is sensitive to outliers. In this paper, we propose two new algorithms, generalized FCM
(GFCM) and hierarchical FCM (HFCM), to solve these two problems. Traditional FCM can be
considered as a linear combination of membership and distance from the expression of its
mathematical formula. GFCM is generated by applying generalized mean on these two items. We
impose generalized mean on membership to incorporate local spatial information and cluster
information, and on distance function to incorporate local spatial information and image intensity
value. Thus, our GFCM is more robust to image noise with the spatial constraints: the generalized
mean. To solve the second problem caused by Euclidean distance (l2 norm), we introduce a more
flexibility function which considers the distance function itself as a sub-FCM. Furthermore, the sub-
FCM distance function in HFCM is general and flexible enough to deal with non-Euclidean data.
Finally, we combine these two algorithms to introduce a new generalized hierarchical FCM (GHFCM).
Experimental results demonstrate the improved robustness and effectiveness of the proposed
algorithm.},
author = {Zheng, Yuhui and Jeon, Byeungwoo and Xu, Danhua and Wu, Q.M. Jonathan and Zhang,
Hui},
doi = {10.3233/IFS-141378},
issn = {1064-1246},
number = {2},
pages = {961--973},
volume = {28},
year = {2015}
@article{Wang2019,
author = {Wang, Xinjun and Ji, Ji and Liu, Tingting and Liu, Yujie and Qiao, Liang and Liu, Baohong},
doi = {10.1021/acs.analchem.8b04850},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/wang2019.pdf:pdf},
title = {{Plasmonic Colloidosome-Based Single Cell Detector: A Strategy for Individual Cell Secretion
Sensing}},
year = {2019}
@article{VanHentenryck1992,
abstract = {Constraint logic programming (CLP) is a new class of declarative programming languages
whose primitive operations are based on constraints (e.g. constraint solving and constraint
entailment). CLP languages naturally combine constraint propagation with nondeterministic choices.
As a consequence, they are particularly appropriate for solving a variety of combinatorial search
problems, using the global search paradigm, with short development time and efficiency comparable
to procedural tools based on the same approach. In this paper, we describe how the CLP language
cc(FD), a successor of CHIP using consistency techniques over finite domains, can be used to solve
two practical applications: test-pattern generation and car sequencing. For both applications, we
present the cc(FD) program, describe how constraint solving is performed, report experimental
results, and compare the approach with existing tools. {\textcopyright} 1992.},
author = {{Van Hentenryck}, Pascal and Simonis, Helmut and Dincbas, Mehmet},
doi = {10.1016/0004-3702(92)90006-J},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/0004-3702-2892-2990006-j.pdf:pdf},
issn = {00043702},
number = {1-3},
pages = {113--159},
volume = {58},
year = {1992}
@article{Larsen1997,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/larsen1997.pdf:pdf},
pages = {134--152},
year = {1997}
@article{Liao2014,
author = {Liao, Tianjun and Socha, Krzysztof and Oca, Marco A Montes De and St, Thomas},
number = {4},
pages = {503--518},
volume = {18},
year = {2014}
@article{Glazier2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2020{\_}Meta{\_}Google.pdf:pdf},
@article{Kim2015,
author = {Kim, Ki Wan and Lee, Won Oh and Kim, Yeong Gon and Hong, Hyung Gil and Lee, Eui Chul
and Park, Kang Ryoung},
doi = {10.1117/1.OE.54.3.033103},
issn = {0091-3286},
number = {3},
pages = {033103},
title = {{Segmentation method of eye region based on fuzzy logic system for classifying open and
closed eyes}},
url = {http://opticalengineering.spiedigitallibrary.org/article.aspx?doi=10.1117/1.OE.54.3.033103},
volume = {54},
year = {2015}
@article{DeLemos2013,
abstract = {The goal of this roadmap paper is to summarize the state-of-the-art and identify research
challenges when developing, deploying and managing self-adaptive software systems. Instead of
dealing with a wide range of topics associated with the field, we focus on four essential topics of
self-adaptation: design space for self-adaptive solutions, software engineering processes for self-
adaptive systems, from centralized to decentralized control, and practical run-time verification {\&}
validation for self-adaptive systems. For each topic, we present an overview, suggest future
directions, and focus on selected challenges. This paper complements and extends a previous
roadmap on software engineering for self-adaptive systems published in 2009 covering a different
set of topics, and reflecting in part on the previous paper. This roadmap is one of the many results of
the Dagstuhl Seminar 10431 on Software Engineering for Self-Adaptive Systems, which took place in
October 2010. {\textcopyright} 2013 Springer-Verlag.},
author = {{De Lemos}, Rog{\'{e}}rio and Giese, Holger and M{\"{u}}ller, Hausi A. and Shaw, Mary and
Andersson, Jesper and Litoiu, Marin and Schmerl, Bradley and Tamura, Gabriel and Villegas, Norha
M. and Vogel, Thomas and Weyns, Danny and Baresi, Luciano and Becker, Basil and Bencomo, Nelly
and Brun, Yuriy and Cukic, Bojan and Desmarais, Ron and Dustdar, Schahram and Engels, Gregor and
Geihs, Kurt and G{\"{o}}schka, Karl M. and Gorla, Alessandra and Grassi, Vincenzo and Inverardi,
Paola and Karsai, Gabor and Kramer, Jeff and Lopes, Ant{\'{o}}nia and Magee, Jeff and Malek, Sam
and Mankovskii, Serge and Mirandola, Raffaela and Mylopoulos, John and Nierstrasz, Oscar and
Pezz{\`{e}}, Mauro and Prehofer, Christian and Sch{\"{a}}fer, Wilhelm and Schlichting, Rick and
Smith, Dennis B. and Sousa, Jo{\~{a}}o Pedro and Tahvildari, Ladan and Wong, Kenny and Wuttke,
Jochen},
doi = {10.1007/978-3-642-35813-5_1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/delemos2013.pdf:pdf},
isbn = {9783642358128},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {1--32},
year = {2013}
@article{Babkin2017,
the language and tools of the MIT Alloy Analyzer system were chosen;
using Archi -the visual editor for ArchiMate models. We have developed
into the language of the MIT Alloy Analyzer system and uses the
specific domain models. The proposed method and software solutions have
been tested using the ArciSurance case and their enterprise architecture
model.},
doi = {10.17323/1998-0663.2017.3.30.40},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Analysis{\_}of{\_}the{\_}consistency{\
_}of{\_}enterprise{\_}architec.pdf:pdf},
issn = {19980663},
number = {3},
pages = {30--40},
title = {{Analysis of the consistency of enterprise architecture models using formal verification
methods}},
volume = {2017},
year = {2017}
@article{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-540-26869-7{\
_}BookBackMatter.pdf:pdf},
title = {{Index}}
}
@article{Andrikopoulos2014a,
abstract = {Collective Adaptive Systems comprise large numbers of heterogeneous entities that can
join and leave the system at any time depending on their own objectives. In the scope of pervasive
computing, both physical and virtual entities may exist, e.g., buses and their passengers using mobile
devices, as well as city-wide traffic coordination systems. In this paper we introduce a novel
conceptual framework that enables Collective Adaptive Systems based on well-founded and widely
accepted paradigms and technologies like service orientation, distributed systems, context-aware
computing and adaptation of composite systems. Toward achieving this goal, we also present an
architecture that underpins the envisioned framework, discuss the current state of our
implementation effort, and we outline the open issues and challenges in the field. {\textcopyright}
Springer International Publishing Switzerland 2014.},
author = {Andrikopoulos, Vasilios and Bucchiarone, Antonio and {G{\'{o}}mez S{\'{a}}ez}, Santiago
and Karastoyanova, Dimka and Mezzina, Claudio Antares},
doi = {10.1007/978-3-319-06859-6_7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/INPROC-2013-54-Towards-Modeling-
and-Execution-of-Collective-Adaptive-Systems.pdf:pdf},
isbn = {9783319068589},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {69--81},
year = {2014}
@article{Xu2013a,
author = {Xu, Yuming and Li, Kenli and Hu, Jingtong and Li, Keqin},
title = {{Author ' s personal copy A genetic algorithm for task scheduling on heterogeneous
computing systems using multiple priority queues}},
year = {2013}
@misc{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1.1.158.9504.pdf:pdf},
title = {10.1.1.158.9504.pdf}
@article{Greenstreet1999,
doi = {10.1007/3-540-48983-5_12},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}20.pdf:pdf},
pages = {103--116},
year = {1999}
@article{David2012,
abstract = {TAPAAL 2.0 is a platform-independent modelling, simulation and verification tool for
extended timed-arc Petri nets. The tool supports component-based modelling and offers an
automated verification of the EF, AG, EG and AF fragments of TCTL via translations to Uppaal timed
automata and via its own dedicated verification engine. After more than three years of active
development with a main focus on usability aspects and on the efficiency of the verification
algorithms, we present the new version of TAPAAL 2.0 that has by now reached its maturity and
offers the first publicly available tool supporting the analysis and verification of timed-arc Petri nets.
{\textcopyright} 2012 Springer-Verlag Berlin Heidelberg.},
author = {David, Alexandre and Jacobsen, Lasse and Jacobsen, Morten and J{\o}rgensen, Kenneth
Yrke and M{\o}ller, Mikael H. and Srba, Jiř{\'{i}}},
doi = {10.1007/978-3-642-28756-5_36},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/David2012{\_}Chapter{\
_}TAPAAL20IntegratedDevelopmentE.pdf:pdf},
isbn = {9783642287558},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {492--497},
title = {{TAPAAL 2.0: Integrated development environment for timed-arc petri nets}},
year = {2012}
@book{Behrmann2006,
abstract = {This is a tutorial paper on the tool Uppaal . Its goal is to be a short introduction on the
flavour of timed automata impleme nted in the tool, to present its interface, and to explain how to
use t he tool. The contribution of the paper is to provide reference examples a nd modelling
patterns.},
author = {Behrmann, Gerd and David, Alexandre and Larsen, Kim G},
booktitle = {Computer},
isbn = {1111111111},
pages = {1--48},
year = {2006}
@article{Kumar1992,
abstract = {... The backtracking method essential- ly performs a depth-first search ( Kumar 1987) of
the space of potential CSP solutions. Although backtracking is strictly better than the generate-and-
test method, its run-time complexity for most nontrivial problems is still exponential. ... $\
backslash$n},
number = {1},
pages = {32--44},
volume = {13},
year = {1992}
@article{Yang2017,
abstract = {Self-adaptive systems (SASs) are capable of adjusting its behavior in response to
meaningful changes in the operational con-text and itself. The adaptation needs to be performed
automatically through self-managed reactions and decision-making processes at runtime. To support
this kind of automatic behavior, SASs must be endowed by a rich runtime support that can detect
requirements violations and reason about adaptation decisions. Requirements Engineering for SASs
primarily aims to model adaptation logic and mechanisms. Requirements models will guide the
design decisions and runtime behaviors of sys-tem-to-be. This paper proposes a model-driven
approach for achieving adaptation against non-functional requirements (NFRs), i.e. reliability and
performances. The approach begins with the models in RE stage and provides runtime support for
self-adaptation. We capture adaptation mechanisms as graphical elements in the goal model. By
assigning reliability and performance attributes to related system tasks, we derive the tagged
sequential diagram for specifying the reliability and performances of system behaviors. To formalize
system behavior, we transform the requirements model to the corresponding behavior model,
expressed by Label Transition Systems (LTS). To analyze the reliability requirements and
performance requirements, we merged the sequential diagram and LTS to a variable Discrete-Time
Markov Chains (DTMC) and a variable Continuous-Time Markov Chains (CTMC) respectively.
Adaptation candidates are characterized by the variable states. The optimal decision is derived by
verifying the concerned NFRs and reducing the decision space. Our approach is implemented
through the demonstration of a mobile information system.},
archivePrefix = {arXiv},
arxivId = {1704.00869},
eprint = {1704.00869},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1704.00869.pdf:pdf},
title = {{Achieving Adaptation for Adaptive Systems via Runtime Verification: A Model-Driven
Approach}},
url = {http://arxiv.org/abs/1704.00869},
year = {2017}
@article{Zon2016a,
abstract = {{\textcopyright} N. Zo{\'{n}}, V. Galpin and S. Gilmore. Space and movement through
space play an important role in many collective adaptive systems (CAS). CAS consist of multiple
components interacting to achieve some goal in a system or environment that can change over time.
When these components operate in space, then their behaviour can be affected by where they are
located in that space. Examples include the possibility of communication between two components
located at different points, and rates of movement of a component that may be affected by location.
The CARMA language and its associated software tools can be used to model such systems. In
particular, a graphical editor for CARMA allows for the specification of spatial structure and
generation of templates that can be used in a CARMA model with space. We demonstrate the use of
this tool to experiment with a model of pedestrian movement over a network of paths.},
doi = {10.4204/eptcs.217.6},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1607.02963.pdf:pdf},
volume = {217},
year = {2016}
@article{Ridder2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/gbu-ad-ridder.pdf:pdf},
pages = {1--6},
title = {{The Good , the Bad , and the Ugly in Markov Chains 2 Monte Carlo Simulation}},
year = {2019}
@article{Mishchenko2006,
abstract = {The paper explores several ways to improve the speed and capacity of combinational
equivalence checking based on Boolean satisfiability (SAT). State-of-the-art methods use simulation
and BDD/SAT sweeping on the input side (i.e. proving equivalence of some internal nodes in a
topological order), interleaved with attempts to run SAT on the output (i.e. proving equivalence of
the output to constant 0). This paper improves on this method by (a) using more intelligent
simulation, (b) using CNF-based SAT with circuit-based decision heuristics, and (c) interleaving SAT
with low-effort logic synthesis. Experimental results on public and industrial benchmarks
demonstrate substantial reductions in runtime, compared to the current methods. In several cases,
the new solver succeeded in solving previously unsolved problems},
author = {Mishchenko, Alan and Chatterjee, Satrajit and Brayton, Robert and Een, Niklas},
doi = {10.1109/ICCAD.2006.320087},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/mishchenko2006.pdf:pdf},
isbn = {1595933891},
issn = {10923152},
pages = {836--843},
year = {2006}
@article{Pitt,
author = {Pitt, Jeremy and London, Imperial College},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/cas-for-collective-action-
solutions.pdf:pdf},
@article{Gutierrez2020,
abstract = {In the context of multi-agent systems, the rational verification problem is concerned with
checking which temporal logic properties will hold in a system when its constituent agents are
assumed to behave rationally and strategically in pursuit of individual objectives. Typically, those
objectives are expressed as temporal logic formulae which the relevant agent desires to see
satisfied. Unfortunately, rational verification is computationally complex, and requires specialised
techniques in order to obtain practically useable implementations. In this paper, we present such a
technique. This technique relies on a reduction of the rational verification problem to the solution of
a collection of parity games. Our approach has been implemented in the Equilibrium Verification
Environment (EVE) system. The EVE system takes as input a model of a concurrent/multi-agent
system represented using the Simple Reactive Modules Language (SRML), where agent goals are
represented as Linear Temporal Logic ([Formula presented]) formulae, together with a claim about
the equilibrium behaviour of the system, also expressed as an [Formula presented] formula. EVE can
then check whether the [Formula presented] claim holds on some (or every) computation of the
system that could arise through agents choosing Nash equilibrium strategies; it can also check
whether a system has a Nash equilibrium, and synthesise individual strategies for players in the
multi-player game. After presenting our basic framework, we describe our new technique and prove
its correctness. We then describe our implementation in the EVE system, and present experimental
results which show that EVE performs favourably in comparison to other existing tools that support
rational verification.},
author = {Gutierrez, Julian and Najib, Muhammad and Perelli, Giuseppe and Wooldridge, Michael},
doi = {10.1016/j.artint.2020.103353},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.artint.2020.103353.pdf:pdf},
issn = {00043702},
pages = {103353},
title = {{Automated temporal equilibrium analysis: Verification and synthesis of multi-player games}},
url = {https://doi.org/10.1016/j.artint.2020.103353},
volume = {287},
year = {2020}
}
@article{Salehie2009,
doi = {10.1145/1516533.1516538},
number = {2},
volume = {4},
year = {2009}
@article{Baudry2007,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Automatic{\_}generation{\_}of{\_}test{\
_}cases{\_}using{\_}2.pdf:pdf},
issn = {15359484},
journal = {October},
number = {5},
pages = {M110.002089--111.009969},
pmid = {21879489},
url = {http://www.ncbi.nlm.nih.gov/pubmed/21879489},
volume = {10},
year = {2007}
@article{Xue2016,
abstract = {$\backslash$r$\backslash$nEmail$\backslash$r$\backslash$nPrint$\backslash$r$\
backslash$nRequest Permissions$\backslash$r$\backslash$nFeature selection is an important task
in data mining and machine learning to reduce the dimensionality of the data and increase the
performance of an algorithm, such as a classification algorithm. However, feature selection is a
challenging task due mainly to the large search space. A variety of methods have been applied to
solve feature selection problems, where evolutionary computation (EC) techniques have recently
gained much attention and shown some success. However, there are no comprehensive guidelines
on the strengths and weaknesses of alternative approaches. This leads to a disjointed and
fragmented field with ultimately lost opportunities for improving performance and successful
applications. This paper presents a comprehensive survey of the state-of-the-art work on EC for
feature selection, which identifies the contributions of these different algorithms. In addition,
current issues and challenges are also discussed to identify promising areas for future research.},
author = {Xue, Bing and Zhang, Mengjie and Browne, Will N. and Yao, Xin},
doi = {10.1109/TEVC.2015.2504420},
issn = {1089778X},
number = {4},
pages = {606--626},
volume = {20},
year = {2016}
@article{Xie2017,
abstract = {We develop a new edge detection algorithm that tackles two important issues in this
long-standing vision problem: (1) holistic image training and prediction; and (2) multi-scale and
multi-level feature learning. Our proposed method, holistically-nested edge detection (HED),
performs image-to-image prediction by means of a deep learning model that leverages fully
convolutional neural networks and deeply-supervised nets. HED automatically learns rich
hierarchical representations (guided by deep supervision on side responses) that are important in
order to approach the human ability resolve the challenging ambiguity in edge and object boundary
detection. We significantly advance the state-of-the-art on the BSD500 dataset (ODS F-score of .782)
and the NYU Depth dataset (ODS F-score of .746), and do so with an improved speed (0.4 second per
image) that is orders of magnitude faster than some recent CNN-based edge detection algorithms.},
archivePrefix = {arXiv},
arxivId = {1504.06375},
doi = {10.1007/s11263-017-1004-z},
eprint = {1504.06375},
issn = {15731405},
pages = {1--16},
year = {2017}
@article{Padda2014,
author = {Padda, Er Sheilly and Arora, Er Apoorva and Gupta, Er Sonali and Sharma, Er Priya},
keywords = {agile,iterative,prototype,scrum,v-},
number = {5},
volume = {3},
year = {2014}
@article{Fornari2019,
abstract = {Reconfigurable systems have evolved as a more comprehensive and better known area in
the last years. Reconfigurability is strictly related to the ability to change: the more flexible a system
is, the greater is its reconfigurability. Reconfiguration can provide the systems characteristics as self-
adaptation, allowing their resources to be used according to the environment in which they are
found and, consequently, extracting a better use of these resources. Unmanned Aerial Vehicles
(UAVs), mine hoists, mobile robots, and balloon systems are some applications where self-
adaptation and reconfiguration are important. Some reconfigurable systems are able to plan their
reconfiguration at runtime, i.e., the system sets its new configuration while running. These systems
are called Dynamically Reconfigurable Systems (DRSs). This paper aims to investigate DRSs seeking to
answer four specific questions: (i) how the different kinds of DRSs are classified in the literature and
what is their definitions; (ii) what are the hardware and software platforms, methodologies and
techniques engaged in DRSs; (iii) what are the domains of application of DRSs; and, (iv) which
countries lead the number of publications in DRSs. To do that, a systematic literature review was
conducted, where, at the end, 85 articles between 1995 and 2017 were completely read.},
doi = {10.1007/s10846-018-0921-6},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/fornari2018.pdf:pdf},
isbn = {1084601809216},
issn = {15730409},
number = {3-4},
pages = {829--849},
volume = {95},
year = {2019}
@inproceedings{H.C.Cheng2009,
author = {{H.C. Cheng}, Betty and de Lemos, Rogerio and Giese, Holger and Inverardi, Paola},
isbn = {9783642021602},
number = {5525},
pages = {1 -- 26},
year = {2009}
@book{Katoen2014,
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-319-10431-7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/software-engineering-and-formal-
methods-2015.pdf:pdf},
isbn = {9783319104300},
issn = {16113349},
year = {2014}
@article{Banister2016,
abstract = {Purpose To compare the diagnostic performance of automated imaging for glaucoma.
Design Prospective, direct comparison study. Participants Adults with suspected glaucoma or ocular
hypertension referred to hospital eye services in the United Kingdom. Methods We evaluated 4
automated imaging test algorithms: the Heidelberg Retinal Tomography (HRT; Heidelberg
Engineering, Heidelberg, Germany) glaucoma probability score (GPS), the HRT Moorfields regression
analysis (MRA), scanning laser polarimetry (GDx enhanced corneal compensation; Glaucoma
Diagnostics (GDx), Carl Zeiss Meditec, Dublin, CA) nerve fiber indicator (NFI), and Spectralis optical
coherence tomography (OCT; Heidelberg Engineering) retinal nerve fiber layer (RNFL) classification.
We defined abnormal tests as an automated classification of outside normal limits for HRT and OCT
or NFI ≥ 56 (GDx). We conducted a sensitivity analysis, using borderline abnormal image
classifications. The reference standard was clinical diagnosis by a masked glaucoma expert including
standardized clinical assessment and automated perimetry. We analyzed 1 eye per patient (the one
with more advanced disease). We also evaluated the performance according to severity and using a
combination of 2 technologies. Main Outcome Measures Sensitivity and specificity, likelihood ratios,
diagnostic, odds ratio, and proportion of indeterminate tests. Results We recruited 955 participants,
and 943 were included in the analysis. The average age was 60.5 years (standard deviation, 13.8
years); 51.1{\%} were women. Glaucoma was diagnosed in at least 1 eye in 16.8{\%}; 32{\%} of
participants had no glaucoma-related findings. The HRT MRA had the highest sensitivity (87.0{\%};
95{\%} confidence interval [CI], 80.2{\%}-92.1{\%}), but lowest specificity (63.9{\%}; 95{\%} CI, 60.2{\
%}-67.4{\%}); GDx had the lowest sensitivity (35.1{\%}; 95{\%} CI, 27.0{\%}-43.8{\%}), but the highest
specificity (97.2{\%}; 95{\%} CI, 95.6{\%}-98.3{\%}). The HRT GPS sensitivity was 81.5{\%} (95{\%} CI,
73.9{\%}-87.6{\%}), and specificity was 67.7{\%} (95{\%} CI, 64.2{\%}-71.2{\%}); OCT sensitivity was
76.9{\%} (95{\%} CI, 69.2{\%}-83.4{\%}), and specificity was 78.5{\%} (95{\%} CI, 75.4{\%}-81.4{\%}).
Including only eyes with severe glaucoma, sensitivity increased: HRT MRA, HRT GPS, and OCT would
miss 5{\%} of eyes, and GDx would miss 21{\%} of eyes. A combination of 2 different tests did not
improve the accuracy substantially. Conclusions Automated imaging technologies can aid clinicians
in diagnosing glaucoma, but may not replace current strategies because they can miss some cases of
severe glaucoma.},
author = {Banister, Katie and Boachie, Charles and Bourne, Rupert and Cook, Jonathan and Burr,
Jennifer M. and Ramsay, Craig and Garway-Heath, David and Gray, Joanne and McMeekin, Peter and
Hern{\'{a}}ndez, Rodolfo and Azuara-Blanco, Augusto},
doi = {10.1016/j.ophtha.2016.01.041},
issn = {15494713},
journal = {Ophthalmology},
number = {5},
pages = {930--938},
pmid = {27016459},
title = {{Can Automated Imaging for Optic Disc and Retinal Nerve Fiber Layer Analysis Aid Glaucoma
Detection?}},
url = {http://dx.doi.org/10.1016/j.ophtha.2016.01.041},
volume = {123},
year = {2016}
@article{DeLemos2017,
abstract = {The goal of this roadmap paper is to summarize the state-of-the-art and identify research
challenges when developing, deploying and managing self-adaptive software systems. Instead of
dealing with a wide range of topics associated with the field, we focus on four essential topics of
self-adaptation: design space for self-adaptive solutions, software engineering processes for self-
adaptive systems, from centralized to decentralized control, and practical run-time verification {\&}
validation for self-adaptive systems. For each topic, we present an overview, suggest future
directions, and focus on selected challenges. This paper complements and extends a previous
roadmap on software engineering for self-adaptive systems published in 2009 covering a different
set of topics, and reflecting in part on the previous paper. This roadmap is one of the many results of
the Dagstuhl Seminar 10431 on Software Engineering for Self-Adaptive Systems, which took place in
October 2010.},
author = {de Lemos, Rog{\'{e}}rio and Garlan, David and Ghezzi, Carlo and Holger, Giese},
doi = {10.1007/978-3-319-74183-3},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/decomp-challenges{\_}2.pdf:pdf},
isbn = {978-3-319-74182-6},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
number = {September},
pages = {282--306},
url = {http://link.springer.com/10.1007/978-3-319-74183-3},
volume = {9640},
year = {2017}
@article{Zon2019,
author = {Zon, Natalia},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Zon2019.pdf:pdf},
year = {2019}
@inproceedings{Lau2010,
doi = {10.1109/SEAA.2010.36},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/lau2010.pdf:pdf},
isbn = {9780769541709},
year = {2010}
@article{Hovsepyan,
author = {Hovsepyan, Aram and Landuyt, Dimitri Van and De, Steven Op and Joosen, Wouter and
Rangel, Gustavo and Briones, Javier Fernandez},
@article{Taormina2015a,
abstract = {In this work, we suggest that the poorer results obtained with particle swarm
optimization (PSO) in some previous studies should be attributed to the cross-validation scheme
commonly employed to improve generalization of PSO-trained neural network river forecasting
(NNRF) models. Cross-validation entails splitting the training dataset into two, and accepting particle
position updates only if fitness improvements are concurrently measured on both subsets. The NNRF
calibration process thus becomes a multi-objective (MO) optimization problem which is still
addressed as a single-objective one. In our opinion, PSO cross-validated training should be carried
out under an MO optimization framework instead. Therefore, in this work, we introduce a novel MO
variant of the swarm optimization algorithm to train NNRF models for the prediction of future
streamflow discharges in the Shenandoah River watershed, Virginia (USA). The case study comprises
over 9,000 observations of both streamflow and rainfall observations, spanning a period of almost
25 years. The newly introduced MO fully informed particle swarm (MOFIPS) optimization algorithm
is found to provide better performing models with respect to those developed using the standard
PSO, as well as advanced gradient-based optimization techniques. These findings encourage the use
of an MO approach to NNRF cross-validated training with swarm optimization.},
doi = {10.2166/hydro.2014.116},
issn = {1464-7141},
number = {1},
title = {{Neural network river forecasting with multi-objective fully informed particle swarm
optimization}},
url = {http://www.iwaponline.com/jh/017/jh0170099.htm},
volume = {17},
year = {2015}
@article{Zheng2016,
abstract = {Two formal models for parallel computation are presented: an abstract conceptual model
and a parallel-program model. The former model does not distinguish between control and data
states. The latter model includes the capability for the representation of an infinite set of control
states by allowing there to be arbitrarily many instruction pointers (or processes) executing the
program. An induction principle is presented which treats the control and data state sets on the
same ground. Through the use of “place variables,” it is observed that certain correctness conditions
can be expressed without enumeration of the set of all possible control states. Examples are
presented in which the induction principle is used to demonstrate proofs of mutual exclusion. It is
shown that assertions-oriented proof methods are special cases of the induction principle. A special
case of the assertions method, which is called parallel place assertions, is shown to be incomplete. A
formalization of “deadlock” is then presented. The concept of a “norm” is introduced, which yields
an extension, to the deadlock problem, of Floyd's technique for proving termination. Also discussed
is an extension of the program model which allows each process to have its own local variables and
permits shared global variables. Correctness of certain forms of implementation is also discussed. An
Appendix is included which relates this work to previous work on the satisfiability of certain logical
formulas.},
author = {Zheng, Manchun and Rogers, Michael S. and Luo, Ziqing and Dwyer, Matthew B. and
Siegel, Stephen F.},
doi = {10.1109/ASE.2015.99},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Keller-Formal-Verification.pdf:pdf},
isbn = {9781509000241},
pages = {830--835},
year = {2016}
@article{Ishibuchi2015,
abstract = {We examine the behavior of three classes of evolutionary multiobjective optimization
(EMO) algorithms on many-objective knapsack problems. They are Pareto dominance-based,
scalarizing function-based, and hypervolume-based algorithms. NSGA-II, MOEA/D, SMS-EMOA, and
HypE are examined using knapsack problems with 2-10 objectives. Our test problems are generated
by randomly specifying coefficients (i.e., profits) in objectives. We also generate other test problems
by combining two objectives to create a dependent or correlated objective. Experimental results on
randomly generated many-objective knapsack problems are consistent with well-known
performance deterioration of Pareto dominance-based algorithms. That is, NSGA-II is outperformed
by the other algorithms. However, it is also shown that NSGA-II outperforms the other algorithms
when objectives are highly correlated. MOEA/D shows totally different search behavior depending
on the choice of a scalarizing function and its parameter value. Some MOEA/D variants work very
well only on two-objective problems while others work well on many-objective problems with 4-10
objectives. We also obtain other interesting observations such as the performance improvement by
similar parent recombination and the necessity of diversity improvement for many-objective
knapsack problems.},
doi = {10.1109/TEVC.2014.2315442},
issn = {1089778X},
number = {2},
pages = {264--283},
year = {2015}
@article{Whittle2009,
author = {Whittle, Jon and Sawyer, Pete and Bencomo, Nelly and Cheng, Betty H C and Bruel, Jean-
michel},
year = {2009}
@article{1976,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper01.pdf:pdf},
pages = {11--36},
year = {1976}
@article{Sweetman2019,
isbn = {9781733632508},
pages = {0--12},
year = {2019}
@article{Holz2014,
doi = {10.1016/j.robot.2014.03.013},
isbn = {9783642339318},
issn = {09218890},
number = {9},
pages = {1282--1293},
title = {{Approximate triangulation and region growing for efficient segmentation and smoothing of
range images}},
volume = {62},
year = {2014}
@article{Alipour2011,
archivePrefix = {arXiv},
arxivId = {arXiv:1610.08020v1},
eprint = {arXiv:1610.08020v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1610.08020.pdf:pdf},
title = {{Bounded Model Checking and Feature Omission Diversity Model checking is a formal
verification technique which inspects all}},
year = {2011}
@article{Choudhary2017,
author = {Choudhary, Pradeepkumar and Khandekar, Rahul and Borkar, Aakash and Chotaliya,
Punit},
number = {3},
pages = {2741--2743},
title = {{IMAGE PROCESSING ALGORITHM FOR FRUIT IDENTIFICATION DETECTION OF MANGO FRUIT
ON TREE : Proposed Model : Pre-processing input images : clustering Algorithm : Image Binarisation :
Morphological operations :}},
url = {https://irjet.net/archives/V4/i3/IRJET-V4I3691.pdf},
volume = {4},
year = {2017}
@article{Martinelli2007,
abstract = {In this paper we describe an approach based on open system analysis for the
specification, verification and synthesis of secure systems. In particular, by using our framework, we
are able to model a system with a possible intruder and verify whether the whole system is secure,
i.e. whether the system satisfies a given temporal logic formula that describes its secure behavior. If
necessary, we are also able to automatically synthesize a process that, by controlling the behavior of
the possible intruder, enforces the desired secure behavior of the whole system. {\textcopyright}
2006.},
doi = {10.1016/j.entcs.2006.12.003},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S1571066107000266-
main.pdf:pdf},
issn = {15710661},
pages = {29--43},
title = {{An Approach for the Specification, Verification and Synthesis of Secure Systems}},
url = {http://dx.doi.org/10.1016/j.entcs.2006.12.003},
volume = {168},
year = {2007}
@article{Henzinger1994,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}21.pdf:pdf},
number = {1},
pages = {181--204},
volume = {41},
year = {1994}
@article{,
pages = {4--5},
volume = {1},
year = {2016}
@article{DeNicola2020a,
abstract = {Modern systems evolve in unpredictable environments and have to continuously adapt
their behaviour to changing conditions. The “DReAM” (Dynamic Reconfigurable Architecture
Modelling) framework has been designed for modelling reconfigurable dynamic systems. It provides
a rule-based language, inspired from Interaction Logic, which is expressive and easy to use
encompassing all aspects of dynamicity including parametric multi-modal coordination with
creation/deletion of components as well as mobility. Additionally, it allows the description of both
endogenous/modular and exogenous/centralized coordination styles and sound transformations
from one style to the other. The DReAM framework is implemented in the form of a Java API
bundled with an execution engine. It allows us to develop runnable systems combining the
expressiveness of the rule-based notation together with the flexibility of this widespread
programming language.},
author = {{De Nicola}, Rocco and Maggi, Alessandro and Sifakis, Joseph},
doi = {10.1007/s10009-020-00555-2},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {14332787},
pages = {437--455},
title = {{The DReAM framework for dynamic reconfigurable architecture modelling: theory and
applications}},
url = {https://doi.org/10.1007/s10009-020-00555-2},
volume = {22},
year = {2020}
@article{DeFlorio2015,
abstract = {The present article introduces a reference framework for discussing resilience of
computational systems. Rather than a property that may or may not be exhibited by a system,
resilience is interpreted here as the emerging result of a dynamic process. Said process represents
the dynamic interplay between the behaviors exercised by a system and those of the environment it
is set to operate in. As a result of this interpretation, coherent definitions of several aspects of
resilience can be derived and proposed, including elasticity, change tolerance, and antifragility.
Definitions are also provided for measures of the risk of unresilience as well as for the optimal match
of a given resilient design with respect to the current environmental conditions. Finally, a resilience
strategy based on our model is exemplified through a simple scenario.},
archivePrefix = {arXiv},
arxivId = {1503.08421},
doi = {10.1007/s40860-015-0002-6},
eprint = {1503.08421},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/DeFlorio2015{\_}Article{\
_}OnResilientBehaviorsInComputat.pdf:pdf},
isbn = {4086001500},
issn = {2199-4668},
pages = {33--46},
publisher = {Springer International Publishing},
url = {http://arxiv.org/abs/1503.08421{\%}0Ahttp://dx.doi.org/10.1007/s40860-015-0002-6},
year = {2015}
@article{Cornish2009,
abstract = {Language is a product of both biological and cultural evolution. Clues to the origins of key
structural properties of language can be found in the process of cultural transmission between
learners. Recent experiments have shown that iterated learning by human participants in the
laboratory transforms an initially unstructured artificial language into one containing regularities
that make the system more learnable and stable over time. Here, we explore the process of iterated
learning in more detail by demonstrating exactly how one type of structure - compositionality -
emerges over the course of these experiments. We introduce a method to precisely quantify the
increasing ability of a language to systematically encode associations between individual
components of meanings and signals over time and we examine how the system as a whole evolves
to avoid ambiguity in these associations and generate adaptive structure.},
doi = {10.1111/j.1467-9922.2009.00540.x},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/complex{\_}adaptive{\
_}systems.pdf:pdf},
issn = {00238333},
pages = {187--205},
title = {{Complex adaptive systems and the origins of adaptive structure: What experiments can tell
us}},
volume = {59},
year = {2009}
@article{Hossein2013,
author = {Hossein, Amir and Jin, Gun and Yang, Xin-she and Talatahari, Siamak},
doi = {10.1016/j.cnsns.2012.07.017},
issn = {1007-5704},
journal = {Communications in Nonlinear Science and Numerical Simulation},
number = {2},
pages = {327--340},
title = {{Commun Nonlinear Sci Numer Simulat Chaos-enhanced accelerated particle swarm
optimization}},
url = {http://dx.doi.org/10.1016/j.cnsns.2012.07.017},
volume = {18},
year = {2013}
@article{Kiaei2020,
abstract = {In typical embedded applications, the precise execution time of the program does not
matter, and it is sufficient to meet a real-time deadline. However, modern applications in
information security have become much more time-sensitive, due to the risk of timing side-channel
leakage. The timing of such programs needs to be data-independent and precise. We describe a
parallel synchronous software model, which executes as N parallel threads on a processor with
word-length N. Each thread is a single-bit synchronous machine with precise, contention-free timing,
while each of the N threads still executes as an independent machine. The resulting software
supports fine-grained parallel execution. In contrast to earlier work to obtain precise and repeatable
timing in software, our solution does not require modifications to the processor architecture nor
specialized instruction scheduling techniques. In addition, all threads run in parallel and without
contention, which eliminates the problem of thread scheduling. We use hardware (HDL) semantics
to describe a thread as a single-bit synchronous machine. Using logic synthesis and code generation,
we derive a parallel synchronous implementation of this design. We illustrate the synchronous
parallel programming model with practical examples from cryptography and other applications with
precise timing requirements.},
archivePrefix = {arXiv},
arxivId = {2005.02562},
doi = {10.1109/LES.2020.2992051},
eprint = {2005.02562},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2005.02562.pdf:pdf},
issn = {19430671},
pages = {1--4},
title = {{Synthesis of Parallel Synchronous Software}},
year = {2020}
@article{Glazier2015,
abstract = {{\textcopyright} 2015 IEEE. Modern software systems are often compositions of entities
that increasingly use self-adaptive capabilities to improve their behavior to achieve systemic quality
goals. Self adaptive managers for each component system attempt to provide locally optimal results,
but if they cooperated and potentially coordinated their efforts it might be possible to obtain more
globally optimal results. The emergent properties that result from such composition and cooperation
of self-adaptive systems are not well understood, difficult to reason about, and present a key
challenge in the evolution of modern software systems. For example, the effects of coordination
patterns and protocols on emergent properties, such as the resiliency of the collectives, need to be
understood when designing these systems. In this paper we propose that probabilistic model
checking of stochastic multiplayer games (SMG) provides a promising approach to analyze,
understand, and reason about emergent properties in collectives of adaptive systems (CAS).
Probabilistic Model Checking of SMGs is a technique particularly suited to analyzing emergent
properties in CAS since SMG models capture: (i) the uncertainty and variability intrinsic to a CAS and
its execution environment in the form of probabilistic and nondeterministic choices, and (ii) the
competitive/cooperative aspects of the interplay among the constituent systems of the CAS. Analysis
of SMGs allows us to reason about things like the worst case scenarios, which constitutes a new
contribution to understanding emergent properties in CAS. We investigate the use of SMGs to show
how they can be useful in analyzing the impact of communication topology for collections of fully
cooperative systems defending against an external attack.},
author = {Glazier, Thomas J. and C{\'{a}}mara, Javier and Schmerl, Bradley and Garlan, David},
doi = {10.1109/SASOW.2015.14},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/glazier2015.pdf:pdf},
isbn = {9781467384391},
journal = {Proceedings - 2015 IEEE 9th International Conference on Self-Adaptive and Self-Organizing
Systems Workshops, SASOW 2015},
pages = {55--60},
year = {2015}
@article{Pumpuni-lenss2017,
doi = {10.1002/sys},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/pumpuni-lenss2017.pdf:pdf},
year = {2017}
@article{B2015,
author = {B, Annabelle Klarl and Cichella, Lucia and Hennicker, Rolf},
doi = {10.1007/978-3-319-15317-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/formal-aspects-of-component-software-
2015{\_}2.pdf:pdf},
isbn = {9783319153179},
pages = {183--190},
year = {2015}
@article{Jansen2019,
abstract = {We introduce the concept of structured synthesis for Markov decision processes. A
structure is induced from finitely many pre-specified options for a system configuration. We define
the structured synthesis problem as a nonlinear programming problem (NLP) with integer variables.
As solving NLPs is not feasible in general, we present an alternative approach. A transformation of
models specified in the PRISM probabilistic programming language creates models that account for
all possible system configurations by nondeterministic choices. Together with a control module that
ensures consistent configurations throughout a run of the system, this transformation enables the
use of optimized tools for model checking in a black-box fashion. While this transformation increases
the size of a model, experiments with standard benchmarks show that the method provides a
feasible approach for structured synthesis. We motivate and demonstrate the usefulness of the
approach along a realistic case study involving surveillance by unmanned aerial vehicles in a shipping
facility.},
archivePrefix = {arXiv},
arxivId = {1807.06106},
author = {Jansen, Nils and Humphrey, Laura and Tumova, Jana and Topcu, Ufuk},
doi = {10.1007/978-3-030-20652-9_16},
eprint = {1807.06106},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1007978-3-030-20652-9{\
_}2.pdf:pdf},
isbn = {9783030206512},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {237--254},
year = {2019}
@article{Beek2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/SPLC15.pdf:pdf},
isbn = {9781450336130},
pages = {321--326},
title = {{Applying the Product Lines Paradigm to the Quantitative Analysis of Collective Adaptive
Systems}},
year = {2015}
@article{Mason2018,
abstract = {Unmanned aerial vehicles (UAVs), a.k.a. drones, are becoming increasingly popular due
to great advancements in their control mechanisms and price reduction. UAVs are being used in
applications such as package delivery, plantation and railroad track monitoring, where UAVs carry
out tasks in an automated fashion. Devising how UAVs achieve a task is challenging as the
environment where UAVs are deployed is normally unpredictable, for example, due to winds. Formal
methods can help engineers to specify flight strategies and to evaluate how well UAVs are going to
perform to achieve a task. This paper proposes a formal framework where engineers can raise the
confidence in their UAV specification by using symbolic, simulation and statistical and model
checking methods. Our framework is constructed over three main components: the behavior of
UAVs and the environment are specified in a formal executable language; the UAV's physical model
is specified by a simulator; and statistical model checking algorithms are used for the analysis of
system behaviors. We demonstrate the effectiveness of our framework by means of several
scenarios involving multiple drones.},
author = {Mason, Ian A. and Nigam, Vivek and Talcott, Carolyn and Brito, Alisson},
doi = {10.1007/978-3-319-74781-1_28},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/cosim17.pdf:pdf},
isbn = {9783319747804},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {406--422},
year = {2018}
@article{Reisig2013,
abstract = {Purpose - This study aims to investigate whether low self-control and routine activity
theories explain fraud outcomes among the elderly. Specifically, the effects of low self-control and
remote purchasing behaviors on shopping fraud targeting and victimization are empirically assessed.
Design/methodology/approach - Cross-sectional survey data from telephone interviews conducted
in Arizona and Florida are used. A total of 2,000 adults aged 60 and over were surveyed. Because
selection bias was observed, a two-stage probit regression model was estimated to assess
theoretical hypotheses in a multivariate context. Findings - The results demonstrate that two forms
of remote purchasing - telemarketing purchase and mail-order purchase - increase the probability of
shopping fraud targeting. Infomercial purchase and mail-order purchase are significant correlates of
shopping fraud victimization. The probability of becoming a target and victim is affected positively by
reduced levels of self-control. The effects of demographic characteristics on fraud outcomes are null.
Research limitations/implications - This research lends support to the argument that low self-control
and routine activity theories shed light on fraud victimization among elderly consumers. Future
research should examine the influence of low self-control, individual routines and lifestyles on other
forms of victimization that the elderly experience. Practical implications - The findings underscore
the need for fraud prevention and increasing public awareness among elderly consumers.
Originality/value - This is the first study to examine shopping fraud targeting and victimization of the
elderly in a broad theoretical context. [ABSTRACT FROM AUTHOR] Copyright of Journal of Financial
Crime is the property of Emerald Group Publishing Limited and its content may not be copied or
emailed to multiple sites or posted to a listserv without the copyright holder's express written
permission. However, users may print, download, or email articles for individual use. This abstract
may be abridged. No warranty is given about the accuracy of the copy. Users should refer to the
original published version of the material for the full abstract. (Copyright applies to all Abstracts.)},
doi = {10.1108/JFC-03-2013-0014},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2013{\_}.{\_}Shopping{\_}Fraud{\
_}Victimization{\_}amo.pdf:pdf},
isbn = {13590790},
issn = {1359-0790},
number = {3},
pages = {324--337},
pmid = {88860233},
url = {http://www.emeraldinsight.com/doi/10.1108/JFC-03-2013-0014},
volume = {20},
year = {2013}
@article{Moreno2016,
author = {Moreno, Gabriel A. and Camara, Javier and Garlan, David and Schmerl, Bradley},
doi = {10.1109/ICAC.2016.59},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/moreno2016.pdf:pdf},
isbn = {9781509016532},
journal = {Proceedings - 2016 IEEE International Conference on Autonomic Computing, ICAC 2016},
keywords = {latency-aware,proactive,self-adaptation},
pages = {147--156},
year = {2016}
@article{Walker2009b,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Walker, Melanie and Kublin, James G and Zunt, Joseph R},
doi = {10.1086/498510.Parasitic},
eprint = {NIHMS150003},
isbn = {9780123850447},
issn = {9780123850447},
number = {1},
pages = {115--125},
pmid = {1000000221},
volume = {42},
year = {2009}
@article{Kwiatkowska2018a,
doi = {10.1007/s10009-017-0476-z},
issn = {1433-2787},
number = {2},
pages = {195--210},
publisher = {Springer Berlin Heidelberg},
title = {{PRISM-games : verification and strategy synthesis for stochastic multi-player games with
multiple objectives}},
url = {https://doi.org/10.1007/s10009-017-0476-z},
volume = {20},
year = {2018}
@article{Lydman2011,
year = {2011}
@article{Armani2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/5346-13-12149-1-10-20170814.pdf:pdf},
number = {December},
pages = {1--16},
volume = {14},
year = {2014}
@article{Cordy2019,
doi = {10.1007/s10009-019-00537-z},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {1433-2779},
url = {https://doi.org/10.1007/s10009-019-00537-z},
year = {2019}
@article{Tahar2002,
author = {Tahar, Professors S and Cerny, E and Song, X and Tahar, Updated S},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-intro-02.06.pdf:pdf},
number = {May},
volume = {1},
year = {2002}
@book{Rico2008,
abstract = {Abstract: The purpose of this chapter is to present a literature review relevant to a study
of using agile methods to manage the development of Internet websites and their subsequent
quality. This chapter places website quality within the context of the {\$}2.4 trillion U.S. electronic
commerce industry. Thus, this chapter provides a history of electronic computers, electronic
commerce, software methods, software quality metrics, agile methods and studies on agile
methods. None of these histories are without controversy. For instance, some scholars begin the
study of the electronic computer by mentioning the emergence of the Sumerian text, Hammurabi
code or the abacus. We, however, will align our history with the emergence of the modern
electronic computer at the beginning of World War II. The history of electronic commerce also has
poorly defined beginnings. Some studies of electronic commerce begin with the widespread use of
the Internet in the early 1990s. However, electronic commerce cannot be appreciated without
establishing a deeper context. Few scholarly studies, if any, have been performed on agile methods,
which is the basic purpose of this literature review. That is, to establish the context to conduct
scholarly research within the fields of agile methods and electronic commerce. {\textcopyright} 2008
Elsevier Inc. All rights reserved.},
author = {Rico, David F. and Sayani, Hasan H. and Field, Ralph F.},
doi = {10.1016/S0065-2458(08)00401-4},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/rico2008.pdf:pdf},
isbn = {9780123744258},
issn = {00652458},
number = {08},
pages = {1--55},
url = {http://dx.doi.org/10.1016/S0065-2458(08)00401-4},
volume = {73},
year = {2008}
@article{Ismail2018,
abstract = {The ability to automatically generate and guarantee the optimal decision for self-
adaptation is important especially when there are multiple quality objectives that need to be
satisfied, the uncertainties in the adaptation outcome, and the time-varying resource demands,
especially in the autonomic cloud systems. To address this issue, in this paper, we propose an
approach to automatically encode the adaptation decision behavior and the multiple quality
objectives, as well as synthesizing the behaviour to fulfill the specified objectives. In the approach,
we emphasize the relation between quality objectives expressed as a variant of temporal logic
specification and the domain-specific Service Level Agreements (SLA) (i.e. cloud environment). The
approach also covers the abstraction method for representing the adaptation behavior as stochastic
games, and the re-synthesis method to adjust the threshold values, if failing to satisfy the predefined
thresholds. We apply the stochastic games model checking with strategy synthesis to realize the
approach. The Pareto-set computation is utilized to support the adjustment of threshold values. We
present a set of validation results to show the effectiveness and performance of the proposed
approach.},
doi = {10.1109/APSEC.2017.50},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/IK17.pdf:pdf},
isbn = {9781538636817},
issn = {15301362},
pages = {436--445},
title = {{Synthesizing Pareto Optimal Decision for Autonomic Clouds Using Stochastic Games Model
Checking}},
volume = {2017-Decem},
year = {2018}
@article{Teslya2020,
doi = {10.3390/info11010028},
file = {:C$\backslash$:/Users/Asus/Downloads/information-11-00028-v2.pdf:pdf},
title = {{Execution Plan Control in Dynamic Coalition of Robots with Smart Contracts and
Blockchain}},
year = {2020}
@article{Bures2013,
author = {Bures, Tomas and Gerostathopoulos, Ilias and Hnetynka, Petr and Keznikl, Jaroslav and Kit,
Michal and Plasil, Frantisek},
doi = {10.1145/2465449.2465462},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}8.pdf:pdf},
isbn = {9781450321228},
pages = {81},
volume = {June},
year = {2013}
@article{SanfordBernhardt2007,
abstract = {Agent-based modeling (ABM) is a relatively new paradigm compared to some of the
other advanced computing paradigms discussed in this document. Researchers and practitioners in
many disciplines, from biology to business, have developed agent-based models, and the number of
applications continues to rise. Bonabeau (2002b) provides an accessible overview of the variety of
applications of ABM. The purpose of this chapter is to explain briefly what ABM is and what it can be
used for, as well as to review some of the many applications developed so far in the transportation
domain. Because interest in ABM continues to grow, the number of applications continues to grow
as well. The applications described present examples of the type of work that can be done rather
than a comprehensive review. This chapter provides a primer of sorts for those wondering whether
ABM could be a useful tool for a particular transportation problem. The following sections define
ABM, describe the types of problems to which ABM can be applied, discuss its strengths and
weaknesses, provide some examples of transportation applications, and suggest some guidelines for
those interested in developing an ABM for a transportation application.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ec113.pdf:pdf},
number = {January},
pages = {72--79},
volume = {E-C113},
year = {2007}
@article{Lim2015,
abstract = {{\textcopyright} 2015 IEEE. Collective adaptive systems (CAS) consist of multiple agents
that adapt to changing system and environmental conditions in order to satisfy system goals and
quality requirements. As more applications involve using CAS in a critical context, ensuring the
correct and safe adaptive behaviors of quality-driven CAS has become more important. In this paper,
we propose Collective Adaptive System Testing (CAST), a scalable and efficient approach to testing
self-adaptive behaviors of CAS. We propose a selective method to instantiate and execute test cases
relevant to the current adaptation context. This enables testers to focus testing on key self-adaptive
behaviors while dealing with the scale and dynamicity of the system. An experimental evaluation
using a traffic monitoring system is performed to validate its scalability, efficiency, and fault-
detection effectiveness. The experimental results provide insights into how CAST can serve as a
feasible and effective assurance technique for CAS.},
author = {Lim, Yoo Jin and Jee, Eunkyoung and Shin, Donghwan and Bae, Doo Hwan},
doi = {10.1109/COMPSAC.2015.131},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/COMPSAC2015-yjlim.pdf:pdf},
isbn = {9781467365635},
issn = {07303157},
pages = {216--221},
title = {{Efficient Testing of Self-Adaptive Behaviors in Collective Adaptive Systems}},
volume = {2},
year = {2015}
@article{Gracitelli2015,
abstract = {Identification of structural damage to the optic nerve and retinal nerve fiber layer (RNFL)
is an essential component of diagnosis and management of glaucoma. The introduction of spectral-
domain OCT (SD-OCT) has allowed objective quantification of damage to these structures with
unprecedented resolution. In addition, recent attention has been directed towards imaging the
macular area for quantifying loss of neural tissue caused by the disease. Many studies have
evaluated and compared the diagnostic accuracies of a variety of parameters that can be obtained
from imaging these areas of the ocular fundus. In this article, we critically review the existing
literature evaluating the diagnostic accuracy of SD-OCT in glaucoma and we discuss issues related to
how SD-OCT results should be incorporated into clinical practice.},
author = {Gracitelli, Carolina P B and Abe, Ricardo Y and Medeiros, Felipe a},
doi = {10.2174/1874364101509010068},
isbn = {1874-3641},
issn = {1874-3641},
pages = {68--77},
pmid = {26069519},
url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=4460228{\&}tool=pmcentrez{\
&}rendertype=abstract},
volume = {9},
year = {2015}
@article{Bortolussi2014,
author = {Bortolussi, Luca and Nicola, Rocco De and Gast, Nicolas and Gilmore, Stephen and Hillston,
Jane and Massink, Mieke and Tribastone, Mirco},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/2c7b0ff03d771633a7e07a9baaf5ea8198be.pdf
:pdf},
journal = {ERCIM News},
pages = {1--2},
title = {{A Quantitative Approach to the Design and Analysis of Collective Adaptive$\backslash$n
Systems for Smart Cities}},
url = {http://ercim-news.ercim.eu/en98/special/a-quantitative-approach-to-the-design-and-analysis-
of-collective-adaptive-systems-for-smart-cities},
year = {2014}
@article{Chandra2015,
abstract = {From the beginning of software development, it always tried to formulate some process
or process sequences or steps to develop or making software. Development of software should be in
systematic manner. Today there are variety of software projects some require only good GUI, some
require security, some software are made for mission critical tasks, for all these different types of
projects require different type of model for development of software. Few well know software
development models are waterfall, v-shaped, agile, and object-oriented. All these have their own
advantages and disadvantages. This paper try to solve the problem of choosing right methodology
for particular software by comparing all software development methodologies. It also figure out the
advantages and disadvantages of different methodologies in useful manner not to criticize, so that a
particular model will be chosen by an organization or company. This paper divided into two parts
first is dedicated to advantage and disadvantages and second part id for comparison between all
most popular software development life cycle models. This paper also used for choosing best model
for particular project for developing particular software. This paper broadly categorize all software
development models into four categories flow based model, iteration based model, object oriented
model, structured based model. Keywords Comparison, software development models, advantages,
disadvantages, model selection, flow based, iteration based model, object oriented model,
structured based model.},
number = {9},
pages = {7--10},
url = {http://www.ijcaonline.org/research/volume131/number9/chandra-2015-ijca-907294.pdf},
volume = {131},
year = {2015}
}
@article{Li2014,
abstract = {This paper reviewed major remote sensing image classification techniques, including
pixel-wise, sub-pixel-wise, and object-based image classification methods, and highlighted the
importance of incorporating spatio-contextual information in remote sensing image classification.
Further, this paper grouped spatio-contextual analysis techniques into three major categories,
including 1) texture extraction, 2) Markov random fields (MRFs) modeling, and 3) image
segmentation and object-based image analysis. Finally, this paper argued the necessity of developing
geographic information analysis models for spatial-contextual classifications using two case studies.},
author = {Li, Miao and Zang, Shuying and Zhang, Bing and Li, Shanshan and Wu, Changshan},
doi = {10.5721/EuJRS20144723},
isbn = {22797254},
issn = {22797254},
number = {1},
pages = {389--411},
title = {{A review of remote sensing image classification techniques: The role of Spatio-contextual
information}},
volume = {47},
year = {2014}
@article{Mcmillan1993,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/mcmillan1993.pdf:pdf},
year = {1993}
@article{Yep2015,
author = {Yep, T. and Patel, V. and Slejko, J.F. and Devine, B.},
doi = {10.1016/j.jval.2015.03.1047},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For
FYP/PIIS1098301515011043.pdf:pdf},
issn = {10983015},
number = {3},
pages = {A181},
title = {{Comparing total and disease specific Healthcare costs for Glaucoma Patients before and
after their index Diagnosis: a retrospective claims database analysis}},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1098301515011043},
volume = {18},
year = {2015}
@article{Muntasa2014,
abstract = {Diabetic Retinopathy is one of the diseases that have the effect of a high mortality rate
after heart disease and cancer. However, the disease can be early detected through blood vessels
and the optic nerve head in Fundus images. Blood vessels separation of the optic nerve head
required high effort when it is conducted manually, therefore it is necessary that the appropriate
method to perform segmentation of the object. Level Set method is well-known as object
segmentation method based on object deformable. However, the methods have the disadvantage it
requires initialization before the segmentation process. In this research, segmentation method
without initialization process is proposed. The segmentation is conducted by using the maximum
value selection results of convoluti on 8 directions. Experimental results show that, proposed
method has obtained 89.48{\%} accuracy. Segmentation errors are caused by small branches, where
they are not connected, so that the objects are supposed as noises.},
author = {Muntasa, Arif and Sirajudin, Indah Agustien and Sophan, Moch Kautsar},
doi = {10.12928/telkomnika.v12i3.97},
issn = {2302-9293},
number = {3},
pages = {631},
title = {{Matrix Mask Overlapping and Convolution Eight Directions for Blood Vessel Segmentation on
Fundus Retinal Image}},
url = {http://journal.uad.ac.id/index.php/TELKOMNIKA/article/view/97},
volume = {12},
year = {2014}
@article{Marzband2016,
number = {February},
pages = {265--274},
title = {{Please cite this paper as follows : implementation of optimum energy management system
in standalone Microgrid by using multi-layer ant colony Real Time Experimental Implementation of
Optimum Energy Management System in Standalone Microgrid by Using Multi}},
volume = {75},
year = {2016}
@article{Moonsamy2014,
year = {2014}
@article{Cordeiro,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ieee{\_}tse.pdf:pdf},
pages = {1--19},
@article{Dollar2013,
abstract = {Edge detection is a critical component of many vision systems, including object detectors
and image segmentation algorithms. Patches of edges exhibit well-known forms of local structure,
such as straight lines or T-junctions. In this paper we take advantage of the structure present in local
image patches to learn both an accurate and computationally efficient edge detector. We formulate
the problem of predicting local edge masks in a structured learning framework applied to random
decision forests. Our novel approach to learning decision trees robustly maps the structured labels
to a discrete space on which standard information gain measures may be evaluated. The result is an
approach that obtains realtime performance that is orders of magnitude faster than many
competing state-of-the-art approaches, while also achieving state-of-the-art edge detection results
on the BSDS500 Segmentation dataset and NYU Depth dataset. Finally, we show the potential of our
approach as a general purpose edge detector by showing our learned edge models generalize well
across datasets.},
archivePrefix = {arXiv},
arxivId = {arXiv:1406.5549v1},
doi = {10.1109/ICCV.2013.231},
eprint = {arXiv:1406.5549v1},
isbn = {9781479928392},
issn = {1550-5499},
pages = {1841--1848},
pmid = {26352995},
year = {2013}
@article{Kuila2014,
abstract = {Energy efficient clustering and routing are two well known optimization problems which
have been studied widely to extend lifetime of wireless sensor networks (WSNs). This paper presents
Linear/Nonlinear Programming (LP/NLP) formulations of these problems followed by two proposed
algorithms for the same based on particle swarm optimization (PSO). The routing algorithm is
developed with an efficient particle encoding scheme and multi-objective fitness function. The
clustering algorithm is presented by considering energy conservation of the nodes through load
balancing. The proposed algorithms are experimented extensively and the results are compared with
the existing algorithms to demonstrate their superiority in terms of network life, energy
consumption, dead sensor nodes and delivery of total data packets to the base station. {\
textcopyright} 2014 Elsevier Ltd.},
doi = {10.1016/j.engappai.2014.04.009},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/Energy{\_}efficient{\
_}clustering{\_}and{\_}routing.pdf:pdf},
isbn = {9781479930791},
issn = {09521976},
pages = {127--140},
publisher = {Elsevier},
title = {{Energy efficient clustering and routing algorithms for wireless sensor networks: Particle
swarm optimization approach}},
url = {http://dx.doi.org/10.1016/j.engappai.2014.04.009},
volume = {33},
year = {2014}
@article{Khazeev2019,
abstract = {Formal methods yet advantageous, face challenges towards wide acceptance and
adoption in software development practices. The major reason being presumed complexity. The
issue can be addressed by academia with a thoughtful plan of teaching and practise. The user study
detailed in this paper is examining AutoProof tool with the motivation to identify complexities
attributed to formal methods. Participants' (students of Masters program in Computer Science)
performance and feedback on the experience with formal methods assisted us in extracting specific
problem areas that effect tool usability. The study results infer, along with improvements in
verification tool functionalities, teaching program must be modified to include pre-requisite courses
to make formal methods easily adapted by students and promoting their usage in software
development process.},
archivePrefix = {arXiv},
arxivId = {1906.01430},
author = {Khazeev, Mansur and Mazzara, Manuel and {De Carvalho}, Daniel and Aslam, Hamna},
eprint = {1906.01430},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1906.01430.pdf:pdf},
title = {{Towards A Broader Acceptance Of Formal Verification Tools: The Role Of Education}},
url = {http://arxiv.org/abs/1906.01430},
year = {2019}
}
@book{Margaria2018,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030034269},
year = {2018}
@article{Bianco,
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/5d704ca3216c5761ed11e6af177e6a67.pdf:pdf
},
@article{Frei2011,
abstract = {This paper addresses a vision of future manufacturing systems, which are
account and allows the systems to evolve together with the requirements.
indicated.},
author = {Frei, Regina and Serugendo, Giovanna Di Marzo},
doi = {10.1109/TSMCC.2010.2098027},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/SMCC2011.pdf:pdf},
issn = {1094-6977},
journal = {Ieee Transactions on Systems Man and Cybernetics Part C-Applications and Reviews},
number = {6},
pages = {885--897},
volume = {41},
year = {2011}
@article{Kounev2017,
abstract = {This book provides formal and informal definitions and taxonomies for self-aware
computing systems, and explains how self-aware computing relates to many existing subfields of
computer science, especially software engineering. It describes architectures and algorithms for self-
aware systems as well as the benefits and pitfalls of self-awareness, and reviews much of the latest
relevant research across a wide array of disciplines, including open research challenges. The
chapters of this book are organized into five parts: Introduction, System Architectures, Methods and
Algorithms, Applications and Case Studies, and Outlook. Part I offers an introduction that defines
self-aware computing systems from multiple perspectives, and establishes a formal definition, a
taxonomy and a set of reference scenarios that help to unify the remaining chapters. Next, Part II
explores architectures for self-aware computing systems, such as generic concepts and notations
that allow a wide range of self-aware system architectures to be described and compared with both
isolated and interacting systems. It also reviews the current state of reference architectures,
architectural frameworks, and languages for self-aware systems. Part III focuses on methods and
algorithms for self-aware computing systems by addressing issues pertaining to system design, like
modeling, synthesis and verification. It also examines topics such as adaptation, benchmarks and
metrics. Part IV then presents applications and case studies in various domains including cloud
computing, data centers, cyber-physical systems, and the degree to which self-aware computing
approaches have been adopted within those domains. Lastly, Part V surveys open challenges and
future research directions for self-aware computing systems. It can be used as a handbook for
professionals and researchers working in areas related to self-aware computing, and can also serve
as an advanced textbook for lecturers and postgraduate students studying subjects like advanced
software engineering, autonomic computing, self-adaptive systems, and data-center resource
management. Each chapter is largely self-contained, and offers plenty of references for anyone
wishing to pursue the topic more deeply.},
author = {Kounev, Samuel and Kephart, Jeffrey O. and Milenkoski, Aleksandar and Zhu, Xiaoyun},
doi = {10.1007/978-3-319-47474-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-319-47474-8{\_}13.pdf:pdf},
isbn = {9783319474748},
pages = {1--722},
year = {2017}
@article{Saoud2020,
archivePrefix = {arXiv},
arxivId = {arXiv:2002.02014v1},
author = {Saoud, Adnane and Jagtap, Pushpak and Zamani, Majid and Girard, Antoine},
eprint = {arXiv:2002.02014v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2002.02014.pdf:pdf},
number = {725144},
pages = {1--18},
year = {2020}
@article{Laghari2016,
doi = {10.1371/journal.pone.0146760},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/journal.pone.0146760.PDF:PDF},
issn = {19326203},
number = {1},
title = {{Modeling the internet of things, self-organizing and other complex adaptive communication
networks: A Cognitive Agent-based Computing approach}},
volume = {11},
year = {2016}
@article{Liu2020,
doi = {10.1016/j.jmsy.2019.11.001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.jmsy.2019.11.001.pdf:pdf},
issn = {02786125},
pages = {24--34},
publisher = {Elsevier},
volume = {54},
year = {2020}
@article{Quesel2006,
abstract = {This paper presents an automatic verification method for combined temporal and spatial
properties of mobile real-time systems. We provide a translation of the Shape Calculus (SC), a spatio-
temporal extension of Duration Calculus, into weak second order logic of one successor (WS1S). A
prototypical implementation facilitates successful verification of spatio-temporal properties by
translating SC specifications into the syntax of the WS1S checker MONA. For demonstrating the
formalism and tool usage, we apply it to the benchmark case study "generalised railroad crossing"
(GRC) enriched by requirements inexpressible in non-spatial formalisms. {\textcopyright} Springer-
Verlag Berlin Heidelberg 2006.},
doi = {10.1007/11921240_24},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/qs.pdf:pdf},
isbn = {3540488154},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {347--361},
year = {2006}
@article{Motoyama2019,
archivePrefix = {arXiv},
arxivId = {1708.04851},
doi = {10.1109/tcns.2019.2890980},
eprint = {1708.04851},
file = {:C$\backslash$:/Users/Asus/Downloads/1708.04851.pdf:pdf},
issn = {2325-5870},
pages = {1--1},
year = {2019}
@article{Dokhanchi2016,
abstract = {A framework for the elicitation and debugging of formal specifications for Cyber-Physical
Systems is presented. The elicitation of specifications is handled through a graphical interface. Two
debugging algorithms are presented. The first checks for erroneous or incomplete temporal logic
specifications without considering the system. The second can be utilized for the analysis of reactive
requirements with respect to system test traces. The specification debugging framework is applied
on a number of formal specifications collected through a user study. The user study establishes that
requirement errors are common and that the debugging framework can resolve many insidious
specification errors.},
archivePrefix = {arXiv},
arxivId = {1607.02549},
eprint = {1607.02549},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}19.pdf:pdf},
number = {2},
title = {{Formal Requirement Debugging for Testing and Verification of Cyber-Physical Systems}},
url = {http://arxiv.org/abs/1607.02549},
volume = {17},
year = {2016}
@article{Alhakbani2014,
abstract = {{\textcopyright} Springer International Publishing Switzerland 2014. This paper discusses
the Internet of Things (IoT) within the cloud computing concepts and architectures. We review
different frameworks of combined IoT architecture with cloud being in the center. Then we
investigate adaptive interaction support concept. Finally, we propose a novel framework that
incorporates and supports adaptive interaction of the user with the IoT cloud architecture based on
the quality of context information and quality of services. The propsed framework increases user
satisfaction and reduces user annoyance towards the IoT cloud environment.},
author = {Alhakbani, Noura and Hassan, Mohammed Mehedi and Hossain, M. Anwar and Alnuem,
Mohammed},
doi = {10.1007/978-3-319-11692-1_12},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/alhakbani2014.pdf:pdf},
pages = {136--146},
title = {{A Framework of Adaptive Interaction Support in Cloud-Based Internet of Things (IoT)
Environment}},
year = {2014}
@book{Douligeris2019,
doi = {10.1007/978-3-030-29551-6},
file = {:C$\backslash$:/Users/Asus/Downloads/[email protected]:pdf},
isbn = {9783030295509},
year = {2019}
@article{Andrikopoulos2014,
abstract = {Large-scale systems comprising of multiple heterogeneous entities are directly influenced
by the interactions of their participating entities. Such entities, both physical and virtual, attempt to
satisfy their objectives by dynamically collaborating with each other, and thus forming collective
adaptive systems. These systems are subject to the dynamicity of the entities' objectives, and to
changes to the environment. In this work we focus on the latter, i.e. on providing the means for
entities in such systems to model, monitor and evaluate their perceived utility by participating in the
system. This allows for them to make informed decisions about their interactions with other entities
in the system. For this purpose we propose a utility-based approach for decision making, as well as
an architecture that allows for the support of this approach. Copyright {\textcopyright} 2014
SCITEPRESS - Science and Technology Publications.},
author = {Andrikopoulos, Vasilios and Bitsaki, Marina and S{\'{a}}ez, Santiago G{\'{o}}mez and
Karastoyanova, Dimka and Nikolaou, Christos and Psycharaki, Alina},
doi = {10.5220/0004937403080314},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/49374.pdf:pdf},
isbn = {9789897580192},
journal = {CLOSER 2014 - Proceedings of the 4th International Conference on Cloud Computing and
Services Science},
keywords = {Choreography,Collective adaptive systems,Decision making,Utility},
pages = {308--314},
year = {2014}
@article{Soeanu2015,
abstract = {Transportation and supply chain activities represent essential components in many
endeavors covering both public and private domains. However, the underlying transport networks
are complex and potentially fragile due to weather, natural disasters or other risk factors. Thus,
assessing transportation related risk represents a key decision support capability along with the
ability to evaluate contingency options for risk mitigation. In this paper, we address these issues by
adopting probabilistic model checking to evaluate the risk and contingency options related to
transportation tasks. In this pursuit, risk related properties are assessed for behavioral models
capturing the transport system. Moreover, we show the usefulness of constructing decision trees
that can provide insightful means of risk appraisal. The proposed approach can help decision makers
evaluate contingency options and determine lower and upper cost bounds for risky transportation
tasks such as those involved in humanitarian aid provision. The proposed approach is also illustrated
with a case study.},
author = {Soeanu, Andrei and Debbabi, Mourad and Alhadidi, Dima and Makkawi, Makram and
Allouche, Mohamad and B{\'{e}}langer, Micheline and L{\'{e}}chevin, Nicholas},
doi = {10.1016/j.eswa.2014.12.052},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}27.pdf:pdf},
issn = {09574174},
number = {9},
pages = {4410--4421},
url = {http://dx.doi.org/10.1016/j.eswa.2014.12.052},
volume = {42},
year = {2015}
@misc{Pun1980,
abstract = {This paper describes an automatic threshold selection method for picture segmentation,
using the entropy of the grey level histogram. It is shown that, by an a priori maximation of an
entropy determined a posteriori, a picture can successfully be thresholded into a two-level image.
Several experimental results are presented to show the validity of the method. An extension to
multithresholding and to multidimensional histogram processing is also discussed. ?? 1980.},
archivePrefix = {arXiv},
arxivId = {0734-I89X/85},
doi = {10.1016/0165-1684(80)90020-1},
eprint = {85},
isbn = {0165-1684},
issn = {01651684},
number = {3},
pages = {223--237},
pmid = {21691657},
primaryClass = {0734-I89X},
title = {{A new method for grey-level picture thresholding using the entropy of the histogram}},
volume = {2},
year = {1980}
@article{Vaandrager2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/MC-lecture.pdf:pdf},
year = {2015}
@article{Viroli2017,
archivePrefix = {arXiv},
arxivId = {arXiv:1711.08297v1},
eprint = {arXiv:1711.08297v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1711.08297.pdf:pdf},
pages = {1--48},
year = {2017}
@article{Krings2018,
abstract = {We have implemented various symbolic model checking algorithms, such as BMC, k-
Induction and IC3 for B, Event-B and other modeling languages. The high-level nature of software
models accounts for complicated constraints arising in these symbolic analysis techniques. In this
article we suggest using static information stemming from proof obligations to simplify occurring
constraints. We show how to include proof information in the aforementioned algorithms. Using
different benchmarks we compare explicit state to symbolic model checking as well as techniques
with and without proof assistance. In particular for models with large branching factor, e.g., due to
complicated data values being manipulated, the symbolic techniques fare much better than explicit
state model checking. The inclusion of proof information results in further performance
improvements.},
doi = {10.1016/j.scico.2017.08.013},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/krings2017.pdf:pdf},
issn = {01676423},
pages = {41--63},
title = {{Proof assisted bounded and unbounded symbolic model checking of software and system
models}},
url = {http://dx.doi.org/10.1016/j.scico.2017.08.013},
volume = {158},
year = {2018}
@article{Dang1998,
abstract = {In this paper we discuss the problem of calculating the reachable$\backslash$nstates of a
dynamical system defined by ordinary differential$\backslash$nequations or inclusions. We present
a prototype system for$\backslash$napproximating this set and demonstrate some experimental
results.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}16.pdf:pdf},
isbn = {3540643583},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {96--109},
volume = {1386},
year = {1998}
@article{Svore2016b,
doi = {10.1016/j.ejcon.2016.04.009},
pages = {15--30},
title = {{Quantitative verification and strategy synthesis for stochastic games {\$}}},
volume = {30},
year = {2016}
@book{Herrmann2017,
doi = {10.1007/978-3-319-29363-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {978-3-319-29362-2},
pages = {253--269},
url = {http://link.springer.com/10.1007/978-3-319-29363-9},
volume = {100},
year = {2017}
}
@book{Belmonte2019,
author = {Belmonte, Gina and Ciancia, Vincenzo and Latella, Diego and Massink, Mieke},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030309848},
year = {2019}
@article{Kitzelmann2006,
abstract = {We describe an approach to the inductive synthesis of recursive equations from
input/output-examples which is based on the classical two-step approach to induction of functional
Lisp programs of Summers (1977). In a first step, I/O-examples are rewritten to traces which explain
the outputs given the respective inputs based on a datatype theory. These traces can be integrated
into one conditional expression which represents a non-recursive program. In a second step, this
initial program term is generalized into recursive equations by searching for syntactical regularities in
the term. Our approach extends the classical work in several aspects. The most important extensions
are that we are able to induce a sel of recursive equations in one synthesizing step, the equations
may contain more than one recursive call, and additionally needed parameters are automatically
introduced.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/kitzelmann06a.pdf:pdf},
issn = {15337928},
pages = {429--454},
volume = {7},
year = {2006}
@article{Zhu2019,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/odqocw1594713832.pdf:pdf},
title = {{A Categorical Approach to Model Formation of Reactive Autonomic Systems Framework}},
volume = {4},
year = {2019}
@article{Ramachandran2019,
abstract = {We address the problem of maintaining resource availability in a networked multi-robot
system performing distributed target tracking. In our model, robots are equipped with sensing and
computational resources enabling them to track a target's position using a Distributed Kalman Filter
(DKF). We use the trace of each robot's sensor measurement noise covariance matrix as a measure
of sensing quality. When a robot's sensing quality deteriorates, the system's communication graph is
modified by adding edges such that the robot with deteriorating sensor quality may share
information with other robots to improve the team's target tracking ability. This computation is
performed centrally and is designed to work without a large change in the number of active
communication links. We propose two mixed integer semi-definite programming formulations (an
'agent-centric' strategy and a 'team-centric' strategy) to achieve this goal. We implement both
formulations and a greedy strategy in simulation and show that the team-centric strategy
outperforms the agent-centric and greedy strategies.},
archivePrefix = {arXiv},
arxivId = {1910.01300},
author = {Ramachandran, Ragesh K. and Fronda, Nicole and Sukhatme, Gaurav S.},
eprint = {1910.01300},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1910.01300.pdf:pdf},
url = {http://arxiv.org/abs/1910.01300},
year = {2019}
@book{Surana2005,
abstract = {In this era, information technology is revolutionizing almost every domain of technology
and society, whereas the 'complexity revolution' is occurring in science at a silent pace. In this paper,
we look at the impact of the two, in the context of supply-chain networks. With the advent of
information technology, supply chains have acquired a complexity almost equivalent to that of
biological systems. However, one of the major challenges that we are facing in supply-chain
management is the deployment of coordination strategies that lead to adaptive, flexible and
coherent collective behaviour in supply chains. The main hurdle has been the lack of the principles
that govern how supply chains with complex organizational structure and function arise and
develop, and what organizations and functionality are attainable, given specific kinds of lower-level
constituent entities. The study of Complex Adaptive Systems (CAS), has been a research effort
attempting to find common characteristics and/or formal distinctions among complex systems
arising in diverse domains (like biology, social systems, ecology and technology) that might lead to a
better understanding of how com-plexity occurs, whether it follows any general scientific laws of
nature, and how it might be related to simplicity. In this paper, we argue that supply chains should
be treated as a CAS. With this recognition, we propose how various concepts, tools and techniques
used in the study of CAS can be exploited to characterize and model supply-chain networks. These
tools and techniques are based on the fields of nonlinear dynamics, statistical physics and
information theory.},
author = {Surana, Amit and Kumara, Soundar and Greaves, Mark and Raghavan, Usha Nandini},
doi = {10.1080/00207540500142274},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/Supplychainnetworksacomplexadaptivesystem
sperspective.pdf:pdf},
isbn = {0020754050014},
issn = {00207543},
number = {20},
pages = {4235--4265},
volume = {43},
year = {2005}
@article{Litoiu2018,
doi = {10.13140/RG.2.2.20701.26080},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Adaptation{\_}as{\_}a{\
_}Service.pdf:pdf},
number = {October},
year = {2018}
@article{Liu2020a,
abstract = {This article addresses the challenge of collective decision making among the smart
components in medical waste transportation. We search for an optimized alternation to generate a
commitment according to the capabilities belonging to heterogeneous agents under ever-changing
context conditions. we propose a Goal-Capability-Commitment based Executable Tree (GCC-ETree)
approach to generate an adaptive protocol. We illustrate our approach in a real-world medical waste
automated guided vehicle transportation scenario and evaluate the feasibility of our approach by
comparison with other common approaches. Our work makes two main contributions. First, we
show how ontology-based matching and calculation can be used to enact a semantic understanding
of the alignment of environment, goal, capability and commitment. Second, we introduce run-time
goal modeling into collective decision making to capture the real-time requirements and provide
execution flow information for generating a decision flow at runtime.},
author = {Liu, Wei and Guo, Jingzhi and Yao, Feng and Chen, Deng},
doi = {10.1016/j.future.2020.04.003},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {0167739X},
pages = {167--180},
title = {{Adaptive protocol generation for group collaborative in smart medical waste
transportation}},
url = {https://doi.org/10.1016/j.future.2020.04.003},
volume = {110},
year = {2020}
@article{Lee2016,
author = {Lee, Kyoung Min and Lee, Eun Ji and Kim, Tae Woo and Kim, Hyunjoong},
doi = {10.1371/journal.pone.0147964},
isbn = {10.1371/journal.pone.0147964},
issn = {19326203},
number = {1},
pages = {1--17},
pmid = {26812064},
title = {{Comparison of the abilities of SD-OCT and SS-OCT in evaluating the thickness of the macular
inner retinal layer for glaucoma diagnosis}},
volume = {11},
year = {2016}
@article{McKinnon2003,
abstract = {Glaucoma is a chronic neurodegeneration of the optic nerve and one of the leading
causes of vision loss in the world among the aging. Retinal ganglion cells (RGCs) have been shown to
die by apoptosis, or programmed cell death. Central to apoptosis is the activation of specific
proteases, termed caspases. Caspases are activated in chronic neurodegenerations such as
Alzheimer's disease (AD) as well as in RGCs after optic nerve transection. In rat glaucoma models we
have shown that caspase-3, a major effector of the apoptotic cascade, is activated in RGCs and
cleaves amyloid precursor protein (APP) to produce neurotoxic fragments that include amyloid-beta.
Caspase-8, which initiates apoptosis after activation of receptors of the tumor necrosis factor (TNF)
superfamily, is also activated in RGCs. This suggests a new hypothesis for RGC death in glaucoma
involving chronic amyloid-beta neurotoxicity, mimicking AD at the molecular level. With loss of the
protective effect of APP and upregulation of toxic APP fragments, RGCs die from chronic caspase
activation, loss of synaptic homeostasis, amyloid-beta cytotoxicity and excitotoxicity. The benefits
are that treatments for AD could be used to treat glaucoma, and strategies developed to treat
glaucoma could treat other neurodegenerations.},
doi = {10.2741/1172},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/1172.pdf:pdf},
issn = {10939946},
number = {6},
pages = {1172},
url = {https://www.bioscience.org/2003/v8/s/1172/list.htm},
volume = {8},
year = {2003}
@article{Rao2007,
abstract = {Two probabilistic-based models, namely the Ensemble-Dependent Matrix model [1][3]
and the Markov Random Field model [2], have been proposed to deal with faults in nanoscale
system. The MRF design can provide excellent noise tolerance in nanoscale circuit design. However,
it is complicated to be applied to model circuit behavior at system level. Ensemble dependent matrix
methodology is more effective and suitable for CAD tools development and to optimize nanoscale
circuit and system design. In this paper, we show that the ensemble-dependent matrices describe
the actual circuit performances when signal errors are present. We then propose a new criterion to
compare circuit error-tolerance capability. We also prove that the Matrix model and the Markov
model converge when signals are digital {\textcopyright} 2007 IEEE.},
author = {Rao, Huifei and Chen, Jie and Yu, Changhong and Ang, Woon Tiong and Wey, I. Chyn and
Wu, An Yeu and Zhao, Hong},
doi = {10.1109/iscas.2007.378023},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/CA68{\_}2007.pdf:pdf},
isbn = {1424409217},
issn = {02714310},
number = {c},
pages = {1803--1806},
year = {2007}
@article{Steinberg2008,
author = {Steinberg, Dave and Budinsky, Frank and Paternostro, Marcelo and Merks, Ed},
isbn = {9780321331885},
pages = {744},
year = {2008}
@inproceedings{Bures2015,
abstract = {{\textcopyright} 2015 ACM. Recently, several ensemble-based component models have
been created to address the dynamicity and complexity of designing cyber-physical systems.
Experience in applying these models to actual case studies has shown that there are still scenarios in
distributed organization that are hard to capture by utilizing only the concepts of these component
models. In this paper, we present a summary of issues encountered, based on the analysis of
selected case studies. We propose new concepts that build on those contained in ensemble-based
models. In particular, we introduce the ideas of ensemble nesting, dynamic role cardinalities and
ensemble fitness. These concepts and their support in the runtime framework aim at serving as a
bridge between high-level ensemble formation rules and low-level decentralized implementation.
These concepts are illustrated on one of the case studies, demonstrating a domain specific language
based on that used in the DEECo component model.},
author = {Bures, Tomas and Krijt, Filip and Plasil, Frantisek and Hnetynka, Petr and Jiracek, Zbynek},
doi = {10.1145/2797433.2797450},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bures2015.pdf:pdf},
isbn = {9781450333931},
volume = {07-11-Sept},
year = {2015}
@inproceedings{Liu2008,
abstract = {System simulation and verification become more demanding as complexity grows. PAT is
developed as an interactive system to support composing, simulating and reasoning of process
algebra with various extensions like fairness events, global variables and parameterized processes.
PAT provides user friendly interfaces to support system modeling and simulation. Furthermore, it
embeds two complementing model checking techniques catering for different systems and
properties, namely, an explicit on-the-fly model checker which is designed to verify event-based
fairness constraints efficiently and a bounded model checker based on state-of-the-art SAT solvers.
The model checkers are capable of proving reachability, deadlock-freeness and the full set of Linear
Temporal Logic (LTL) properties. Compared to other model checkers, PAT has two key advantages.
Firstly, it supports an intuitive annotation of fairness constraints so that it handles large number of
fairness constraints efficiently. Secondly, the compositional encoding of system models as SAT
problems allows us to handle compositional process algebra effectively. The experimental results
show that PAT is capable of verifying systems with large number of states and outperforms the
state-of-the-art model checkers in some cases. {\textcopyright} May 10-18, 2008.},
author = {Liu, Yang and Sun, Jun and Dong, Jin Song},
doi = {10.1145/1370175.1370187},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paticse08.pdf:pdf},
isbn = {9781605580791},
issn = {02705257},
pages = {919--920},
year = {2008}
@article{Salam2016,
abstract = {Glaucoma is a chronic disease often called "silent thief of sight" as it has no symptoms
and if not detected at an early stage it may cause permanent blindness. Glaucoma progression
precedes some structural changes in the retina which aid ophthalmologists to detect glaucoma at an
early stage and stop its progression. Fundoscopy is among one of the biomedical imaging techniques
to analyze the internal structure of retina. Our proposed technique provides a novel algorithm to
detect glaucoma from digital fundus image using a hybrid feature set. This paper proposes a novel
combination of structural (cup to disc ratio) and non-structural (texture and intensity) features to
improve the accuracy of automated diagnosis of glaucoma. The proposed method introduces a
suspect class in automated diagnosis in case of any conflict in decision from structural and non-
structural features. The evaluation of proposed algorithm is performed using a local database
containing fundus images from 100 patients. This system is designed to refer glaucoma cases from
rural areas to specialists and the motivation behind introducing suspect class is to ensure high
sensitivity of proposed system. The average sensitivity and specificity of proposed system are 100
and 87 {\%} respectively.},
author = {Salam, Anum A. and Khalil, Tehmina and Akram, M. Usman and Jameel, Amina and Basit,
Imran},
doi = {10.1186/s40064-016-3175-4},
isbn = {2193-1801},
issn = {2193-1801},
journal = {SpringerPlus},
number = {1},
pages = {1519},
title = {{Automated detection of glaucoma using structural and non structural features}},
url = {http://springerplus.springeropen.com/articles/10.1186/s40064-016-3175-4},
volume = {5},
year = {2016}
@article{Manuscript2016,
doi = {10.1016/j.neuroimage.2014.12.061.Deep},
pages = {214--224},
year = {2016}
@article{Forsati2014,
abstract = {This paper presents a new variant of ant colony optimization (ACO), called enRiched Ant
Colony Optimization (RACO). This variation tries to consider the previously traversed edges in the
earlier executions to adjust the pheromone values appropriately and prevent premature
convergence. Feature selection (FS) is the task of selecting relevant features or disregarding
irrelevant features from data. In order to show the efficacy of the proposed algorithm, RACO is then
applied to the feature selection problem. In the RACO-based feature selection (RACOFS) algorithm, it
might be assumed that the proposed algorithm considers later features with a higher priority. Hence
in another variation, the algorithm is integrated with a capability local search procedure to
demonstrate that this is not the case. The modified RACO algorithm is able to find globally optimal
solutions but suffers from entrapment in local optima. Hence, in the third variation, the algorithm is
integrated with a local search procedure to tackle this problem by searching the vicinity of the
globally optimal solution. To demonstrate the effectiveness of the proposed algorithms, experiments
were conducted using two measures, kappa statistics and classification accuracy, on several
standard datasets. The comparisons were made with a wide variety of other swarm-based
algorithms and other feature selection methods. The results indicate that the proposed algorithms
have superiorities over competitors. {\textcopyright} 2014 Elsevier B.V.},
author = {Forsati, Rana and Moayedikia, Alireza and Jensen, Richard and Shamsfard, Mehrnoush and
Meybodi, Mohammad Reza},
doi = {10.1016/j.neucom.2014.03.053},
issn = {18728286},
journal = {Neurocomputing},
pages = {354--371},
publisher = {Elsevier},
title = {{Enriched ant colony optimization and its application in feature selection}},
url = {http://dx.doi.org/10.1016/j.neucom.2014.03.053},
volume = {142},
year = {2014}
@article{Chapman2018,
author = {Chapman, Margaret P and Smith, Kevin M and Cheng, Victoria and Freyberg, David L and
Tomlin, Claire J},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Chapman{\_}SusTech{\_}final{\
_}Sept142018.pdf:pdf},
year = {2018}
@book{Drechsler2017,
abstract = {This book provides readers with a comprehensive introduction to the formal verification
of hardware and software. World-leading experts from the domain of formal proof techniques show
the latest developments starting from electronic system level (ESL) descriptions down to the register
transfer level (RTL). The authors demonstrate at different abstraction layers how formal methods
can help to ensure functional correctness. Coverage includes the latest academic research results, as
well as descriptions of industrial tools and case studies. Formal Techniques for Verification and
Coverage Analysis of Analog Systems -- Verification of Incomplete Designs -- Probabilistic Model
Checking: Advances and Applications -- Software in a Hardware View -- Formal Verification -- The
Industrial Perspective.},
doi = {10.1007/978-3-319-57685-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/formal-system-verification-2018.pdf:pdf},
isbn = {9783319576855},
pages = {1--182},
year = {2017}
@article{Bosnacki2016,
doi = {10.1007/978-3-319-32582-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/khamespanah2016.pdf:pdf},
isbn = {9783319325811},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {165--181},
title = {{Model checking software: 23rd international symposium, SPIN 2016 co-located with ETAPS
2016 Eindhoven, the Netherlands, april 7–8, 2016 proceedings}},
volume = {9641},
year = {2016}
@article{Chaysri2020,
abstract = {In this work we investigate the use of a reinforcement learning (RL) framework for the
autonomous navigation of a group of mini-robots in a multi-agent collaborative environment. Each
mini-robot is driven by inertial forces provided by two vibration motors that are controlled by a
simple and efficient low-level speed controller. The action of the RL agent is the direction of each
mini-robot, and it is based on the position of each mini-robot, the distance between them and the
sign of the distance gradient between each mini-robot and the nearest one. Each mini-robot is
considered a moving obstacle that must be avoided by the others. We propose suitable state space
and reward function that result in an efficient collaborative RL framework. The classical and the
double Q-learning algorithms are employed, where the latter is considered to learn optimal policies
of mini-robots that offers more stable and reliable learning process. A simulation environment is
created, using the ROS framework, that include a group of four mini-robots. The dynamic model of
each mini-robot and of the vibration motors is also included. Several application scenarios are
simulated and the results are presented to demonstrate the performance of the proposed
approach.},
doi = {10.1080/01691864.2020.1757507},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/01691864.2020.1757507.pdf:pdf},
issn = {15685535},
number = {13},
pages = {902--916},
url = {https://doi.org/01691864.2020.1757507},
volume = {34},
year = {2020}
@inproceedings{Jussien2010,
author = {Jussien, Narendra and Rochart, Guillaume and Lorca, Xavier and Jussien, Narendra and
Rochart, Guillaume and Lorca, Xavier and Source, Open and Constraint, Java},
booktitle = {CPAIOR'08 Workshop on Open-Source Software for Integer and Contraint Programming
(OSSICP'08)},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/choco-presentation.pdf:pdf},
year = {2008}
@article{Michael2019,
author = {Michael, Ellis and Anderson, Thomas and Ernst, Michael D and Tatlock, Zachary},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/dslabs-eurosys19.pdf:pdf},
isbn = {9781450362818},
year = {2019}
@book{Ciancia2016,
abstract = {Spatial aspects of computation are increasingly relevant in Computer Science, especially
in the field of collective adaptive systems and when dealing with systems distributed in physical
space. Traditional formal verification techniques are well suited to analyse the temporal evolution of
concurrent systems; however, properties of space are typically not explicitly taken into account. This
tutorial provides an introduction to recent work on a topology-inspired approach to formal
verification of spatial properties depending upon (physical) space. A logic is presented, stemming
from the tradition of topological interpretations of modal logics, dating back to earlier logicians such
as Tarski, where modalities describe neighbourhood. These topological definitions are lifted to the
more general setting of closure spaces, also encompassing discrete, graph-based structures. The
present tutorial illustrates the extension of the framework with a spatial surrounded operator,
leading to the spatial logic for closure spaces SLCS, and its combination with the temporal logic CTL,
leading to STLCS. The interplay of space and time permits one to define complex spatio-temporal
properties. Both for the spatial and the spatio-temporal fragment efficient model-checking
algorithms have been developed and their use on a number of case studies and examples is
illustrated.},
author = {Ciancia, Vincenzo and Latella, Diego and Loreti, Michele and Massink, Mieke},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-319-34096-8_6},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/formal-methods-for-the-quantitative-
evaluation-of-collective-ada-2016.pdf:pdf},
isbn = {9783319340951},
issn = {16113349},
pages = {156--201},
title = {{Spatial logic and spatial model checking for closure spaces}},
volume = {9700},
year = {2016}
}
@book{Lemos2013,
isbn = {9783319741826},
year = {2013}
@article{Zeppezauer2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/zeppephil-thesis.pdf:pdf},
title = {{Virtualizing Communication for Hybrid and Diversity-Aware Collective Adaptive Systems}},
volume = {2014},
year = {2014}
@article{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/a461346.pdf:pdf},
year = {1387}
@article{Chen2011,
abstract = {Multi-agent systems are an increasingly important software paradigm and in many of its
applications agents cooperate to achieve a particular goal. This requires the design of efficient
collaboration protocols, a typical example of which is team formation. In this paper, we illustrate
how probabilistic model checking, a technique for formal verification of probabilistic systems, can be
applied to the analysis, design and verification of such protocols. We start by analysing the
performance of an existing team formation protocol modelled as a discrete-time Markov chain.
Then, using a Markov decision process model, we construct optimal algorithms for team formation.
Finally, we use stochastic two-player games to analyse the competitive coalitional setting, in which
agents are split into cooperative and hostile classes. We present experimental results from these
models using the probabilistic model checking tool PRISM, which we have extended with support for
stochastic games. {\textcopyright} 2011 Springer-Verlag Berlin Heidelberg.},
author = {Chen, Taolue and Kwiatkowska, Marta and Parker, David and Simaitis, Aistis},
doi = {10.1007/978-3-642-22359-4_14},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/clima11.pdf:pdf},
isbn = {9783642223587},
issn = {03029743},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {190--207},
year = {2011}
@book{Estanol2015,
abstract = {This paper presents a way of checking the correctness of artifact-centric business process
models defined using the BAUML framework. To ensure that these models are free of errors, we
propose an approach to verify (i.e. there are no internal mistakes) and to validate them (i.e. the
model complies with the business requirements). This approach is based on translating these models
into logic and then encoding the desirable properties as satisfiability problems of derived predicates.
In this way, we can then use a tool to check if these properties are fulfilled.},
archivePrefix = {arXiv},
arxivId = {9780201398298},
author = {Esta{\~{n}}ol, Montserrat and Sancho, Maria Ribera and Teniente, Ernest},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
eprint = {9780201398298},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-030-49435-3.pdf:pdf},
isbn = {978-3-319-19068-6},
issn = {16113349},
pages = {434--449},
pmid = {3855355746424284029},
volume = {9097},
year = {2015}
@article{Chandrappa2015,
number = {10},
pages = {32--35},
title = {{Segmentation of Retinal Nerve Fiber Layer in Optical Coherence Tomography ( OCT ) Images
using Statistical Region Merging Technique for Glaucoma Screening}},
volume = {128},
year = {2015}
@article{Clark2014,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/978-3-540-68265-3{\_}11.pdf:pdf},
pages = {2014},
year = {2014}
@article{Craig2002,
doi = {10.1002/hec.654},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/craig2002.pdf:pdf},
pages = {33--42},
volume = {42},
year = {2002}
@article{Martinez-de-la-Casa2014,
abstract = {PURPOSE: To assess the capacity of internal retinal layer thickness measurements made
at the macula using new spectral-domain optical coherence tomography (OCT) software to
distinguish between healthy subjects and those with suspected glaucoma. The diagnostic
performance of such measurements also was compared with that of conventional peripapillary
retinal nerve fiber layer (RNFL) thickness measurements.$\backslash$n$\backslash$nMETHODS: The
study included 38 subjects with suspected glaucoma and 38 age-matched healthy subjects. In one
randomly selected eye of each participant, thickness measurements at the level of the macula were
made of the nerve fiber layer (mRNFL), the ganglion cell layer (GCL), and the ganglion cell complex
(GCC; GCL + internal plexiform layer) through automated OCT segmentation. Peripapillary RNFL
thickness (pRNFL) also was determined using the conventional scan.$\backslash$n$\
backslash$nRESULTS: As the only variable showing intergroup variation, mRNFL in the glaucoma
suspects was significantly thinner in the quadrants inner inferior (P = 0.003), inner temporal (P =
0.010), and outer inferior (P = 0.017). The variable best able to discriminate between the two groups
was inner inferior mRNFL thickness, as indicated by an area below the receiver operating
characteristic (ROC) curve of 0.742.$\backslash$n$\backslash$nCONCLUSIONS: Macular RNFL
thickness measurements showed an improved diagnostic capacity over the other variables examined
to distinguish between healthy subjects and glaucoma suspects.},
author = {Martinez-de-la-Casa, Jose M. and Cifuentes-Canorea, Pilar and Berrozpe, Clara and Sastre,
Marina and Polo, Vicente and Moreno-Monta{\~{n}}es, Javier and Garcia-Feijoo, Julian},
doi = {10.1167/iovs.14-15501},
isbn = {0146-0404},
issn = {15525783},
number = {12},
pages = {8343--8348},
pmid = {25425301},
title = {{Diagnostic ability of macular nerve fiber layer thickness using new segmentation software in
glaucoma suspects}},
volume = {55},
year = {2014}
}
@article{Glazier2017,
author = {Glazier, T J and Schmerl, Bradley and Javier, C and Garlan, David},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/CMU-ISR-17-119.pdf:pdf},
number = {December},
year = {2017}
@article{Joice2014,
archivePrefix = {arXiv},
arxivId = {NIHMS150003},
author = {Joice, Regina and Nilsson, Sandra K and Montgomery, Jacqui and Dankwa, Selasi and
Morahan, Belinda and Seydel, Karl B and Bertuccini, Lucia and Alano, Pietro and Kim, C and
Duraisingh, Manoj T and Taylor, Terrie E and Milner, Danny A},
doi = {10.1126/scitranslmed.3008882.Plasmodium},
eprint = {NIHMS150003},
isbn = {6176436508},
issn = {15378276},
number = {244},
pages = {1--16},
pmid = {1000000221},
volume = {6},
year = {2014}
}
@article{IBM2006,
abstract = {Autonomic Computing White Paper June 2006 Fourth Edition Page 2. Page2 Contents 1.
Introduction 3},
author = {IBM},
doi = {10.1021/am900608j},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10.1.1.150.1011.pdf:pdf},
issn = {19448244},
number = {June},
pages = {34},
pmid = {20356171},
url = {http://scholar.google.com/scholar?hl=en{\&}btnG=Search{\
&}q=intitle:An+architectural+blueprint+for+autonomic+computing+.{\#}0{\%}5Cnhttp://
users.encs.concordia.ca/{~}ac/ac-resources/AC{\_}Blueprint{\_}White{\_}Paper{\_}4th.pdf},
volume = {36},
year = {2006}
@article{,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/choco{\_}2.pdf:pdf},
number = {18},
pages = {2017},
volume = {173},
year = {2011}
@article{Bach2013,
doi = {10.1007/s00441-013-1598-6},
isbn = {1111111111},
issn = {0302766X},
number = {2},
pages = {287--296},
pmid = {23525754},
volume = {353},
year = {2013}
@book{B2018,
abstract = {This paper extends the leveled homomorphic encryption scheme for an approximate
arithmetic of Cheon et al. (ASIACRYPT 2017) to a fully homomorphic encryption, i.e., we propose a
new technique to refresh low-level ciphertexts based on Gentry's bootstrapping procedure. The
modular reduction operation is the main bottleneck in the homomorphic evaluation of the
decryption circuit. We exploit a scaled sine function as an approximation of the modular reduction
operation and present an efficient evaluation strategy. Our method requires only one homomorphic
multiplication for each of iterations and so the total computation cost grows linearly with the depth
of the decryption circuit. We also show how to recrypt packed ciphertexts on the RLWE construction
with an open-source implementation. For example, it takes 139.8 seconds to refresh a ciphertext
that encrypts 128 numbers with 12 bits of precision, yielding an amortized rate of 1.1 seconds per
slot.},
author = {B, Roderick Bloem and Gross, Hannes and Iusupov, Rinat and Bettina, K and Mangard,
Stefan and Winter, Johannes},
doi = {10.1007/978-3-319-78372-7},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/bloem2018.pdf:pdf},
isbn = {978-3-319-78371-0},
pages = {321--353},
url = {http://link.springer.com/10.1007/978-3-319-78372-7},
volume = {10822},
year = {2018}
@article{Peng2013,
abstract = {Image segmentation is a fundamental problem in computer vision. Despite many years of
research, general purpose image segmentation is still a very challenging task because segmentation
is inherently ill-posed. Among different segmentation schemes, graph theoretical ones have several
good features in practical applications. It explicitly organizes the image elements into
mathematically sound structures, and makes the formulation of the problem more flexible and the
computation more efficient. In this paper, we conduct a systematic survey of graph theoretical
methods for image segmentation, where the problem is modeled in terms of partitioning a graph
into several sub-graphs such that each of them represents a meaningful object of interest in the
image. These methods are categorized into five classes under a uniform notation: the minimal
spanning tree based methods, graph cut based methods with cost functions, graph cut based
methods on Markov random field models, the shortest path based methods and the other methods
that do not belong to any of these classes. We present motivations and detailed technical
descriptions for each category of methods. The quantitative evaluation is carried by using five
indices - Probabilistic Rand (PR) index, Normalized Probabilistic Rand (NPR) index, Variation of
Information (VI), Global Consistency Error (GCE) and Boundary Displacement Error (BDE) - on some
representative automatic and interactive segmentation methods. {\textcopyright} 2012 Elsevier
Ltd.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
doi = {10.1016/j.patcog.2012.09.015},
eprint = {arXiv:1011.1669v3},
isbn = {0031-3203},
issn = {00313203},
number = {3},
pages = {1020--1038},
pmid = {12939454},
publisher = {Elsevier},
url = {http://dx.doi.org/10.1016/j.patcog.2012.09.015},
volume = {46},
year = {2013}
@article{Naeem2014,
isbn = {9781479947645},
keywords = {-risk},
number = {Cctot},
pages = {197--202},
title = {{Using V-Model Methodology , UML Process-Based Risk Assessment of Software and
Visualization}},
year = {2014}
@article{Tan2006,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/tan2006.pdf:pdf},
isbn = {0769527183},
year = {2006}
@article{Popyack2016,
isbn = {9783662448748},
issn = {13892576},
number = {2},
pages = {197--199},
title = {{Gusz Eiben and Jim Smith (Eds): Introduction to evolutionary computing: Springer, 2015,
299 pp, ISBN: 978-3-662-44874-8}},
volume = {17},
year = {2016}
@article{Ciancia2009,
abstract = {One of the biggest challenges in the RoboCup Soccer Standard Platform League (SPL) is
autonomously achieving and maintaining an accurate estimate of a robot's position and orientation
on the field. In other robotics applications many robust systems already exist for localization such as
visual simultaneous localization and mapping (SLAM) and LIDAR based SLAM. These approaches
either require special hardware or are very computationally expensive and are not suitable for the
Nao robot, the current robot of choice for the SPL. Therefore novel approaches to localization in the
RoboCup SPL environment are required. In this paper we present a new approach to localization in
the SPL which relies primarily on the information contained within white field markings while being
efficient enough to run in real time on board a Nao robot. {\textcopyright} 2012 Springer-Verlag.},
author = {Ciancia, Vincenzo and Latella, Diego and Massink, Mieke and Paskauskas, Rytis and Vandin,
Andrea},
doi = {10.1007/978-3-540-88479-8},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/video{\_}25.pdf:pdf},
isbn = {978-3-540-88478-1},
issn = {1865-0929},
journal = {ISoLA},
number = {600708},
pages = {769--781},
title = {{A Tool-Chain for Statistical Spatio-Temporal Model Checking of Bike Sharing Systems}},
url = {http://dblp.uni-trier.de/db/conf/isola/isola2008.html{\#}Attiogbe08},
volume = {17},
year = {2009}
@book{Margaria2018a,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030034238},
year = {2018}
@article{Sayama2013,
author = {Sayama, Hiroki and Pestov, Irene and Schmidt, Jeffrey and James, Benjamin and Wong,
Chun},
doi = {10.1016/j.camwa.2012.12.005},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S0898122112007018-
main.pdf:pdf},
issn = {0898-1221},
number = {10},
pages = {1645--1664},
url = {http://dx.doi.org/10.1016/j.camwa.2012.12.005},
volume = {65},
year = {2013}
@book{Belmonte2019a,
abstract = {Spatial and spatio-temporal model checking techniques have a wide range of application
domains, among which large scale distributed systems and signal and image analysis. We explore a
new domain, namely (semi-)automatic contouring in Medical Imaging, introducing the tool
VoxLogicA which merges the state-of-the-art library of computational imaging algorithms ITK with
the unique combination of declarative specification and optimised execution provided by spatial
logic model checking. The result is a rapid, logic based analysis development methodology. The
analysis of an existing benchmark of medical images for segmentation of brain tumours shows that
simple VoxLogicA analysis can reach state-of-the-art accuracy, competing with best-in-class
algorithms, with the advantage of explainability and easy replicability. Furthermore, due to a two-
orders-of-magnitude speedup compared to the existing general-purpose spatio-temporal model
checker topochecker, VoxLogicA enables interactive development of analysis of 3D medical images,
which can greatly facilitate the work of professionals in this domain.},
archivePrefix = {arXiv},
arxivId = {1811.05677},
author = {Belmonte, Gina and Ciancia, Vincenzo and Latella, Diego and Massink, Mieke},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-17462-0_16},
eprint = {1811.05677},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
isbn = {9783030174613},
issn = {16113349},
pages = {281--298},
url = {http://dx.doi.org/10.1007/978-3-030-17462-0{\_}16},
year = {2019}
@article{Sasieta1996,
abstract = {Five immunoassays for the determination of digoxin have been evaluated (Digoxin II,
Abbott; Cedia Digoxin XL, Microgenics; Coat-a-Count Digoxin, Diagnostic Procedure Corporation,
DPC; “Online” Digoxin, Roche Diagnostic Systems; EMIT 2000 Digoxin, Syva). Four of them required
no sample pre-treatment. The methods included a radioimmunoassay, fluoroimrnunoassay, two
enzyme-immunoassays and a turbidimetric immunoassay; the last three mentioned were adapted to
the Cobas{\textregistered} Mira Plus. The intra- and inter-assay precision was lower than 9{\%},
except for Microgenics. The calibration stability fluctuated from 120 days for Abbott to 27 days for
the Roche test, 7 days for the Syva assay and 2 days for Microgenics. The DPC test was not assayed
for calibration stability. The interference from “digoxinlike immunoreactive factor(s)” differed
according to the assay. The highest interference was seen with Abbott and Microgenics, and the
lowest with the DPC test. The comparison among all the methods offered values of “r” higher than
0.95 except between Microgenics and Syva assays where “r” was 0.896. The results obtained with
Roche and Microgenics were higher than 12{\%} of the remaining assays. {\textcopyright} 1996,
Walter de Gruyter. All rights reserved.},
doi = {10.1515/cclm.1996.34.11.935},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/arw2019-proc.pdf:pdf},
issn = {14374331},
number = {11},
pages = {935--940},
pmid = {19838543},
volume = {34},
year = {1996}
@article{Chen2015a,
archivePrefix = {arXiv},
arxivId = {arXiv:2001.08236v1},
author = {Chen, Tao and Li, Miqing and Li, Ke and Deb, Kalyanmoy},
eprint = {arXiv:2001.08236v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2001.08236.pdf:pdf},
pages = {1--21},
title = {{Search-Based Software Engineering for Self-Adaptive Systems : One Survey , Five
Disappointments and Six Opportunities}},
year = {2015}
@article{Szenasi2014,
abstract = {Signal processing plays an important role in the work of pathologists; it is especially true
for image processing software products. High-resolution digital images have taken over the role of
traditional tissue slides on a glass plate. In addition to the direct effects of this advancement (sharing
images, remote access, etc.), a new option appeared: the possibility of using image processing
software for automatic (or semi-automatic) diagnostics. One of the most important tasks in this
procedure is the segmentation of the tissue images; we have to identify the main components (in
the case of colon tissue samples, these are the cell nuclei, glands and surface epithelium). There are
several traditional image segmentation methods for this purpose, but none of them provides both
acceptable accuracy and runtime. This paper presents a distributed region growing method
implemented on CPUs and GPGPUs.},
issn = {19984464},
pages = {173--181},
volume = {8},
year = {2014}
@article{Katoen2011,
author = {Katoen, Joost-pieter and Zapreev, Ivan S and Moritz, Ernst and Hermanns, Holger and
Jansen, David N},
doi = {10.1016/j.peva.2010.04.001},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/katoen2011.pdf:pdf},
issn = {0166-5316},
number = {2},
pages = {90--104},
title = {{The ins and outs of the probabilistic model checker MRMC ✩}},
url = {http://dx.doi.org/10.1016/j.peva.2010.04.001},
volume = {68},
year = {2011}
@article{Abuseta2015,
abstract = {Self adaptation has been proposed to overcome the complexity of today's software
systems which results from the uncertainty issue. Aspects of uncertainty include changing systems
goals, changing resource availability and dynamic operating conditions. Feedback control loops have
been recognized as vital elements for engineering self-adaptive systems. However, despite their
importance, there is still a lack of systematic way of the design of the interactions between the
different components comprising one particular feedback control loop as well as the interactions
between components from different control loops . Most existing approaches are either domain
specific or too abstract to be useful. In addition, the issue of multiple control loops is often neglected
and consequently self adaptive systems are often designed around a single loop. In this paper we
propose a set of design patterns for modeling and designing self adaptive software systems based on
MAPE-K. Control loop of IBM architecture blueprint which takes into account the multiple control
loops issue. A case study is presented to illustrate the applicability of the proposed design patterns.},
doi = {10.5121/ijsea.2015.6402},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1508.01330.pdf:pdf},
issn = {09762221},
number = {4},
pages = {11--28},
volume = {6},
year = {2015}
@article{Afzal2019a,
author = {Afzal, Humaira and Rafiq, Muhammad and Awan, Irfan and Yousaf, Muhammad},
doi = {10.1016/j.jss.2019.01.053},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/afzal2019.pdf:pdf},
issn = {0164-1212},
keywords = {Discrete time Markov chain model,Cognitive radio,C,discrete time markov chain,model},
pages = {1--7},
title = {{The Journal of Systems and Software Performance analysis of radio spectrum for cognitive
radio wireless networks using discrete time Markov chain}},
url = {https://doi.org/10.1016/j.jss.2019.01.053},
volume = {151},
year = {2019}
@article{Cervantes2018,
abstract = {In the real world, the assumption that, once generated, a pervasive system of systems
and the services it provides will remain static is false. Systems can leave or enter, and service
availability in systems can change along with the environmental conditions. The dynamic
composition and adaptation of pervasive systems of systems enable them to expand their
functionality through leverage resources in the user vicinity. Most of the adaptation approaches
based on ad hoc networks use service adaptation created from scratch. That is, each time the
environment changes, the system starts the composition process over again, resulting in the
depletion of system resources and network capacity. We present a protocol for dynamic
composition of a pervasive system of systems and their services in ad hoc networks. Our proposal
uses a dynamic and distributed constraint satisfaction approach to establish the pervasive-system-
of-systems requirements. In this paper, we deal with spatial and temporal constraints. However, any
other kind of restriction, for instance, quality, can be addressed in the same manner. Additionally, a
heuristic allowing recomposing services based on the identification of service components
disqualified due to the dynamism of the environment is presented. Thus, the adaptation of services
is not necessarily done from scratch. Finally, we present simulations that show the performance of
our proposal regarding the nearest work in the state of the art, both in time and in number of
messages consumed for adapting services.},
author = {Cervantes, Francisco and Ramos, F{\'{e}}lix and Guti{\'{e}}rrez, Luis F. and Occello, Michel
and Jamont, Jean Paul},
doi = {10.1109/JSYST.2017.2655031},
file = {:C$\backslash$:/Users/Asus/Downloads/cervantes2017.pdf:pdf},
issn = {19379234},
number = {2},
pages = {1709--1721},
title = {{A New Approach for the Composition of Adaptive Pervasive Systems}},
volume = {12},
year = {2018}
@article{Korukhova2007,
abstract = {The work deals with automatic deductive synthesis of functional programs. Formal
specification of a program is taken as a mathematical existence theorem and while proving it, we
derive a program and simultaneously prove that this program corresponds to given specification.
Several problems have to be resolved for automatic synthesis: the choice of synthesis rules that
allows us to derive the basic constructions of a functional program, order of rule application and
choice of a particular induction rule. The method proposed here is based on the deductive tableau
method. The basic method gives rules for functional program construction. To determine the proof
strategy we use some external heuristics, including rippling. And for the induction hypothesis
formation the combination of rippling and the deductive tableau method became very useful. The
proposed techniques are implemented in the system ALISA (Automatic Lisp Synthesizer) and used
for automatic synthesis of several functions in the Lisp language. {\textcopyright} 2007 Springer
Science+Business Media B.V.},
doi = {10.1007/s10472-007-9079-9},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/korukhova2007.pdf:pdf},
issn = {10122443},
number = {3-4},
pages = {255--271},
volume = {50},
year = {2007}
@inproceedings{Kwiatkowska2021,
abstract = {Automated verification techniques for stochastic games allow formal reasoning about
systems that feature competitive or collaborative behaviour among rational agents in uncertain or
probabilistic settings. Existing tools and techniques focus on turn-based games, where each state of
the game is controlled by a single player, and on zero-sum properties, where two players or
coalitions have directly opposing objectives. In this paper, we present automated verification
techniques for concurrent stochastic games (CSGs), which provide a more natural model of
concurrent decision making and interaction. We also consider (social welfare) Nash equilibria, to
formally identify scenarios where two players or coalitions with distinct goals can collaborate to
optimise their joint performance. We propose an extension of the temporal logic rPATL for
specifying quantitative properties in this setting and present corresponding algorithms for
verification and strategy synthesis for a variant of stopping games. For finite-horizon properties the
computation is exact, while for infinite-horizon it is approximate using value iteration. For zero-sum
properties it requires solving matrix games via linear programming, and for equilibria-based
properties we find social welfare or social cost Nash equilibria of bimatrix games via the method of
labelled polytopes through an SMT encoding. We implement this approach in PRISM-games, which
required extending the tool's modelling language for CSGs, and apply it to case studies from domains
including robotics, computer security and computer networks, explicitly demonstrating the benefits
of both CSGs and equilibria-based properties.},
author = {Kwiatkowska, Marta and Norman, Gethin and Parker, David and Santos, Gabriel},
doi = {https://doi.org/10.1007/s10703-020-00356-y},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2008.04613.pdf:pdf},
url = {https://link.springer.com/article/10.1007/s10703-020-00356-y},
year = {2021}
@article{Kwiatkowska2016a,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Marta+Kwiatkowska,+
+Model+checking+and+strategy+synthesis+for+stochastic+games.pdf:pdf},
number = {Xxx},
pages = {1--17},
title = {{Model Checking and Strategy Synthesis for Stochastic Games : From Theory to Practice ∗}},
year = {2016}
@article{Kwiatkowska2015,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/marta-kth15-p4.pdf:pdf},
number = {August},
url = {http://www.prismmodelchecker.org/lectures/},
year = {2015}
@book{Kwiatkowska2017,
abstract = {This book provides readers with a comprehensive introduction to the formal verification
of hardware and software. World-leading experts from the domain of formal proof techniques show
the latest developments starting from electronic system level (ESL) descriptions down to the register
transfer level (RTL). The authors demonstrate at different abstraction layers how formal methods
can help to ensure functional correctness. Coverage includes the latest academic research results, as
well as descriptions of industrial tools and case studies.},
doi = {10.1007/978-3-319-57685-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/kwiatkowska2017.pdf:pdf},
isbn = {9783319576855},
pages = {1--182},
publisher = {Springer},
year = {2017}
@techreport{Hillston2014,
author = {Hillston, Jane and Pitt, Jeremy and Wirsing, Martin and Zambonelli, Franco},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/dagrep{\_}v004{\_}i012{\_}p068{\
_}s14512.pdf:pdf},
isbn = {9780262326216},
number = {12},
title = {{Collective Adaptive Systems : Qualitative and Quantitative Modelling and Analysis}},
volume = {4},
year = {2014}
@inproceedings{Schmickl2011,
archivePrefix = {arXiv},
arxivId = {arXiv:1108.5643v1},
eprint = {arXiv:1108.5643v1},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1108.5643.pdf:pdf},
year = {2011}
@article{Gueye2014,
abstract = {Computing systems have become more and more distributed and heterogeneous, making
their manual administration difficult and error-prone. The Autonomic Computing approach has been
proposed to overcome this issue, by automating the administration of computing systems with the
help of control loops called autonomic managers. Many research works have investigated the
automation of the administration functions of computing systems and today many autonomic
managers are available. However the existing autonomic manages are mostly specialized in the
management of few administration concerns such as self-repair which handles server failures, and
self-sizing which deals with dynamic server allocation. This makes necessary the coexistence of
multiple autonomic managers for a complete system management. The coexistence of several such
managers is required to handle multiple concerns, yet requires coordination mechanisms to avoid
incoherent administration decisions. We investigate the use of control techniques for the design of
coordination controllers, for which we exercise synchronous programming that provide formal
semantics, and discrete controller synthesis to automate the construction of the controller. The
paper details an application of the latter approach for the design of a coordination controller to
orchestrate the execution of four self-repair and two self-sizing managers that address the
availability and performance of a multi-tier replication-based system. We evaluate and demonstrate
the benefits of our coordination solution by executing the RUBiS Benchmark web application. {\
textcopyright} 2014 Elsevier B.V. All rights reserved.},
author = {Gueye, Soguy Mak Kar{\'{e}} and {De Palma}, No{\"{e}}l and Rutten, {\'{E}}ric and Tchana,
Alain and Berthier, Nicolas},
doi = {10.1016/j.future.2013.12.037},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2014-Coordinating{\_}self-sizing{\_}and{\
_}self-repair{\_}managers{\_}for{\_}multi-tier{\_}systems.pdf:pdf},
issn = {0167739X},
pages = {14--26},
url = {http://dx.doi.org/10.1016/j.future.2013.12.037},
volume = {35},
year = {2014}
@article{Delaval2014,
abstract = {Complex computing systems are increasingly self-adaptive, with an autonomic computing
approach for their administration. Real systems require the co-existence of multiple autonomic
management loops, each complex to design. However their uncoordinated co-existence leads to
performance degradation and possibly to inconsistency. There is a need for methodological supports
facilitating the coordination of multiple autonomic managers. In this paper we propose a method
focusing on the discrete control of the interactions of managers. We follow a component-based
approach and explore modular discrete control, allowing to break down the combinatorial
complexity inherent to the state-space exploration technique. This improves scalability of the
approach and allows constructing a hierarchical control. It also allows re-using complex managers in
different contexts without modifying their control specifications. We build a component-based
coordination of managers, with introspection, adaptivity and reconfiguration. We validate our
method on a multiple-loop multi-tier system. Copyright {\textcopyright} 2014 ACM.},
author = {Delaval, Gwena{\"{e}}l and Gueye, Soguy Mak Kar{\'{e}} and {De Palma}, No{\"{e}}l and
Rutten, Eric},
doi = {10.1145/2602458.2602465},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2014-Modular{\_}coordination{\_}of{\
_}multiple{\_}autonomic{\_}managers.pdf:pdf},
isbn = {9781450325776},
journal = {CBSE 2014 - Proceedings of the 17th International ACM SIGSOFT Symposium on
Component-Based Software Engineering (Part of CompArch 2014)},
number = {Cbse},
pages = {3--12},
year = {2014}
@article{Eshuis2017,
doi = {10.1109/TSC.2015.2467395},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2017-Synthesizing{\_}Minimal{\
_}Protocol{\_}Adaptors{\_}for{\_}Asynchronously{\_}Interacting{\_}Services.pdf:pdf},
issn = {19391374},
number = {3},
pages = {461--474},
volume = {10},
year = {2017}
@article{Autili2017a,
abstract = {Software systems are often built by composing together software services distributed
over the Internet. Choreographies are a form of decentralized composition that models the external
interaction of the participant services by specifying peer-to-peer message exchanges from a global
perspective. Nowadays, very few approaches address the problem of actually realizing
choreographies in an automatic way. Most current approaches are rather static and are poorly
suited to the need of the Future Internet. In this chapter, we propose a method for the automatic
synthesis of evolving choreographies. Coordination software entities are synthesized in order to
proxify and control the participant services' interaction. When interposed among the services,
coordination entities enforce the collaboration specified by the choreography. The ability to evolve
the coordination logic in a modular way enables choreography evolution in response to possible
changes. We illustrate our method at work on a running example in the domain of Intelligent
Transportation Systems (ITS).},
author = {Autili, Marco and Inverardi, Paola and Perucci, Alexander and Tivoli, Massimo},
doi = {10.1007/978-3-319-74183-3_10},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2017-Synthesis{\_}of{\_}Distributed{\
_}and{\_}Adaptable{\_}Coordinators{\_}to{\_}Enable{\_}Choreography{\_}Evolution.pdf:pdf},
isbn = {9783319741826},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {282--306},
year = {2017}
@article{Berthier2016,
abstract = {The ever growing complexity of software systems has led to the emergence of automated
solutions for their management. The software assigned to this work is usually called an Autonomic
Management System (AMS). It is ordinarily designed as a composition of several managers, which
are pieces of software evaluating the dynamics of the system under management through
measurements (e.g., workload, memory usage), taking decisions, and acting upon it so that it stays in
a set of acceptable operating states. However, careless combination of managers may lead to
inconsistencies in the taken decisions, and classical approaches dealing with these coordination
problems often rely on intricate and ad hoc solutions. To tackle this problem, we take a global view
and underscore that AMSs are intrinsically reactive, as they react to flows of monitoring data by
emitting flows of reconfiguration actions. Therefore we propose a new approach for the design of
AMSs, based on synchronous programming and discrete controller synthesis techniques. They
provide us with high-level languages for modeling the system to manage, as well as means for
statically guaranteeing the absence of logical coordination problems. Hence, they suit our main
contribution, which is to obtain guarantees at design time about the absence of logical
inconsistencies in the taken decisions. We detail our approach, illustrate it by designing an AMS for a
realistic multi-tier application, and evaluate its practicality with an implementation.},
author = {Berthier, Nicolas and Rutten, {\'{E}}ric and {De Palma}, No{\"{e}}l and Gueye, Soguy Mak
Kar{\'{e}}},
doi = {10.1109/TSE.2015.2510004},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2016-Designing{\_}Autonomic{\
_}Management{\_}Systems{\_}by{\_}Using{\_}Reactive{\_}Control{\_}Techniques (1).pdf:pdf},
issn = {00985589},
number = {7},
pages = {640--657},
volume = {42},
year = {2016}
@article{Nostro2016,
abstract = {Our everyday life is pervaded by the use of a number of heterogeneous systems that are
continuously and dynamically available in the networked environment to interoperate to achieve
some goal. Goals may include both functional and non functional aspects and the evolving nature of
such environment requires automated solutions as means to reach the needed level of flexibility.
Achieving interoperability in such environment is a challenging problem. Even though some of such
systems may in principle interact since they have compatible functionalities and similar interaction
protocols, mismatches in their protocols and non functional issues arising from the environment
may undermine their seamless interoperability. In this paper, we propose an approach for the
automated synthesis of application layer connectors between heterogeneous networked systems
(NSs) addressing both functional and some non functional interoperability. Our contributions are: (i)
an automated connectors synthesis approach for NSs interoperability taking into account functional,
performance and dependability aspects spanning pre-deployment time and run-time; (ii) a
connector adaptation process, related to the performance and dependability aspects; and (iii) a
stochastic model-based implementation of the performance and dependability analysis. In addition,
we implemented, analyzed, and critically discussed a case study.},
author = {Nostro, Nicola and Spalazzese, Romina and Giandomenico, Felicita Di and Inverardi, Paola},
doi = {10.1016/j.jss.2015.09.038},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/2016-Achieving{\_}functional{\_}and{\
_}non{\_}functional{\_}interoperability{\_}through{\_}synthesized{\_}connectors.pdf:pdf},
issn = {01641212},
pages = {185--199},
title = {{Achieving functional and non functional interoperability through synthesized connectors}},
volume = {111},
year = {2016}
@article{Li2020,
abstract = {Multiagent systems (MASs) have received extensive attention in a variety of domains,
such as robotics and distributed control. This paper focuses on how independent learners (ILs,
structures used in decentralized reinforcement learning) decide on their individual behaviors to
achieve coherent joint behavior. To date, Reinforcement learning(RL) approaches for ILs have not
guaranteed convergence to the optimal joint policy in scenarios in which communication is difficult.
Especially in a decentralized algorithm, the proportion of credit for a single agent's action in a
multiagent system is not distinguished, which can lead to miscoordination of joint actions.
Therefore, it is highly significant to study the mechanisms of coordination between agents in MASs.
Most previous coordination mechanisms have been carried out by modeling the communication
mechanism and other agent policies. These methods are applicable only to a particular system, so
such algorithms do not offer generalizability, especially when there are dozens or more agents.
Therefore, this paper mainly focuses on the MAS contains more than a dozen agents. By combining
the method of parallel computation, the experimental environment is closer to the application
scene. By studying the paradigm of centralized training and decentralized execution(CTDE), a multi-
agent reinforcement learning algorithm for implicit coordination based on TD error is proposed. The
new algorithm can dynamically adjust the learning rate by deeply analyzing the dissonance problem
in the matrix game and combining it with a multiagent environment. By adjusting the dynamic
learning rate between agents, coordination of the agents' strategies can be achieved. Experimental
results show that the proposed algorithm can effectively improve the coordination ability of a MAS.
Moreover, the variance of the training results is more stable than that of the hysteretic Q
learning(HQL) algorithm. Hence, the problem of miscoordination in a MAS can be avoided to some
extent without additional communication. Our work provides a new way to solve the
miscoordination problem for reinforcement learning algorithms in the scale of dozens or more
number of agents. As a new IL structure algorithm, our results should be extended and further
studied.},
author = {Li, Meng Lin and Chen, Shaofei and Chen, Jing},
doi = {10.1109/ACCESS.2020.2997899},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/access.2020.2997899.pdf:pdf},
issn = {21693536},
pages = {99404--99421},
title = {{Adaptive Learning: A New Decentralized Reinforcement Learning Approach for Cooperative
Multiagent Systems}},
volume = {8},
year = {2020}
@article{Tao2020,
abstract = {Internet of Things (IoT) is a pervasive technology covering many applications areas (Smart
Mobility, Smart Industry, Smart Healthcare, Smart Building, etc.). Its success and the technology
evolution allow targeting more complex and critical applications such as the management of critical
infrastructures and cooperative service robotics, which requires real time operation and a higher
level of intelligence in the monitoring-control command for decision-making. Furthermore, these
applications type need to be fully validated in advance considering that bugs discovered during real
operation could cause significant damages. In order to avoid these drawbacks, IoT developers and
system integrators need advanced tools and methodologies. This paper presents a methodology and
a set of tools, defined and developed in the context of the BRAIN-IoT European Union (EU) project.
The overall framework includes both Open semantic models to enforce interoperable operations
and exchange of data and control features; and Model-based development tools to implement
Digital Twin solutions to facilitate the prototyping and integration of interoperable and reliable IoT
system solutions. After describing the solution developed, this paper also presents concrete use
cases based on the two critical systems mentioned above, leveraging the application scenarios used
to validate the concepts developed and results obtained by the BRAIN-IoT project.},
author = {Tao, Xu and Conzon, Davide and Ferrera, Enrico and Li, Shuai and Goetz, Juergen and
Maillet-Contoz, Laurent and Michel, Emmanuel and Diaz-Nava, Mario and Baouya, Abdel Hakim and
Chehida, Salim},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/paper{\_}11.pdf:pdf},
issn = {16130073},
pages = {80--89},
title = {{Model based methodology and framework for design and management of next-gen IoT
systems}},
volume = {2739},
year = {2020}
@article{Azadegan2021,
abstract = {The COVID-19 pandemic has illustrated the broad and diverse challenges that supply
networks face in preparing for and adapting to significant supply and demand disruptions. While
much has been written about resilience strategies, few consider resiliency from a network level
perspective. In this essay, we explain a typology of resiliency strategies linked to different types of
collaboration within and between supply networks. Existing literature focuses on two of these types,
micro- and macro-level supply network resilience. Micro-level resilience occurs when buyers and
suppliers coordinate directly on supply risk prevention and recovery. Macro-level resilience occurs
when corporations, including competitors, collaborate with institutions such as government or trade
associations to manage or regulate longer-term supply risks. This essay identifies a third type, meso-
level resilience. Meso-level resilience emerges when multiple supply networks collaborate on short-
to medium-term supply risks. These collaborations tend to be more opportunistic and ad hoc than
micro- or macro-level collaborations, and we argue that they can be viewed as complex adaptive
systems, exhibiting self-organization and dynamism. We identify a number of novel characteristics of
meso-level resilience and discuss research implications.},
doi = {10.1111/jscm.12256},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/[email protected]:pdf},
issn = {1745493X},
journal = {Journal of Supply Chain Management},
number = {1},
pages = {17--26},
title = {{A Typology of Supply Network Resilience Strategies: Complex Collaborations in a Complex
World}},
volume = {57},
year = {2021}
@article{Calinescu2020a,
abstract = {Ensuring that systems achieve their goals under uncertainty is a key driver for self-
adaptation. Nevertheless, the concept of uncertainty in self-adaptive systems (SAS) is still
insufficiently understood. Although several taxonomies of uncertainty have been proposed,
taxonomies alone cannot convey the SAS research community's perception of uncertainty. To
explore and to learn from this perception, we conducted a survey focused on the SAS ability to deal
with unanticipated change and to model uncertainty, and on the major challenges that limit this
ability. In this paper, we analyse the responses provided by the 51 participants in our survey. The
insights gained from this analysis include the view - held by 71{\%} of our participants - that SAS can
be engineered to cope with unanticipated change, e.g., through evolving their actions, synthesising
new actions, or using default actions to deal with such changes. To handle uncertainties that affect
SAS models, the participants recommended the use of confidence intervals and probabilities for
parametric uncertainty, and the use of multiple models with model averaging or selection for
structural uncertainty. Notwithstanding this positive outlook, the provision of assurances for safety-
critical SAS continues to pose major challenges according to our respondents. We detail these
findings in the paper, in the hope that they will inspire valuable future research on self-adaptive
systems.},
author = {Calinescu, Radu and Mirandola, Raffaela and Perez-Palacin, Diego and Weyns, Danny},
doi = {10.1109/ACSOS49614.2020.00047},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ACSOS49614.2020.00047.pdf:pdf},
isbn = {9781728172774},
journal = {Proceedings - 2020 IEEE International Conference on Autonomic Computing and Self-
Organizing Systems, ACSOS 2020},
pages = {242--251},
year = {2020}
}
@article{Klos2018,
doi = {10.1016/j.sysarc.2018.03.004},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/j.sysarc.2018.03.004.pdf:pdf},
issn = {13837621},
pages = {28--42},
volume = {85-86},
year = {2018}
@article{Gabor2020,
abstract = {Systems are becoming increasingly more adaptive, using techniques like machine
learning to enhance their behavior on their own rather than only through human developers
programming them. We analyze the impact the advent of these new techniques has on the discipline
of rigorous software engineering, especially on the issue of quality assurance. To this end, we
provide a general description of the processes related to machine learning and embed them into a
formal framework for the analysis of adaptivity, recognizing that to test an adaptive system a new
approach to adaptive testing is necessary. We introduce scenario coevolution as a design pattern
describing how system and test can work as antagonists in the process of software evolution. While
the general pattern applies to large-scale processes (including human developers further
augmenting the system), we show all techniques on a smaller-scale example of an agent navigating a
simple smart factory. We point out new aspects in software engineering for adaptive systems that
may be tackled naturally using scenario coevolution. This work is a substantially extended take on
Gabor et al. (International symposium on leveraging applications of formal methods, Springer, pp
137–154, 2018).},
author = {Gabor, Thomas and Sedlmeier, Andreas and Phan, Thomy and Ritz, Fabian and Kiermeier,
Marie and Belzner, Lenz and Kempter, Bernhard and Klein, Cornel and Sauer, Horst and Schmid,
Reiner and Wieghardt, Jan and Zeller, Marc and Linnhoff-Popien, Claudia},
doi = {10.1007/s10009-020-00560-5},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Gabor2020{\_}Article{\
_}TheScenarioCoevolutionParadigm.pdf:pdf},
issn = {14332787},
number = {4},
pages = {457--476},
title = {{The scenario coevolution paradigm: adaptive quality assurance for adaptive systems}},
url = {https://doi.org/10.1007/s10009-020-00560-5},
volume = {22},
year = {2020}
@article{Botelho2020,
abstract = {Multi-Robot System (MRS) is composed of a group of robots that work cooperatively.
However, Multi-Agent System (MAS) is computational systems consisting of a group of agents that
interact with each other to solve a problem. The central difference between MRS and MAS is that in
the first case, the agent is a robot, and in the second, it is a software. Analyzing the scientific
literature, it is possible to notice that few studies address the integration between MAS and MRS. In
order to achieve the interdisciplinary integration, the theoretical background of these areas must be
considered in this paper, so that the integration can be applied using a case study of decentralized
MRS. The objective of this MRS is to track and surround a stationary target. Also, it has been
implemented and validated in the robot simulator called Virtual Robot Experimentation Platform (V-
REP). In the validation of the proposed MRS, a scenario with three robots and a stationary target
were defined. In the tracking task, the robot can detect the target whose position is not known a
priori. When the detection occurs, the V-REP informs the target position to the robot because the
environment is discretized into a grid of rectangular cells. After that, all the robots are directed to
the target, and the surround task is realized. In this task, a mathematical model with direct
communication between the robots was used to keep the robots equidistant therefrom and from
each other.},
author = {Botelho, Wagner Tanaka and Marietto, Maria Das Grac{\c{a}}s Bruno and Mendes,
Eduardo De Lima and Sousa, Daniel Rodrigues De and Pimentel, Edson Pinheiro and {Da Silva}, Vera
L{\'{u}}cia and {Dos Santos}, Tamires},
doi = {10.1017/S0269888920000375},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/S0269888920000375.pdf:pdf},
issn = {14698005},
pages = {1--24},
volume = {35},
year = {2020}
@article{Arcaini2017,
abstract = {Feedback control loops that monitor and adapt managed parts of a software system are
considered crucial for realizing self-adaptation in software systems. The MAPE-K (Monitor-Analyze-
Plan-Execute over a shared Knowledge) autonomic control loop is the most influential reference
control model for self-adaptive systems. The design of complex distributed self-adaptive systems
having decentralized adaptation control by multiple interacting MAPE components is among the
major challenges. In particular, formal methods for designing and assuring the functional correctness
of the decentralized adaptation logic are highly demanded. This article presents a framework for
formal modeling and analyzing self-adaptive systems. We contribute with a formalism, called self-
adaptive Abstract State Machines, that exploits the concept of multiagent Abstract State Machines
to specify distributed and decentralized adaptation control in terms of MAPE-K control loops, also
possible instances of MAPE patterns. We support validation and verification techniques for
discovering unexpected interfering MAPE-K loops, and for assuring correctness of MAPE
components interaction when performing adaptation.},
doi = {10.1145/3019598},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/3019598.pdf:pdf},
issn = {15564703},
number = {4},
title = {{Formal design and verification of self-adaptive systems with decentralized control}},
volume = {11},
year = {2017}
@article{Vig2007,
abstract = {A problem that has recently attracted the attention of the research community is the
autonomous formation of robot teams to perform complex multi-robot tasks. The corresponding
problem for software agents is also known in the multi-agent community as the coalition formation
problem. Numerous algorithms for software agent coalition formation have been provided that
allow for efficient cooperation in both competitive and cooperative environments. However, despite
the plethora of relevant literature on the software agent coalition formation problem, and the
existence of similar problems in theoretical computer science, the multi-robot coalition formation
problem has not been sufficiently grounded for different tasks and task environments. In this paper,
comparisons are drawn to highlight the differences between software agents and robotics, and
parallel problems from theoretical computer science are identified. This paper further explores robot
coalition formation in different practical robotic environments. A heuristic-based coalition formation
algorithm from our previous work was extended to operate in precedence ordered cooperative
environments. In order to explore coalition formation in competitive environments, the paper also
studies the RACHNA system, a market based coalition formation system. Finally, the paper
investigates the notion of task preemption for complex multi-robot tasks in random allocation
environments. {\textcopyright} 2007 Springer Science+Business Media, Inc.},
doi = {10.1007/s10846-007-9150-0},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/s10846-007-9150-0.pdf:pdf},
issn = {09210296},
number = {1},
pages = {85--118},
volume = {50},
year = {2007}
@article{Sheena2018,
doi = {10.1109/ICCPCCT.2018.8574228},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/n2018.pdf:pdf},
isbn = {9781538607961},
journal = {2018 International Conference on Control, Power, Communication and Computing
Technologies, ICCPCCT 2018},
pages = {352--355},
publisher = {IEEE},
title = {{A Review on Formal Verification of Basic Algorithms in Time Triggered Architecture}},
year = {2018}
@article{Hennicker2015,
abstract = {The Helena approach allows to specify dynamically evolving ensembles of collaborating
components. It is centered around the notion of roles which components can adopt in ensembles. In
this paper, we focus on the early verification of Helena models. We propose to translate Helena
specifications into Promela and check satisfaction of LTL properties with Spin [11]. To prove the
correctness of the translation, we consider an SOS semantics of (simplified variants of) Helena and
Promela and establish stutter trace equivalence between them. Thus, we can guarantee that a
Helena specification and its Promela translation satisfy the same LTL formulae (without next). Our
correctness proof relies on a new, general criterion for stutter trace equivalence.},
doi = {10.1007/978-3-319-23165-5_16},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/logic-rewriting-and-concurrency-2015{\
_}2{\_}2.pdf:pdf},
isbn = {9783319231648},
issn = {16113349},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
pages = {331--360},
volume = {9200},
year = {2015}
@article{Hennicker2014,
doi = {10.1007/978-3-642-54624-2_18},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/specification-algebra-and-software-
2014{\_}2.pdf:pdf},
journal = {Specification, Algebra, and Software},
pages = {359--381},
year = {2014}
@article{Lemos2011,
author = {Lemos, Rog{\'{e}}rio De and Giese, Holger and M{\"{u}}ller, Hausi A and Shaw, Mary and
Andersson, Jesper and Baresi, Luciano and Becker, Basil and Bencomo, Nelly and Brun, Yuriy and
Cukic, Bojan and Dustdar, Schahram and Engels, Gregor and Geihs, Kurt and Goeschka, Karl M and
Grassi, Vincenzo and Inverardi, Paola and Karsai, Gabor and Kramer, Jeff and Litoiu, Marin and
Magee, Jeff and Malek, Sam and Mankovskii, Serge and Mirandola, Raffaela and Mylopoulos, John
and Nierstrasz, Oscar and Pezz{\`{e}}, Mauro and Prehofer, Christian and Sch{\"{a}}fer, Wilhelm and
Schlichting, Rick and Smith, Dennis B and Sousa, Jo{\~{a}}o P and Tamura, Gabriel and Tahvildari,
Ladan and Norha, M and Vogel, Thomas and Weyns, Danny and Wong, Kenny and Wuttke, Jochen},
title = {{Software Engineering for Self-Adaptive Systems : A Second Research Roadmap ( Draft
Version of May 20 , 2011 )}},
year = {2011}
@book{Anderson2013,
abstract = {This book is about understanding, designing, controlling, and governing adaptive
collective systems. It is intended for readers from master's students to Ph.D. students, from
engineers to decision makers, and anyone else who is interested in understanding how technologies
are changing the way we think and live. The},
author = {Anderson, Stuart and Bredeche, Nicolas and Eiben, Agoston Endre and Kampis, George and
van Steen, Marten},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/adaptive-collective-systems.pdf:pdf},
isbn = {pending},
pages = {72},
url = {http://focas.eu/adaptive-collective-systems/},
year = {2013}
}
@inproceedings{Sanderson2015,
author = {Sanderson, David and Antzoulatos, Nikolas and Chaplin, Jack C and Pitt, Jeremy and
German, Carl and Norbury, Alan and Kelly, Emma and Ratchev, Svetan},
doi = {10.1109/SASOW.2015.15},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/sanderson2015.pdf:pdf},
isbn = {9781467384391},
year = {2015}
@inproceedings{Sharma2015,
doi = {10.1016/j.procs.2015.10.019},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1-s2.0-S187705091503183X-
main.pdf:pdf},
pages = {16--28},
volume = {70},
year = {2015}
@inproceedings{Petri2014,
booktitle = {EASE '14: Proceedings of the 18th International Conference on Evaluation and
Assessment in Software Engineering},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/a34-petric.pdf:pdf},
isbn = {9781450324762},
year = {2014}
@inproceedings{Qureshi2010,
abstract = {Requirements engineering (RE) for self-adaptive systems (SAS) is an emerging research
area. The key features of such systems are to be aware of the changes in both their operating and
external environments, while simultaneously remaining aware of their users' goals and preferences.
This is accomplished by evaluating such changes and adapting to a suitable alternative that can
satisfy those changes in the context of the user goals. Most current RE languages do not consider
this `reflective' and online component of requirements models. In this paper, we propose a new
framework for building SAS that is goal- and user-oriented. We sketch a framework to enable
continuous adaptive requirements engineering (CARE) for SAS that leverage requirements-aware
systems and exploits the Techne modeling language and reasoning system. We illustrate our
framework by showing how it can handle an adaptive scenario in the travel domain.},
doi = {10.1109/reruntime.2010.5628552},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/RERUNTIME.2010.5628552.pdf:pdf},
isbn = {9781424487998},
pages = {9--16},
year = {2010}
@book{Holzmann1991,
doi = {https://doi.org/10.1145/122419.1024051},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/x20v{\_}1991.pdf:pdf},
isbn = {978-0-13-539925-5},
pages = {1 -- 539},
year = {1991}
}
@article{Alam2014,
abstract = {Optimization based pattern discovery has emerged as an important field in knowledge
discovery and data mining (KDD), and has been used to enhance the efficiency and accuracy of
clustering, classification, association rules and outlier detection. Cluster analysis, which identifies
groups of similar data items in large datasets, is one of its recent beneficiaries. The increasing
complexity and large amounts of data in the datasets have seen data clustering emerge as a popular
focus for the application of optimization based techniques. Different optimization techniques have
been applied to investigate the optimal solution for clustering problems. Swarm intelligence (SI) is
one such optimization technique whose algorithms have successfully been demonstrated as
solutions for different data clustering domains. In this paper we investigate the growth of literature
in SI and its algorithms, particularly Particle Swarm Optimization (PSO). This paper makes two major
contributions. Firstly, it provides a thorough literature overview focusing on some of the most cited
techniques that have been used for PSO-based data clustering. Secondly, we analyze the reported
results and highlight the performance of different techniques against contemporary clustering
techniques. We also provide an brief overview of our PSO-based hierarchical clustering approach
(HPSO-clustering) and compare the results with traditional hierarchical agglomerative clustering
(HAC), K-means, and PSO clustering. {\textcopyright} 2014 Elsevier B.V.},
author = {Alam, Shafiq and Dobbie, Gillian and Koh, Yun Sing and Riddle, Patricia and {Ur Rehman},
Saeed},
doi = {10.1016/j.swevo.2014.02.001},
isbn = {2210-6502},
issn = {22106502},
pages = {1--13},
title = {{Research on particle swarm optimization based clustering: A systematic review of literature
and techniques}},
volume = {17},
year = {2014}
@article{Eiben2015,
abstract = {Evolution has provided a source of inspiration for algorithm designers since the birth of
computers. The resulting field, evolutionary computation, has been successful in solving engineering
tasks ranging in outlook from the molecular to the astronomical. Today, the field is entering a new
phase as evolutionary algorithms that take place in hardware are developed, opening up new
avenues towards autonomous machines that can adapt to their environment. We discuss how
evolutionary computation compares with natural evolution and what its benefits are relative to
other computing approaches, and we introduce the emerging area of artificial evolution in physical
systems.},
archivePrefix = {arXiv},
arxivId = {arXiv:1011.1669v3},
doi = {10.1038/nature14544},
eprint = {arXiv:1011.1669v3},
isbn = {8610648079},
issn = {0028-0836},
journal = {Nature},
number = {7553},
pages = {476--482},
pmid = {26017447},
url = {http://www.nature.com/doifinder/10.1038/nature14544},
volume = {521},
year = {2015}
@article{Cai2016,
abstract = {Uncovering community structures of a complex network can help us to understand how
the network functions. Over the past few decades, network community detection has attracted
growing research interest from many fields. Many community detection methods have been
developed. Network community structure detection can be modelled as optimisation problems. Due
to their inherent complexity, these problems often cannot be well solved by traditional optimisation
methods. For this reason, evolutionary algorithms have been adopted as a major tool for dealing
with community detection problems. This paper presents a survey on evolutionary algorithms for
network community detection. The evolutionary algorithms in this survey cover both single objective
and multiobjective optimisations. The network models involve weighted/unweighted,
signed/unsigned, overlapping/non-overlapping and static/dynamic ones. His current research
interests are in the area of computational intelligence and complex network analysis.},
author = {Cai, Qing and Ma, Lijia and Gong, Maoguo and Tian, Dayong},
doi = {10.1504/IJBIC.2016.076329},
number = {2},
pages = {84},
url = {http://www.inderscience.com/link.php?id=76329},
volume = {8},
year = {2016}
@article{Hu2015,
abstract = {Despite the wide application of evolutionary computation (EC) techniques to rule
discovery in stock algorithmic trading (AT), a comprehensive literature review on this topic is
unavailable. Therefore, this paper aims to provide the first systematic literature review on the state-
of-the-art application of EC techniques for rule discovery in stock AT. Out of 650 articles published
before 2013 (inclusive), 51 relevant articles from 24 journals were confirmed. These papers were
reviewed and grouped into three analytical method categories (fundamental analysis, technical
analysis, and blending analysis) and three EC technique categories (evolutionary algorithm, swarm
intelligence, and hybrid EC techniques). A significant bias toward the applications of genetic
algorithm-based (GA) and genetic programming-based (GP) techniques in technical trading rule
discovery is observed. Other EC techniques and fundamental analysis lack sufficient study.
Furthermore, we summarize the information on the evaluation scheme of selected papers and
particularly analyze the researches which compare their models with buy and hold strategy (B{\&}H).
We observe an interesting phenomenon where most of the existing techniques perform effectively
in the downtrend and poorly in the uptrend, and considering the distribution of research in the
classification framework, we suggest that this phenomenon can be attributed to the inclination of
factor selections and problem in transaction cost selections. We also observe the significant
influence of the transaction cost change on the margins of excess return. Other influenced factors
are also presented in detail. The absence of ways for market trend prediction and the selection of
transaction cost are two major limitations of the studies reviewed. In addition, the combination of
trading rule discovery techniques and portfolio selection is a major research gap. Our review reveals
the research focus and gaps in applying EC techniques for rule discovery in stock AT and suggests a
roadmap for future research.},
author = {Hu, Yong and Liu, Kang and Zhang, Xiangzhou and Su, Lijun and Ngai, E. W.T. and Liu, Mei},
doi = {10.1016/j.asoc.2015.07.008},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Article For FYP/2015{\_}ASOC{\
_}Applicationofevolutionarycomputationforrulediscoveryinstockalgorithmictrading{\
_}Aliteraturereview.pdf:pdf},
isbn = {1568-4946},
issn = {15684946},
pages = {534--551},
title = {{Application of evolutionary computation for rule discovery in stock algorithmic trading: A
literature review}},
url = {http://dx.doi.org/10.1016/j.asoc.2015.07.008},
volume = {36},
year = {2015}
@article{Guzek2015,
abstract = {Cloud computing is significantly reshaping the computing industry. Individuals and small
organizations can benefit from using state-of-the-art services and infrastructure, while large
companies are attracted by the flexibility and the speed with which they can obtain the services.
Service providers compete to offer the most attractive conditions at the lowest prices. However, the
environmental impact and legal aspects of cloud solutions pose additional challenges. Indeed, the
new cloud-related techniques for resource virtualization and sharing and the corresponding service
level agreements call for new optimization models and solutions. It is important for computational
intelligence researchers to understand the novelties introduced by cloud computing. The current
survey highlights and classifies key research questions, the current state of the art, and open
problems.},
archivePrefix = {arXiv},
arxivId = {1209.5467},
doi = {10.1109/MCI.2015.2405351},
eprint = {1209.5467},
isbn = {0957-4174},
issn = {1556603X},
pages = {53--67},
pmid = {1000102567},
title = {{A survey of evolutionary computation for resource management of processing in cloud
computing [review article]}},
volume = {10},
year = {2015}
@article{Li2015,
abstract = {Multiobjective evolutionary algorithms (MOEAs) have been widely used in real-world
applications. However, most MOEAs based on Pareto-dominance handle many-objective problems
(MaOPs) poorly due to a high proportion of incomparable and thus mutually nondominated
solutions. Recently, a number of many-objective evolutionary algorithms (MaOEAs) have been
proposed to deal with this scalability issue. In this article, a survey of MaOEAs is reported. According
to the key ideas used, MaOEAs are categorized into seven classes: relaxed dominance based,
diversity-based, aggregation-based, indicator-based, reference set based, preference-based, and
dimensionality reduction approaches. Several future research directions in this field are also
discussed.},
author = {Li, Bingdong and Li, Jinlong and Tang, Ke and Yao, Xin},
doi = {10.1145/2792984},
issn = {0360-0300},
number = {1},
pages = {13:1--13:35},
url =
{http://doi.acm.org/10.1145/2792984{\%}0Ahttp://dl.acm.org.libezproxy.bournemouth.ac.uk/ft{\
_}gateway.cfm?id=2792984{\&}type=pdf},
volume = {48},
year = {2015}
@article{Kim2017,
doi = {10.3390/en10101668},
file = {:C$\backslash$:/Users/Asus/Downloads/energies-10-01668-v2.pdf:pdf},
pages = {1--20},
title = {{Learning-Based Adaptive Imputation Method with kNN Algorithm for Missing Power Data}},
year = {2017}
@article{Gumble2017,
pages = {94--98},
title = {{Analysis {\&} Classification of Acute Lymphoblastic Leukemia using KNN Algorithm}},
year = {2017}
@article{Vandenbroucke1998,
doi = {10.1109/ICIP.1998.723452},
file = {:C$\backslash$:/Users/Asus/Downloads/vandenbroucke{\_}icip{\_}1998.pdf:pdf},
isbn = {0818688211},
number = {November},
title = {{Color pixels classification in an hybrid color space Color Pixels Classification in an Hybrid
Color Space}},
year = {1998}
@article{Science2018,
doi = {10.24327/IJRSR},
file = {:C$\backslash$:/Users/Asus/Downloads/10735-A-2018.pdf:pdf},
pages = {26126--26130},
title = {{Research Article ANALYSIS OF K- NEAREST NEIGHBOR TECHNIQUE FOR BREAST CANCER
DISEASE CLASSIFICATION}},
volume = {9},
year = {2018}
@article{Hassan2010,
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/1106-1146-1-PB.pdf:pdf},
number = {5},
pages = {1--15},
volume = {6},
year = {2010}
@article{Chen2017,
abstract = {The current study identified the antecedents of being an Internet scam victim and how it
impacts online privacy concerns and privacy protection behaviors. Structural equation modeling on
data from a survey of 11,534 Internet users revealed that one indicator of weak self-control (i.e.,
willingness of risky investments) and two indicators of routine Internet activities (i.e., online
shopping and opening emails from unknown sources) positively predicted being an Internet scam
victim. Subsequently, being an Internet scam victim predicted increased online privacy concerns,
which, in turn, predicted elevated privacy protection behaviors. Moreover, we found that being an
Internet scam victim mediated the effects of routine Internet activities on privacy protection
behaviors and that online privacy concerns mediated the effect of being an Internet scam on privacy
protection behaviors. Unlike most Internet privacy studies using protection motivation theory only,
the current study contributes to the understanding of the Internet scam victimization by
incorporating three new theories—extended parallel process model, self-control theory, and routine
activity theory. The research findings provided valuable implications for theory and practice related
to Internet scam processes and prevention.},
author = {Chen, Hongliang and Beaudoin, Christopher E. and Hong, Traci},
doi = {10.1016/j.chb.2017.01.003},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/ChenBeaudoinHong2017.pdf:pdf},
isbn = {0747-5632},
issn = {07475632},
pages = {291--302},
title = {{Securing online privacy: An empirical test on Internet scam victimization, online privacy
concerns, and privacy protection behaviors}},
url = {http://dx.doi.org/10.1016/j.chb.2017.01.003},
volume = {70},
year = {2017}
@article{Reuter2013,
abstract = {Does online social media undermine authoritarianism? We examine the conditions under
which online social networks can increase public awareness of electoral fraud in non-democracies.
We argue that a given online social network will only increase political awareness if it is first
politicized by elites. Using survey data from the 2011 Russian parliamentary elections, we show that
usage of Twitter and Facebook, which were politicized by opposition elites, significantly increased
respondents' perceptions of electoral fraud, while usage of Russia's domestic social networking
platforms, Vkontakte and Odnoklassniki, which were not politicized by opposition activists, had no
effect on perceptions of fraud. Our study elucidates the causes of post-election protest by
uncovering a mechanism through which knowledge of electoral fraud can become widespread.},
doi = {10.1017/S0007123413000203},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/10PS2012.pdf:pdf},
number = {1},
pages = {29--51},
year = {2013}
@article{Shillair2015,
abstract = {Serious and pervasive threats confront all Internet users. Despite frequent reports of
losses due to computer security breaches, many individuals still do not follow basic safety
precautions. Understanding the mental processes that motivate users to follow safe practices is key
to strengthening this weak link in the security chain. Using protection motivation theory (PMT), a
model within the class of social cognitive theories (SCT), we develop and assess the value of
interventions strategies to enhance safe online behaviors. Furthermore, we integrate the concept of
personal responsibility within the PMT approach to better understand what motivates safe, online
behaviors. The online safety interventions were tested using a 2 (intervention strategy: manipulated)
× 2 (personal responsibility: manipulated) × 2 (knowledge: measured and blocked), between subjects
with random assignment to experimental conditions and online safety behavior intentions as the
targeted outcome. Based on SCT principles of behavior change, two intervention strategies were
developed, one that semantically explained behaviors, and one that offered the user an enactive
mastery exercise. The sample was cross-sectional and representative of Internet users. Results
showed a significant three-way interaction effect among personal responsibility, the intervention
strategy and prior knowledge. Enhancing a user's sense of personal responsibility appears to be a
necessary precursor to effective online safety interventions, but not necessarily sufficient; the
intervention strategy should match the knowledge level of the user to enhance online safety
behaviors. Potential strategies for designing effective online safety messages are discussed.},
author = {Shillair, Ruth and Cotten, Shelia R. and Tsai, Hsin Yi Sandy and Alhabash, Saleem and
Larose, Robert and Rifon, Nora J.},
doi = {10.1016/j.chb.2015.01.046},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/OnlinesafetybeginswithyouandmeConvincingI
nternetuserstoprotectthemselvesCHB.pdf:pdf},
isbn = {0747-5632},
issn = {07475632},
pages = {199--207},
title = {{Online safety begins with you and me: Convincing Internet users to protect themselves}},
url = {http://dx.doi.org/10.1016/j.chb.2015.01.046},
volume = {48},
year = {2015}
@article{Cross2016,
abstract = {Online fraud is a global problem. Millions of individuals worldwide are losing money and
experiencing the devastation associated with becoming a victim of online fraud. In 2014, Australians
reported losses of {\$}82 million as a result of online fraud to the Australian Competition and
Consumer Commission (ACCC). Given that the ACCC is one of many agencies that receives victim
complaints, and the extent of under‐reporting of online fraud, this figure is likely to represent only a
fraction of the actual monetary losses incurred. The successful policing of online fraud is hampered
by its transnational nature, the prevalence of false/stolen identities used by offenders, and a lack of
resources available to investigate offences. In addition, police are restricted by the geographical
boundaries of their own jurisdictions which conflicts with the lack of boundaries afforded to
offenders by the virtual world. In response to this, Australia is witnessing the emergence of victim‐
oriented policing approaches to counter online fraud victimisation. This incorporates the use of
financial intelligence as a tool to proactively notify potential victims of online fraud. Using a variety
of Australian examples, this paper documents the history to this new approach and considers the
significance that such a shift represents to policing in a broader context. It also details the value that
this approach can have to both victims and law enforcement agencies. Overall, it is argued that a
victim‐oriented approach to policing online fraud can have substantial benefits to police and victims
alike.},
file = {:C$\backslash$:/Users/Asus/Downloads/Documents/Cross{\_}CJRC{\_}Conference{\
_}paper.pdf:pdf},
journal = {Cross, C. (2016). Policing online fraud in Australia: The emergence of a victim‐oriented
approach. In Crime, Justice and Social Democracy: Proceedings of the 3rd International Conference
2015 (Vol. 1, pp. 1-8). Crime and Justice Research Centre, QUT.},
pages = {1--12},
year = {2016}
@article{Mundra2014,
doi = {10.1007/978-81-322-1665-0},
file =
{:C$\backslash$:/Users/Asus/Downloads/Documents/OHMforOnlineFraudPreventionandDetection.p
df:pdf},
isbn = {978-81-322-1664-3},
issn = {2194-5357},
keywords = {auction fraud,{\'{a}} credit card fraud,{\'{a}} hmm,{\'{a}} identity theft fraud},
url = {http://link.springer.com/10.1007/978-81-322-1665-0},
volume = {243},
year = {2014}
@article{Adel2015,
abstract = {The computer has become indispensable in today's life, and it is widely used in many
fields of life such as commerce, education, industry{\ldots}etc. The computer saves time in regarding
to help solving complex, long, repeated processes in a short time and high speed. As the software
programs need to handle these features, many companies produce software programs to facilitate
the works for administrations, banks, offices, etc. Moreover, software has been in used for analyzing
information or solving problems for more than four decades. Creating a suitable work to develop
programs of high quality is the main goal of the software engineering. Usually, clients seek the
assistance from computer and software engineers to solve and handle their problems. There are
various models have been widely in used to develop software products. Common models will be
described in this paper.},
issn = {1694-0784},
number = {1},
pages = {106--111},
title = {{A Comparison Between Three SDLC Models Waterfall Model, Spiral Model, and
Incremental/Iterative Model}},
url = {https://www.academia.edu/10793943/A{\_}Comparison{\_}Between{\_}Three{\_}SDLC{\
_}Models{\_}Waterfall{\_}Model{\_}Spiral{\_}Model{\_}and{\_}Incremental{\_}Iterative{\_}Model},
volume = {12},
year = {2015}
@article{Bahrudin2016,
title = {{a Comparative Study of User Acceptance Testing Between Modified Waterfall Model and
Extreme Programming in Small-Scale Project Ida Aryanie Bahrudin Status Confirmation for Master ' S
Dissertation}},
year = {2016}
@article{Adenowo2013,
abstract = {This paper discusses two main software engineering methodologies to system
development, the waterfall model and the object- oriented approach. A review of literature reveals
that waterfall model uses linear approach and is only suitable for sequential or procedural design. In
waterfall, errors can only be detected at the end of the whole process and it may be difficult going
back to repeat the entire process because the processes are sequential. Also, software based on
waterfall approach is difficult to maintain and upgrade due to lack of integration between software
components. On the other hand, the Object Oriented approach enables software systems to be
developed as integration of software objects that work together to make a holistic and functional
system. The software objects are independent of each other, allowing easy upgrading and
maintenance of software codes. The paper also highlighted the merits and demerits of each of the
approaches. This work concludes with the appropriateness of each approach in relation to the
complexity of the problem domain.},
issn = {2229-5518},
number = {7},
pages = {427--434},
title = {{Software Engineering Methodologies: A Review of the Waterfall Model and Object-Oriented
Approach}},
url = {http://www.ijser.org/researchpaper{\%}5CSoftware-Engineering-Methodologies-A-Review-of-
the-Waterfall-Model-and-ObjectOriented-Approach.pdf},
volume = {4},
year = {2013}
@article{Bindal2015,
author = {Bindal, Nancy and Mehta, Aanchal},
number = {03},
pages = {100--109},
volume = {02},
year = {2015}
@article{CongVinh2016,
abstract = {New computing systems are currently at crucial point in their evolution: autonomic
systems (ASs), which are inspired by the human autonomic nervous system. Autonomic computing
(AC) is characterized by self-∗such as self-configuration, self-healing, self-optimization, self-
protection and more which run simultaneously in ASs. Hence, self-∗is a form of concurrent
processing in ASs. Taking advantage of categorical structures we establish, in this paper, a firm
formal basis for specifying concurrency of self-∗in ASs.},
doi = {10.1016/j.future.2015.04.017},
file = {:C\:/Users/Hp/Downloads/j.future.2015.04.017.pdf:pdf},
issn = {0167739X},
pages = {140--152},
url = {http://dx.doi.org/10.1016/j.future.2015.04.017},
volume = {56},
year = {2016}
@article{Miller2018,
author = {Miller, A. and Giaquinta, R. and Hoffmann, R. and Ireland, M. and Norman, G.},
file = {:C\:/Users/Hp/Downloads/d4cb8206-f37c-0ed4-8cc2-df820868aac1.pdf:pdf},
journal = {Tenth NASA Formal Methods Symposium ( NFM 2018 ), Newport News , VA , USA , 17-19
Apr 2018 .},
number = {August},
pages = {17--19},
year = {2018}
@article{Fraser2020,
abstract = {We show how detailed simulation models and abstract Markov modelscan be developed
collaboratively to generate and implement effectivecontrollers for autonomous agent search and
retrieve missions. Weintroduce a concrete simulation model of an Unmanned Aerial Vehicle(UAV).
We then show how the probabilistic model checker PRISM isused for optimal strategy synthesis for a
sequence of scenariosrelevant to UAVs and potentially other autonomous agent systems. Foreach
scenario we demonstrate how it can be modelled using PRISM,give model checking statistics and
present the synthesised optimalstrategies. We then show how our strategies can be returned to
thecontroller for the simulation model and provide experimental resultsto demonstrate the
effectiveness of one such strategy. Finally weexplain how our models can be adapted, using
symmetry, for use onlarger search areas, and demonstrate the feasibility of thisapproach.},
author = {Fraser, Douglas and Giaquinta, Ruben and Hoffmann, Ruth and Ireland, Murray and Miller,
Alice and Norman, Gethin},
doi = {10.1007/s00165-020-00508-1},
file = {:C\:/Users/Hp/Downloads/cc1bc2cd-fc41-0c45-ab5e-0072aaa984aa.pdf:pdf},
isbn = {0016502000},
issn = {1433299X},
number = {2-3},
pages = {157--186},
url = {https://doi.org/10.1007/s00165-020-00508-1},
volume = {32},
year = {2020}
}
@article{Carmichael2019,
abstract = {Complex Adaptive Systems (CAS) is a framework for studying, explaining, and
understanding systems of agents that collectively combine to form emergent, global level properties.
These agents can be nearly anything, from ants or bees, to brain cells, to water particles in a weather
pattern, to groups of cars or people in a city or town. These agents produce emergent patterns via
correlated feedbacks throughout the system, feedbacks that create and fortify a basin of attraction:
a persistent pattern of behavior that itself is outside of equilibrium. There is also an ever-growing
understanding that similar features in complex systems across a diversity of domains may indicate
similar fundamental principles at work, and as such there is often utility in using the key features of
one system to gain insight into the workings of seemingly distinct fields. Here we also include a brief
review of multiple models that attempt to do exactly this, including some of our previous work.
Though there is not complete agreement on all aspects and definitions in this field, this introduction
also summarizes our understanding of what defines a CAS, including the concepts of complexity,
agents, adaptation, feedbacks, emergence, and self-organization; and places this definition and its
key features in a historical context. Finally we briefly discuss two of the common biases often found
that the tools of CAS can help counteract: the hierarchical bias, assuming a strong top-down
organization; and the complexity bias, the tendency to assign complicated features to agents that
turn out to be quite simple.},
doi = {10.1007/978-3-030-20309-2_1},
file = {:C\:/Users/Hp/Downloads/Documents/978-3-030-20309-2_1.pdf:pdf},
isbn = {9783030203092},
issn = {18600840},
pages = {1--16},
year = {2019}
@article{Porter2020,
abstract = {Major research venues on autonomic and self-adaptive systems have been active for 16
years, exploring and building on the seminal vision of autonomic computing in 2003. We study the
current trajectory and progress of the research field towards this vision, surveying the research
questions that are asked by researchers and the methodological practice that they employ in order
to answer these questions. We survey contributions under this lens across the three main venues for
primary research in autonomic and self-adaptive systems work: ICAC, SASO, and SEAMS. We
examine the last three years of contributions from each venue, totaling 210 publications, to gain an
understanding of the dominant current research questions and methodological practice - and what
this shows us about the progress of the field. Our major findings include: (i) most research questions
still focus one level below the highest autonomy level vision; (ii) methodological practice is split
almost evenly between real-world experiments and simulation; (iii) a high level of positive results
bias exists in publications; and (iv) there are low levels of repeatability across most contributions.},
author = {Porter, Barry and Filho, Roberto Rodrigues and Dean, Paul},
doi = {10.1109/ACSOS49614.2020.00039},
file = {:C\:/Users/Hp/Downloads/Documents/ACSOS49614.2020.00039.pdf:pdf},
isbn = {9781728172774},
journal = {Proceedings - 2020 IEEE International Conference on Autonomic Computing and Self-
Organizing Systems, ACSOS 2020},
pages = {168--177},
year = {2020}
@article{Viroli2018,
abstract = {Collective adaptive systems are an emerging class of networked computational systems
particularly suited for application domains such as smart cities, complex sensor networks, and the
Internet of Things. These systems tend to feature large-scale, heterogeneity of communication
model (including opportunistic peer-to-peer wireless interaction) and require inherent self-
adaptiveness properties to address unforeseen changes in operating conditions. In this context, it is
extremely difficult (if not seemingly intractable) to engineer reusable pieces of distributed behaviour
to make them provably correct and smoothly composable. Building on the field calculus, a
computational model (and associated toolchain) capturing the notion of aggregate network-level
computation, we address this problem with an engineering methodology coupling formal theory and
computer simulation. On the one hand, functional properties are addressed by identifying the
largest-to-date field calculus fragment generating self-stabilising behaviour, guaranteed to
eventually attain a correct and stable final state despite any transient perturbation in state or
topology and including highly reusable building blocks for information spreading, aggregation, and
time evolution. On the other hand, dynamical properties are addressed by simulation, empirically
evaluating the different performances that can be obtained by switching between implementations
of building blocks with provably equivalent functional properties. Overall, our methodology sheds
light on how to identify core building blocks of collective behaviour and how to select
implementations that improve system performance while leaving overall system function and
resiliency properties unchanged.},
archivePrefix = {arXiv},
arxivId = {1711.08297},
author = {Viroli, Mirko and Audrito, Giorgio and Beal, Jacob and Damiani, Ferruccio and Pianini,
Danilo},
doi = {10.1145/3177774},
eprint = {1711.08297},
file = {:C\:/Users/Hp/Downloads/Documents/3177774.pdf:pdf},
issn = {15581195},
journal = {ACM Transactions on Modeling and Computer Simulation},
number = {2},
volume = {28},
year = {2018}
@article{Aldini2018,
abstract = {Collective adaptive systems (CAS) often adopt cooperative operating strategies to run
distributed decision-making mechanisms. Sometimes, their effectiveness massively relies on the
collaborative nature of individuals' behavior. Stimulating cooperation while preventing selfish and
malicious behaviors is the main objective of trust and reputation models. These models are largely
used in distributed, peer-to-peer environments and, therefore, represent an ideal framework for
improving the robustness, as well as security, of CAS. In this article, we propose a formal framework
for modeling and verifying trusted CAS. From the modeling perspective, mobility, adaptiveness, and
trust-based interaction represent the main ingredients used to define a flexible and easy-to-use
paradigm. Concerning analysis, formal automated techniques based on equivalence and model
checking support the prediction of the CAS behavior and the verification of the underlying trust and
reputation models, with the specific aim of estimating robustness with respect to the typical attacks
conducted against webs of trust.},
doi = {10.1145/3155337},
file = {:C\:/Users/Hp/Downloads/Documents/3155337.pdf:pdf},
issn = {15581195},
number = {2},
volume = {28},
year = {2018}
@book{Gervasi2019,
abstract = {A long stream of research in RE has been devoted to analyzing the occurrences and
consequences of ambiguity in requirements documents. Ambiguity often occurs in documents, most
often in natural language (NL) ones, but occasionally also in formal specifications, be it because of
abstraction, or of imprecise designation of which real-world entities are denotated by certain
expressions. In many of those studies, ambiguity has been considered a defect to be avoided. In this
paper, we investigate the nature of ambiguity, and advocate that the simplistic view of ambiguity as
merely a defect in the document does not do justice to the complexity of this phenomenon. We
offer a more extensive analysis, based on the multiple linguistic sources of ambiguity, and present a
list of real-world cases, both in written matter and in oral interviews, that we analyze based on our
framework. We hope that a better understanding of the phenomenon can help in the analysis of
practical experiences and in the design of more effective methods to detect, mark and handle
ambiguity.},
author = {Gervasi, Vincenzo and Ferrari, Alessio and Zowghi, Didar and Spoletini, Paola},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-30985-5_12},
file = {:C\:/Users/Hp/Downloads/Documents/978-3-030-30985-5.pdf:pdf},
isbn = {9783030309848},
issn = {16113349},
pages = {191--210},
year = {2019}
@article{Farahani2016,
abstract = {In this paper we analyze the self-∗ properties in the collective systems with respect to its
relationship with other concepts like emergence and propagation, then we will consider the
manipulations that should be made in order to make the MAPE-K loop compliant with the
characteristics of collective systems.},
doi = {10.1145/2968219.2979127},
file = {:C\:/Users/Hp/Downloads/Documents/2968219.2979127.pdf:pdf},
isbn = {9781450344623},
journal = {UbiComp 2016 Adjunct - Proceedings of the 2016 ACM International Joint Conference on
Pervasive and Ubiquitous Computing},
pages = {1309--1314},
@article{Szabo2017,
abstract = {Undesired or unexpected properties are frequent, as large-scale complex systems with
nonlinear interactions are being designed and implemented to answer real-life scenarios. Identifying
these behaviors as they happen as well as determining whether these behaviors are beneficial for
the system is crucial to highlight potential faults or undesired side effects early in the development
of a system, thus promising significant cost reductions. Beyond the inherent challenges in identifying
these behaviors, the problem of validating the observed emergent behavior remains challenging, as
this behavior is, by definition, not expected or envisaged by system designers. This chapter presents
an overview of existing work for the automated detection of emergent behavior and discusses some
potential solutions to the challenge of validating emergent behavior. Building on the idea of
comparing an identified emergent behavior with previously seen behaviors, we propose a two-step
process for validating emergent behavior. Our initial experiments using a Flock of Birds model show
the promise of this approach but also highlight future avenues of research.},
doi = {10.1007/978-3-319-64182-9_4},
isbn = {9783319641829},
pages = {47--62},
year = {2017}
@article{Kwiatkowska2022,
abstract = {The design and control of autonomous systems that operate in uncertain or adversarial
environments can be facilitated by formal modeling and analysis. Probabilistic model checking is a
technique to automatically verify, for a given temporal logic specification, that a system model
satisfies the specification, as well as to synthesize an optimal strategy for its control. This method
has recently been extended to multiagent systems that exhibit competitive or cooperative behavior
modeled via stochastic games and synthesis of equilibria strategies. In this article, we provide an
overview of probabilistic model checking, focusing on models supported by the PRISM and PRISM-
games model checkers. This overview includes fully observable and partially observable Markov
decision processes, as well as turn-based and concurrent stochastic games, together with associated
probabilistic temporal logics. We demonstrate the applicability of the framework through illustrative
examples from autonomous systems. Finally, we highlight research challenges and suggest directions
for future work in this area.Expected final online publication date for the Annual Review of Control,
Robotics, and Autonomous Systems, Volume 5 is May 2022. Please see
http://www.annualreviews.org/page/journal/pubdates for revised estimates.},
archivePrefix = {arXiv},
arxivId = {2111.10630},
doi = {10.1146/annurev-control-042820-010947},
eprint = {2111.10630},
file = {:C\:/Users/Hp/Downloads/Documents/c69d3324-8e9c-07ce-8b8d-5b990f4a3e27.pdf:pdf},
issn = {2573-5144},
number = {1},
pages = {1--26},
volume = {5},
year = {2022}
@article{Liu2010,
abstract = {During the last two decades, model checking has emerged as an effec- tive system
analysis technique complementary to simulation and testing. Many model checking algorithms and
state space reduction techniques have been pro- posed. Although it is desirable to have dedicated
model checkers for every lan- guage (or application domain), implementing one with effective
reduction tech- niques is rather challenging. In this work, we present a generic and extensible
framework PAT, which facilitates users to build customized model checkers. PAT provides a library of
state-of-art model checking algorithms as well as support for customizing language syntax,
semantics, state space reduction techniques, graphic user interfaces, and even domain specific
abstraction techniques. Based on this design, model checkers for concurrent systems, real-time
systems, prob- abilistic systems and Web Services are developed inside the PAT framework, which
demonstrates the practicality and scalability of our approach.},
author = {Liu, Yang and Sun, Jun and Dong, Jin Song},
isbn = {9780199732579},
number = {1},
pages = {237--256},
volume = {3},
year = {2010}
@article{Michiels2002,
abstract = {A major problem in todays Internet servers is that they suffer from extreme peak loads.
Traditional (operating) systems are designed to perform extremely well under heavy load conditions.
However, it is not feasible to over-provision resources only to support peak loads. A key factor to
deal with such peak loads is internal concurrency control. We have developed a component based
architecture (DMonA), which allows to adapt, internal concurrency according to measured
throughput. Performance tests show that DMonA outperforms traditional approaches, while it, is
still very manageable thanks to the underlying DiPS component architecture.},
author = {Michiels, Sam and Desmet, Lieven and Janssens, Nico and Mahieu, Tom and Verbaeten,
Pierre},
file = {:C\:/Users/Hp/Downloads/582128.582137.pdf:pdf},
isbn = {1581136099},
journal = {Proceedings of the first ACM SIGSOFT Workshop on Self-Healing Systems (WOSS'02)},
pages = {43--48},
year = {2002}
@article{DeLaIglesia2015,
abstract = {Mobile technologies have emerged as facilitators in the learning process, extending
traditional classroom activities. However, engineering mobile learning applications for outdoor
usage poses severe challenges. The requirements of these applications are challenging, as many
different aspects need to be catered, such as resource access and sharing, communication between
peers, group management, activity flow, etc. Robustness is particularly important for learning
scenarios to guarantee undisturbed and smooth user experiences, pushing the technological aspects
in the background. Despite significant research in the field of mobile learning, very few efforts have
focused on collaborative mobile learning requirements from a software engineering perspective.
This paper focuses on aspects of the software architecture, aiming to address the challenges related
to resource sharing in collaborative mobile learning activities. This includes elements such as
autonomy for personal interactive learning, richness for large group collaborative learning (indoor
and outdoor), as well as robustness of the learning system. Additionally, we present self-adaptation
as a solution to mitigate risks of resource unavailability and organization failures that arise from
environment and system dynamism. Our evaluation provides indications regarding the system
correctness with respect to resource sharing and collaboration concerns, and offers qualitative
evidence of self-adaptation benefits for collaborative mobile learning applications.},
author = {{De La Iglesia}, Didac Gil and Calder{\'{o}}n, Juan Felipe and Weyns, Danny and Milrad,
Marcelo and Nussbaum, Miguel},
doi = {10.1109/TLT.2014.2367493},
file = {:C\:/Users/Hp/Downloads/TLT.2014.2367493.pdf:pdf},
issn = {19391382},
number = {2},
pages = {158--172},
title = {{A self-adaptive multi-agent system approach for collaborative mobile learning}},
volume = {8},
year = {2015}
@article{Petrillo2018,
abstract = {The development of automated and coordinated driving systems (platooning) is an hot
topic today for vehicles and it represents a challenging scenario that heavily relies on distributed
control in the presence of wireless communication network. To actuate platooning in a safe way it is
necessary to design controllers able to effectively operate on informations exchanged via Inter-
Vehicular Communication (IVC) systems despite the presence of unavoidable communication
impairments, such as multiple time-varying delays that affect communication links. To this aim in this
paper we propose a novel distributed adaptive collaborative control strategy that exploits
information coming from connected vehicles to achieve leader synchronization and we analytically
demonstrate its stability with a Lyapunov-Krasovskii approach. The effectiveness of the proposed
strategy is shown via numerical simulations in PLEXE, a state of the art IVC and mobility simulator
that includes basic building blocks for platooning.},
author = {Petrillo, Alberto and Salvi, Alessandro and Santini, Stefania and Valente, Antonio Saverio},
doi = {10.1016/j.trc.2017.11.009},
file = {:C\:/Users/Hp/Downloads/petrillo2018.pdf:pdf},
issn = {0968090X},
pages = {372--392},
publisher = {Elsevier},
title = {{Adaptive multi-agents synchronization for collaborative driving of autonomous vehicles with
multiple communication delays}},
url = {https://doi.org/10.1016/j.trc.2017.11.009},
volume = {86},
year = {2018}
@article{BenMahfoudh2020,
abstract = {Context-aware, pervasive systems, mobile devices, intelligent virtual assistants activating
services or controlling connected devices are pervading our everyday life. These systems rely on
centralized services provided by servers in a cloud gathering all requests, performing pre-defined
computations and involving pre-defined devices. Large-scale scenarios, involving unanticipated
devices, adaptation to dynamically changing conditions, call for alternative solutions favoring edge
computing and decentralized behavior. For several years, we have worked on a new type of
applications, built and spontaneously composed on-demand. Applications arise from the interactions
of multiple sensors and devices, working together as a decentralized collective adaptive system. Our
solution relies on a learning-based coordination model providing decentralized communication
platforms among agents working on behalf of heterogeneous devices. Each device provides few
simple services and data regarding itself (properties and capabilities). In this article, we discuss first
the design of complex services, arising from the spontaneous self-composition of simpler services.
Second, we present our learning-based coordination model combining coordination and
reinforcement learning, and how this approach ensures reliable self-composition of services in terms
of functionality and expected quality of services. On the basis of a humanitarian scenario, we show
the feasibility of the approach and discuss our current implementation. Preliminary results show
convergence toward learning and correct functionality. Spontaneous self-composition and learning
provide a self-adaptive solution for creating on-demand complex services evolving in highly dynamic
scenarios comprising large numbers of connected devices.},
author = {{Ben Mahfoudh}, Houssem and {Di Marzo Serugendo}, Giovanna and Naja, Nabil and
Abdennadher, Nabil},
doi = {10.1007/s10009-020-00557-0},
file = {:C\:/Users/Hp/Downloads/s10009-020-00557-0.pdf:pdf},
issn = {14332787},
number = {4},
pages = {417--436},
url = {https://doi.org/10.1007/s10009-020-00557-0},
volume = {22},
year = {2020}
@article{Liu2010,
abstract = {During the last two decades, model checking has emerged as an effec- tive system
analysis technique complementary to simulation and testing. Many model checking algorithms and
state space reduction techniques have been pro- posed. Although it is desirable to have dedicated
model checkers for every lan- guage (or application domain), implementing one with effective
reduction tech- niques is rather challenging. In this work, we present a generic and extensible
framework PAT, which facilitates users to build customized model checkers. PAT provides a library of
state-of-art model checking algorithms as well as support for customizing language syntax,
semantics, state space reduction techniques, graphic user interfaces, and even domain specific
abstraction techniques. Based on this design, model checkers for concurrent systems, real-time
systems, prob- abilistic systems and Web Services are developed inside the PAT framework, which
demonstrates the practicality and scalability of our approach.},
author = {Liu, Yang and Sun, Jun and Dong, Jin Song},
isbn = {9780199732579},
number = {1},
pages = {237--256},
volume = {3},
year = {2010}
@article{CongVinh2016,
abstract = {New computing systems are currently at crucial point in their evolution: autonomic
systems (ASs), which are inspired by the human autonomic nervous system. Autonomic computing
(AC) is characterized by self-∗such as self-configuration, self-healing, self-optimization, self-
protection and more which run simultaneously in ASs. Hence, self-∗is a form of concurrent
processing in ASs. Taking advantage of categorical structures we establish, in this paper, a firm
formal basis for specifying concurrency of self-∗in ASs.},
doi = {10.1016/j.future.2015.04.017},
file = {:C\:/Users/Hp/Downloads/j.future.2015.04.017.pdf:pdf},
issn = {0167739X},
pages = {140--152},
url = {http://dx.doi.org/10.1016/j.future.2015.04.017},
volume = {56},
year = {2016}
@article{Miller2018,
author = {Miller, A. and Giaquinta, R. and Hoffmann, R. and Ireland, M. and Norman, G.},
file = {:C\:/Users/Hp/Downloads/d4cb8206-f37c-0ed4-8cc2-df820868aac1.pdf:pdf},
journal = {Tenth NASA Formal Methods Symposium ( NFM 2018 ), Newport News , VA , USA , 17-19
Apr 2018 .},
number = {August},
pages = {17--19},
year = {2018}
@article{Fraser2020,
abstract = {We show how detailed simulation models and abstract Markov modelscan be developed
collaboratively to generate and implement effectivecontrollers for autonomous agent search and
retrieve missions. Weintroduce a concrete simulation model of an Unmanned Aerial Vehicle(UAV).
We then show how the probabilistic model checker PRISM isused for optimal strategy synthesis for a
sequence of scenariosrelevant to UAVs and potentially other autonomous agent systems. Foreach
scenario we demonstrate how it can be modelled using PRISM,give model checking statistics and
present the synthesised optimalstrategies. We then show how our strategies can be returned to
thecontroller for the simulation model and provide experimental resultsto demonstrate the
effectiveness of one such strategy. Finally weexplain how our models can be adapted, using
symmetry, for use onlarger search areas, and demonstrate the feasibility of thisapproach.},
author = {Fraser, Douglas and Giaquinta, Ruben and Hoffmann, Ruth and Ireland, Murray and Miller,
Alice and Norman, Gethin},
doi = {10.1007/s00165-020-00508-1},
file = {:C\:/Users/Hp/Downloads/cc1bc2cd-fc41-0c45-ab5e-0072aaa984aa.pdf:pdf},
isbn = {0016502000},
issn = {1433299X},
number = {2-3},
pages = {157--186},
url = {https://doi.org/10.1007/s00165-020-00508-1},
volume = {32},
year = {2020}
@article{Carmichael2019,
abstract = {Complex Adaptive Systems (CAS) is a framework for studying, explaining, and
understanding systems of agents that collectively combine to form emergent, global level properties.
These agents can be nearly anything, from ants or bees, to brain cells, to water particles in a weather
pattern, to groups of cars or people in a city or town. These agents produce emergent patterns via
correlated feedbacks throughout the system, feedbacks that create and fortify a basin of attraction:
a persistent pattern of behavior that itself is outside of equilibrium. There is also an ever-growing
understanding that similar features in complex systems across a diversity of domains may indicate
similar fundamental principles at work, and as such there is often utility in using the key features of
one system to gain insight into the workings of seemingly distinct fields. Here we also include a brief
review of multiple models that attempt to do exactly this, including some of our previous work.
Though there is not complete agreement on all aspects and definitions in this field, this introduction
also summarizes our understanding of what defines a CAS, including the concepts of complexity,
agents, adaptation, feedbacks, emergence, and self-organization; and places this definition and its
key features in a historical context. Finally we briefly discuss two of the common biases often found
that the tools of CAS can help counteract: the hierarchical bias, assuming a strong top-down
organization; and the complexity bias, the tendency to assign complicated features to agents that
turn out to be quite simple.},
file = {:C\:/Users/Hp/Downloads/Documents/978-3-030-20309-2_1.pdf:pdf},
isbn = {9783030203092},
issn = {18600840},
pages = {1--16},
year = {2019}
@article{Porter2020,
abstract = {Major research venues on autonomic and self-adaptive systems have been active for 16
years, exploring and building on the seminal vision of autonomic computing in 2003. We study the
current trajectory and progress of the research field towards this vision, surveying the research
questions that are asked by researchers and the methodological practice that they employ in order
to answer these questions. We survey contributions under this lens across the three main venues for
primary research in autonomic and self-adaptive systems work: ICAC, SASO, and SEAMS. We
examine the last three years of contributions from each venue, totaling 210 publications, to gain an
understanding of the dominant current research questions and methodological practice - and what
this shows us about the progress of the field. Our major findings include: (i) most research questions
still focus one level below the highest autonomy level vision; (ii) methodological practice is split
almost evenly between real-world experiments and simulation; (iii) a high level of positive results
bias exists in publications; and (iv) there are low levels of repeatability across most contributions.},
author = {Porter, Barry and Filho, Roberto Rodrigues and Dean, Paul},
doi = {10.1109/ACSOS49614.2020.00039},
file = {:C\:/Users/Hp/Downloads/Documents/ACSOS49614.2020.00039.pdf:pdf},
isbn = {9781728172774},
journal = {Proceedings - 2020 IEEE International Conference on Autonomic Computing and Self-
Organizing Systems, ACSOS 2020},
pages = {168--177},
year = {2020}
@article{Aldini2018,
abstract = {Collective adaptive systems (CAS) often adopt cooperative operating strategies to run
distributed decision-making mechanisms. Sometimes, their effectiveness massively relies on the
collaborative nature of individuals' behavior. Stimulating cooperation while preventing selfish and
malicious behaviors is the main objective of trust and reputation models. These models are largely
used in distributed, peer-to-peer environments and, therefore, represent an ideal framework for
improving the robustness, as well as security, of CAS. In this article, we propose a formal framework
for modeling and verifying trusted CAS. From the modeling perspective, mobility, adaptiveness, and
trust-based interaction represent the main ingredients used to define a flexible and easy-to-use
paradigm. Concerning analysis, formal automated techniques based on equivalence and model
checking support the prediction of the CAS behavior and the verification of the underlying trust and
reputation models, with the specific aim of estimating robustness with respect to the typical attacks
conducted against webs of trust.},
doi = {10.1145/3155337},
file = {:C\:/Users/Hp/Downloads/Documents/3155337.pdf:pdf},
issn = {15581195},
number = {2},
volume = {28},
year = {2018}
@article{Viroli2018,
abstract = {Collective adaptive systems are an emerging class of networked computational systems
particularly suited for application domains such as smart cities, complex sensor networks, and the
Internet of Things. These systems tend to feature large-scale, heterogeneity of communication
model (including opportunistic peer-to-peer wireless interaction) and require inherent self-
adaptiveness properties to address unforeseen changes in operating conditions. In this context, it is
extremely difficult (if not seemingly intractable) to engineer reusable pieces of distributed behaviour
to make them provably correct and smoothly composable. Building on the field calculus, a
computational model (and associated toolchain) capturing the notion of aggregate network-level
computation, we address this problem with an engineering methodology coupling formal theory and
computer simulation. On the one hand, functional properties are addressed by identifying the
largest-to-date field calculus fragment generating self-stabilising behaviour, guaranteed to
eventually attain a correct and stable final state despite any transient perturbation in state or
topology and including highly reusable building blocks for information spreading, aggregation, and
time evolution. On the other hand, dynamical properties are addressed by simulation, empirically
evaluating the different performances that can be obtained by switching between implementations
of building blocks with provably equivalent functional properties. Overall, our methodology sheds
light on how to identify core building blocks of collective behaviour and how to select
implementations that improve system performance while leaving overall system function and
resiliency properties unchanged.},
archivePrefix = {arXiv},
arxivId = {1711.08297},
author = {Viroli, Mirko and Audrito, Giorgio and Beal, Jacob and Damiani, Ferruccio and Pianini,
Danilo},
doi = {10.1145/3177774},
eprint = {1711.08297},
file = {:C\:/Users/Hp/Downloads/Documents/3177774.pdf:pdf},
issn = {15581195},
number = {2},
volume = {28},
year = {2018}
@book{Gervasi2019,
abstract = {A long stream of research in RE has been devoted to analyzing the occurrences and
consequences of ambiguity in requirements documents. Ambiguity often occurs in documents, most
often in natural language (NL) ones, but occasionally also in formal specifications, be it because of
abstraction, or of imprecise designation of which real-world entities are denotated by certain
expressions. In many of those studies, ambiguity has been considered a defect to be avoided. In this
paper, we investigate the nature of ambiguity, and advocate that the simplistic view of ambiguity as
merely a defect in the document does not do justice to the complexity of this phenomenon. We
offer a more extensive analysis, based on the multiple linguistic sources of ambiguity, and present a
list of real-world cases, both in written matter and in oral interviews, that we analyze based on our
framework. We hope that a better understanding of the phenomenon can help in the analysis of
practical experiences and in the design of more effective methods to detect, mark and handle
ambiguity.},
author = {Gervasi, Vincenzo and Ferrari, Alessio and Zowghi, Didar and Spoletini, Paola},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial
Intelligence and Lecture Notes in Bioinformatics)},
doi = {10.1007/978-3-030-30985-5_12},
file = {:C\:/Users/Hp/Downloads/Documents/978-3-030-30985-5.pdf:pdf},
isbn = {9783030309848},
issn = {16113349},
pages = {191--210},
year = {2019}
@article{Moreno1999,
abstract = {Purpose – This paper aims to focus on the relationships between the levels of knowledge
and the type of knowledge transfer approaches, and the relationships between the types of
knowledge and the knowledge transfer approaches which were adopted in a study of knowledge
transfer from a US-based technical support center to an offshore support center in China.
Design/methodology/approach – The research was conducted as an interpretive case study. Three
techniques (i.e. document review, participant observation, and semi-structured interviews) were
employed for data collection in the field. Findings – The findings indicate that the lower the level of
recipient absorptive and retentive capacity, the more difficulty the recipient will have in acquiring
tacit and complex types of knowledge, and the more formal structured knowledge transfer approach
the recipient will need to adopt. The results identify that “structured transfer stages” was used by
novices to transfer embrained and encoded knowledge, while “unstructured copy” was widely
adopted by advanced beginners to transfer encoded and embodied knowledge, “unstructured
adaptation” was mainly utilized by those at the competence level to transfer embodied and
embedded knowledge, and “unstructured fusion” was preferred by recipients at the proficiency level
to transfer embodied and embedded knowledge as well. Practical implications – The findings
contribute to an understanding of the knowledge transfer processes required when US-based firms
outsource business processes to offshore countries with significantly different cultural contexts. The
findings also reflect the testing of possible analytical structures for understanding the processes of
knowledge transfer, and the mechanisms for knowledge transfer in a cross-cultural business context.
Originality/value – The paper provides new insights into the knowledge transfer process for different
levels of knowledge acquisition in a cross-cultural business context},
doi = {10.1108/09593849910301612},
file = {:C\:/Users/Hp/Downloads/Documents/MTS.2020.3012324.pdf:pdf},
issn = {0959-3845},
number = {4},
pages = {359--389},
volume = {12},
year = {1999}
}
@article{Farahani2016,
abstract = {In this paper we analyze the self-∗ properties in the collective systems with respect to its
relationship with other concepts like emergence and propagation, then we will consider the
manipulations that should be made in order to make the MAPE-K loop compliant with the
characteristics of collective systems.},
doi = {10.1145/2968219.2979127},
file = {:C\:/Users/Hp/Downloads/Documents/2968219.2979127.pdf:pdf},
isbn = {9781450344623},
journal = {UbiComp 2016 Adjunct - Proceedings of the 2016 ACM International Joint Conference on
Pervasive and Ubiquitous Computing},
pages = {1309--1314},
year = {2016}
@article{Szabo2017,
abstract = {Undesired or unexpected properties are frequent, as large-scale complex systems with
nonlinear interactions are being designed and implemented to answer real-life scenarios. Identifying
these behaviors as they happen as well as determining whether these behaviors are beneficial for
the system is crucial to highlight potential faults or undesired side effects early in the development
of a system, thus promising significant cost reductions. Beyond the inherent challenges in identifying
these behaviors, the problem of validating the observed emergent behavior remains challenging, as
this behavior is, by definition, not expected or envisaged by system designers. This chapter presents
an overview of existing work for the automated detection of emergent behavior and discusses some
potential solutions to the challenge of validating emergent behavior. Building on the idea of
comparing an identified emergent behavior with previously seen behaviors, we propose a two-step
process for validating emergent behavior. Our initial experiments using a Flock of Birds model show
the promise of this approach but also highlight future avenues of research.},
doi = {10.1007/978-3-319-64182-9_4},
isbn = {9783319641829},
pages = {47--62},
year = {2017}
}
@article{Kwiatkowska2022,
abstract = {The design and control of autonomous systems that operate in uncertain or adversarial
environments can be facilitated by formal modeling and analysis. Probabilistic model checking is a
technique to automatically verify, for a given temporal logic specification, that a system model
satisfies the specification, as well as to synthesize an optimal strategy for its control. This method
has recently been extended to multiagent systems that exhibit competitive or cooperative behavior
modeled via stochastic games and synthesis of equilibria strategies. In this article, we provide an
overview of probabilistic model checking, focusing on models supported by the PRISM and PRISM-
games model checkers. This overview includes fully observable and partially observable Markov
decision processes, as well as turn-based and concurrent stochastic games, together with associated
probabilistic temporal logics. We demonstrate the applicability of the framework through illustrative
examples from autonomous systems. Finally, we highlight research challenges and suggest directions
for future work in this area.Expected final online publication date for the Annual Review of Control,
Robotics, and Autonomous Systems, Volume 5 is May 2022. Please see
http://www.annualreviews.org/page/journal/pubdates for revised estimates.},
archivePrefix = {arXiv},
arxivId = {2111.10630},
doi = {10.1146/annurev-control-042820-010947},
eprint = {2111.10630},
file = {:C\:/Users/Hp/Downloads/Documents/c69d3324-8e9c-07ce-8b8d-5b990f4a3e27.pdf:pdf},
issn = {2573-5144},
number = {1},
pages = {1--26},
volume = {5},
year = {2022}