### 2015

Adeleke JA, Moodley D. An Ontology for Proactive Indoor Environmental Quality Monitoring and Control. In: The 2015 Annual Conference of the South African Institute of Computer Scientists and Information Technologists (SAICSIT '15). New York, NY, USA ©2015; 2015.

Proactive monitoring and control of indoor air quality in homes where there are pregnant mothers and infants is essential for healthy development and well-being of children. This is especially true in low income households where cooking practices and exposure to harmful pollutants produced by nearby industries can negatively impact on a healthy home environment. Interdisciplinary expert knowledge is required to make sense of dynamic and complex environmental phenomena from multivariate low level sensor observations and high level human activities to detect health risks and enact decisions about control. We have developed an ontology for indoor environmental quality monitoring and control based on an ongoing real world case study in Durban, South Africa. We implemented an Indoor Air Quality Index and a thermal comfort index which can be automatically determined by reasoning on the ontology. We evaluated the ontology by populating it with test sensor data and showing how it can be queried to analyze health risk situations and determine control actions. Our evaluation shows that the ontology can be used for real world indoor monitoring and control applications in resource constrained settings.

@{127,
author = {Jude Adeleke and Deshen Moodley},
title = {An Ontology for Proactive Indoor Environmental Quality Monitoring and Control},
abstract = {Proactive monitoring and control of indoor air quality in homes where there are pregnant mothers and infants is essential for healthy development and well-being of children. This is especially true in low income households where cooking practices and exposure to harmful pollutants produced by nearby industries can negatively impact on a healthy home environment. Interdisciplinary expert knowledge is required to make sense of dynamic and complex environmental phenomena from multivariate low level sensor observations and high level human activities to detect health risks and enact decisions about control. We have developed an ontology for indoor environmental quality monitoring and control based on an ongoing real world case study in Durban, South Africa. We implemented an Indoor Air Quality Index and a thermal comfort index which can be automatically determined by reasoning on the ontology. We evaluated the ontology by populating it with test sensor data and showing how it can be queried to analyze health risk situations and determine control actions. Our evaluation shows that the ontology can be used for real world indoor monitoring and control applications in resource constrained settings.},
year = {2015},
journal = {The 2015 Annual Conference of the South African Institute of Computer Scientists and Information Technologists (SAICSIT &#039;15)},
month = {28/09-30/09},
isbn = {978-1-4503-3683-3},
}

Fischer B, Greene GJ. Interactive tag cloud visualization of software version control repositories. Software Visualization (VISSOFT). 2015. http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7332415&isnumber=7332403.

No Abstract

@article{126,
author = {Bernd Fischer and G.J. Greene},
title = {Interactive tag cloud visualization of software version control repositories},
abstract = {No Abstract},
year = {2015},
journal = {Software Visualization (VISSOFT)},
pages = {56-65},
url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&amp;arnumber=7332415&amp;isnumber=7332403},
}

Britz K, Klarman S. Ontology learning from interpretations in lightweight description logics. In: 25th International Conference on Inductive Logic Programming. ; 2015.

Data-driven elicitation of ontologies from structured data is a well-recognized knowledge acquisition bottleneck. The development of efficient techniques for (semi-)automating this task is therefore practically vital --- yet, hindered by the lack of robust theoretical foundations. In this paper, we study the problem of learning Description Logic TBoxes from interpretations, which naturally translates to the task of ontology learning from data. In the presented framework, the learner is provided with a set of positive interpretations (i.e., logical models) of the TBox adopted by the teacher. The goal is to correctly identify the TBox given this input. We characterize the key constraints on the models that warrant finite learnability of TBoxes expressed in selected fragments of the Description Logic $\mathcal{EL}$ and define corresponding learning algorithms.

@{125,
author = {Katarina Britz and Simon Klarman},
title = {Ontology learning from interpretations in lightweight description logics},
abstract = {Data-driven elicitation of ontologies from structured data is a well-recognized knowledge acquisition bottleneck. The development of efficient techniques for (semi-)automating this task is therefore practically vital --- yet, hindered by the lack of robust theoretical foundations. In this paper, we study the problem of learning Description Logic TBoxes from interpretations, which naturally translates to the task of ontology learning from data. In the presented framework, the learner is provided with a set of positive interpretations (i.e., logical models) of the TBox adopted by the teacher. The goal is to correctly identify the TBox given this input. We characterize the key constraints on the models that warrant finite learnability of TBoxes expressed in selected fragments of the Description Logic $\mathcal{EL}$ and define corresponding learning algorithms.},
year = {2015},
journal = {25th International Conference on Inductive Logic Programming},
month = {20/08-22/08},
}

de Vries M, Gerber A, van der Merwe A. The enterprise engineering domain. In: Advances in Enterprise Engineering IX. Springer; 2015. http://link.springer.com/chapter/10.1007%2F978-3-319-19297-0_4.

No Abstract

@inbook{124,
author = {Marne de Vries and Aurona Gerber and Alta van der Merwe},
title = {The enterprise engineering domain},
abstract = {No Abstract},
year = {2015},
journal = {Advances in Enterprise Engineering IX},
publisher = {Springer},
isbn = {978-3-319-19296-3},
}

van der Merwe A, Naidoo R, Gerber A. Understanding familiarization processes with Design Science Research: A social representation analysis. SACJ. 2015;65.

No Abstract

@article{123,
author = {Alta van der Merwe and Rennie Naidoo and Aurona Gerber},
title = {Understanding familiarization processes with Design Science Research: A social representation analysis.},
abstract = {No Abstract},
year = {2015},
journal = {SACJ},
volume = {65},
isbn = {ISSN: 2313-7835},
}

Kotzé P, van der Merwe A, Gerber A. Design Science Research as Research Approach in Doctoral Studies. In: AMCIS 2015, the 2015 Americas Conference on Information Systems. ; 2015.

No Abstract

@{122,
author = {Paula Kotzé and Alta van der Merwe and Aurona Gerber},
title = {Design Science Research as Research Approach in Doctoral Studies},
abstract = {No Abstract},
year = {2015},
journal = {AMCIS 2015, the 2015 Americas Conference on Information Systems},
month = {13/08-15/08},
}

Gerber MC, Gerber A, van der Merwe A. The Conceptual Framework for Financial Reporting as a Domain Ontology. In: AMCIS 2015, the 2015 Americas Conference on Information Systems. ; 2015.

No Abstract

@{121,
author = {Mathinus Gerber and Aurona Gerber and Alta van der Merwe},
title = {The Conceptual Framework for Financial Reporting as a Domain Ontology},
abstract = {No Abstract},
year = {2015},
journal = {AMCIS 2015, the 2015 Americas Conference on Information Systems},
month = {13/08-15/08},
}

Lapalme J, Gerber A, van der Merwe A, Zachman J, de Vries M, Hinkelmann K. Exploring the future of enterprise architecture: A Zachman perspective. Computers in Industry. 2015. http://www.sciencedirect.com/science/article/pii/S0166361515300166.

Today, and for the foreseeable future, organizations will face ever-increasing levels of complexity and uncertainty. Many believe that enterprise architecture (EA) will help organizations address such difficult terrain by guiding the design of adaptive and resilient enterprises and their information systems. This paper presents the “Grand Challenges” that we believe will challenge organizations in the future and need to be addressed by enterprise architecture. As a first step in using enterprise architecture as a solution for overcoming identified challenges, the Zachman Enterprise Architecture Framework is used to guide and structure the discussion. The paper presents the “Grand Challenges” and discusses promising theories and models for addressing them. In addition, current advances in the field of enterprise architecture that have begun to address the challenges will be presented. In conclusion, final thoughts on the future of enterprise architecture as a research field and a profession are offered.

@article{120,
author = {James Lapalme and Aurona Gerber and Alta van der Merwe and John Zachman and Marne de Vries and Knut Hinkelmann},
title = {Exploring the future of enterprise architecture: A Zachman perspective},
abstract = {Today, and for the foreseeable future, organizations will face ever-increasing levels of complexity and uncertainty. Many believe that enterprise architecture (EA) will help organizations address such difficult terrain by guiding the design of adaptive and resilient enterprises and their information systems. This paper presents the “Grand Challenges” that we believe will challenge organizations in the future and need to be addressed by enterprise architecture. As a first step in using enterprise architecture as a solution for overcoming identified challenges, the Zachman Enterprise Architecture Framework is used to guide and structure the discussion. The paper presents the “Grand Challenges” and discusses promising theories and models for addressing them. In addition, current advances in the field of enterprise architecture that have begun to address the challenges will be presented. In conclusion, final thoughts on the future of enterprise architecture as a research field and a profession are offered.},
year = {2015},
journal = {Computers in Industry},
publisher = {Elsevier},
url = {http://www.sciencedirect.com/science/article/pii/S0166361515300166},
}

Hinkelmann K, Gerber A, Karagiannis D, Thoenssen B, van der Merwe A, Woitsch R. A new paradigm for the continuous alignment of business and IT: Combining enterprise architecture modelling and enterprise ontology. Computers in Industry. 2015. http://www.sciencedirect.com/science/article/pii/S0166361515300270.

The paper deals with Next Generation Enterprise Information Systems in the context of Enterprise Engineering. The continuous alignment of business and IT in a rapidly changing environment is a grand challenge for today's enterprises. The ability to react timeously to continuous and unexpected change is called agility and is an essential quality of the modern enterprise. Being agile has consequences for the engineering of enterprises and enterprise information systems. In this paper a new paradigm for next generation enterprise information systems is proposed, which shifts the development approach of model-driven engineering to continuous alignment of business and IT for the agile enterprise. It is based on a metamodelling approach, which supports both human-interpretable graphical enterprise architecture and machine-interpretable enterprise ontologies. Furthermore, next generation enterprise information systems are described, which embed modelling tools and algorithms for model analysis

@article{119,
author = {Knut Hinkelmann and Aurona Gerber and Dimitris Karagiannis and Barbara Thoenssen and Alta van der Merwe and Robert Woitsch},
title = {A new paradigm for the continuous alignment of business and IT: Combining enterprise architecture modelling and enterprise ontology.},
abstract = {The paper deals with Next Generation Enterprise Information Systems in the context of Enterprise Engineering. The continuous alignment of business and IT in a rapidly changing environment is a grand challenge for today&#039;s enterprises. The ability to react timeously to continuous and unexpected change is called agility and is an essential quality of the modern enterprise. Being agile has consequences for the engineering of enterprises and enterprise information systems. In this paper a new paradigm for next generation enterprise information systems is proposed, which shifts the development approach of model-driven engineering to continuous alignment of business and IT for the agile enterprise. It is based on a metamodelling approach, which supports both human-interpretable graphical enterprise architecture and machine-interpretable enterprise ontologies. Furthermore, next generation enterprise information systems are described, which embed modelling tools and algorithms for model analysis},
year = {2015},
journal = {Computers in Industry},
publisher = {Elsevier},
url = {http://www.sciencedirect.com/science/article/pii/S0166361515300270},
}

Thomas A, Gerber A, van der Merwe A. Visual Syntax of UML Class and Package Diagram Constructs as an Ontology. In: KEOD 2015 the the 7th International Joint Conference on Knowledge Discovery, Knowledge Engineering and Knowledge Management. ; 2015.

Diagrams are often studied as visual languages with an abstract and a concrete syntax (concrete syntax is often referred to as visual syntax), where the latter contains the visual representations of the concepts in the former. A formal specification of the concrete syntax is useful in diagram processing applications as well as in achieving unambiguous understanding of diagrams. Unified Modeling Language (UML) is a commonly used modeling language to represent software models using its diagrams. Class and package diagrams are two diagrams of UML. The motivation for this work is twofold; UML lacks a formal visual syntax specification and ontologies are under-explored for visual syntax specifications. The work in this paper, therefore, explores using ontologies for visual syntax specifications by specifying the visual syntax of a set of UML class and package diagram constructs as an ontology in the Web ontology language, OWL. The reasoning features of the ontology reasoners are then used to verify the visual syntax specification. Besides formally encoding the visual syntax of numerous UML constructs, the work also demonstrates the general value of using OWL for visual syntax specifications.

@{118,
author = {Anitta Thomas and Aurona Gerber and Alta van der Merwe},
title = {Visual Syntax of UML Class and Package Diagram Constructs as an Ontology},
abstract = {Diagrams are often studied as visual languages with an abstract and a concrete syntax (concrete syntax is often referred to as visual syntax), where the latter contains the visual representations of the concepts in the former. A formal specification of the concrete syntax is useful in diagram processing applications as well as in achieving unambiguous understanding of diagrams. Unified Modeling Language (UML) is a commonly used modeling language to represent software models using its diagrams. Class and package diagrams are two diagrams of UML. The motivation for this work is twofold; UML lacks a formal visual syntax specification and ontologies are under-explored for visual syntax specifications. The work in this paper, therefore, explores using ontologies for visual syntax specifications by specifying the visual syntax of a set of UML class and package diagram constructs as an ontology in the Web ontology language, OWL. The reasoning features of the ontology reasoners are then used to verify the visual syntax specification. Besides formally encoding the visual syntax of numerous UML constructs, the work also demonstrates the general value of using OWL for visual syntax specifications.},
year = {2015},
journal = {KEOD 2015 the the 7th International Joint Conference on Knowledge Discovery, Knowledge Engineering and Knowledge Management},
month = {12/11-14/11},
isbn = {978-989-758-158-8},
}

Casini G, Straccia U, Meyer T. A Polynomial Time Subsumption Algorithm for EL⊥ under Rational Closure. 2015.

No Abstract

@misc{117,
author = {Giovanni Casini and Umberto Straccia and Thomas Meyer},
title = {A Polynomial Time Subsumption Algorithm for EL⊥ under Rational Closure},
abstract = {No Abstract},
year = {2015},
}

Britz K, Klarman S. Towards unsupervised ontology learning from data. 2015. http://ceur-ws.org/Vol-1423/.

Data-driven elicitation of ontologies from structured data is a well-recognized knowledge acquisition bottleneck. The development of efficient techniques for (semi-)automating this task is therefore practically vital --- yet, hindered by the lack of robust theoretical foundations. In this paper, we study the problem of learning Description Logic TBoxes from interpretations, which naturally translates to the task of ontology learning from data. In the presented framework, the learner is provided with a set of positive interpretations (i.e., logical models) of the TBox adopted by the teacher. The goal is to correctly identify the TBox given this input. We characterize the key constraints on the models that warrant finite learnability of TBoxes expressed in selected fragments of the Description Logic EL and define corresponding learning algorithms.

@misc{116,
author = {Katarina Britz and Simon Klarman},
title = {Towards unsupervised ontology learning from data},
abstract = {Data-driven elicitation of ontologies from structured data is a well-recognized knowledge acquisition bottleneck. The development of efficient techniques for (semi-)automating this task is therefore practically vital --- yet, hindered by the lack of robust theoretical foundations. In this paper, we study the problem of learning Description Logic TBoxes from interpretations, which naturally translates to the task of ontology learning from data. In the presented framework, the learner is provided with a set of positive interpretations (i.e., logical models) of the TBox adopted by the teacher. The goal is to correctly identify the TBox given this input. We characterize the key constraints on the models that warrant finite learnability of TBoxes expressed in selected fragments of the Description Logic EL and define corresponding learning algorithms.},
year = {2015},
publisher = {CEUR-WS Volume1423},
isbn = {ISSN 1613-0073},
url = {http://ceur-ws.org/Vol-1423/},
}

Booth R. On the Entailment Problem for a Logic of Typicality. In: IJCAI 2015. ; 2015.

Propositional Typicality Logic (PTL) is a recently proposed logic, obtained by enriching classical propositional logic with a typicality operator. In spite of the non-monotonic features introduced by the semantics adopted for the typicality operator, the obvious Tarskian definition of entailment for PTL remains monotonic and is therefore not appropriate. We investigate different (semantic) versions of entailment for PTL, based on the notion of Rational Closure as defined by Lehmann and Magidor for KLM-style conditionals, and constructed using minimality. Our first important result is an impossibility theorem showing that a set of proposed postulates that at first all seem appropriate for a notion of entailment with regard to typicality cannot be satisfied simultaneously. Closer inspection reveals that this result is best interpreted as an argument for advocating the development of more than one type of PTL entailment. In the spirit of this interpretation, we define two primary forms of entailment for PTL and discuss their advantages and disadvantages.

@{115,
author = {Richard Booth},
title = {On the Entailment Problem for a Logic of Typicality},
abstract = {Propositional Typicality Logic (PTL) is a recently proposed logic, obtained by enriching classical propositional logic with a typicality operator. In spite of the non-monotonic features introduced by the semantics adopted for the typicality operator, the obvious Tarskian definition of entailment for PTL remains monotonic and is therefore not appropriate.
We investigate different (semantic) versions of entailment for PTL, based on the notion of Rational Closure as defined by Lehmann and Magidor for KLM-style conditionals, and constructed using minimality. Our first important result is an impossibility theorem showing that a set of proposed postulates that at first all seem appropriate for a notion of entailment with regard to typicality cannot be satisfied simultaneously. Closer inspection reveals that
this result is best interpreted as an argument for advocating the development of more than one type of PTL entailment. In the spirit of this interpretation, we define two primary forms of entailment for PTL and discuss their advantages and disadvantages.},
year = {2015},
journal = {IJCAI 2015},
month = {25/07-31/07},
}

Casini G, Meyer T, Moodley K, Varzinczak I, Sattler U. Introducing Defeasibility into OWL Ontologies. In: The International Semantic Web Conference. ; 2015.

In recent years, various approaches have been developed for representing and reasoning with exceptions in OWL. The price one pays for such capabilities, in terms of practical performance, is an important factor that is yet to be quantified comprehensively. A major barrier is the lack of naturally occurring ontologies with defeasible features - the ideal candidates for evaluation. Such data is unavailable due to absence of tool support for representing defeasible features. In the past, defeasible reasoning implementations have favoured automated generation of defeasible ontologies. While this suffices as a preliminary approach, we posit that a method somewhere in between these two would yield more meaningful results. In this work, we describe a systematic approach to modify real-world OWL ontologies to include defeasible features, and we apply this to the Manchester OWL Repository to generate defeasible ontologies for evaluating our reasoner DIP (Defeasible-Inference Platform). The results of this evaluation are provided together with some insights into where the performance bottle-necks lie for this kind of reasoning. We found that reasoning was feasible on the whole, with surprisingly few bottle-necks in our evaluation.

@{113,
author = {Giovanni Casini and Thomas Meyer and Kody Moodley and Ivan Varzinczak and U. Sattler},
title = {Introducing Defeasibility into OWL Ontologies},
abstract = {In recent years, various approaches have been developed for representing and reasoning with exceptions in OWL. The price one pays for such capabilities, in terms of practical performance, is an important factor that is yet to be quantified comprehensively. A major barrier is the lack of naturally occurring ontologies with defeasible features - the ideal candidates for evaluation. Such data is unavailable due to absence of tool support for representing defeasible features. In the past, defeasible reasoning implementations have favoured automated generation of defeasible ontologies. While this suffices as a preliminary approach, we posit that a method somewhere in between these two would yield more meaningful results. In this work, we describe a systematic approach to modify real-world OWL ontologies to include defeasible features, and we apply this to the Manchester OWL Repository to generate defeasible ontologies for evaluating our reasoner DIP (Defeasible-Inference Platform). The results of this evaluation are provided together with some insights into where the performance bottle-necks lie for this kind of reasoning. We found that reasoning was feasible on the whole, with surprisingly few bottle-necks in our evaluation.},
year = {2015},
journal = {The International Semantic Web Conference},
month = {11/10-15/10},
}

Rens G, Meyer T. A New Approach to Probabilistic Belief Change. In: International Florida AI Research Society Conference. ; 2015.

One way for an agent to deal with uncertainty about its beliefs is to maintain a probability distribution over the worlds it believes are possible. A belief change operation may recommend some previously believed worlds to become impossible and some previously disbelieved worlds to become possible. This work investigates how to redistribute probabilities due to worlds being added to and removed from an agent’s belief-state. Two related approaches are proposed and analyzed.

@{111,
author = {Gavin Rens and Thomas Meyer},
title = {A New Approach to Probabilistic Belief Change},
abstract = {One way for an agent to deal with uncertainty about its beliefs is to maintain a probability distribution over the worlds it believes are possible. A belief change operation may recommend some previously believed worlds to become impossible and some previously disbelieved worlds to become possible. This work investigates how to redistribute probabilities due to worlds being added to and removed from an agent’s belief-state. Two related approaches are proposed and analyzed.},
year = {2015},
journal = {International Florida AI Research Society Conference},
pages = {582-587},
month = {18/05-20/05},
isbn = {978-1-57735-730-8},
}

Crichton R, Pillay A, Moodley D. The Open Health Information Mediator: an Architecture for Enabling Interoperability in Low to Middle Income Countries. 2015;MSc.

Interoperability and system integration are central problems that limit the effective use of health information systems to improve efficiency and effectiveness of health service delivery. There is currently no proven technology that provides a general solution in low and middle income countries where the challenges are especially acute. Engineering health information systems in low resource environments have several challenges that include poor infrastructure, skills shortages, fragmented and piecemeal applications deployed and managed by multiple organisations as well as low levels of resourcing. An important element of modern solutions to these problems is a health information exchange that enable disparate systems to share health information. It is a challenging task to develop systems as complex as health information exchanges that will have wide applicability in low and middle income countries. This work takes a case study approach and uses the development of a health information exchange in Rwanda as the case study. This research reports on the design, implementation and analysis of an architecture, the Health Information Mediator, that is a central component of a health information exchange. While such architectures have been used successfully in high income countries their efficacy has not been demonstrated in low and middle income countries. The Rwandan case study was used to understand and identify the challenges and requirements for health information exchange in low and middle income countries. These requirements were used to derive a set of key concerns for the architecture that were then used to drive its design. Novel features of the architecture include: the ability to mediate messages at both the service provider and service consumer interfaces; support for multiple internal representations of messages to facilitate the adoption of new and evolving standards; and the provision of a general method for mediating health information exchange transactions agnostic of the type of transactions. The architecture is shown to satisfy the key concerns and was validated by implementing and deploying a reference application, the OpenHIM, within the Rwandan health information exchange. The architecture is also analysed using the Architecture Trade-off Analysis Method. It has also been successfully implemented in other low and middle income countries with relatively minor configuration changes which demonstrates the architectures generalizability.

@phdthesis{110,
author = {Ryan Crichton and Anban Pillay and Deshen Moodley},
title = {The Open Health Information Mediator: an Architecture for Enabling Interoperability in Low to Middle Income Countries},
abstract = {Interoperability and system integration are central problems that limit the effective use of health information systems to improve efficiency and effectiveness of health service delivery. There is currently no proven technology that provides a general solution in low and middle income countries where the challenges are especially acute. Engineering health information systems in low resource environments have several challenges that include poor infrastructure, skills shortages, fragmented and piecemeal applications deployed and managed by multiple organisations as well as low levels of resourcing. An important element of modern solutions to these problems is a health information exchange that enable disparate systems to share health information.
It is a challenging task to develop systems as complex as health information exchanges that will have wide applicability in low and middle income countries. This work takes a case study approach and uses the development of a health information exchange in Rwanda as the case study. This research reports on the design, implementation and analysis of an architecture, the Health Information Mediator, that is a central component of a health information exchange. While such architectures have been used successfully in high income countries their efficacy has not been demonstrated in low and middle income countries. The Rwandan case study was used to understand and identify the challenges and requirements for health information exchange in low and middle income countries. These requirements were used to derive a set of key concerns for the architecture that were then used to drive its design. Novel features of the architecture include: the ability to mediate messages at both the service provider and service consumer interfaces; support for multiple internal representations of messages to facilitate the adoption of new and evolving standards; and the provision of a general method for mediating health information exchange transactions agnostic of the type of transactions.
The architecture is shown to satisfy the key concerns and was validated by implementing and deploying a reference application, the OpenHIM, within the Rwandan health information exchange. The architecture is also analysed using the Architecture Trade-off Analysis Method. It has also been successfully implemented in other low and middle income countries with relatively minor configuration changes which demonstrates the architectures generalizability.},
year = {2015},
volume = {MSc},
}

Ruttkamp-Bloem E, Casini G, Meyer T. A Non-Classical Logical Foundation for Naturalised Realism. In: Logica Yearbook 2014. Unknown; 2015.

In this paper, by suggesting a formal representation of science based on recent advances in logic-based Artificial Intelligence (AI), we show how three serious concerns around the realisation of traditional scientific realism (the theory/observation distinction, over-determination of theories by data, and theory revision) can be overcome such that traditional realism is given a new guise as ‘naturalised’. We contend that such issues can be dealt with (in the context of scientific realism) by developing a formal representation of science based on the application of the following tools from Knowledge Representation: the family of Description Logics, an enrichment of classical logics via defeasible statements, and an application of the preferential interpretation of the approach to Belief Revision.

@inbook{109,
author = {Emma Ruttkamp-Bloem and Giovanni Casini and Thomas Meyer},
title = {A Non-Classical Logical Foundation for Naturalised Realism},
abstract = {In this paper, by suggesting a formal representation of science based on recent advances in logic-based Artificial Intelligence (AI), we show how three serious concerns around the realisation of traditional scientific realism (the theory/observation distinction, over-determination of theories by
data, and theory revision) can be overcome such that traditional realism is given a new guise as ‘naturalised’. We contend that such issues can be dealt
with (in the context of scientific realism) by developing a formal representation of science based on the application of the following tools from Knowledge Representation: the family of Description Logics, an enrichment of classical logics via defeasible statements, and an application of the preferential interpretation of the approach to Belief Revision.},
year = {2015},
journal = {Logica Yearbook 2014},
publisher = {Unknown},
}

Booth R, Casini G, Meyer T, Varzinczak I. What Does Entailment for PTL Mean? In: Commonsense 2015. ; 2015.

We continue recent investigations into the problem of reasoning about typicality. We do so in the framework of Propositional Typicality Logic (PTL), which is obtained by enriching classical propositional logic with a typicality operator and characterized by a preferential semantics a la KLM. In this paper we study different notions of entailment for PTL. We take as a starting point the notion of Rational Closure defined for KLM-style conditionals. We show that the additional expressivity of PTL results in different versions of Rational Closure for PTL — versions that are equivalent with respect to the conditional language originally proposed by KLM.

@{108,
author = {Richard Booth and Giovanni Casini and Thomas Meyer and Ivan Varzinczak},
title = {What Does Entailment for PTL Mean?},
abstract = {We continue recent investigations into the problem of reasoning about typicality. We do so in the framework of Propositional Typicality Logic (PTL), which is obtained by enriching classical propositional logic with a typicality operator and characterized by a preferential semantics a la KLM. In this paper we study different notions of entailment for PTL. We take as a starting point the notion of Rational Closure defined for KLM-style conditionals. We show that the additional expressivity of PTL results in different versions of Rational Closure for PTL — versions that are equivalent with respect to the conditional language originally proposed by KLM.},
year = {2015},
journal = {Commonsense 2015},
month = {23/03-25/03},
}

Ongoma N, Keet M. Temporal Attributes: Status and Subsumption. In: Asia-Pacific Conference on Conceptual Modelling. Sydney, Australia; 2015.

Representing data that changes over time in conceptual data models is required by various application domains, and requires a language that is expressive enough to fully capture the operational semantics of the time-varying information. Temporal modelling languages typically focus on representing and reasoning over temporal classes and relationships, but have scant support for temporal attributes, if at all. This prevents one to fully utilise a temporal conceptual data model, which, however, is needed to model not only evolving objects (e.g., an employee’s role), but also its attributes, such as changes in salary and bonus payouts. To characterise temporal attributes precisely, we use the DLRUS Description Logic language to provide its model-theoretic semantics, there- with essentially completing the temporal ER language ERVT. The new notion of status attribute is introduced to capture the possible changes, which results in several logical implications they entail, including their interaction with temporal classes to ensure correct behaviour in subsumption hierarchies, paving the way to verify automatically whether a temporal conceptual data model is consistent.

@{105,
author = {Nasubo Ongoma and Maria Keet},
title = {Temporal Attributes: Status and Subsumption},
abstract = {Representing data that changes over time in conceptual data models is required by various application domains, and requires a language that is expressive enough to fully capture the operational semantics of the time-varying information. Temporal modelling languages typically focus on representing and reasoning over temporal classes and relationships, but have scant support for temporal attributes, if at all. This prevents one to fully utilise a temporal conceptual data model, which, however, is needed to model not only evolving objects (e.g., an employee’s role), but also its attributes, such as changes in salary and bonus payouts. To characterise temporal attributes precisely, we use the DLRUS Description Logic language to provide its model-theoretic semantics, there- with essentially completing the temporal ER language ERVT. The new notion of status attribute is introduced to capture the possible changes, which results in several logical implications they entail, including their interaction with temporal classes to ensure correct behaviour in subsumption hierarchies, paving the way to verify automatically whether a temporal conceptual data model is consistent.},
year = {2015},
journal = {Asia-Pacific Conference on Conceptual Modelling},
pages = {61-70},
month = {27/01-30/01},
isbn = {978-1-921770-47-0},
}

Rens G. Speeding up Online POMDP Planning: Unification of Observation Branches by Belief-state Compression via Expected Feature Values. In: International Conference on Agents and Artificial Intelligence (ICAART) Vol. 2. ; 2015.

A novel algorithm to speed up online planning in partially observable Markov decision processes (POMDPs) is introduced. I propose a method for compressing nodes in belief-decision-trees while planning occurs. Whereas belief-decision-trees branch on actions and observations, with my method, they branch only on actions. This is achieved by unifying the branches required due to the nondeterminism of observations. The method is based on the expected values of domain features. The new algorithm is experimentally compared to three other online POMDP algorithms, outperforming them on the given test domain.

@{104,
author = {Gavin Rens},
title = {Speeding up Online POMDP Planning: Unification of Observation Branches by Belief-state Compression via Expected Feature Values},
abstract = {A novel algorithm to speed up online planning in partially observable Markov decision processes (POMDPs) is introduced. I propose a method for compressing nodes in belief-decision-trees while planning occurs. Whereas belief-decision-trees branch on actions and observations, with my method, they branch only on actions. This is achieved by unifying the branches required due to the nondeterminism of observations. The method is based on the expected values of domain features. The new algorithm is experimentally compared to three other online POMDP algorithms, outperforming them on the given test domain.},
year = {2015},
journal = {International Conference on Agents and Artificial Intelligence (ICAART) Vol. 2},
pages = {241-246},
month = {10/01-12/01},
isbn = {978-989-758-074-1},
}

Rens G, Meyer T. Hybrid POMDP-BDI: An Agent Architecture with Online Stochastic Planning and Desires with Changing Intensity Levels. In: International Conference on Agents and Artificial Intelligence (ICAART) Vol. 1. ; 2015.

Partially observable Markov decision processes (POMDPs) and the belief-desire-intention (BDI) framework have several complimentary strengths. We propose an agent architecture which combines these two powerful approaches to capitalize on their strengths. Our architecture introduces the notion of intensity of the desire for a goal’s achievement. We also define an update rule for goals’ desire levels. When to select a new goal to focus on is also defined. To verify that the proposed architecture works, experiments were run with an agent based on the architecture, in a domain where multiple goals must continually be achieved. The results show that (i) while the agent is pursuing goals, it can concurrently perform rewarding actions not directly related to its goals, (ii) the trade-off between goals and preferences can be set effectively and (iii) goals and preferences can be satisfied even while dealing with stochastic actions and perceptions. We believe that the proposed architecture furthers the theory of high-level autonomous agent reasoning.

@{103,
author = {Gavin Rens and Thomas Meyer},
title = {Hybrid POMDP-BDI: An Agent Architecture with Online Stochastic Planning and Desires with Changing Intensity Levels},
abstract = {Partially observable Markov decision processes (POMDPs) and the belief-desire-intention (BDI) framework have several complimentary strengths. We propose an agent architecture which combines these two powerful approaches to capitalize on their strengths. Our architecture introduces the notion of intensity of the desire for a goal’s achievement. We also define an update rule for goals’ desire levels. When to select a new goal to focus on is also defined. To verify that the proposed architecture works, experiments were run with an agent based on the architecture, in a domain where multiple goals must continually be achieved. The results show that (i) while the agent is pursuing goals, it can concurrently perform rewarding actions not directly related to its goals, (ii) the trade-off between goals and preferences can be set effectively and (iii) goals and preferences can be satisfied even while dealing with stochastic actions and perceptions. We believe that the proposed architecture furthers the theory of high-level autonomous agent reasoning.},
year = {2015},
journal = {International Conference on Agents and Artificial Intelligence (ICAART) Vol. 1},
pages = {5-14},
month = {10/01-12/01},
isbn = {978-989-758-073-4},
}

Rens G, Meyer T, Lakemeyer G. A Modal Logic for the Decision-Theoretic Projection Problem. In: International Conference on Agents and Artificial Intelligence (ICAART) Vol. 2. ; 2015.

We present a decidable logic in which queries can be posed about (i) the degree of belief in a propositional sentence after an arbitrary finite number of actions and observations and (ii) the utility of a finite sequence of actions after a number of actions and observations. Another contribution of this work is that a POMDP model specification is allowed to be partial or incomplete with no restriction on the lack of information specified for the model. The model may even contain information about non-initial beliefs. Essentially, entailment of arbitrary queries (expressible in the language) can be answered. A sound, complete and terminating decision procedure is provided.

@{102,
author = {Gavin Rens and Thomas Meyer and G. Lakemeyer},
title = {A Modal Logic for the Decision-Theoretic Projection Problem},
abstract = {We present a decidable logic in which queries can be posed about (i) the degree of belief in a propositional sentence after an arbitrary finite number of actions and observations and (ii) the utility of a finite sequence of actions after a number of actions and observations. Another contribution of this work is that a POMDP model specification is allowed to be partial or incomplete with no restriction on the lack of information specified for the model. The model may even contain information about non-initial beliefs. Essentially, entailment of arbitrary queries (expressible in the language) can be answered. A sound, complete and terminating decision procedure is provided.},
year = {2015},
journal = {International Conference on Agents and Artificial Intelligence (ICAART) Vol. 2},
pages = {5-16},
month = {10/01-12/01},
isbn = {978-989-758-074-1},
}


### 2014

Klarman S, Meyer T. Complexity of Temporal Query Abduction in DL-Lite. 27th International Workshop on Description Logics (DL 2014). 2014. http://ceur-ws.org/Vol-1193/paper_45.pdf.

Temporal query abduction is the problem of hypothesizing a minimal set of temporal data which, given some ﬁxed background knowledge, warrants the entailment of the query. This problem formally underlies a variety of forms of explanatory and diagnostic reasoning in the context of time series data, data streams, or otherwise temporally annotated structured information. In this paper, we consider (temporally ordered) data represented in Description Logics from the popular DLLite family and Temporal Query Language, based on the combination of LTL with conjunctive queries. In this deﬁned setting, we study the complexity of temporal query abduction, assuming diﬀerent restrictions on the problem and minimality criteria for abductive solutions. As a result, we draw several revealing demarcation lines between NP-, DP- and PSpace-complete variants of the problem.

@misc{365,
author = {Szymon Klarman and Thomas Meyer},
title = {Complexity of Temporal Query Abduction in DL-Lite},
abstract = {Temporal query abduction is the problem of hypothesizing a minimal set of temporal data which, given some ﬁxed background knowledge, warrants the entailment of the query. This problem formally underlies a variety of forms of explanatory and diagnostic reasoning in the context of time series data, data streams, or otherwise temporally annotated structured information. In this paper, we consider (temporally ordered) data represented in Description Logics from the popular DLLite family and Temporal Query Language, based on the combination of LTL with conjunctive queries. In this deﬁned setting, we study the complexity of temporal query abduction, assuming diﬀerent restrictions on the problem and minimality criteria for abductive solutions. As a result, we draw several revealing demarcation lines between NP-, DP- and PSpace-complete variants of the problem.},
year = {2014},
journal = {27th International Workshop on Description Logics (DL 2014)},
month = {17/07 - 20/07},
url = {http://ceur-ws.org/Vol-1193/paper_45.pdf},
}