Publications
Home > Project output > Publications
Core Project Publications
2020 | |
van de Poel, Ibo; Taebi, Behnam; de Wildt, Tristan Accounting for Values in the Development and Design of New Nuclear Reactors Journal Article The Bridge, 50 (3), pp. 59-65, 2020. @article{RN3920, title = {Accounting for Values in the Development and Design of New Nuclear Reactors}, author = {Ibo van de Poel and Behnam Taebi and Tristan de Wildt}, url = {https://www.nae.edu/239186/Accounting-for-Values-in-the-Development-and-Design-of-New-Nuclear-Reactors}, year = {2020}, date = {2020-01-01}, journal = {The Bridge}, volume = {50}, number = {3}, pages = {59-65}, keywords = {}, pubstate = {published}, tppubtype = {article} } | |
![]() | Steinert, Steffen Corona and value change. The role of social media and emotional contagion Journal Article Ethics and Information Technology, 2020, ISSN: 1388-1957, 1572-8439. @article{steinert_corona_2020, title = {Corona and value change. The role of social media and emotional contagion}, author = {Steffen Steinert}, url = {http://link.springer.com/10.1007/s10676-020-09545-z}, doi = {10.1007/s10676-020-09545-z}, issn = {1388-1957, 1572-8439}, year = {2020}, date = {2020-01-01}, urldate = {2020-09-30}, journal = {Ethics and Information Technology}, abstract = {People share their emotions on social media and evidence suggests that in times of crisis people are especially motivated to post emotional content. The current Coronavirus pandemic is such a crisis. The online sharing of emotional content during the Coronavirus crisis may contribute to societal value change. Emotion sharing via social media could lead to emotional contagion which in turn could facilitate an emotional climate in a society. In turn, the emotional climate of a society can influence society’s value structure. The emotions that spread in the current Coronavirus crisis are predominantly negative, which could result in a negative emotional climate. Based on the dynamic relations of values to each other and the way that emotions relate to values, a negative emotional climate can contribute to societal value change towards values related to security preservation and threat avoidance. As a consequence, a negative emotional climate and the shift in values could lead to a change in political attitudes that has implications for rights, freedom, privacy and moral progress. Considering the impact of social media in terms of emotional contagion and a longer-lasting value change is an important perspective in thinking about the ethical long-term impact of social media technology.}, keywords = {}, pubstate = {published}, tppubtype = {article} } People share their emotions on social media and evidence suggests that in times of crisis people are especially motivated to post emotional content. The current Coronavirus pandemic is such a crisis. The online sharing of emotional content during the Coronavirus crisis may contribute to societal value change. Emotion sharing via social media could lead to emotional contagion which in turn could facilitate an emotional climate in a society. In turn, the emotional climate of a society can influence society’s value structure. The emotions that spread in the current Coronavirus crisis are predominantly negative, which could result in a negative emotional climate. Based on the dynamic relations of values to each other and the way that emotions relate to values, a negative emotional climate can contribute to societal value change towards values related to security preservation and threat avoidance. As a consequence, a negative emotional climate and the shift in values could lead to a change in political attitudes that has implications for rights, freedom, privacy and moral progress. Considering the impact of social media in terms of emotional contagion and a longer-lasting value change is an important perspective in thinking about the ethical long-term impact of social media technology. |
![]() | Klenk, Michael How Do Technological Artefacts Embody Moral Values? Journal Article Philosophy & Technology, 2020, ISSN: 2210-5441. @article{RN3917, title = {How Do Technological Artefacts Embody Moral Values?}, author = {Michael Klenk}, url = {https://doi.org/10.1007/s13347-020-00401-y}, doi = {10.1007/s13347-020-00401-y}, issn = {2210-5441}, year = {2020}, date = {2020-01-01}, journal = {Philosophy & Technology}, abstract = {According to some philosophers of technology, technology embodies moral values in virtue of its functional properties and the intentions of its designers. But this paper shows that such an account makes the values supposedly embedded in technology epistemically opaque and that it does not allow for values to change. Therefore, to overcome these shortcomings, the paper introduces the novel Affordance Account of Value Embedding as a superior alternative. Accordingly, artefacts bear affordances, that is, artefacts make certain actions likelier given the circumstances. Based on an interdisciplinary perspective that invokes recent moral anthropology, I conceptualize affordances as response-dependent properties. That is, they depend on intrinsic as well as extrinsic properties of the artefact. We have reason to value these properties. Therefore, artefacts embody values and are not value-neutral, which has practical implications for the design of new technologies.}, keywords = {}, pubstate = {published}, tppubtype = {article} } According to some philosophers of technology, technology embodies moral values in virtue of its functional properties and the intentions of its designers. But this paper shows that such an account makes the values supposedly embedded in technology epistemically opaque and that it does not allow for values to change. Therefore, to overcome these shortcomings, the paper introduces the novel Affordance Account of Value Embedding as a superior alternative. Accordingly, artefacts bear affordances, that is, artefacts make certain actions likelier given the circumstances. Based on an interdisciplinary perspective that invokes recent moral anthropology, I conceptualize affordances as response-dependent properties. That is, they depend on intrinsic as well as extrinsic properties of the artefact. We have reason to value these properties. Therefore, artefacts embody values and are not value-neutral, which has practical implications for the design of new technologies. |
![]() | van de Poel, Ibo Core Values and Value Conflicts in Cybersecurity: Beyond Privacy Versus Security Book Chapter Christen, Markus; Gordijn, Bert; Loi, Michele (Ed.): The Ethics of Cybersecurity, pp. 45–71, Springer International Publishing, Cham, 2020, ISBN: 978-3-030-29053-5. @inbook{vandePoel2020b, title = {Core Values and Value Conflicts in Cybersecurity: Beyond Privacy Versus Security}, author = {Ibo van de Poel}, editor = {Markus Christen and Bert Gordijn and Michele Loi}, url = {https://doi.org/10.1007/978-3-030-29053-5_3}, doi = {10.1007/978-3-030-29053-5_3}, isbn = {978-3-030-29053-5}, year = {2020}, date = {2020-01-01}, booktitle = {The Ethics of Cybersecurity}, pages = {45--71}, publisher = {Springer International Publishing}, address = {Cham}, abstract = {This chapter analyses some of the main values, and values conflicts, in relation to cybersecurity by distinguishing four important value clusters that should be considered when deciding on cybersecurity measures. These clusters are security, privacy, fairness and accountability. Each cluster consists of a range of further values, which can be viewed as articulating specific moral reasons relevant when devising cybersecurity measures. In addition to the four value clusters, domain-specific values that are served by computer systems, such as health, are important. Following a detailed discussion of the four relevant value clusters, potential value conflicts and value tensions are considered. The relationships of five pairs of values (privacy-security, privacy-fairness, privacy-accountability, security-accountability and security-fairness) are analysed in terms of whether they are largely supportive or conflicting. In addition, possible methods for addressing these potential value conflicts are discussed. It is concluded that values, and value conflicts, in cybersecurity should be considered in context, also taking into account the specific computer systems at play, to enable the use of nuanced and fine-grained methods for addressing the relevant value conflicts.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } This chapter analyses some of the main values, and values conflicts, in relation to cybersecurity by distinguishing four important value clusters that should be considered when deciding on cybersecurity measures. These clusters are security, privacy, fairness and accountability. Each cluster consists of a range of further values, which can be viewed as articulating specific moral reasons relevant when devising cybersecurity measures. In addition to the four value clusters, domain-specific values that are served by computer systems, such as health, are important. Following a detailed discussion of the four relevant value clusters, potential value conflicts and value tensions are considered. The relationships of five pairs of values (privacy-security, privacy-fairness, privacy-accountability, security-accountability and security-fairness) are analysed in terms of whether they are largely supportive or conflicting. In addition, possible methods for addressing these potential value conflicts are discussed. It is concluded that values, and value conflicts, in cybersecurity should be considered in context, also taking into account the specific computer systems at play, to enable the use of nuanced and fine-grained methods for addressing the relevant value conflicts. |
Boenink, Marianne; Kudina, Olya Values in responsible research and innovation: from entities to practices Journal Article Journal of Responsible Innovation, 7 (3), pp. 450–470, 2020, ISSN: 2329-9460. @article{d7ab979cbf584a688533d285c7cd17d5, title = {Values in responsible research and innovation: from entities to practices}, author = {Marianne Boenink and Olya Kudina}, doi = {10.1080/23299460.2020.1806451}, issn = {2329-9460}, year = {2020}, date = {2020-01-01}, journal = {Journal of Responsible Innovation}, volume = {7}, number = {3}, pages = {450--470}, publisher = {Taylor & Francis}, abstract = {This article explores the understanding of values in Responsible Research and Innovation (RRI). First, it analyses how two mainstream RRI approaches, the largely substantial one by Von Schomberg and the procedural one by Stilgoe and colleagues, identify and conceptualize values. We argue that by treating values as relatively stable entities, directly available for reflection, both fall into an entity trap. As a result, the hermeneutic work required to identify values is overlooked. We therefore seek to bolster a practice-based take on values, which approaches values as the evolving results of valuing processes. We highlight how this approach views values as lived realities, interactive and dynamic, discuss methodological implications for RRI, and explore potential limitations. Overall, the strength of this approach is that it enables RRI scholars and practitioners to better acknowledge the complexities involved in valuing.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This article explores the understanding of values in Responsible Research and Innovation (RRI). First, it analyses how two mainstream RRI approaches, the largely substantial one by Von Schomberg and the procedural one by Stilgoe and colleagues, identify and conceptualize values. We argue that by treating values as relatively stable entities, directly available for reflection, both fall into an entity trap. As a result, the hermeneutic work required to identify values is overlooked. We therefore seek to bolster a practice-based take on values, which approaches values as the evolving results of valuing processes. We highlight how this approach views values as lived realities, interactive and dynamic, discuss methodological implications for RRI, and explore potential limitations. Overall, the strength of this approach is that it enables RRI scholars and practitioners to better acknowledge the complexities involved in valuing. | |
2018 | |
![]() | van de Poel, Ibo Design for value change Journal Article Ethics and Information Technology, 2018, ISSN: 1572-8439. @article{vandePoel2018, title = {Design for value change}, author = {Ibo van de Poel}, url = {https://doi.org/10.1007/s10676-018-9461-9}, doi = {10.1007/s10676-018-9461-9}, issn = {1572-8439}, year = {2018}, date = {2018-06-26}, journal = {Ethics and Information Technology}, abstract = {In the value sensitive design (VSD) literature, there has been little attention for how values may change during the adoption and use of a sociotechnical system, and what that implies for design. A value change taxonomy is proposed, as well as a number of technical features that allow dealing with value change.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In the value sensitive design (VSD) literature, there has been little attention for how values may change during the adoption and use of a sociotechnical system, and what that implies for design. A value change taxonomy is proposed, as well as a number of technical features that allow dealing with value change. |
Other Publication
A selection of further publications by members of the project’s research team:
2021 | |
Klenk, Michael; de Poel, Ibo Van COVID-19, uncertainty, and moral experiments Journal Article History and Philosophy of the Life Sciences, 43 (1), 2021, ISSN: 0391-9714. @article{35c2dfa629b448cdb30e845a0aa404f5, title = {COVID-19, uncertainty, and moral experiments}, author = {Michael Klenk and Ibo Van de Poel}, doi = {10.1007/s40656-020-00360-9}, issn = {0391-9714}, year = {2021}, date = {2021-01-01}, journal = {History and Philosophy of the Life Sciences}, volume = {43}, number = {1}, abstract = {Pandemics like COVID-19 confront us with decisions about life and death that come with great uncertainty, factual as well as moral. How should policy makers deal with such uncertainty? We suggest that rather than to deliberate until they have found the right course of action, they better do moral experiments that generate relevant experiences to enable more reliable moral evaluations and rational decisions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Pandemics like COVID-19 confront us with decisions about life and death that come with great uncertainty, factual as well as moral. How should policy makers deal with such uncertainty? We suggest that rather than to deliberate until they have found the right course of action, they better do moral experiments that generate relevant experiences to enable more reliable moral evaluations and rational decisions. | |
2020 | |
![]() | van de Poel, Ibo Embedding Values in Artificial Intelligence (AI) Systems Journal Article Minds and Machines, 2020. @article{vandePoel2020, title = {Embedding Values in Artificial Intelligence (AI) Systems}, author = {Ibo van de Poel}, url = {https://link-springer-com.tudelft.idm.oclc.org/article/10.1007/s11023-020-09537-4}, doi = {DOI: 10.1007/s11023-020-09537-4}, year = {2020}, date = {2020-09-01}, journal = {Minds and Machines}, abstract = {Organizations such as the EU High-Level Expert Group on AI and the IEEE have recently formulated ethical principles and (moral) values that should be adhered to in the design and deployment of artificial intelligence (AI). These include respect for autonomy, non-maleficence, fairness, transparency, explainability, and accountability. But how can we ensure and verify that an AI system actually respects these values? To help answer this question, I propose an account for determining when an AI system can be said to embody certain values. This account understands embodied values as the result of design activities intended to embed those values in such systems. AI systems are here understood as a special kind of sociotechnical system that, like traditional sociotechnical systems, are composed of technical artifacts, human agents, and institutions but—in addition—contain artificial agents and certain technical norms that regulate interactions between artificial agents and other elements of the system. The specific challenges and opportunities of embedding values in AI systems are discussed, and some lessons for better embedding values in AI systems are drawn.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Organizations such as the EU High-Level Expert Group on AI and the IEEE have recently formulated ethical principles and (moral) values that should be adhered to in the design and deployment of artificial intelligence (AI). These include respect for autonomy, non-maleficence, fairness, transparency, explainability, and accountability. But how can we ensure and verify that an AI system actually respects these values? To help answer this question, I propose an account for determining when an AI system can be said to embody certain values. This account understands embodied values as the result of design activities intended to embed those values in such systems. AI systems are here understood as a special kind of sociotechnical system that, like traditional sociotechnical systems, are composed of technical artifacts, human agents, and institutions but—in addition—contain artificial agents and certain technical norms that regulate interactions between artificial agents and other elements of the system. The specific challenges and opportunities of embedding values in AI systems are discussed, and some lessons for better embedding values in AI systems are drawn. |
![]() | van de Poel, Ibo Human Affairs, 30 (4), pp. 499, 2020, ISSN: 1210-3055. @article{RN3922, title = {Three philosophical perspectives on the relation between technology and society, and how they affect the current debate about artificial intelligence}, author = {Ibo van de Poel}, url = {https://www.degruyter.com/view/journals/humaff/30/4/article-p499.xml http://resolver.tudelft.nl/uuid:45f19fab-b952-4623-bdd4-ca28dc8b65c0}, doi = {https://doi.org/10.1515/humaff-2020-0042}, issn = {1210-3055}, year = {2020}, date = {2020-01-01}, journal = {Human Affairs}, volume = {30}, number = {4}, pages = {499}, abstract = {Three philosophical perspectives on the relation between technology and society are distinguished and discussed: 1) technology as an autonomous force that determines society; 2) technology as a human construct that can be shaped by human values, and 3) a co-evolutionary perspective on technology and society where neither of them determines the other. The historical evolution of the three perspectives is discussed and it is argued that all three are still present in current debates about technological change and how it may affect society. This is illustrated for the case of Artificial Intelligence (AI). It is argued that each of the three perspectives contributes to the debate of AI but that the third has the strongest potential to uncover blind spots in the current debate.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Three philosophical perspectives on the relation between technology and society are distinguished and discussed: 1) technology as an autonomous force that determines society; 2) technology as a human construct that can be shaped by human values, and 3) a co-evolutionary perspective on technology and society where neither of them determines the other. The historical evolution of the three perspectives is discussed and it is argued that all three are still present in current debates about technological change and how it may affect society. This is illustrated for the case of Artificial Intelligence (AI). It is argued that each of the three perspectives contributes to the debate of AI but that the third has the strongest potential to uncover blind spots in the current debate. |
![]() | de Reuver, Mark; van Wynsberghe, Aimee; Janssen, Marijn; van de Poel, Ibo Digital platforms and responsible innovation: expanding value sensitive design to overcome ontological uncertainty Journal Article Ethics and Information Technology, 2020, ISSN: 1572-8439. @article{RN3931, title = {Digital platforms and responsible innovation: expanding value sensitive design to overcome ontological uncertainty}, author = {Mark de Reuver and Aimee van Wynsberghe and Marijn Janssen and Ibo van de Poel}, url = {https://doi.org/10.1007/s10676-020-09537-z}, doi = {10.1007/s10676-020-09537-z}, issn = {1572-8439}, year = {2020}, date = {2020-01-01}, journal = {Ethics and Information Technology}, abstract = {In this paper, we argue that the characteristics of digital platforms challenge the fundamental assumptions of value sensitive design (VSD). Traditionally, VSD methods assume that we can identify relevant values during the design phase of new technologies. The underlying assumption is that there is only epistemic uncertainty about which values will be impacted by a technology. VSD methods suggest that one can predict which values will be affected by new technologies by increasing knowledge about how values are interpreted or understood in context. In contrast, digital platforms exhibit a novel form of uncertainty, namely, ontological uncertainty: even with full information and overview, it cannot be foreseen what users or developers will do with digital platforms. Hence, predictions about which values are affected might not hold. In this paper, we suggest expanding VSD methods to account for value dynamism resulting from ontological uncertainty. Our expansions involve (1) extending VSD to the entire lifecycle of a platform, (2) broadening VSD through the addition of reflexivity, i.e. second-order learning about what values to aim at, and (3) adding specific tools of moral sandboxing and moral prototyping to enhance such reflexivity. While we illustrate our approach with a short case study about ride-sharing platforms such as Uber, our approach is relevant for other technologies exhibiting ontological uncertainty as well, such as machine learning, robotics and artificial intelligence.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this paper, we argue that the characteristics of digital platforms challenge the fundamental assumptions of value sensitive design (VSD). Traditionally, VSD methods assume that we can identify relevant values during the design phase of new technologies. The underlying assumption is that there is only epistemic uncertainty about which values will be impacted by a technology. VSD methods suggest that one can predict which values will be affected by new technologies by increasing knowledge about how values are interpreted or understood in context. In contrast, digital platforms exhibit a novel form of uncertainty, namely, ontological uncertainty: even with full information and overview, it cannot be foreseen what users or developers will do with digital platforms. Hence, predictions about which values are affected might not hold. In this paper, we suggest expanding VSD methods to account for value dynamism resulting from ontological uncertainty. Our expansions involve (1) extending VSD to the entire lifecycle of a platform, (2) broadening VSD through the addition of reflexivity, i.e. second-order learning about what values to aim at, and (3) adding specific tools of moral sandboxing and moral prototyping to enhance such reflexivity. While we illustrate our approach with a short case study about ride-sharing platforms such as Uber, our approach is relevant for other technologies exhibiting ontological uncertainty as well, such as machine learning, robotics and artificial intelligence. |
![]() | de Wildt, Tristan; Chappin, Emile; van de Kaa, Geerten; Herder, Paulien; van de Poel, Ibo Energy Research & Social Science, 64 , pp. 101451, 2020, ISSN: 2214-6296. @article{RN3686, title = {Conflicted by decarbonisation: Five types of conflict at the nexus of capabilities and decentralised energy systems identified with an agent-based model}, author = {Tristan de Wildt and Emile Chappin and Geerten van de Kaa and Paulien Herder and Ibo van de Poel}, url = {http://www.sciencedirect.com/science/article/pii/S2214629620300281}, doi = {https://doi.org/10.1016/j.erss.2020.101451}, issn = {2214-6296}, year = {2020}, date = {2020-01-01}, journal = {Energy Research & Social Science}, volume = {64}, pages = {101451}, abstract = {This paper explores capability conflicts in the deployment of decentralised energy systems and identifies the affected population. These systems have positive societal impacts in terms of sustainability and consumer empowerment, but they are not accessible to all and their deployment may increase socio-economic inequalities. The societal impacts of decentralised energy systems can be understood in terms of conflicting capabilities; for some citizens capabilities may increase, whereas for others they may decrease. While problematic, capability conflicts may not be inherent. They may only occur in certain neighbourhoods, for example, where both affluent and less affluent populations coexist. By understanding why these capability conflicts occur, we may be able to anticipate whether these decentralised energy projects could result in societal problems. We use agent-based modelling and the scenario discovery technique to identify capability conflicts and the populations that may be affected. We distinguish five classes of conflicts, which can be used to anticipate social acceptance issues. Affected populations can be involved in the decision-making process to foster acceptance of decentralised energy systems. This work contributes to the growing political and scientific debate on issues of energy justice and inclusiveness related to the energy transition. Additionally, we contribute to the operationalisation of such capabilities, as this is one of the first papers to formalise the Capability Approach using an agent-based model.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper explores capability conflicts in the deployment of decentralised energy systems and identifies the affected population. These systems have positive societal impacts in terms of sustainability and consumer empowerment, but they are not accessible to all and their deployment may increase socio-economic inequalities. The societal impacts of decentralised energy systems can be understood in terms of conflicting capabilities; for some citizens capabilities may increase, whereas for others they may decrease. While problematic, capability conflicts may not be inherent. They may only occur in certain neighbourhoods, for example, where both affluent and less affluent populations coexist. By understanding why these capability conflicts occur, we may be able to anticipate whether these decentralised energy projects could result in societal problems. We use agent-based modelling and the scenario discovery technique to identify capability conflicts and the populations that may be affected. We distinguish five classes of conflicts, which can be used to anticipate social acceptance issues. Affected populations can be involved in the decision-making process to foster acceptance of decentralised energy systems. This work contributes to the growing political and scientific debate on issues of energy justice and inclusiveness related to the energy transition. Additionally, we contribute to the operationalisation of such capabilities, as this is one of the first papers to formalise the Capability Approach using an agent-based model. |
Klenk, Michael Charting moral psychology significance for bioethics: Routes to bioethical progress, its limits, and lessons from moral philosophy Journal Article Diametros, 17 (64), pp. 36–55, 2020, ISSN: 1733-5566. @article{d7072b47cb8a47798807ff500b0d424c, title = {Charting moral psychology significance for bioethics: Routes to bioethical progress, its limits, and lessons from moral philosophy}, author = {Michael Klenk}, doi = {10.33392/diam.1520}, issn = {1733-5566}, year = {2020}, date = {2020-01-01}, journal = {Diametros}, volume = {17}, number = {64}, pages = {36--55}, publisher = {Instytut Filozofii UJ}, abstract = {Empirical moral psychology is sometimes dismissed as normatively insignificant because it plays no decisive role in settling ethical disputes. But that conclusion, even if it is valid for normative ethics, does not extend to bioethics. First, in contrast to normative ethics, bioethics can legitimately proceed from a presupposed moral framework. Within that framework, moral psychology can be shown to play four significant roles: it can improve bioethiciststextquoteright understanding of (1) the decision situation, (2) the origin and legitimacy of their moral concepts, (3) efficient options for implementing (legitimate) decisions, and (4) how to change and improve some parts of their moral framework. Second, metaethical considerations suggest that moral psychology may lead to the radical revision of entire moral frameworks and thus prompt the radical revision of entire moral frameworks in bioethics. However, I show that bioethics must either relinquish these radical implications of moral psychology and accept that there are limits to progress in bioethics based on moral psychology or establish an epistemic framework that guides radical revision.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Empirical moral psychology is sometimes dismissed as normatively insignificant because it plays no decisive role in settling ethical disputes. But that conclusion, even if it is valid for normative ethics, does not extend to bioethics. First, in contrast to normative ethics, bioethics can legitimately proceed from a presupposed moral framework. Within that framework, moral psychology can be shown to play four significant roles: it can improve bioethiciststextquoteright understanding of (1) the decision situation, (2) the origin and legitimacy of their moral concepts, (3) efficient options for implementing (legitimate) decisions, and (4) how to change and improve some parts of their moral framework. Second, metaethical considerations suggest that moral psychology may lead to the radical revision of entire moral frameworks and thus prompt the radical revision of entire moral frameworks in bioethics. However, I show that bioethics must either relinquish these radical implications of moral psychology and accept that there are limits to progress in bioethics based on moral psychology or establish an epistemic framework that guides radical revision. | |
Klenk, Michael; Duijf, Hein Ethics of digital contact tracing and COVID-19: who is (not) free to go? Journal Article Ethics and Information Technology, 2020, ISSN: 1388-1957. @article{41bde71c95c94a9abfb046c9ed9ebd32, title = {Ethics of digital contact tracing and COVID-19: who is (not) free to go?}, author = {Michael Klenk and Hein Duijf}, doi = {10.1007/s10676-020-09544-0}, issn = {1388-1957}, year = {2020}, date = {2020-01-01}, journal = {Ethics and Information Technology}, publisher = {Springer}, abstract = {Digital tracing technologies are heralded as an effective way of containing SARS-CoV-2 faster than it is spreading, thereby allowing the possibility of easing draconic measures of population-wide quarantine. But existing technological proposals risk addressing the wrong problem. The proper objective is not solely to maximise the ratio of people freed from quarantine but to also ensure that the composition of the freed group is fair. We identify several factors that pose a risk for fair group composition along with an analysis of general lessons for a philosophy of technology. Policymakers, epidemiologists, and developers can use these risk factors to benchmark proposal technologies, curb the pandemic, and keep public trust.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Digital tracing technologies are heralded as an effective way of containing SARS-CoV-2 faster than it is spreading, thereby allowing the possibility of easing draconic measures of population-wide quarantine. But existing technological proposals risk addressing the wrong problem. The proper objective is not solely to maximise the ratio of people freed from quarantine but to also ensure that the composition of the freed group is fair. We identify several factors that pose a risk for fair group composition along with an analysis of general lessons for a philosophy of technology. Policymakers, epidemiologists, and developers can use these risk factors to benchmark proposal technologies, curb the pandemic, and keep public trust. | |
Muishout, Chantal E; Coggins, Tom N; Schipper, Roel H More Than Meets the Eye? Robotisation and Normativity in the Dutch Construction Industry Proceeding Springer, 2020, ISBN: 978-3-030-49915-0, (Accepted Author Manuscript; Digital Concrete 2020 - 2nd RILEM International Conference on Concrete and Digital Fabrication ; Conference date: 06-07-2020 Through 08-07-2020). @proceedings{1309a149b2b3413db2010740d3508e93, title = {More Than Meets the Eye? Robotisation and Normativity in the Dutch Construction Industry}, author = {Chantal E Muishout and Tom N Coggins and Roel H Schipper}, url = {https://digitalconcrete2020.com/}, doi = {10.1007/978-3-030-49916-7_82}, isbn = {978-3-030-49915-0}, year = {2020}, date = {2020-01-01}, booktitle = {Second RILEM International Conference on Concrete and Digital Fabrication}, pages = {839--851}, publisher = {Springer}, series = {RILEM Bookseries}, abstract = {Construction robots are becoming more common in the Netherlands, but remain rarities in contexts aside from state-of-the-art factories owned by wealthy or technologically-orientated companies. In its current state, the construction industry would have to change significantly to make room for robots. To understand whether these changes are welcome or not, this paper presents qualitative, exploratory research concerning 10 stakeholderstextquoteright perspectives of robotisation and construction robots in the Dutch construction industry.}, note = {Accepted Author Manuscript; Digital Concrete 2020 - 2nd RILEM International Conference on Concrete and Digital Fabrication ; Conference date: 06-07-2020 Through 08-07-2020}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } Construction robots are becoming more common in the Netherlands, but remain rarities in contexts aside from state-of-the-art factories owned by wealthy or technologically-orientated companies. In its current state, the construction industry would have to change significantly to make room for robots. To understand whether these changes are welcome or not, this paper presents qualitative, exploratory research concerning 10 stakeholderstextquoteright perspectives of robotisation and construction robots in the Dutch construction industry. | |
Steinert, Steffen Unleashing the Constructive Potential of Emotions: Some Critical Comments on Risk, Technology and Moral Emotions by Sabine Roeser Journal Article Science & Engineering Ethics, 26 (4), pp. 1913–1920, 2020, ISSN: 1471-5546. @article{8bdd08b49699429dacd0b88335a5126a, title = {Unleashing the Constructive Potential of Emotions: Some Critical Comments on Risk, Technology and Moral Emotions by Sabine Roeser}, author = {Steffen Steinert}, doi = {10.1007/s11948-020-00195-4}, issn = {1471-5546}, year = {2020}, date = {2020-01-01}, journal = {Science & Engineering Ethics}, volume = {26}, number = {4}, pages = {1913--1920}, publisher = {Springer}, keywords = {}, pubstate = {published}, tppubtype = {article} } | |
Hayes, Paul; van de Poel, Ibo; Steen, Marc Algorithms and Values in Justice and Security Journal Article AI&Society: the journal of human-centered systems and machine intelligence, 35 (3), pp. 533–555, 2020, ISSN: 0951-5666. @article{2e06348e6d684a7ba0968ba9389647e1, title = {Algorithms and Values in Justice and Security}, author = {Paul Hayes and Ibo van de Poel and Marc Steen}, doi = {10.1007/s00146-019-00932-9}, issn = {0951-5666}, year = {2020}, date = {2020-01-01}, journal = {AI&Society: the journal of human-centered systems and machine intelligence}, volume = {35}, number = {3}, pages = {533--555}, publisher = {Springer}, abstract = {This article presents a conceptual investigation into the value impacts and relations of algorithms in the domain of justice and security. As a conceptual investigation, it represents one step in a value sensitive design based methodology (not incorporated here are empirical and technical investigations). Here, we explicate and analyse the expression of values of accuracy, privacy, fairness and equality, property and ownership, and accountability and transparency in this context. We find that values are sensitive to disvalue if algorithms are designed, implemented or deployed inappropriately or without sufficient consideration for their value impacts, potentially resulting in problems including discrimination and constrained autonomy. Furthermore, we outline a framework of conceptual relations of values indicated by our analysis, and potential value tensions in their implementation and deployment with a view towards supporting future research, and supporting the value sensitive design of algorithms in justice and security.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This article presents a conceptual investigation into the value impacts and relations of algorithms in the domain of justice and security. As a conceptual investigation, it represents one step in a value sensitive design based methodology (not incorporated here are empirical and technical investigations). Here, we explicate and analyse the expression of values of accuracy, privacy, fairness and equality, property and ownership, and accountability and transparency in this context. We find that values are sensitive to disvalue if algorithms are designed, implemented or deployed inappropriately or without sufficient consideration for their value impacts, potentially resulting in problems including discrimination and constrained autonomy. Furthermore, we outline a framework of conceptual relations of values indicated by our analysis, and potential value tensions in their implementation and deployment with a view towards supporting future research, and supporting the value sensitive design of algorithms in justice and security. | |
Ortt, Roland; van Putten, David; Kamp, Linda; van de Poel, Ibo Conclusions: How can responsible innovation be defined and how to do it? Book Chapter Ortt, Roland J; van Putten, David; Kamp, Linda M; van de Poel, Ibo (Ed.): Responsible Innovation in Large Technological Systems, Routledge - Taylor & Francis Group, 1, 2020, ISBN: 9780367895815. @inbook{96c26864c60846c09192c3e57c822497, title = {Conclusions: How can responsible innovation be defined and how to do it?}, author = {Roland Ortt and David van Putten and Linda Kamp and Ibo van de Poel}, editor = {Roland J Ortt and David van Putten and Linda M Kamp and Ibo van de Poel}, isbn = {9780367895815}, year = {2020}, date = {2020-01-01}, booktitle = {Responsible Innovation in Large Technological Systems}, publisher = {Routledge - Taylor & Francis Group}, edition = {1}, abstract = {Large technological systems such as sluices and seaports in our water system and nuclear power systems, wind turbines and shale gas exploitation technologies in our energy system provide vital societal functions. For such large technological systems in society, the innovation process itself can also impact many stakeholder groups. Responsibility refers to a range of aspects: privacy of individuals, security, respect, a fair division of wealth, sustainability and so on. Some of these aspects conflict with each other: maximum security against terrorism, for example, can only be created at the expense of privacy. Before decision-making starts and responsible innovation processes are initiated, a comprehensive set of basic information is required. The economic way to balance values is to weigh costs versus benefits. For large technological systems in society it is important to represent all stakeholders and explore the costs and benefits that they experience from the system.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } Large technological systems such as sluices and seaports in our water system and nuclear power systems, wind turbines and shale gas exploitation technologies in our energy system provide vital societal functions. For such large technological systems in society, the innovation process itself can also impact many stakeholder groups. Responsibility refers to a range of aspects: privacy of individuals, security, respect, a fair division of wealth, sustainability and so on. Some of these aspects conflict with each other: maximum security against terrorism, for example, can only be created at the expense of privacy. Before decision-making starts and responsible innovation processes are initiated, a comprehensive set of basic information is required. The economic way to balance values is to weigh costs versus benefits. For large technological systems in society it is important to represent all stakeholders and explore the costs and benefits that they experience from the system. | |
2019 | |
Steinert, Steffen; Roeser, Sabine Passion for the Art of Morally Responsible Technology Development Journal Article Royal Institute of Philosophy Supplement, 85 , pp. 87–109, 2019, (Green Open Access added to TU Delft Institutional Repository textquoteleftYou share, we take care!textquoteright – Taverne project https://www.openaccess.nl/en/you-share-we-take-care Otherwise as indicated in the copyright section: the publisher is the copyright holder of this work and the author uses the Dutch legislation to make this work public.). @article{7f7bed31524c47a49ab4da28f58705b3, title = {Passion for the Art of Morally Responsible Technology Development}, author = {Steffen Steinert and Sabine Roeser}, doi = {10.1017/S135824611800070X}, year = {2019}, date = {2019-01-01}, journal = {Royal Institute of Philosophy Supplement}, volume = {85}, pages = {87–109}, publisher = {Cambridge University Press}, abstract = {In this article, we discuss the importance of emotions for ethical reflection on technological developments, as well as the role that art can play in this. We review literature that argues that emotions can and should play an important role in the assessment and acceptance of technological risk and in designing morally responsible technologies. We then investigate how technologically engaged art can contribute to critical, emotional-moral reflection on technological risks. The role of art that engages with technology is unexplored territory and gives rise to many fascinating philosophical questions that have not yet been sufficiently addressed in the literature.}, note = {Green Open Access added to TU Delft Institutional Repository textquoteleftYou share, we take care!textquoteright – Taverne project https://www.openaccess.nl/en/you-share-we-take-care Otherwise as indicated in the copyright section: the publisher is the copyright holder of this work and the author uses the Dutch legislation to make this work public.}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this article, we discuss the importance of emotions for ethical reflection on technological developments, as well as the role that art can play in this. We review literature that argues that emotions can and should play an important role in the assessment and acceptance of technological risk and in designing morally responsible technologies. We then investigate how technologically engaged art can contribute to critical, emotional-moral reflection on technological risks. The role of art that engages with technology is unexplored territory and gives rise to many fascinating philosophical questions that have not yet been sufficiently addressed in the literature. | |
Steinert, Steffen; Bublitz, Christoph; Jox, Ralf; Friedrich, Orsolya Doing Things with Thoughts: Brain-Computer Interfaces and Disembodied Agency Journal Article Philosophy & Technology, 32 (3), pp. 457–482, 2019, ISSN: 2210-5433. @article{16082f22776f4d119e91e13dd4622ab8, title = {Doing Things with Thoughts: Brain-Computer Interfaces and Disembodied Agency}, author = {Steffen Steinert and Christoph Bublitz and Ralf Jox and Orsolya Friedrich}, doi = {10.1007/s13347-018-0308-4}, issn = {2210-5433}, year = {2019}, date = {2019-01-01}, journal = {Philosophy & Technology}, volume = {32}, number = {3}, pages = {457--482}, publisher = {Springer}, abstract = {Connecting human minds to various technological devices and applications through brain-computer interfaces (BCIs) affords intriguingly novel ways for humans to engage and interact with the world. Not only do BCIs play an important role in restorative medicine, they are also increasingly used outside of medical or therapeutic contexts (e.g., gaming or mental state monitoring). A striking peculiarity of BCI technology is that the kind of actions it enables seems to differ from paradigmatic human actions, because, effects in the world are brought about by devices such as robotic arms, prosthesis, or other machines, and their execution runs through a computer directed by brain signals. In contrast to usual forms of action, the sequence does not need to involve bodily or muscle movements at all. A motionless body, the epitome of inaction, might be acting. How do theories of action relate to such BCI-mediated forms of changing the world? We wish to explore this question through the lenses of three perspectives on agency: subjective experience of agency, philosophical action theory, and legal concepts of action. Our analysis pursues three aims: First, we shall discuss whether and which BCI-mediated events qualify as actions, according to the main concepts of action in philosophy and law. Secondly, en passant, we wish to highlight the ten most interesting novelties or peculiarities of BCI-mediated movements. Thirdly, we seek to explore whether these novel forms of movement may have consequences for concepts of agency. More concretely, we think that convincing assessments of BCI-movements require more fine-grained accounts of agency and a distinction between various forms of control during movements. In addition, we show that the disembodied nature of BCI-mediated events causes troubles for the standard legal account of actions as bodily movements. In an exchange with views from philosophy, we wish to propose that the law ought to reform its concept of action to include some, but not all, BCI-mediated events and sketch some of the wider implications this may have, especially for the venerable legal idea of the right to freedom of thought. In this regard, BCIs are an example of the way in which technological access to yet largely sealed-off domains of the person may necessitate adjusting normative boundaries between the personal and the social sphere.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Connecting human minds to various technological devices and applications through brain-computer interfaces (BCIs) affords intriguingly novel ways for humans to engage and interact with the world. Not only do BCIs play an important role in restorative medicine, they are also increasingly used outside of medical or therapeutic contexts (e.g., gaming or mental state monitoring). A striking peculiarity of BCI technology is that the kind of actions it enables seems to differ from paradigmatic human actions, because, effects in the world are brought about by devices such as robotic arms, prosthesis, or other machines, and their execution runs through a computer directed by brain signals. In contrast to usual forms of action, the sequence does not need to involve bodily or muscle movements at all. A motionless body, the epitome of inaction, might be acting. How do theories of action relate to such BCI-mediated forms of changing the world? We wish to explore this question through the lenses of three perspectives on agency: subjective experience of agency, philosophical action theory, and legal concepts of action. Our analysis pursues three aims: First, we shall discuss whether and which BCI-mediated events qualify as actions, according to the main concepts of action in philosophy and law. Secondly, en passant, we wish to highlight the ten most interesting novelties or peculiarities of BCI-mediated movements. Thirdly, we seek to explore whether these novel forms of movement may have consequences for concepts of agency. More concretely, we think that convincing assessments of BCI-movements require more fine-grained accounts of agency and a distinction between various forms of control during movements. In addition, we show that the disembodied nature of BCI-mediated events causes troubles for the standard legal account of actions as bodily movements. In an exchange with views from philosophy, we wish to propose that the law ought to reform its concept of action to include some, but not all, BCI-mediated events and sketch some of the wider implications this may have, especially for the venerable legal idea of the right to freedom of thought. In this regard, BCIs are an example of the way in which technological access to yet largely sealed-off domains of the person may necessitate adjusting normative boundaries between the personal and the social sphere. |