2024
Catanzariti, Benedetta
Investigating the Impact of Facial Expression Recognition in Healthcare Miscellaneous
2024.
@misc{Catanzariti2024,
title = {Investigating the Impact of Facial Expression Recognition in Healthcare},
author = {Benedetta Catanzariti},
url = {https://zenodo.org/records/13132860},
doi = {https://doi.org/10.5281/zenodo.13132860},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {Facial expression recognition (FER) is a technology designed to track and classify facial expressive behaviours and produce some form of meaningful knowledge about people based on those behaviours. This report investigates the impact of FER systems in healthcare contexts. Here, these tools are designed to detect and assess mental health conditions (depression and anxiety), neurodevelopmental disorders (autism, bipolar disorders, and attention deficit hyperactivity disorder or ADHD), and assess pain levels on the basis of facial expressions. Many have pointed to the limitations of these systems, as well as the privacy risks associated with their use in high-stakes contexts. This report highlights the broader societal impact of these technologies and offers recommendations to identify and address harm.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Ganesh, Bhargavi
2024.
@misc{Ganesh2024,
title = {Policy Approaches for Building a Responsible Ecosystem: Contextualising AI Governance Challenges Within Other Regulatory/Governance Sectors and Histories},
author = {Bhargavi Ganesh},
url = {https://zenodo.org/records/13132896},
doi = {https://doi.org/10.5281/zenodo.13132896},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {This report explains the historical role that governments have assumed in creating a more responsible ecosystem around new technologies, and protecting the safety and fundamental rights of citizens. This role is particularly crucial in the context of AI, given how ubiquitously it is used and how it is now almost impossible to opt out of its impacts. Analogies to past governance challenges, such as regulations targeting smog/air pollution, can help policymakers understand their role in the presence of externalised risks. Additionally, examining past governance responses to harms and risks associated with new technologies, enables policymakers to build AI governance mechanisms based on existing governance tools, rather than approaching AI governance as an unprecedented and daunting task. Moreover, governance remains an iterative exercise, in which initial governance efforts are typically crucial in enabling governments to revise and improve subsequent efforts.
Governance responses to AI are important because the failure to meet the concerns of affected parties risks creating responsibility gaps, or outcomes for which society bears the costs but no-one ultimately faces the consequences. It also inhibits the relational practice of responsibility, in which affected parties can consistently feed into the process of improving AI systems. The ability to have concerns heard and responded to is the hallmark of any working democracy. Persistent responsibility gaps threaten social trust and solidarity, by diminishing the trust that individuals and communities have both in institutions and each other. Although the concept of trust is more often used to describe the loss of trust by entities looking to use or adopt AI, the impacts of losing social trust and solidarity present even greater challenges, by threatening broader social well-being and political stability. },
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Governance responses to AI are important because the failure to meet the concerns of affected parties risks creating responsibility gaps, or outcomes for which society bears the costs but no-one ultimately faces the consequences. It also inhibits the relational practice of responsibility, in which affected parties can consistently feed into the process of improving AI systems. The ability to have concerns heard and responded to is the hallmark of any working democracy. Persistent responsibility gaps threaten social trust and solidarity, by diminishing the trust that individuals and communities have both in institutions and each other. Although the concept of trust is more often used to describe the loss of trust by entities looking to use or adopt AI, the impacts of losing social trust and solidarity present even greater challenges, by threatening broader social well-being and political stability.
Simkute, Auste
Explainability in Expert Contents: Challenges and Limitations in Supporting Domain Experts in AI-driven Decision-making Miscellaneous
2024.
@misc{Simkute2024,
title = {Explainability in Expert Contents: Challenges and Limitations in Supporting Domain Experts in AI-driven Decision-making},
author = {Auste Simkute},
url = {https://zenodo.org/records/13132939},
doi = {https://doi.org/10.5281/zenodo.13132939},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {The public sector increasingly relies on artificial intelligence (AI) to inform decision making across various domains, including policing, healthcare, social work, and immigration services. AI decision support systems (DSSs) can process large amounts of data (1) and generate outputs, such as predictions of medical diagnoses (2) or potential outcomes of a visa application (3). AI support could make processes within the public sector not only more efficient but also fairer by reducing the potential for human biases (4, 5).
However, AI-driven systems lack contextual sensitivity and cannot account for unique cases. They can also be trained on biased or incomplete data. Given that most of the decisions are highly sensitive, it is crucial that domain experts (e.g. social workers) maintain agency when making AI-supported decisions. Ideally, AI would automate mundane, repetitive tasks and allow experts to focus on higher-level and creative ones (6). Unfortunately, domain experts often cannot understand and evaluate whether they should trust AI systems and their generated outputs (7).
This report provides a broad overview of challenges faced when DSSs inform decisionmaking. It explores critical blockages for effective expert–AI collaborations and discusses potential solutions. It also considers the role of explainability in supporting experts and outlines recommendations for how explanations could be made more effective and usable in each expert context.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
However, AI-driven systems lack contextual sensitivity and cannot account for unique cases. They can also be trained on biased or incomplete data. Given that most of the decisions are highly sensitive, it is crucial that domain experts (e.g. social workers) maintain agency when making AI-supported decisions. Ideally, AI would automate mundane, repetitive tasks and allow experts to focus on higher-level and creative ones (6). Unfortunately, domain experts often cannot understand and evaluate whether they should trust AI systems and their generated outputs (7).
This report provides a broad overview of challenges faced when DSSs inform decisionmaking. It explores critical blockages for effective expert–AI collaborations and discusses potential solutions. It also considers the role of explainability in supporting experts and outlines recommendations for how explanations could be made more effective and usable in each expert context.
Jones, Elliot; Hardalupas, Mahi; Agnew, William
Under the radar? Examining the evaluation of foundation models Miscellaneous
2024.
@misc{nokey,
title = {Under the radar? Examining the evaluation of foundation models},
author = {Elliot Jones and Mahi Hardalupas and William Agnew},
url = {https://www.adalovelaceinstitute.org/report/under-the-radar/},
year = {2024},
date = {2024-07-25},
urldate = {2024-07-25},
abstract = {Global policy proposals for ensuring the safety of advanced artificial intelligence (AI) systems have centred on foundation model evaluations as an important method to identify and mitigate the risks these systems pose. The core goals of foundation model evaluations are to understand the foundation model and / or its impacts, including the model’s capabilities, risks, performance, behaviour and social impact.
Policymakers are seeking to use evaluations to provide clarity on appropriate and responsible uses of foundation models. They are incorporating evaluations into emerging regulatory proposals in the EU, UK and USA, and creating both voluntary and legally mandated requirements for developers to evaluate AI systems for different kinds of risks.
The EU’s newly passed AI Act requires developers of foundation models and general-purpose AI models to evaluate these systems for ‘systemic risks’. The Act has established an AI Office, which also has a mandate to evaluate general purpose AI models.
In the USA and UK, governments have secured voluntary commitments from major AI companies to allow external evaluations of their foundation models by newly established national AI safety institutes. France, Canada, Japan and Singapore have their own AI safety institutes with similar mandates to develop and run evaluations of foundation models.
Both governments and technology companies have described evaluations as a necessary component of effective foundation model governance. Many foundation model developers have hired dedicated evaluation teams to construct evaluations and test their models, and there is also a growing third-party evaluation industry in which contracted third parties can construct test models on behalf of a developer.
However, our research indicates that evaluations alone are not sufficient for determining the safety of foundation models, the systems built from them and their applications for people and society in real-world conditions. There is no agreed terminology or set of methods for evaluating foundation models, and evaluations need to be used alongside other tools including codes of practice, incident reporting and post-market monitoring. In practice, AI model evaluations are currently voluntary and subject to company discretion, leading to inconsistencies in quality and limited access for evaluators without pre-existing company relationships. Current policy proposals allow companies to selectively choose what evaluations to conduct, and fail to ensure evaluation results lead to meaningful action that prevents unsafe products from entering the market.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Policymakers are seeking to use evaluations to provide clarity on appropriate and responsible uses of foundation models. They are incorporating evaluations into emerging regulatory proposals in the EU, UK and USA, and creating both voluntary and legally mandated requirements for developers to evaluate AI systems for different kinds of risks.
The EU’s newly passed AI Act requires developers of foundation models and general-purpose AI models to evaluate these systems for ‘systemic risks’. The Act has established an AI Office, which also has a mandate to evaluate general purpose AI models.
In the USA and UK, governments have secured voluntary commitments from major AI companies to allow external evaluations of their foundation models by newly established national AI safety institutes. France, Canada, Japan and Singapore have their own AI safety institutes with similar mandates to develop and run evaluations of foundation models.
Both governments and technology companies have described evaluations as a necessary component of effective foundation model governance. Many foundation model developers have hired dedicated evaluation teams to construct evaluations and test their models, and there is also a growing third-party evaluation industry in which contracted third parties can construct test models on behalf of a developer.
However, our research indicates that evaluations alone are not sufficient for determining the safety of foundation models, the systems built from them and their applications for people and society in real-world conditions. There is no agreed terminology or set of methods for evaluating foundation models, and evaluations need to be used alongside other tools including codes of practice, incident reporting and post-market monitoring. In practice, AI model evaluations are currently voluntary and subject to company discretion, leading to inconsistencies in quality and limited access for evaluators without pre-existing company relationships. Current policy proposals allow companies to selectively choose what evaluations to conduct, and fail to ensure evaluation results lead to meaningful action that prevents unsafe products from entering the market.
Callaghan, S; Bouich, A; Gooding, P; Spooner, R; Thorpe, K; Booker, L; Wright, R; Galassi, M
16.07.2024.
@misc{nokey,
title = {iREAL: Inclusive Requirements Elicitation for AI in Libraries to Support Respectful Management of Indigenous Knowledges},
author = {S Callaghan and A Bouich and P Gooding and R Spooner and K Thorpe and L Booker and R Wright and M Galassi},
url = {https://eprints.gla.ac.uk/332226/},
year = {2024},
date = {2024-07-16},
urldate = {2024-07-16},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
Drury, M. R. F.; Miles, O; Brundell, P; Farina, L; Webb, H; Giannachi, G; Moore, J; Benford, S; Jordan, S; Stahl, B; Vallejos, E Perez; Vear, C
2024.
@workshop{nokey,
title = {CReAting a Dynamic archive of responsibLe AI Ecosystems in the context of Creative AI (CRADLE): Workshop Report 3: additional case studies},
author = {M.R.F. Drury and O Miles and P Brundell and L Farina and H Webb and G Giannachi and J Moore and S Benford and S Jordan and B Stahl and E Perez Vallejos and C Vear},
url = {https://www.nottingham.ac.uk/humanities/documents/philosophy/ai-ecosystems/workshop-report-3-5-additional-case-studies.pdf},
year = {2024},
date = {2024-03-31},
urldate = {2024-03-31},
keywords = {},
pubstate = {published},
tppubtype = {workshop}
}
Tollon, Fabio
Technology and the Situationist Challenge to Virtue Ethics Journal Article
In: Science and Engineering Ethics, vol. 30, iss. 10, 2024.
@article{Tollon2024,
title = {Technology and the Situationist Challenge to Virtue Ethics},
author = {Fabio Tollon},
url = {https://link.springer.com/article/10.1007/s11948-024-00474-4},
doi = {https://doi.org/10.1007/s11948-024-00474-4},
year = {2024},
date = {2024-03-27},
urldate = {2024-03-27},
journal = {Science and Engineering Ethics},
volume = {30},
issue = {10},
abstract = {In this paper, I introduce a “promises and perils” framework for understanding the “soft” impacts of emerging technology, and argue for a eudaimonic conception of well-being. This eudaimonic conception of well-being, however, presupposes that we have something like stable character traits. I therefore defend this view from the “situationist challenge” and show that instead of viewing this challenge as a threat to well-being, we can incorporate it into how we think about living well with technology. Human beings are susceptible to situational influences and are often unaware of the ways that their social and technological environment influence not only their ability to do well, but even their ability to know whether they are doing well. Any theory that attempts to describe what it means for us to be doing well, then, needs to take these contextual features into account and bake them into a theory of human flourishing. By paying careful attention to these contextual factors, we can design systems that promote human flourishing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Drury, M. R. F; Miles, O; Brundell, P; Farina, L; Webb, H; Giannachi, G; J Moore,; Benford, S; Jordan, S; Stahl, B; Vallejos, E Perez; Vear, C
2024.
@workshop{nokey,
title = {CReAting a Dynamic archive of responsibLe AI Ecosystems in the context of Creative AI (CRADLE): Workshop Report 2: Cat Royale},
author = {M.R.F Drury and O Miles and P Brundell and L Farina and H Webb and G Giannachi and J Moore, and S Benford and S Jordan and B Stahl and E Perez Vallejos and C Vear},
url = {https://www.nottingham.ac.uk/humanities/documents/philosophy/ai-ecosystems/workshop-report-2-cat-royale.pdf},
year = {2024},
date = {2024-03-25},
urldate = {2024-03-25},
keywords = {},
pubstate = {published},
tppubtype = {workshop}
}
Gooding, P; Booker, L; Bouich, A; Callaghan, S; Spooner, R; Thorpe, K
iREAL: Indigenising Requirements Elicitation for Artificial Intelligence in Libraries Presentation
27.02.2024.
@misc{nokey,
title = {iREAL: Indigenising Requirements Elicitation for Artificial Intelligence in Libraries},
author = {P Gooding and L Booker and A Bouich and S Callaghan and R Spooner and K Thorpe},
url = {https://eprints.gla.ac.uk/332213/},
year = {2024},
date = {2024-02-27},
urldate = {2024-02-27},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
Miles, Oliver; Webb, Helena; Farina, Lydia; Giannachi, Gabriella; Benford, Steve; Moore, John; Jordan, Spencer; Stahl, Bernd; Perez-Vallejos, Elvira; Vear, Craig
2024.
@workshop{nokey,
title = {CReAting a Dynamic archive of responsibLe Ecosystems in the context of creative AI (CRADLE): Workshop Report 1: JESS+ A Digiscore},
author = {Oliver Miles and Helena Webb and Lydia Farina and Gabriella Giannachi and Steve Benford and John Moore and Spencer Jordan and Bernd Stahl and Elvira Perez-Vallejos and
Craig Vear},
url = {https://www.nottingham.ac.uk/humanities/documents/philosophy/ai-ecosystems/workshop-report-1-jess-a-digiscore.pdf},
year = {2024},
date = {2024-02-14},
urldate = {2024-02-14},
keywords = {},
pubstate = {published},
tppubtype = {workshop}
}
2023
Brennan, Jenny; Groves, Lara; Jones, Elliot; Strait, Andrew
AI assurance? Assessing and mitigating risks across the AI lifecycle Miscellaneous
2023.
@misc{nokey,
title = {AI assurance? Assessing and mitigating risks across the AI lifecycle},
author = {Jenny Brennan and Lara Groves and Elliot Jones and Andrew Strait},
url = {https://www.adalovelaceinstitute.org/report/risks-ai-systems/},
year = {2023},
date = {2023-07-18},
urldate = {2023-07-18},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Jones, Elliot; Birtwistle, Michael; Reeve, Octavia
Keeping an eye on AI: Approaches to government monitoring of the AI landscape Miscellaneous
2023.
@misc{nokey,
title = {Keeping an eye on AI: Approaches to government monitoring of the AI landscape},
author = {Elliot Jones and Michael Birtwistle and Octavia Reeve},
url = {https://www.adalovelaceinstitute.org/report/keeping-an-eye-on-ai/},
year = {2023},
date = {2023-07-18},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Brown, Ian
Allocating accountability in AI supply chains: a UK-centred regulatory perspective Miscellaneous
2023, ISBN: 978-1-7392615-2-8.
@misc{nokey,
title = {Allocating accountability in AI supply chains: a UK-centred regulatory perspective},
author = {Ian Brown},
url = {https://www.adalovelaceinstitute.org/resource/ai-supply-chains/},
isbn = {978-1-7392615-2-8},
year = {2023},
date = {2023-06-29},
urldate = {2023-06-29},
abstract = {Creating an artificial intelligence (AI) system is a collaborative effort that involves many actors and sources of knowledge. Whether simple or
complex, built in-house or by an external developer, AI systems often rely on complex supply chains, each involving a network of actors responsible for various aspects of the system’s training and development.
As policymakers seek to develop a regulatory framework for AI technologies, it will be crucial for them to understand how these different
supply chains work, and how to assign relevant, distinct responsibilities to the appropriate actor in each supply chain. Policymakers must also
recognise that not all actors in supply chains will be equally resourced, and regulation will need to take account of these realities.
Depending on the supply chain, some companies (perhaps UK small businesses) supplying services directly to customers will not have the
power, access or capability to address or mitigate all risks or harms that may arise.
This paper aims to help policymakers and regulators explore the challenges and nuances of different AI supply chains, and provides a
conceptual framework for how they might apply different responsibilities in the regulation of AI systems. },
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
complex, built in-house or by an external developer, AI systems often rely on complex supply chains, each involving a network of actors responsible for various aspects of the system’s training and development.
As policymakers seek to develop a regulatory framework for AI technologies, it will be crucial for them to understand how these different
supply chains work, and how to assign relevant, distinct responsibilities to the appropriate actor in each supply chain. Policymakers must also
recognise that not all actors in supply chains will be equally resourced, and regulation will need to take account of these realities.
Depending on the supply chain, some companies (perhaps UK small businesses) supplying services directly to customers will not have the
power, access or capability to address or mitigate all risks or harms that may arise.
This paper aims to help policymakers and regulators explore the challenges and nuances of different AI supply chains, and provides a
conceptual framework for how they might apply different responsibilities in the regulation of AI systems.
Jones, Bronwyn; Luger, Ewa; Jones, Rhia
Generative AI & journalism: A rapid risk-based review Miscellaneous
2023.
@misc{Jones2023,
title = {Generative AI & journalism: A rapid risk-based review},
author = {Bronwyn Jones and Ewa Luger and Rhia Jones},
url = {https://www.pure.ed.ac.uk/ws/portalfiles/portal/372212564/GenAI_Journalism_Rapid_Risk_Review_June_2023_BJ_RJ_EL.pdf
https://www.research.ed.ac.uk/en/publications/generative-ai-amp-journalism-a-rapid-risk-based-review},
year = {2023},
date = {2023-06-06},
urldate = {2023-06-06},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}