2024
Gregory, Karen; Gallagher, Cailean
Mitigating Harms in On-Demand Delivery Platforms: AI Regulations, Data Protection, and Workers' Tools Miscellaneous
2024.
Abstract | Links | Tags: Report
@misc{Gregory_Gallagher2024,
title = {Mitigating Harms in On-Demand Delivery Platforms: AI Regulations, Data Protection, and Workers' Tools},
author = {Karen Gregory and Cailean Gallagher},
url = {https://zenodo.org/records/13144353},
doi = {https://doi.org/10.5281/zenodo.13144353},
year = {2024},
date = {2024-07-31},
urldate = {2024-07-31},
abstract = {The regulation of employment and artificial intelligence (AI) in the platform economy matters at both an individual and social level. The Royal Society for the encouragement of Arts, Manufactures and Commerce (RSA) estimates that there are 1.1 million people in Britain’s gig economy.22 These workers increasingly operate in the growing e-commerce environment and with “rapid” (or “quick”) commerce infrastructure.23 The Covid-19 pandemic exacerbated both participation in the gig economy and the growth of rapid commerce.24 The way we work and consume are changing, and innovations in AI, machine learning, and algorithmic management are central to both economies. However, on-demand delivery work can be dangerous work. On-demand platforms are marked by “exploitative practices, which have become widespread and institutionalised”.25 Intervening in these work-related risks and exploitative practices requires a concerted effort to bridge the gap between employment rights and the proposed regulation of AI in the UK, as well as strong data protection rights.},
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
Zerelli, John; Goñi, Iñaki; Placci, Matilde Masetti
Automation Bias and Procedural Fairness: A short guide for the public sector Miscellaneous
2024.
Abstract | Links | Tags: Report
@misc{Zerelli_et_al2024,
title = {Automation Bias and Procedural Fairness: A short guide for the public sector},
author = {John Zerelli and Iñaki Goñi and Matilde Masetti Placci},
url = {https://zenodo.org/records/13132781},
doi = {https://doi.org/10.5281/zenodo.13132781},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {The use of advanced artificial intelligence (AI) and data-driven automation in the public sector poses several organisational, practical, and ethical challenges. One that is easy to underestimate is automation bias, which, in turn, has underappreciated legal consequences. Automation bias is an attitude in which the operator of an autonomous system will defer to its outputs to the point where they overlook or ignore evidence that the system is failing. The legal problem arises when statutory office-holders (or their employees) either fetter their discretion to in-house algorithms or improperly delegate their discretion to third-party software developers – something automation bias may facilitate. A synthesis of previous research suggests an easy way to mitigate the risks of automation bias and its potential legal ramifications is for those responsible for procurement decisions to adhere to a simple checklist that ensures that the pitfalls of automation are avoided as much as possible. },
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
Kasirzadeh, Atoosa; Bird, Charlotte; Ungless, Eddie
Policy Report on Generative Artificial Intelligence Miscellaneous
2024.
Abstract | Links | Tags: Report
@misc{Kasirzadeh_et_al2024,
title = {Policy Report on Generative Artificial Intelligence},
author = {Atoosa Kasirzadeh and Charlotte Bird and Eddie Ungless},
url = {https://zenodo.org/records/13124532},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {Our study is based on a comprehensive literature review of text-to-image generative models, identifying four high-priority risks associated with generative artificial intelligence (AI):
1. At-scale production of discriminatory content.
2. At-scale toxic and harmful (mis)use.
3. Rapid and cheap production of misinformation and disinformation.
4. Privacy and copyright infringement.
Recognising the importance of a well-informed and holistic approach to AI development and regulation, we show how the UK’s pro-innovation framework for AI regulation can be adapted to regulate generative AI models and offset the aforementioned risks.
We propose that the UK’s financial support for generative AI model development aligns with the regulatory recommendations outlined in this report. Specifically, we recommend that a portion of this investment should be allocated to the implementation of socio-technical safeguards that mitigate the high-priority risks.
We argue that establishing strong connections among academic, policy, and regulatory institutions is essential for effective knowledge sharing and application. This ensures that the integrity of all knowledge forms is maintained, contributing to a well-rounded and informed strategy for generative AI development and regulation.},
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
1. At-scale production of discriminatory content.
2. At-scale toxic and harmful (mis)use.
3. Rapid and cheap production of misinformation and disinformation.
4. Privacy and copyright infringement.
Recognising the importance of a well-informed and holistic approach to AI development and regulation, we show how the UK’s pro-innovation framework for AI regulation can be adapted to regulate generative AI models and offset the aforementioned risks.
We propose that the UK’s financial support for generative AI model development aligns with the regulatory recommendations outlined in this report. Specifically, we recommend that a portion of this investment should be allocated to the implementation of socio-technical safeguards that mitigate the high-priority risks.
We argue that establishing strong connections among academic, policy, and regulatory institutions is essential for effective knowledge sharing and application. This ensures that the integrity of all knowledge forms is maintained, contributing to a well-rounded and informed strategy for generative AI development and regulation.
Catanzariti, Benedetta
Investigating the Impact of Facial Expression Recognition in Healthcare Miscellaneous
2024.
Abstract | Links | Tags: Report
@misc{Catanzariti2024,
title = {Investigating the Impact of Facial Expression Recognition in Healthcare},
author = {Benedetta Catanzariti},
url = {https://zenodo.org/records/13132860},
doi = {https://doi.org/10.5281/zenodo.13132860},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {Facial expression recognition (FER) is a technology designed to track and classify facial expressive behaviours and produce some form of meaningful knowledge about people based on those behaviours. This report investigates the impact of FER systems in healthcare contexts. Here, these tools are designed to detect and assess mental health conditions (depression and anxiety), neurodevelopmental disorders (autism, bipolar disorders, and attention deficit hyperactivity disorder or ADHD), and assess pain levels on the basis of facial expressions. Many have pointed to the limitations of these systems, as well as the privacy risks associated with their use in high-stakes contexts. This report highlights the broader societal impact of these technologies and offers recommendations to identify and address harm.},
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
Ganesh, Bhargavi
2024.
Abstract | Links | Tags: Report
@misc{Ganesh2024,
title = {Policy Approaches for Building a Responsible Ecosystem: Contextualising AI Governance Challenges Within Other Regulatory/Governance Sectors and Histories},
author = {Bhargavi Ganesh},
url = {https://zenodo.org/records/13132896},
doi = {https://doi.org/10.5281/zenodo.13132896},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {This report explains the historical role that governments have assumed in creating a more responsible ecosystem around new technologies, and protecting the safety and fundamental rights of citizens. This role is particularly crucial in the context of AI, given how ubiquitously it is used and how it is now almost impossible to opt out of its impacts. Analogies to past governance challenges, such as regulations targeting smog/air pollution, can help policymakers understand their role in the presence of externalised risks. Additionally, examining past governance responses to harms and risks associated with new technologies, enables policymakers to build AI governance mechanisms based on existing governance tools, rather than approaching AI governance as an unprecedented and daunting task. Moreover, governance remains an iterative exercise, in which initial governance efforts are typically crucial in enabling governments to revise and improve subsequent efforts.
Governance responses to AI are important because the failure to meet the concerns of affected parties risks creating responsibility gaps, or outcomes for which society bears the costs but no-one ultimately faces the consequences. It also inhibits the relational practice of responsibility, in which affected parties can consistently feed into the process of improving AI systems. The ability to have concerns heard and responded to is the hallmark of any working democracy. Persistent responsibility gaps threaten social trust and solidarity, by diminishing the trust that individuals and communities have both in institutions and each other. Although the concept of trust is more often used to describe the loss of trust by entities looking to use or adopt AI, the impacts of losing social trust and solidarity present even greater challenges, by threatening broader social well-being and political stability. },
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
Governance responses to AI are important because the failure to meet the concerns of affected parties risks creating responsibility gaps, or outcomes for which society bears the costs but no-one ultimately faces the consequences. It also inhibits the relational practice of responsibility, in which affected parties can consistently feed into the process of improving AI systems. The ability to have concerns heard and responded to is the hallmark of any working democracy. Persistent responsibility gaps threaten social trust and solidarity, by diminishing the trust that individuals and communities have both in institutions and each other. Although the concept of trust is more often used to describe the loss of trust by entities looking to use or adopt AI, the impacts of losing social trust and solidarity present even greater challenges, by threatening broader social well-being and political stability.
Simkute, Auste
Explainability in Expert Contents: Challenges and Limitations in Supporting Domain Experts in AI-driven Decision-making Miscellaneous
2024.
Abstract | Links | Tags: Report
@misc{Simkute2024,
title = {Explainability in Expert Contents: Challenges and Limitations in Supporting Domain Experts in AI-driven Decision-making},
author = {Auste Simkute},
url = {https://zenodo.org/records/13132939},
doi = {https://doi.org/10.5281/zenodo.13132939},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {The public sector increasingly relies on artificial intelligence (AI) to inform decision making across various domains, including policing, healthcare, social work, and immigration services. AI decision support systems (DSSs) can process large amounts of data (1) and generate outputs, such as predictions of medical diagnoses (2) or potential outcomes of a visa application (3). AI support could make processes within the public sector not only more efficient but also fairer by reducing the potential for human biases (4, 5).
However, AI-driven systems lack contextual sensitivity and cannot account for unique cases. They can also be trained on biased or incomplete data. Given that most of the decisions are highly sensitive, it is crucial that domain experts (e.g. social workers) maintain agency when making AI-supported decisions. Ideally, AI would automate mundane, repetitive tasks and allow experts to focus on higher-level and creative ones (6). Unfortunately, domain experts often cannot understand and evaluate whether they should trust AI systems and their generated outputs (7).
This report provides a broad overview of challenges faced when DSSs inform decisionmaking. It explores critical blockages for effective expert–AI collaborations and discusses potential solutions. It also considers the role of explainability in supporting experts and outlines recommendations for how explanations could be made more effective and usable in each expert context.},
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
However, AI-driven systems lack contextual sensitivity and cannot account for unique cases. They can also be trained on biased or incomplete data. Given that most of the decisions are highly sensitive, it is crucial that domain experts (e.g. social workers) maintain agency when making AI-supported decisions. Ideally, AI would automate mundane, repetitive tasks and allow experts to focus on higher-level and creative ones (6). Unfortunately, domain experts often cannot understand and evaluate whether they should trust AI systems and their generated outputs (7).
This report provides a broad overview of challenges faced when DSSs inform decisionmaking. It explores critical blockages for effective expert–AI collaborations and discusses potential solutions. It also considers the role of explainability in supporting experts and outlines recommendations for how explanations could be made more effective and usable in each expert context.
Jones, Elliot; Hardalupas, Mahi; Agnew, William
Under the radar? Examining the evaluation of foundation models Miscellaneous
2024.
@misc{nokey,
title = {Under the radar? Examining the evaluation of foundation models},
author = {Elliot Jones and Mahi Hardalupas and William Agnew},
url = {https://www.adalovelaceinstitute.org/report/under-the-radar/},
year = {2024},
date = {2024-07-25},
urldate = {2024-07-25},
abstract = {Global policy proposals for ensuring the safety of advanced artificial intelligence (AI) systems have centred on foundation model evaluations as an important method to identify and mitigate the risks these systems pose. The core goals of foundation model evaluations are to understand the foundation model and / or its impacts, including the model’s capabilities, risks, performance, behaviour and social impact.
Policymakers are seeking to use evaluations to provide clarity on appropriate and responsible uses of foundation models. They are incorporating evaluations into emerging regulatory proposals in the EU, UK and USA, and creating both voluntary and legally mandated requirements for developers to evaluate AI systems for different kinds of risks.
The EU’s newly passed AI Act requires developers of foundation models and general-purpose AI models to evaluate these systems for ‘systemic risks’. The Act has established an AI Office, which also has a mandate to evaluate general purpose AI models.
In the USA and UK, governments have secured voluntary commitments from major AI companies to allow external evaluations of their foundation models by newly established national AI safety institutes. France, Canada, Japan and Singapore have their own AI safety institutes with similar mandates to develop and run evaluations of foundation models.
Both governments and technology companies have described evaluations as a necessary component of effective foundation model governance. Many foundation model developers have hired dedicated evaluation teams to construct evaluations and test their models, and there is also a growing third-party evaluation industry in which contracted third parties can construct test models on behalf of a developer.
However, our research indicates that evaluations alone are not sufficient for determining the safety of foundation models, the systems built from them and their applications for people and society in real-world conditions. There is no agreed terminology or set of methods for evaluating foundation models, and evaluations need to be used alongside other tools including codes of practice, incident reporting and post-market monitoring. In practice, AI model evaluations are currently voluntary and subject to company discretion, leading to inconsistencies in quality and limited access for evaluators without pre-existing company relationships. Current policy proposals allow companies to selectively choose what evaluations to conduct, and fail to ensure evaluation results lead to meaningful action that prevents unsafe products from entering the market.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Policymakers are seeking to use evaluations to provide clarity on appropriate and responsible uses of foundation models. They are incorporating evaluations into emerging regulatory proposals in the EU, UK and USA, and creating both voluntary and legally mandated requirements for developers to evaluate AI systems for different kinds of risks.
The EU’s newly passed AI Act requires developers of foundation models and general-purpose AI models to evaluate these systems for ‘systemic risks’. The Act has established an AI Office, which also has a mandate to evaluate general purpose AI models.
In the USA and UK, governments have secured voluntary commitments from major AI companies to allow external evaluations of their foundation models by newly established national AI safety institutes. France, Canada, Japan and Singapore have their own AI safety institutes with similar mandates to develop and run evaluations of foundation models.
Both governments and technology companies have described evaluations as a necessary component of effective foundation model governance. Many foundation model developers have hired dedicated evaluation teams to construct evaluations and test their models, and there is also a growing third-party evaluation industry in which contracted third parties can construct test models on behalf of a developer.
However, our research indicates that evaluations alone are not sufficient for determining the safety of foundation models, the systems built from them and their applications for people and society in real-world conditions. There is no agreed terminology or set of methods for evaluating foundation models, and evaluations need to be used alongside other tools including codes of practice, incident reporting and post-market monitoring. In practice, AI model evaluations are currently voluntary and subject to company discretion, leading to inconsistencies in quality and limited access for evaluators without pre-existing company relationships. Current policy proposals allow companies to selectively choose what evaluations to conduct, and fail to ensure evaluation results lead to meaningful action that prevents unsafe products from entering the market.
Tollon, Fabio
Technology and the Situationist Challenge to Virtue Ethics Journal Article
In: Science and Engineering Ethics, vol. 30, iss. 10, 2024.
Abstract | Links | Tags: Journal Article
@article{Tollon2024,
title = {Technology and the Situationist Challenge to Virtue Ethics},
author = {Fabio Tollon},
url = {https://link.springer.com/article/10.1007/s11948-024-00474-4},
doi = {https://doi.org/10.1007/s11948-024-00474-4},
year = {2024},
date = {2024-03-27},
urldate = {2024-03-27},
journal = {Science and Engineering Ethics},
volume = {30},
issue = {10},
abstract = {In this paper, I introduce a “promises and perils” framework for understanding the “soft” impacts of emerging technology, and argue for a eudaimonic conception of well-being. This eudaimonic conception of well-being, however, presupposes that we have something like stable character traits. I therefore defend this view from the “situationist challenge” and show that instead of viewing this challenge as a threat to well-being, we can incorporate it into how we think about living well with technology. Human beings are susceptible to situational influences and are often unaware of the ways that their social and technological environment influence not only their ability to do well, but even their ability to know whether they are doing well. Any theory that attempts to describe what it means for us to be doing well, then, needs to take these contextual features into account and bake them into a theory of human flourishing. By paying careful attention to these contextual factors, we can design systems that promote human flourishing.},
keywords = {Journal Article},
pubstate = {published},
tppubtype = {article}
}
2023
Brennan, Jenny; Groves, Lara; Jones, Elliot; Strait, Andrew
AI assurance? Assessing and mitigating risks across the AI lifecycle Miscellaneous
2023.
@misc{nokey,
title = {AI assurance? Assessing and mitigating risks across the AI lifecycle},
author = {Jenny Brennan and Lara Groves and Elliot Jones and Andrew Strait},
url = {https://www.adalovelaceinstitute.org/report/risks-ai-systems/},
year = {2023},
date = {2023-07-18},
urldate = {2023-07-18},
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
Jones, Elliot; Birtwistle, Michael; Reeve, Octavia
Keeping an eye on AI: Approaches to government monitoring of the AI landscape Miscellaneous
2023.
@misc{nokey,
title = {Keeping an eye on AI: Approaches to government monitoring of the AI landscape},
author = {Elliot Jones and Michael Birtwistle and Octavia Reeve},
url = {https://www.adalovelaceinstitute.org/report/keeping-an-eye-on-ai/},
year = {2023},
date = {2023-07-18},
keywords = {Report},
pubstate = {published},
tppubtype = {misc}
}
Jones, Bronwyn; Luger, Ewa; Jones, Rhia
Generative AI & journalism: A rapid risk-based review Miscellaneous
2023.
@misc{Jones2023,
title = {Generative AI & journalism: A rapid risk-based review},
author = {Bronwyn Jones and Ewa Luger and Rhia Jones},
url = {https://www.pure.ed.ac.uk/ws/portalfiles/portal/372212564/GenAI_Journalism_Rapid_Risk_Review_June_2023_BJ_RJ_EL.pdf
https://www.research.ed.ac.uk/en/publications/generative-ai-amp-journalism-a-rapid-risk-based-review},
year = {2023},
date = {2023-06-06},
urldate = {2023-06-06},
keywords = {Review},
pubstate = {published},
tppubtype = {misc}
}