2025
Jones, B; Jones, R; Luger, E
Power Asymmetries in Public Service Journalism: Artificial Intelligence and the Intelligibility-Agency Problem Book Chapter
In: Alessandro D’Arma,; Michalis, Maria; Lowe, Gregory Ferrel; Zita, Michael-Bernhard (Ed.): Chapter 7, Challenges and Developments in Public Service Journalism, University of Westminster Press, 2025.
@inbook{nokey,
title = {Power Asymmetries in Public Service Journalism: Artificial Intelligence and the Intelligibility-Agency Problem},
author = {B Jones and R Jones and E Luger},
editor = {Alessandro D’Arma and Maria Michalis and Gregory Ferrel Lowe and Michael-Bernhard Zita},
url = {https://doi.org/10.16997/14610450},
doi = {https://doi.org/10.16997/14610450},
year = {2025},
date = {2025-02-27},
urldate = {2025-02-27},
publisher = {Challenges and Developments in Public Service Journalism, University of Westminster Press},
chapter = {7},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Sichani, Anna-Maria; Westenberger, Paula; Bryan-Kinns, Nick; Bunz, Mercedes; Collett, Clementine; Baravi, Bahareh; Miltner, Kate; Morruzzi, Caterina; Townsend, Beverley Alice
BRAID researchers' response to UK Government copyright and AI consultation Miscellaneous
2025.
@misc{nokey,
title = {BRAID researchers' response to UK Government copyright and AI consultation},
author = {Anna-Maria Sichani and Paula Westenberger and Nick Bryan-Kinns and Mercedes Bunz and Clementine Collett and Bahareh Baravi and Kate Miltner and Caterina Morruzzi and Beverley Alice Townsend},
doi = {10.5281/zenodo.14945987},
year = {2025},
date = {2025-02-26},
urldate = {2025-02-26},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Wagg, David J; Burr, Christopher; Shepherd, Jason; Conti, Zack Xuereb; Enzer, Mark; Niederer, Steven
The philosophical foundations of digital twinning Journal Article
In: Data-Centric Engineering, vol. 6, 2025.
@article{nokey,
title = {The philosophical foundations of digital twinning},
author = {David J Wagg and Christopher Burr and Jason Shepherd and Zack Xuereb Conti and Mark Enzer and Steven Niederer},
doi = {https://doi.org/10.1017/dce.2025.4},
year = {2025},
date = {2025-02-10},
urldate = {2025-02-10},
journal = {Data-Centric Engineering},
volume = {6},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Bronwyn; Strait, Andrew; Parnell, Brid-Aine; Horzyk, Amanda Maria; Perez, Jorge
Journalism and generative AI: data, deals and disruption in the news media Workshop
2025.
@workshop{nokey,
title = {Journalism and generative AI: data, deals and disruption in the news media},
author = {Bronwyn Jones and Andrew Strait and Brid-Aine Parnell and Amanda Maria Horzyk and Jorge Perez},
url = {https://zenodo.org/records/14968195},
doi = {10.5281/zenodo.14968195},
year = {2025},
date = {2025-02-03},
urldate = {2025-02-03},
keywords = {},
pubstate = {published},
tppubtype = {workshop}
}
Jones, Bronwyn; Jones, Rhianne
Action research at the BBC: Interrogating artificial intelligence with journalists to generate actionable insights for the newsroom Journal Article
In: 2025.
@article{nokey,
title = {Action research at the BBC: Interrogating artificial intelligence with journalists to generate actionable insights for the newsroom},
author = {Bronwyn Jones and Rhianne Jones},
doi = {https://doi.org/10.1177/14648849251317150},
year = {2025},
date = {2025-01-29},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bennett, S J; Catanzariti, Benedetta; Tollon, Fabio
“Everybody knows what a pothole is”: representations of work and intelligence in AI practice and governance Journal Article
In: AI & Society, 2025.
@article{nokey,
title = {“Everybody knows what a pothole is”: representations of work and intelligence in AI practice and governance},
author = {S J Bennett and Benedetta Catanzariti and Fabio Tollon },
url = {https://link.springer.com/article/10.1007/s00146-024-02162-0#Abs1},
doi = {https://doi.org/10.1007/s00146-024-02162-0},
year = {2025},
date = {2025-01-27},
journal = {AI & Society},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Glover, Jonny; Hood, Beverley
BRAID AI and the Arts: Who's Responsible (Curatorial event) - Visual Minutes Miscellaneous
2025.
@misc{nokey,
title = {BRAID AI and the Arts: Who's Responsible (Curatorial event) - Visual Minutes},
author = {Jonny Glover and Beverley Hood},
doi = {https://doi.org/10.5281/zenodo.14710565},
year = {2025},
date = {2025-01-21},
urldate = {2024-01-19},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Hood, Beverley
BRAID AI and the Arts: Who's Responsible (Artists' event) – Consensus Statement Miscellaneous
2025.
@misc{nokey,
title = {BRAID AI and the Arts: Who's Responsible (Artists' event) – Consensus Statement},
author = {Beverley Hood},
doi = {https://doi.org/10.5281/zenodo.14711008},
year = {2025},
date = {2025-01-21},
urldate = {2024-01-18},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Glover, Jonny; Hood, Beverley
BRAID AI and the Arts: Who's Responsible (Artists' event) - Visual Minutes Miscellaneous
2025.
@misc{nokey,
title = {BRAID AI and the Arts: Who's Responsible (Artists' event) - Visual Minutes},
author = {Jonny Glover and Beverley Hood},
doi = {https://doi.org/10.5281/zenodo.14710975},
year = {2025},
date = {2025-01-21},
urldate = {2024-01-18},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Hood, Beverley
BRAID AI and the Arts: Who's Responsible (Curatorial event) – Consensus Statement Miscellaneous
2025.
@misc{nokey,
title = {BRAID AI and the Arts: Who's Responsible (Curatorial event) – Consensus Statement},
author = {Beverley Hood},
doi = {https://doi.org/10.5281/zenodo.14710921},
year = {2025},
date = {2025-01-21},
urldate = {2024-01-19},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Ruszev, S; Rogers, L; Gurgun, S; Stockton-Brown, M; Gee, M; Davis, T; Yang, X; Griffiths, C; Zhang, K; Cheng, B; Slaymaker, J; Prajitna, S
Shared-posthuman imagination: Human-AI collaboration in media creation Technical Report
2025.
@techreport{nokey,
title = {Shared-posthuman imagination: Human-AI collaboration in media creation},
author = {S Ruszev and L Rogers and S Gurgun and M Stockton-Brown and M Gee and T Davis and X Yang and C Griffiths and K Zhang and B Cheng and J Slaymaker and S Prajitna},
url = {https://eprints.bournemouth.ac.uk/40681/},
doi = {10.18746/epxn-da67},
year = {2025},
date = {2025-01-17},
urldate = {2025-01-17},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Newman-Griffis, Denis
AI Thinking: a framework for rethinking artificial intelligence in practice Journal Article
In: Royal Society Open Science, 2025.
@article{nokey,
title = {AI Thinking: a framework for rethinking artificial intelligence in practice},
author = {Denis Newman-Griffis},
url = {https://doi.org/10.1098/rsos.241482},
year = {2025},
date = {2025-01-08},
urldate = {2025-01-08},
journal = {Royal Society Open Science},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2024
Arnold, Ken; Bandelli, Andrea; Ellis, Rachel; Henderson, Jane; Henning, Michelle; Mellor, Rebecca; Moussouri, Theano; Nanoru, Michal; Redler-Hawes, Hannah; Snelson, Tim
Ten out of ten: a review of the last decade Journal Article
In: Science Museum Group Journal, 2024.
@article{nokey,
title = {Ten out of ten: a review of the last decade},
author = {Ken Arnold and Andrea Bandelli and Rachel Ellis and Jane Henderson and Michelle Henning and Rebecca Mellor and Theano Moussouri and Michal Nanoru and Hannah Redler-Hawes and Tim Snelson},
url = {https://journal.sciencemuseum.ac.uk/article/ten-out-of-ten-a-review-of-the-last-decade/},
doi = {10.15180/242202},
year = {2024},
date = {2024-12-17},
urldate = {2024-12-17},
journal = {Science Museum Group Journal},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tollon, F; Hattiangadi, A
‘Agency’, in The Philosophical Glossary of AI Miscellaneous
2024.
@misc{nokey,
title = {‘Agency’, in The Philosophical Glossary of AI},
author = {F Tollon and A Hattiangadi},
editor = {Alex Grzankowski and Benjamin Henke
},
url = {https://www.aiglossary.co.uk/index/agency},
year = {2024},
date = {2024-12-05},
urldate = {2024-12-05},
journal = {The Philosophical Glossary of AI},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Steyn, B; Tollon, F
'Moral Responsibility', in The Philosophical Glossary of AI Miscellaneous
2024.
@misc{nokey,
title = {'Moral Responsibility', in The Philosophical Glossary of AI},
author = {B Steyn and F Tollon},
editor = {Alex Grzankowski and Benjamin Henke},
url = {https://www.aiglossary.co.uk/index/moral-responsibility},
year = {2024},
date = {2024-12-05},
urldate = {2024-12-05},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Burr, C; Anderson, J; Aran, S; Block, D; Byrne, J; Goodman, R; Gavidia-Calderon, C; Habli, I; Moreira, N; Niederer, S; Polo, N; Wagg, D
Trustworthy and Ethical Assurance of Digital Twins: Putting the Gemini Principles into Practice Technical Report
2024.
@techreport{nokey,
title = {Trustworthy and Ethical Assurance of Digital Twins: Putting the Gemini Principles into Practice},
author = {C Burr and J Anderson and S Aran and D Block and J Byrne and R Goodman and C Gavidia-Calderon and I Habli and N Moreira and S Niederer and N Polo and D Wagg },
url = {https://doi.org/10.5281/zenodo.14216050},
doi = {10.5281/zenodo.14216050},
year = {2024},
date = {2024-12-02},
urldate = {2024-12-02},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jones, Bronwyn; Galanos, Vassilis
Generative AI and Journalism: Mapping the Risk Landscape Technical Report
2024.
@techreport{nokey,
title = {Generative AI and Journalism: Mapping the Risk Landscape},
author = {Bronwyn Jones and Vassilis Galanos},
doi = {10.5281/zenodo.14968183},
year = {2024},
date = {2024-12-01},
urldate = {2024-12-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Farina, L; Webb, H; Giannachi, G; Benford, S; Moore, J; Stahl, B; Vallejos, E Perez; Jordan, S; Vear, C; Drury, R. F. M; Miles, O; Brundell, P
2024.
@techreport{nokey,
title = {CReAting a Dynamic archive of responsibLe AI Ecosystems in the context of Creative AI (CRADLE): Structuring a Dynamic Archive of AI ecosystems in the context of the creative industries},
author = {L Farina and H Webb and G Giannachi and S Benford and J Moore and B Stahl and E Perez Vallejos and S Jordan and C Vear and R.F.M Drury and O Miles and P Brundell},
url = {https://www.nottingham.ac.uk/humanities/documents/philosophy/ai-ecosystems/structuring-a-dynamic-archive-of-ai-ecosystems-in-the-context-of-the-creative-industries.pdf},
year = {2024},
date = {2024-11-01},
urldate = {2024-11-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Smakman, Julia; Davies, Matt
New rules? Lessons for AI regulation from the governance of other high-tech sectors Miscellaneous
2024, ISBN: 978-1-7395236-8-8.
@misc{nokey,
title = {New rules? Lessons for AI regulation from the governance of other high-tech sectors},
author = {Julia Smakman and Matt Davies},
url = {https://www.adalovelaceinstitute.org/report/new-rules-ai-regulation/},
isbn = {978-1-7395236-8-8},
year = {2024},
date = {2024-10-31},
urldate = {2024-10-31},
abstract = {The UK has seen a surge in the adoption of AI technologies across the private and public sectors in recent years, including in domains like education, healthcare and criminal justice. As AI is increasingly used to make highly consequential decisions about people’s lives, governments around the globe have started to propose and pass legislation to regulate these technologies.
While regulating AI brings its own challenges, it is not the first time that policymakers have grappled with governing highly complex technologies that play a central role in society and the economy. However, many of the current policy debates around AI typically seek to start from first principles rather than drawing on the lessons from previous attempts to regulate other domains.
This report looks at the regulatory structures, approaches and objectives of three other UK regulatory regimes that are commonly compared with AI in policy discussions:
Pharmaceuticals for human use
Financial services (with a focus on consumer protection and financial stability)
Climate change mitigation (specifically the carbon emissions regime established by the Climate Change Act 2008)},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
While regulating AI brings its own challenges, it is not the first time that policymakers have grappled with governing highly complex technologies that play a central role in society and the economy. However, many of the current policy debates around AI typically seek to start from first principles rather than drawing on the lessons from previous attempts to regulate other domains.
This report looks at the regulatory structures, approaches and objectives of three other UK regulatory regimes that are commonly compared with AI in policy discussions:
Pharmaceuticals for human use
Financial services (with a focus on consumer protection and financial stability)
Climate change mitigation (specifically the carbon emissions regime established by the Climate Change Act 2008)
What's at stake? Young people's take on AI and education Booklet
2024.
@booklet{nokey,
title = {What's at stake? Young people's take on AI and education},
url = {https://ai-and-education.shorthandstories.com/zine/index.html},
year = {2024},
date = {2024-10-31},
urldate = {2024-10-31},
month = {10},
keywords = {},
pubstate = {published},
tppubtype = {booklet}
}
Manzini, A; Keeling, G; Alberts, L; Vallor, S; Morris, M. R.; Gabriel, I
The Code That Binds Us: Navigating the Appropriateness of Human-AI Assistant Relationships Proceedings Article
In: Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society, pp. 943-957, 2024.
@inproceedings{nokey,
title = {The Code That Binds Us: Navigating the Appropriateness of Human-AI Assistant Relationships },
author = {A Manzini and G Keeling and L Alberts and S Vallor and M.R. Morris and I Gabriel},
url = {https://doi.org/10.1609/aies.v7i1.31694},
doi = {10.1609/aies.v7i1.31694},
year = {2024},
date = {2024-10-16},
urldate = {2024-10-16},
booktitle = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society},
volume = {7},
number = {1},
pages = { 943-957},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Oswald, Marion; Paterson-Young, Claire; McBride, Pauline; Maher, Michael; Calder, Muffy; Gill, Gitanjali; Tiarks, Elizabeth; Noble, William
2024.
@misc{nokey,
title = {Ethical review to support Responsible Artificial Intelligence (AI) in policing: A preliminary study of West Midlands Police's specialist data ethics review committee},
author = {Marion Oswald and Claire Paterson-Young and Pauline McBride and Michael Maher and Muffy Calder and Gitanjali Gill and Elizabeth Tiarks and William Noble},
url = {https://researchportal.northumbria.ac.uk/en/publications/ethical-review-to-support-responsible-artificial-intelligence-ai-},
year = {2024},
date = {2024-09-11},
urldate = {2024-09-11},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Gregory, Karen; Gallagher, Cailean
Mitigating Harms in On-Demand Delivery Platforms: AI Regulations, Data Protection, and Workers' Tools Miscellaneous
2024.
@misc{Gregory_Gallagher2024,
title = {Mitigating Harms in On-Demand Delivery Platforms: AI Regulations, Data Protection, and Workers' Tools},
author = {Karen Gregory and Cailean Gallagher},
url = {https://zenodo.org/records/13144353},
doi = {https://doi.org/10.5281/zenodo.13144353},
year = {2024},
date = {2024-07-31},
urldate = {2024-07-31},
abstract = {The regulation of employment and artificial intelligence (AI) in the platform economy matters at both an individual and social level. The Royal Society for the encouragement of Arts, Manufactures and Commerce (RSA) estimates that there are 1.1 million people in Britain’s gig economy.22 These workers increasingly operate in the growing e-commerce environment and with “rapid” (or “quick”) commerce infrastructure.23 The Covid-19 pandemic exacerbated both participation in the gig economy and the growth of rapid commerce.24 The way we work and consume are changing, and innovations in AI, machine learning, and algorithmic management are central to both economies. However, on-demand delivery work can be dangerous work. On-demand platforms are marked by “exploitative practices, which have become widespread and institutionalised”.25 Intervening in these work-related risks and exploitative practices requires a concerted effort to bridge the gap between employment rights and the proposed regulation of AI in the UK, as well as strong data protection rights.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Zerelli, John; Goñi, Iñaki; Placci, Matilde Masetti
Automation Bias and Procedural Fairness: A short guide for the public sector Miscellaneous
2024.
@misc{Zerelli_et_al2024,
title = {Automation Bias and Procedural Fairness: A short guide for the public sector},
author = {John Zerelli and Iñaki Goñi and Matilde Masetti Placci},
url = {https://zenodo.org/records/13132781},
doi = {https://doi.org/10.5281/zenodo.13132781},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {The use of advanced artificial intelligence (AI) and data-driven automation in the public sector poses several organisational, practical, and ethical challenges. One that is easy to underestimate is automation bias, which, in turn, has underappreciated legal consequences. Automation bias is an attitude in which the operator of an autonomous system will defer to its outputs to the point where they overlook or ignore evidence that the system is failing. The legal problem arises when statutory office-holders (or their employees) either fetter their discretion to in-house algorithms or improperly delegate their discretion to third-party software developers – something automation bias may facilitate. A synthesis of previous research suggests an easy way to mitigate the risks of automation bias and its potential legal ramifications is for those responsible for procurement decisions to adhere to a simple checklist that ensures that the pitfalls of automation are avoided as much as possible. },
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Kasirzadeh, Atoosa; Bird, Charlotte; Ungless, Eddie
Policy Report on Generative Artificial Intelligence Miscellaneous
2024.
@misc{Kasirzadeh_et_al2024,
title = {Policy Report on Generative Artificial Intelligence},
author = {Atoosa Kasirzadeh and Charlotte Bird and Eddie Ungless},
url = {https://zenodo.org/records/13124532},
year = {2024},
date = {2024-07-30},
urldate = {2024-07-30},
abstract = {Our study is based on a comprehensive literature review of text-to-image generative models, identifying four high-priority risks associated with generative artificial intelligence (AI):
1. At-scale production of discriminatory content.
2. At-scale toxic and harmful (mis)use.
3. Rapid and cheap production of misinformation and disinformation.
4. Privacy and copyright infringement.
Recognising the importance of a well-informed and holistic approach to AI development and regulation, we show how the UK’s pro-innovation framework for AI regulation can be adapted to regulate generative AI models and offset the aforementioned risks.
We propose that the UK’s financial support for generative AI model development aligns with the regulatory recommendations outlined in this report. Specifically, we recommend that a portion of this investment should be allocated to the implementation of socio-technical safeguards that mitigate the high-priority risks.
We argue that establishing strong connections among academic, policy, and regulatory institutions is essential for effective knowledge sharing and application. This ensures that the integrity of all knowledge forms is maintained, contributing to a well-rounded and informed strategy for generative AI development and regulation.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
1. At-scale production of discriminatory content.
2. At-scale toxic and harmful (mis)use.
3. Rapid and cheap production of misinformation and disinformation.
4. Privacy and copyright infringement.
Recognising the importance of a well-informed and holistic approach to AI development and regulation, we show how the UK’s pro-innovation framework for AI regulation can be adapted to regulate generative AI models and offset the aforementioned risks.
We propose that the UK’s financial support for generative AI model development aligns with the regulatory recommendations outlined in this report. Specifically, we recommend that a portion of this investment should be allocated to the implementation of socio-technical safeguards that mitigate the high-priority risks.
We argue that establishing strong connections among academic, policy, and regulatory institutions is essential for effective knowledge sharing and application. This ensures that the integrity of all knowledge forms is maintained, contributing to a well-rounded and informed strategy for generative AI development and regulation.