summaryrefslogtreecommitdiff
path: root/biblio.bib
diff options
context:
space:
mode:
authorJan Aalmoes <jan.aalmoes@inria.fr>2024-08-16 16:05:32 +0200
committerJan Aalmoes <jan.aalmoes@inria.fr>2024-08-16 16:05:32 +0200
commit506c5f8857957aaf23e43c60ef25ae0596463a0f (patch)
treecc6ff0826b5728108ca32da27c0f496d6b993e73 /biblio.bib
parentd9b9d68dc038479a85a2ba869957ca2ec5c87bf8 (diff)
biblio biais. Rapport OMS
Diffstat (limited to 'biblio.bib')
-rw-r--r--biblio.bib67
1 files changed, 67 insertions, 0 deletions
diff --git a/biblio.bib b/biblio.bib
index b8e60a8..6e5b65c 100644
--- a/biblio.bib
+++ b/biblio.bib
@@ -47,6 +47,9 @@ abstract = {Statistical algorithms can outperform human predictions of recidivis
publisher={MDPI}
}
+
+####################################
+#Médecine
@article{gulshan2016development,
title={Development and validation of a deep learning algorithm for detection of diabetic retinopathy in retinal fundus photographs},
author={Gulshan, Varun and Peng, Lily and Coram, Marc and Stumpe, Martin C and Wu, Derek and Narayanaswamy, Arunachalam and Venugopalan, Subhashini and Widner, Kasumi and Madams, Tom and Cuadros, Jorge and others},
@@ -68,6 +71,9 @@ abstract = {Statistical algorithms can outperform human predictions of recidivis
publisher={Elsevier}
}
+
+##################################
+#Recrutement
@misc{fortune500,
title={Fortune 500},
howpublished={\url{https://fortune.com/ranking/global500/}},
@@ -103,6 +109,60 @@ abstract = {Statistical algorithms can outperform human predictions of recidivis
primaryClass={cs.AI},
url={https://arxiv.org/abs/2009.01534},
}
+@article{Hardt2016equality,
+ author = {Moritz Hardt and
+ Eric Price and
+ Nathan Srebro},
+ title = {Equality of Opportunity in Supervised Learning},
+ journal = {CoRR},
+ volume = {abs/1610.02413},
+ year = {2016},
+ url = {http://arxiv.org/abs/1610.02413},
+ eprinttype = {arXiv},
+ eprint = {1610.02413},
+ timestamp = {Tue, 26 Apr 2022 09:17:17 +0200},
+ biburl = {https://dblp.org/rec/journals/corr/HardtPS16.bib},
+ bibsource = {dblp computer science bibliography, https://dblp.org}
+}
+
+@misc{Dwork2011fairness,
+ doi = {10.48550/ARXIV.1104.3913},
+
+ url = {https://arxiv.org/abs/1104.3913},
+
+ author = {Dwork, Cynthia and Hardt, Moritz and Pitassi, Toniann and Reingold, Omer and Zemel, Rich},
+
+ keywords = {Computational Complexity (cs.CC), Computers and Society (cs.CY), FOS: Computer and information sciences, FOS: Computer and information sciences},
+
+ title = {Fairness Through Awareness},
+
+ publisher = {arXiv},
+
+ year = {2011},
+
+ copyright = {arXiv.org perpetual, non-exclusive license}
+}
+
+
+
+@inproceedings{10.1145/3278721.3278779,
+author = {Zhang, Brian Hu and Lemoine, Blake and Mitchell, Margaret},
+title = {Mitigating Unwanted Biases with Adversarial Learning},
+year = {2018},
+isbn = {9781450360128},
+publisher = {Association for Computing Machinery},
+address = {New York, NY, USA},
+url = {https://doi.org/10.1145/3278721.3278779},
+doi = {10.1145/3278721.3278779},
+abstract = {Machine learning is a tool for building models that accurately represent input training data. When undesired biases concerning demographic groups are in the training data, well-trained models will reflect those biases. We present a framework for mitigating such biases by including a variable for the group of interest and simultaneously learning a predictor and an adversary. The input to the network X, here text or census data, produces a prediction Y, such as an analogy completion or income bracket, while the adversary tries to model a protected variable Z, here gender or zip code. The objective is to maximize the predictor's ability to predict Y while minimizing the adversary's ability to predict Z. Applied to analogy completion, this method results in accurate predictions that exhibit less evidence of stereotyping Z. When applied to a classification task using the UCI Adult (Census) Dataset, it results in a predictive model that does not lose much accuracy while achieving very close to equality of odds (Hardt, et al., 2016). The method is flexible and applicable to multiple definitions of fairness as well as a wide range of gradient-based learning models, including both regression and classification tasks.},
+booktitle = {Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society},
+pages = {335–340},
+numpages = {6},
+keywords = {multi-task learning, debiasing, adversarial learning, unbiasing},
+location = {New Orleans, LA, USA},
+series = {AIES '18}
+}
+
@@ -433,3 +493,10 @@ abstract = {This paper explores the use of metaphorical personification (anthrop
publisher={Cambridge University Press}
}
+@misc{oms,
+ title={Rapport de l'Organisation Mondiale de la Santé},
+ howpublished={\url{https://www.who.int/fr/news/item/28-06-2021-who-issues-first-global-report-on-ai-in-health-and-six-guiding-principles-for-its-design-and-use}},
+ author={OMS},
+ year={2021}
+}
+