summaryrefslogtreecommitdiff
path: root/ACSAC/proofs
diff options
context:
space:
mode:
authorJan Aalmoes <jan.aalmoes@inria.fr>2024-09-13 00:07:42 +0200
committerJan Aalmoes <jan.aalmoes@inria.fr>2024-09-13 00:07:42 +0200
commitfaa07a8f3337c5d191597ea9b9587cc0969d663c (patch)
treea46440db847ce447917abecb7971d90db4a1f150 /ACSAC/proofs
parent7fc151d6a198d13dc9e1374522ec396d72905d3f (diff)
avnacé aia, remerciement notations, notes
Diffstat (limited to 'ACSAC/proofs')
-rw-r--r--ACSAC/proofs/proof_advdebias.tex30
-rw-r--r--ACSAC/proofs/proof_egd_dp.tex33
-rw-r--r--ACSAC/proofs/proof_egd_eo.tex35
3 files changed, 98 insertions, 0 deletions
diff --git a/ACSAC/proofs/proof_advdebias.tex b/ACSAC/proofs/proof_advdebias.tex
new file mode 100644
index 0000000..06212a0
--- /dev/null
+++ b/ACSAC/proofs/proof_advdebias.tex
@@ -0,0 +1,30 @@
+Definition~\ref{def:dp} of \dempar can be written synthetically as the following property:
+$P_{\hat{Y},S}=P_{\hat{Y}}\otimes P_{S}$.
+Where $P_{\hat{Y}}\otimes P_{S}$ is the product measure defined as the unique measure on
+$\mathcal{P}(\mathcal{Y})\times\mathcal{P}(\mathcal{S})$ such that
+$\forall y\in\mathcal{P}(\mathcal{Y})\forall s\in\mathcal{P}(\mathcal{S})\quad P_{\hat{Y}}\otimes P_{S}(y\times s) = P_{\hat{Y}}(y)P_{S}(s)$.
+This is equivalent to definition~\ref{def:dp} for binary labels and sensitive attribute but more general because when $\hat{Y}$ is not binary as in soft labels, this new definition is well defined.
+% We write formally
+\begin{definition}
+\label{def:dps}
+ $\hat{Y}$ satisfies extended \dempar for $S$ if and only if: $P_{\hat{Y},S}=P_{\hat{Y}}\otimes P_{S}$.
+\end{definition}
+This definition is the same as the statistical parity introduced for fair regression~\cite{fairreg}.
+Note that we can not derive a quantity similar to \demparlevel with this definition but this extended \dempar assures indistinguishably of the sensitive attribute when looking at the soft labels.
+We have the following theorem:
+\begin{theorem}\label{th:advdebias}
+ The following propositions are equivalent: ``$\hat{Y}_s$ is independent of $S$'' and ``Balanced accuracy of \aia in \ref{tm:soft} is $\frac{1}{2}$''
+\end{theorem}
+\begin{proof}
+Let's show that it is equivalent to say "all attack models have a balanced accuracy of 0.5" and "the target model satisfies extended demographic parity".
+{\footnotesize
+ \begin{align*}
+ &\forall a~P(\hat{Y}\in a^{-1}(\{0\})|S=0)+P(\hat{Y}\in a^{-1}(\{1\})|S=1) = 1\\
+ \Leftrightarrow&\forall a~P(\hat{Y}\in a^{-1}(\{0\})|S=0)=P(\hat{Y}\in a^{-1}(\{0\})|S=1)\\
+ \Leftrightarrow&\forall A~P(\hat{Y}\in A)|S=0)=P(\hat{Y}\in A|S=1) \\
+ \Leftrightarrow &P_{\hat{Y},S}=P_{\hat{Y}}\otimes P_{S}
+ \end{align*}
+ }
+\end{proof}
+
+%In conclusion, with extended \dempar we can not unconditionally bound the balanced accuracy of the attack without introducing distances in the space of distributions, but it gives us a condition to protect the sensitive attribute in case of an adversary gaining access to soft labels (AS). \ No newline at end of file
diff --git a/ACSAC/proofs/proof_egd_dp.tex b/ACSAC/proofs/proof_egd_dp.tex
new file mode 100644
index 0000000..d0f23f6
--- /dev/null
+++ b/ACSAC/proofs/proof_egd_dp.tex
@@ -0,0 +1,33 @@
+\begin{theorem}
+\label{th:dpgood}
+Maximum attack accuracy achievable by \aia in \ref{tm:hard} is equal to $\frac{1}{2}(1+\text{\demparlevel of }\targetmodel)$.
+\end{theorem}
+\begin{proof}
+The set $B$ of function from $\{0,1\}$ to $\{0,1\}$ contains four elements: $b_0=0$, $b_1=id$, $b_2=1-id$ and $b,3=1$, where $\forall x, id(x) = x$.
+For every $b\in B$ the balanced \aia accuracy is
+$BA(b) = \frac{1}{2}(P(b\circ \hat{Y}=0|S=0) + P(b\circ \hat{Y}=1|S=1))$.
+We have $BA(b_0) = BA(b_3) = \frac{1}{2}$, hence, we can discard those elements when solving the attack optimisation problem.
+This problem writes $\text{max}_{b\in B}B(A(b)) = \text{max}(BA(b_1), BA(b_2))$.
+We remark that $b_1\circ \hat{Y}=\hat{Y}$ and $b_2\circ \hat{Y}=1 - \hat{Y}$.
+Hence,
+{\footnotesize
+\begin{align*}
+ BA(b_1) &= \frac{1}{2}(P(\hat{Y}=0|S=0) + P(\hat{Y}=1|S=1))\\
+ &=\frac{1}{2}(1+P(\hat{Y}=1|S=1) - P(\hat{Y}=1|S=0))\\
+ BA(b_2)&=\frac{1}{2}(1+P(\hat{Y}=1|S=0) - P(\hat{Y}=1|S=1))
+\end{align*}
+}
+Thus,
+{\footnotesize
+\begin{align*}
+ &\text{max}_{b\in B}BA(b)
+ = \frac{1}{2}\left(1+\text{max}\left(
+ \begin{matrix}
+ P(\hat{Y}=0|S=0) -P(\hat{Y}=1|S=1)\\
+ P(\hat{Y}=1|S=0) -P(\hat{Y}=0|S=1)
+ \end{matrix}
+ \right)\right)\\
+ =&\frac{1}{2}(1+|P(\hat{Y}=1|S=1) - P(\hat{Y}=1|S=0)|)
+\end{align*}
+}
+\end{proof}
diff --git a/ACSAC/proofs/proof_egd_eo.tex b/ACSAC/proofs/proof_egd_eo.tex
new file mode 100644
index 0000000..435add2
--- /dev/null
+++ b/ACSAC/proofs/proof_egd_eo.tex
@@ -0,0 +1,35 @@
+\begin{theorem}
+\label{th:eoo}
+If $\hat{Y}$ satisfies \eo for $Y$ and $S$ then the balanced accuracy of \aia in \ref{tm:hard} is $\frac{1}{2}$ iff $Y$ is independent of $S$ or $\hat{Y}$ is independent of $Y$.
+\end{theorem}
+
+\begin{proof}
+Let $\attackmodel$ be the attack model trained for AS: $\hat{S}=a\circ \hat{Y}$.
+By the total probability formula
+{\footnotesize
+\begin{align*}
+P(\hat{S}=0|S=0)=&P(\hat{S}=0|S=0Y=0)P(Y=0|S=0)\\
++&P(\hat{S}=0|S=0Y=1)P(Y=1|S=0)
+\end{align*}
+}
+and as well
+{\footnotesize
+\begin{align*}
+P(\hat{S}=1|S=1)=&P(\hat{S}=1|S=1Y=0)P(Y=0|S=1)\\
+ +&P(\hat{S}=1|S=1Y=1)P(Y=1|S=1)
+\end{align*}
+}
+Then we substitute those terms in the definition of the balanced accuracy of $\targetmodel$.
+{\footnotesize
+\begin{align*}
+ &\frac{P(\hat{S}=0|S=0)+P(\hat{S}=1|S=1)}{2}\\
+ =&\frac{1}{2}+\frac{1}{2}\left(P(Y=0|S=0)-P(Y=0|S=1)\right)\\
+ &\left(P(\hat{Y}\in \attackmodel^{-1}(\{1\})|S=1Y=0) -
+ P(\hat{Y}\in \attackmodel^{-1}(\{1\})|S=1Y=1)\right)
+\end{align*}
+}
+The balanced accuracy is equal to 0.5 if and only if $P(Y=0|S=0)=P(Y=0|S=1)$
+or $\forall \attackmodel~P(\hat{Y}\in \attackmodel^{-1}(\{1\})|S=1Y=0)=P(\hat{Y}\in \attackmodel^{-1}(\{1\})|S=1Y=1)$.
+The first term indicates that $Y$ is independent of $S$ and the second term indicates that $S=1$ the $\targetmodel$ random guess utility.
+We can do the same computing for $S=0$ and obtain a similar conclusion.
+\end{proof} \ No newline at end of file