Abstract:

Algorithmic recourse, or enabling individuals to reverse a negative outcome, has gained attention as a means of supporting human agency in interactions with artificial intelligence (AI) systems. However, recent work has shown that even if a decision-making classifier is fair according to reasonable criteria, recourse itself may be unfair. Members of disadvantaged groups may have to work harder than their more privileged peers to reverse a negative outcome. In this paper, we introduce effort-aware fairness, which treats algorithmic recourse through the lens of substantive equality of opportunity. It acknowledges that individuals from different groups may not be comparably well-equipped to act on their recourse, resulting in disparities in their chances of reversing unfavorable outcomes. These disparities, shaped by differing effort distributions among demographic groups, can be exacerbated over time. We provide a formal definition of effort-aware fairness, propose fairness metrics, and then develop an intervention that improves recourse fairness by rewarding effort. Through empirical comparison with existing strategies, we demonstrate that this intervention successfully mitigates disparities. Our conceptual framework and experimental evaluation build upon prior work that uses an agent-based model for simulating real-world recourse over time.


Citation

Bell, Andrew, Fonseca, Joao, Abrate, Carlo, Bonchi, Francesco, and Stoyanovich, Julia. 2025. “How Much Effort Is Enough? Fairness in Algorithmic Recourse Through the Lens of Substantive Equality of Opportunity.” Proceedings of the 5th ACM Conference on Equity and Access in Algorithms, Mechanisms, and Optimization: 170–184. https://doi.org/10.1145/3757887.3763014.

@inproceedings{bell2025much,
  author = {Bell*, Andrew and Fonseca*, Joao and Abrate, Carlo and Bonchi, Francesco and Stoyanovich, Julia},
  title = {How Much Effort Is Enough? Fairness in Algorithmic Recourse Through the Lens of Substantive Equality of Opportunity},
  booktitle = {Proceedings of the 5th ACM Conference on Equity and Access in Algorithms, Mechanisms, and Optimization},
  year = {2025},
  pages = {170–184},
  doi = {10.1145/3757887.3763014},
  url = {https://doi.org/10.1145/3757887.3763014},
  abstract = {Algorithmic recourse, or enabling individuals to reverse a negative outcome, has gained attention as a means of supporting human agency in interactions with artificial intelligence (AI) systems. However, recent work has shown that even if a decision-making classifier is fair according to reasonable criteria, recourse itself may be unfair. Members of disadvantaged groups may have to work harder than their more privileged peers to reverse a negative outcome. In this paper, we introduce effort-aware fairness, which treats algorithmic recourse through the lens of substantive equality of opportunity. It acknowledges that individuals from different groups may not be comparably well-equipped to act on their recourse, resulting in disparities in their chances of reversing unfavorable outcomes. These disparities, shaped by differing effort distributions among demographic groups, can be exacerbated over time. We provide a formal definition of effort-aware fairness, propose fairness metrics, and then develop an intervention that improves recourse fairness by rewarding effort. Through empirical comparison with existing strategies, we demonstrate that this intervention successfully mitigates disparities. Our conceptual framework and experimental evaluation build upon prior work that uses an agent-based model for simulating real-world recourse over time.},
  address = {New York, NY, USA},
  isbn = {9798400721403},
  keywords = {algorithmic recourse, fairness, equality, reliability, temporal data, simulation},
  location = {},
  numpages = {15},
  publisher = {Association for Computing Machinery},
  series = {EAAMO '25}
}