-
Benchmarking Partial Observability in Reinforcement Learning with a Suite of Memory-Improvable Domains.
R.Y. Tao, K. Guo, C. Allen, G. Konidaris. Accepted, Reinforcement Learning Conference, August 2025.
[Preprint] [Bibtex]
@article{tao2025pobax,
title = {Benchmarking Partial Observability in Reinforcement Learning with a Suite of Memory-Improvable Domains},
author = {Tao, Ruo Yu and Guo, Kaicheng and Allen, Cameron and Konidaris, George},
journal = {Reinforcement Learning Journal},
volume = {2},
year = {2025}
}
-
Focused Skill Discovery: Using Per-Factor Empowerment to Control State Variables.
J.C. Carr, Q. Sun, C. Allen. Accepted, Reinforcement Learning Conference, August 2025.
[Preprint] [Bibtex]
@article{carr2025focused_skill_discovery,
title = {Focused Skill Discovery: Using Per-Factor Empowerment to Control State Variables},
author = {Carr, Jonathan Cola\c{c}o and Sun, Qianyi and Allen, Cameron},
journal = {Reinforcement Learning Journal},
volume = {2},
year = {2025}
}
- Also accepted, Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2025.
-
Memory as State Abstraction over Trajectories.
A. Kirtland, A. Ivanov, C. Allen, M. Littman, G. Konidaris. Accepted, Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2025.
- Selected for spotlight presentation (20 of 339).
-
Learning Transferable Sub-Goals by Hypothesizing Generalizing Features.
A. de Mello Koch, A. Bagaria, B. Huo, Z. Zhou, C. Allen, G. Konidaris. At the AAAI Workshop on Generalization in Planning, March 2025.
[Code] [Bibtex]
@inproceedings{demellokoch2025learning,
title = {Learning Transferable Sub-Goals by Hypothesizing Generalizing Features},
author = {De Mello Koch, Anita and Bagaria, Akhil and Huo, Bingnan and Zhou, Zhiyuan and Allen, Cameron and Konidaris, George},
booktitle = {AAAI Workshop on Generalization in Planning},
year = {2025}
}
-
Mitigating Partial Observability in Sequential Decision Processes via the Lambda Discrepancy.
C. Allen*, A. Kirtland*, R.Y. Tao*, S. Lobel, D. Scott, N. Petrocelli, O. Gottesman, R. Parr, M.L. Littman, G. Konidaris. In Advances in Neural Information Processing Systems, December 2024.
[Blog] [Video] [Poster] [Code] [Bibtex]
@inproceedings{allenkirtlandtao2024lambdadiscrep,
title = {Mitigating Partial Observability in Sequential Decision Processes via the Lambda Discrepancy},
author = {Allen, Cameron and Kirtland, Aaron and Tao, Ruo Yu and Lobel, Sam and Scott, Daniel and Petrocelli, Nicholas and Gottesman, Omer and Parr, Ronald and Littman, Michael L. and Konidaris, George},
booktitle = {Advances in Neural Information Processing Systems},
volume = {37},
year = {2024}
}
- Selected for oral presentation (3 of 72) at the ICML Foundations of Reinforcement Learning and Control Workshop, July 2024.
- Also a workshop paper at the RLC Finding the Frame Workshop, August 2024.
-
Evidence of Learned Look-Ahead in a Chess-Playing Neural Network.
E. Jenner, S. Kapur, V. Georgiev, C. Allen, S. Emmons, S. Russell. In Advances in Neural Information Processing Systems, December 2024.
[Blog] [Poster] [Code] [Bibtex]
@inproceedings{jenner2024chessinterp,
title = {Evidence of Learned Look-Ahead in a Chess-Playing Neural Network},
author = {Jenner, Erik and Kapur, Shreyas and Georgiev Vasil and Allen, Cameron and Emmons, Scott and Russell, Stuart},
booktitle = {Advances in Neural Information Processing Systems},
volume = {37},
year = {2024}
}
-
Task Scoping: Generating Task-Specific Simplifications of Open-Scope Planning Problems.
M. Fishman, N. Kumar, C. Allen, N. Danas, M. Littman, S. Tellex, and G. Konidaris. Presented at the IJCAI Workshop on Bridging the Gap Between AI Planning and Reinforcement Learning, August 2023.
[Bibtex]
@inproceedings{fishman2023taskscoping,
title = {Task Scoping: Generating Task-Specific Simplifications of Open-Scope Planning Problems},
author = {Fishman, Michael and Kumar, Nishanth and Allen, Cameron and Danas, Natasha and Littman, Michael and Tellex, Stefanie and Konidaris, George},
booktitle = {IJCAI workshop on Bridging the Gap Between AI Planning and Reinforcement Learning},
year = {2023}
}
-
Coarse-Grained Smoothness for Reinforcement Learning in Metric Spaces.
O. Gottesman, K. Asadi, C. Allen, S. Lobel, G. Konidaris, and M. Littman. In Proceedings of the 25th International Conference on Artificial Intelligence and Statistics, April 2023.
[Bibtex]
@inproceedings{gottesman2023coarse_grained,
title = {Coarse-Grained Smoothness for Reinforcement Learning in Metric Spaces},
author = {Gottesman, Omer and Asadi, Kavosh and Allen, Cameron and Lobel, Sam and Konidaris, George and Littman, Michael},
booktitle = {Proceedings of the 26th International Conference on Artificial Intelligence and Statistics},
pages = {1390--1410},
year = {2023}
}
-
Characterizing the Action-Generalization Gap in Deep Q-Learning.
Z. Zhou, C. Allen, K. Asadi, and G. Konidaris. In the 5th Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2022.
[Code] [Bibtex]
@inproceedings{zhou2022actgen_rldm,
title = {Characterizing the Action-Generalization Gap in Deep Q-Learning},
author = {Zhou, Zhiyuan and Allen, Cameron and Asadi, Kavosh and Konidaris, George},
booktitle = {5th Multidisciplinary Conference on Reinforcement Learning and Decision Making},
year = {2022}
}
-
Optimistic Initialization for Exploration in Continuous Control.
S. Lobel, O. Gottesman, C. Allen, A. Bagaria, and G. Konidaris. In Proceedings of the Thirty-Sixth AAAI Conference on Artificial Intelligence, February 2022.
[Code] [Bibtex]
@inproceedings{lobel2022optinit,
title = {Optimistic Initialization for Exploration in Continuous Control},
author = {Lobel, Sam and Gottesman, Omer and Allen, Cameron and Bagaria Akhil and Konidaris, George},
booktitle = {Proceedings of the Thirty-Sixth AAAI Conference on Artificial Intelligence},
pages = {7612--7619},
year = {2022}
}
-
Learning Markov State Abstractions for Deep Reinforcement Learning.
C. Allen, N. Parikh, O. Gottesman, and G. Konidaris. In Advances in Neural Information Processing Systems, December 2021.
[Blog] [Talk] [Poster] [Code] [Bibtex]
@inproceedings{allen2021markov_abstractions,
title = {Learning {M}arkov State Abstractions for Deep Reinforcement Learning},
author = {Allen, Cameron and Parikh, Neev and Gottesman, Omer and Konidaris, George},
booktitle = {Advances in Neural Information Processing Systems},
volume = {34},
pages = {8229--8241},
year = {2021}
}
@inproceedings{allen2020markov_abstractions_ws,
title = {Learning Markov State Abstractions for Deep Reinforcement Learning},
author = {Allen, Cameron and Parikh, Neev and Konidaris, George},
booktitle = {NeurIPS Deep Reinforcement Learning Workshop},
year = {2020}
}
-
Efficient Black-Box Planning Using Macro-Actions with Focused Effects.
C. Allen, M. Katz, T. Klinger, G. Konidaris, M. Riemer, and G. Tesauro. In Proceedings of the 30th International Joint Conference on Artificial Intelligence, August 2021.
[Blog] [Talk] [Poster] [Code] [Bibtex]
@inproceedings{allen2021focused_macros,
title = {Efficient Black-Box Planning Using Macro-Actions with Focused Effects},
author = {Allen, Cameron and Katz, Michael and Klinger, Tim and Konidaris, George and Riemer, Matthew and Tesauro, Gerald},
booktitle = {Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence},
pages = {4024--4031},
year = {2021}
}
- Also a workshop paper at the ICAPS Workshop on Heuristics and Search for Domain-independent Planning, August 2021. [Bibtex]
@inproceedings{allen2021focused_macros_ws,
title = {Efficient Black-Box Planning Using Macro-Actions with Focused Effects},
author = {Allen, Cameron and Katz, Michael and Klinger, Tim and Konidaris, George and Riemer, Matthew and Tesauro, Gerald},
booktitle = {ICAPS Workshop on Heuristics and Search for Domain-independent Planning},
year = {2021}
}
-
Bad-Policy Density: A Measure of Reinforcement Learning Hardness.
D. Abel, C. Allen, D. Arumugam, D. E. Hershkowitz, M. Littman, L. L. S. Wong. In the ICML Workshop on Reinforcement Learning Theory, July 2021.
[Bibtex]
@inproceedings{abel2021bpd,
author = {Abel, David and Allen, Cameron and Arumugam, Dilip and Hershkowitz, D. Ellis and Littman, Michael L. and Wong, Lawson L.S.},
title = {Bad-Policy Density: A Measure of Reinforcement Learning Hardness},
booktitle = {ICML Workshop on Reinforcement Learning Theory},
year = {2021}
}
-
Mean Actor Critic.
C. Allen*, K. Asadi*, M. Roderick, A. Mohamed, G. Konidaris, and M. Littman. arXiv:1709.00503 [stat.ML], September 2017.
[Code 1] [Code 2] [Bibtex]
@article{allen2017mac,
title = {Mean Actor Critic},
author = {Allen, Cameron and Asadi, Kavosh and Roderick, Melrose and Mohamed, Abdel-rahman and Konidaris, George and Littman, Michael},
journal = {arXiv},
eid = {1709.00503},
year = {2017}
}