-
Skill-Driven Neurosymbolic State Abstractions.
A. Ahmetoglu, S. James, C. Allen, S. Lobel, D. Abel, and G. Konidaris. In Advances in Neural Information Processing Systems, December 2025.
[Bibtex]
@inproceedings{ahmetoglu2025skilldriven_neurosym,
title = {Skill-Driven Neurosymbolic State Abstractions},
author = {Ahmetoglu, Alper and James, Steven and Allen, Cameron and Lobel, Sam and Abel, David and Konidaris, George},
booktitle = {Advances in Neural Information Processing Systems},
volume = {38},
year = {2025}
}
-
Benchmarking Partial Observability in Reinforcement Learning with a Suite of Memory-Improvable Domains.
R.Y. Tao, K. Guo, C. Allen, G. Konidaris. Reinforcement Learning Journal, August 2025.
[Code] [Bibtex]
@article{tao2025pobax,
title = {Benchmarking Partial Observability in Reinforcement Learning with a Suite of Memory-Improvable Domains},
author = {Tao, Ruo Yu and Guo, Kaicheng and Allen, Cameron and Konidaris, George},
journal = {Reinforcement Learning Journal},
volume = {6},
year = {2025}
}
-
Focused Skill Discovery: Learning to Control Specific State Variables while Minimizing Side Effects.
J.C. Carr, Q. Sun, C. Allen. Reinforcement Learning Journal, August 2025.
[Blog] [Poster] [Code] [Bibtex]
@article{carr2025focused_skill_discovery,
title = {Focused Skill Discovery: Using Per-Factor Empowerment to Control State Variables},
author = {Carr, Jonathan Cola\c{c}o and Sun, Qinyi and Allen, Cameron},
journal = {Reinforcement Learning Journal},
volume = {6},
year = {2025}
}
- Also an extended abstract at the 6th Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2025.
-
Mitigating Partial Observability in Sequential Decision Processes via the Lambda Discrepancy.
C. Allen*, A. Kirtland*, R.Y. Tao*, S. Lobel, D. Scott, N. Petrocelli, O. Gottesman, R. Parr, M.L. Littman, G. Konidaris. In Advances in Neural Information Processing Systems, December 2024.
[Blog] [Video] [Poster] [Code] [Bibtex]
@inproceedings{allenkirtlandtao2024lambdadiscrep,
title = {Mitigating Partial Observability in Sequential Decision Processes via the Lambda Discrepancy},
author = {Allen, Cameron and Kirtland, Aaron and Tao, Ruo Yu and Lobel, Sam and Scott, Daniel and Petrocelli, Nicholas and Gottesman, Omer and Parr, Ronald and Littman, Michael L. and Konidaris, George},
booktitle = {Advances in Neural Information Processing Systems},
volume = {37},
year = {2024}
}
- Selected for oral presentation (3 of 72) at the ICML Foundations of Reinforcement Learning and Control Workshop, July 2024.
- Also a workshop paper at the RLC Finding the Frame Workshop, August 2024.
-
Evidence of Learned Look-Ahead in a Chess-Playing Neural Network.
E. Jenner, S. Kapur, V. Georgiev, C. Allen, S. Emmons, S. Russell. In Advances in Neural Information Processing Systems, December 2024.
[Blog] [Poster] [Code] [Bibtex]
@inproceedings{jenner2024chessinterp,
title = {Evidence of Learned Look-Ahead in a Chess-Playing Neural Network},
author = {Jenner, Erik and Kapur, Shreyas and Georgiev Vasil and Allen, Cameron and Emmons, Scott and Russell, Stuart},
booktitle = {Advances in Neural Information Processing Systems},
volume = {37},
year = {2024}
}
-
Coarse-Grained Smoothness for Reinforcement Learning in Metric Spaces.
O. Gottesman, K. Asadi, C. Allen, S. Lobel, G. Konidaris, and M. Littman. In Proceedings of the 25th International Conference on Artificial Intelligence and Statistics, April 2023.
[Bibtex]
@inproceedings{gottesman2023coarse_grained,
title = {Coarse-Grained Smoothness for Reinforcement Learning in Metric Spaces},
author = {Gottesman, Omer and Asadi, Kavosh and Allen, Cameron and Lobel, Sam and Konidaris, George and Littman, Michael},
booktitle = {Proceedings of the 26th International Conference on Artificial Intelligence and Statistics},
pages = {1390--1410},
year = {2023}
}
-
Optimistic Initialization for Exploration in Continuous Control.
S. Lobel, O. Gottesman, C. Allen, A. Bagaria, and G. Konidaris. In Proceedings of the Thirty-Sixth AAAI Conference on Artificial Intelligence, February 2022.
[Code] [Bibtex]
@inproceedings{lobel2022optinit,
title = {Optimistic Initialization for Exploration in Continuous Control},
author = {Lobel, Sam and Gottesman, Omer and Allen, Cameron and Bagaria Akhil and Konidaris, George},
booktitle = {Proceedings of the Thirty-Sixth AAAI Conference on Artificial Intelligence},
pages = {7612--7619},
year = {2022}
}
-
Learning Markov State Abstractions for Deep Reinforcement Learning.
C. Allen, N. Parikh, O. Gottesman, and G. Konidaris. In Advances in Neural Information Processing Systems, December 2021.
[Blog] [Talk] [Poster] [Code] [Bibtex]
@inproceedings{allen2021markov_abstractions,
title = {Learning {M}arkov State Abstractions for Deep Reinforcement Learning},
author = {Allen, Cameron and Parikh, Neev and Gottesman, Omer and Konidaris, George},
booktitle = {Advances in Neural Information Processing Systems},
volume = {34},
pages = {8229--8241},
year = {2021}
}
@inproceedings{allen2020markov_abstractions_ws,
title = {Learning Markov State Abstractions for Deep Reinforcement Learning},
author = {Allen, Cameron and Parikh, Neev and Konidaris, George},
booktitle = {NeurIPS Deep Reinforcement Learning Workshop},
year = {2020}
}
-
Efficient Black-Box Planning Using Macro-Actions with Focused Effects.
C. Allen, M. Katz, T. Klinger, G. Konidaris, M. Riemer, and G. Tesauro. In Proceedings of the 30th International Joint Conference on Artificial Intelligence, August 2021.
[Blog] [Talk] [Poster] [Code] [Bibtex]
@inproceedings{allen2021focused_macros,
title = {Efficient Black-Box Planning Using Macro-Actions with Focused Effects},
author = {Allen, Cameron and Katz, Michael and Klinger, Tim and Konidaris, George and Riemer, Matthew and Tesauro, Gerald},
booktitle = {Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence},
pages = {4024--4031},
year = {2021}
}
- Also a workshop paper at the ICAPS Workshop on Heuristics and Search for Domain-independent Planning, August 2021. [Bibtex]
@inproceedings{allen2021focused_macros_ws,
title = {Efficient Black-Box Planning Using Macro-Actions with Focused Effects},
author = {Allen, Cameron and Katz, Michael and Klinger, Tim and Konidaris, George and Riemer, Matthew and Tesauro, Gerald},
booktitle = {ICAPS Workshop on Heuristics and Search for Domain-independent Planning},
year = {2021}
}
-
Neural Manifold Geometry Encodes Feature Fields.
J. Yocum, C. Allen, B. Olshausen, S. Russell. At the NeurIPS Workshop on Symmetry and Geometry in Neural Representations (NeurReps), December 2025.
[Bibtex]
@inproceedings{yocum2025featurefields,
title = {Neural Manifold Geometry Encodes Feature Fields},
author = {Yocum, Julian and Allen, Cameron and Olshausen, Bruno and Russell, Stuart},
booktitle = {NeurIPS Workshop on Symmetry and Geometry in Neural Representations},
year = {2025}
}
-
Transformers Represent Causal Abstractions.
E. Altuzar, J. Yocum, C. Allen. At the NeurIPS Workshop on Symmetry and Geometry in Neural Representations (NeurReps), December 2025.
[Bibtex]
@inproceedings{altuzar2025emergence,
title = {Transformers Represent Causal Abstractions},
author = {Altuzar, Emiliano and Yocum, Julian and Allen, Cameron},
booktitle = {NeurIPS Workshop on Symmetry and Geometry in Neural Representations},
year = {2025}
}
-
Echo of Bayes: Learned Memory Functions Can Recover Belief States
J. Liévano-Karim, P. Koepernik, G. Konidaris, C. Allen. At the NeurIPS Workshop on Unifying Representations in Neural Models, December 2025.
[Bibtex]
@inproceedings{lievano2025beliefprobes,
title = {Echo of Bayes: Learned Memory Functions Can Recover Belief States},
author = {Li\'evano-Karim, Juan and Koepernik, Peter and Konidaris, George and Allen, Cameron},
booktitle = {NeurIPS Workshop on Unifying Representations in Neural Models},
year = {2025}
}
-
The Influence of Scaffolds on Coordination Scaling Laws in LLM Agents.
M. Meireles*, R. Bhati*, N. Lauffer, C. Allen. At the NeurIPS Workshop on Scaling Environments for Agents, December 2025.
[Bibtex]
@inproceedings{meirelesbhati2025scaffolds,
title = {The Influence of Scaffolds on Coordination Scaling Laws in LLM Agents},
author = {Meireles, Mariana and Bhati, Rupali and Lauffer, Niklas and Allen, Cameron},
booktitle = {NeurIPS Workshop on Scaling Environments for Agents},
year = {2025}
}
- Also at the NeurIPS Workshop on Multi-Turn Interactions in Large Language Models, December 2025.
-
Disentangling Independently Controllable Factors in Reinforcement Learning.
R. Rodriguez-Sanchez, C. Allen, G. Konidaris. At the New York Reinforcement Learning Workshop, September 2025.
[Bibtex]
@inproceedings{rodriguezsanchez2025acf_nyrl,
title = {Disentangling Independently Controllable Factors in Reinforcement Learning},
author = {Rodriguez-Sanchez, Rafael and Allen, Cameron and Konidaris, George},
booktitle = {New York Reinforcement Learning Workshop},
year = {2025}
}
-
General Value Discrepancies Mitigate Partial Observability in Reinforcement Learning.
P. Koepernik*, R.Y. Tao*, R. Parr, G. Konidaris, C. Allen. At the RLC Finding the Frame Workshop, August 2025.
[Bibtex]
@inproceedings{koepernik_tao2025general_value_discrepancies,
title = {General Value Discrepancies Mitigate Partial Observability in Reinforcement Learning},
author = {Koepernik, Peter and Tao, Ruo Yu and Parr, Ronald and Konidaris, George and Allen, Cameron},
booktitle = {RLC Finding the Frame Workshop},
year = {2025}
}
-
Improving Reward Learning by Estimating Annotator Expertise.
P. Czempin, R. Freedman, E. Novoseller, V.J. Lawhern, C. Allen, E. Bıyık. At the RSS Workshop on Continual Robot Learning from Humans, June 2025.
[Bibtex]
@inproceedings{czempin2025expertise,
title = {Improving Reward Learning by Estimating Annotator Expertise},
author = {Czempin, Pavel and Freedman, Rachel and Novoseller, Ellen and Lawhern, Vernon J. and Allen, Cameron and B{\i}y{\i}k, Erdem},
booktitle = {RSS Workshop on Continual Robot Learning from Humans},
year = {2025}
}
-
Memory as State Abstraction over Trajectories.
A. Kirtland*, A. Ivanov*, C. Allen, M. Littman, G. Konidaris. At the 6th Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2025.
[Preprint] [Blog] [Poster] [Bibtex]
@inproceedings{kirtland_ivanov2025memory_as_abstr,
title = {Memory as State Abstraction over Trajectories},
author = {Kirtland, Aaron and Ivanov, Alexander and Allen, Cameron and Littman, Michael L. and Konidaris, George},
booktitle = {6th Multidisciplinary Conference on Reinforcement Learning and Decision Making},
year = {2025}
}
- Selected for spotlight presentation (20 of 339).
-
Learning Transferable Sub-Goals by Hypothesizing Generalizing Features.
A. de Mello Koch, A. Bagaria, B. Huo, Z. Zhou, C. Allen, G. Konidaris. At the AAAI Workshop on Generalization in Planning, March 2025.
[Code] [Bibtex]
@inproceedings{demellokoch2025learning,
title = {Learning Transferable Sub-Goals by Hypothesizing Generalizing Features},
author = {De Mello Koch, Anita and Bagaria, Akhil and Huo, Bingnan and Zhou, Zhiyuan and Allen, Cameron and Konidaris, George},
booktitle = {AAAI Workshop on Generalization in Planning},
year = {2025}
}
-
Task Scoping: Generating Task-Specific Simplifications of Open-Scope Planning Problems.
M. Fishman, N. Kumar, C. Allen, N. Danas, M. Littman, S. Tellex, and G. Konidaris. Presented at the IJCAI Workshop on Bridging the Gap Between AI Planning and Reinforcement Learning, August 2023.
[Bibtex]
@inproceedings{fishman2023taskscoping,
title = {Task Scoping: Generating Task-Specific Simplifications of Open-Scope Planning Problems},
author = {Fishman, Michael and Kumar, Nishanth and Allen, Cameron and Danas, Natasha and Littman, Michael and Tellex, Stefanie and Konidaris, George},
booktitle = {IJCAI workshop on Bridging the Gap Between AI Planning and Reinforcement Learning},
year = {2023}
}
-
Characterizing the Action-Generalization Gap in Deep Q-Learning.
Z. Zhou, C. Allen, K. Asadi, and G. Konidaris. At the 5th Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2022.
[Code] [Bibtex]
@inproceedings{zhou2022actgen_rldm,
title = {Characterizing the Action-Generalization Gap in Deep Q-Learning},
author = {Zhou, Zhiyuan and Allen, Cameron and Asadi, Kavosh and Konidaris, George},
booktitle = {5th Multidisciplinary Conference on Reinforcement Learning and Decision Making},
year = {2022}
}
-
Bad-Policy Density: A Measure of Reinforcement Learning Hardness.
D. Abel, C. Allen, D. Arumugam, D. E. Hershkowitz, M. Littman, L. L. S. Wong. In the ICML Workshop on Reinforcement Learning Theory, July 2021.
[Bibtex]
@inproceedings{abel2021bpd,
author = {Abel, David and Allen, Cameron and Arumugam, Dilip and Hershkowitz, D. Ellis and Littman, Michael L. and Wong, Lawson L.S.},
title = {Bad-Policy Density: A Measure of Reinforcement Learning Hardness},
booktitle = {ICML Workshop on Reinforcement Learning Theory},
year = {2021}
}