-
C. Allen*, A. Kirtland*, R.Y. Tao*, S. Lobel, D. Scott, N. Petrocelli, O. Gottesman, R. Parr, M.L. Littman, G. Konidaris. Mitigating Partial Observability in Sequential Decision Processes via the Lambda Discrepancy. To appear at NeurIPS, December 2024. [Preprint] [Blog] [Bibtex]
@article{allenkirtlandtao2024lambdadiscrep,
title = {Mitigating Partial Observability in Sequential Decision Processes via the Lambda Discrepancy},
author = {Allen, Cameron and Kirtland, Aaron and Tao, Ruo Yu and Lobel, Sam and Scott, Daniel and Petrocelli, Nicholas and Gottesman, Omer and Parr, Ronald and Littman, Michael L. and Konidaris, George},
journal = {arXiv},
eid = {2407.07333},
year = {2024}
}
- Selected for oral presentation at the IMCL Foundations of Reinforcement Learning and Control Workshop, July 2024.
- Also a workshop paper at the RLC Finding the Frame Workshop, August 2024.
-
E. Jenner, S. Kapur, V. Georgiev, C. Allen, S. Emmons, S. Russell. Evidence of Learned Look-Ahead in a Chess-Playing Neural Network. To appear at NeurIPS, December 2024. [Preprint] [Blog] [Bibtex]
@article{jenner2024chessinterp,
title = {Evidence of Learned Look-Ahead in a Chess-Playing Neural Network},
author = {Jenner, Erik and Kapur, Shreyas and Georgiev Vasil and Allen, Cameron and Emmons, Scott and Russell, Stuart},
journal = {arXiv},
eid = {2406.00877},
year = {2024}
}
-
M. Fishman, N. Kumar, C. Allen, N. Danas, M. Littman, S. Tellex, and G. Konidaris. Task Scoping: Generating Task-Specific Simplifications of Open-Scope Planning Problems. Presented at the IJCAI Workshop on Bridging the Gap Between AI Planning and Reinforcement Learning, August 2023. [Bibtex]
@inproceedings{fishman2023taskscoping,
title = {Task Scoping: Generating Task-Specific Simplifications of Open-Scope Planning Problems},
author = {Fishman, Michael and Kumar, Nishanth and Allen, Cameron and Danas, Natasha and Littman, Michael and Tellex, Stefanie and Konidaris, George},
booktitle = {IJCAI workshop on Bridging the Gap Between AI Planning and Reinforcement Learning},
year = {2023}
}
-
O. Gottesman, K. Asadi, C. Allen, S. Lobel, G. Konidaris, and M. Littman. Coarse-Grained Smoothness for Reinforcement Learning in Metric Spaces. In Proceedings of the 25th International Conference on Artificial Intelligence and Statistics, April 2023. [Bibtex]
@inproceedings{gottesman2023coarse_grained,
title = {Coarse-Grained Smoothness for Reinforcement Learning in Metric Spaces},
author = {Gottesman, Omer and Asadi, Kavosh and Allen, Cameron and Lobel, Sam and Konidaris, George and Littman, Michael},
booktitle = {Proceedings of the 26th International Conference on Artificial Intelligence and Statistics},
pages = {1390--1410},
year = {2023}
}
-
Z. Zhou, C. Allen, K. Asadi, and G. Konidaris. Characterizing the Action-Generalization Gap in Deep Q-Learning. In the 5th Multidisciplinary Conference on Reinforcement Learning and Decision Making, June 2022. [Bibtex]
@inproceedings{zhou2022actgen_rldm,
title = {Characterizing the Action-Generalization Gap in Deep Q-Learning},
author = {Zhou, Zhiyuan and Allen, Cameron and Asadi, Kavosh and Konidaris, George},
booktitle = {5th Multidisciplinary Conference on Reinforcement Learning and Decision Making},
year = {2022}
}
-
S. Lobel, O. Gottesman, C. Allen, A. Bagaria, and G. Konidaris. Optimistic Initialization for Exploration in Continuous Control. In Proceedings of the Thirty-Sixth AAAI Conference on Artificial Intelligence, February 2022. [Bibtex]
@inproceedings{lobel2022optinit,
title = {Optimistic Initialization for Exploration in Continuous Control},
author = {Lobel, Sam and Gottesman, Omer and Allen, Cameron and Bagaria Akhil and Konidaris, George},
booktitle = {Proceedings of the Thirty-Sixth AAAI Conference on Artificial Intelligence},
pages = {7612--7619},
year = {2022}
}
-
C. Allen, N. Parikh, O. Gottesman, and G. Konidaris. Learning Markov State Abstractions for Deep Reinforcement Learning. In Advances in Neural Information Processing Systems, December 2021. [Blog] [Bibtex]
@inproceedings{allen2021markov_abstractions,
title = {Learning {M}arkov State Abstractions for Deep Reinforcement Learning},
author = {Allen, Cameron and Parikh, Neev and Gottesman, Omer and Konidaris, George},
booktitle = {Advances in Neural Information Processing Systems},
volume = {34},
pages = {8229--8241},
year = {2021}
}
@inproceedings{allen2020markov_abstractions_ws,
title = {Learning Markov State Abstractions for Deep Reinforcement Learning},
author = {Allen, Cameron and Parikh, Neev and Konidaris, George},
booktitle = {NeurIPS Deep Reinforcement Learning Workshop},
year = {2020}
}
-
C. Allen, M. Katz, T. Klinger, G. Konidaris, M. Riemer, and G. Tesauro. Efficient Black-Box Planning Using Macro-Actions with Focused Effects. In Proceedings of the 30th International Joint Conference on Artificial Intelligence, August 2021. [Blog] [Bibtex]
@inproceedings{allen2021focused_macros,
title = {Efficient Black-Box Planning Using Macro-Actions with Focused Effects},
author = {Allen, Cameron and Katz, Michael and Klinger, Tim and Konidaris, George and Riemer, Matthew and Tesauro, Gerald},
booktitle = {Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence},
pages = {4024--4031},
year = {2021}
}
- Also a workshop paper at the ICAPS Workshop on Heuristics and Search for Domain-independent Planning, August 2021. [Bibtex]
@inproceedings{allen2021focused_macros_ws,
title = {Efficient Black-Box Planning Using Macro-Actions with Focused Effects},
author = {Allen, Cameron and Katz, Michael and Klinger, Tim and Konidaris, George and Riemer, Matthew and Tesauro, Gerald},
booktitle = {ICAPS Workshop on Heuristics and Search for Domain-independent Planning},
year = {2021}
}
-
D. Abel, C. Allen, D. Arumugam, D. E. Hershkowitz, M. Littman, L. L. S. Wong. Bad-Policy Density: A Measure of Reinforcement Learning Hardness. In the ICML Workshop on Reinforcement Learning Theory, July 2021. [Bibtex]
@inproceedings{abel2021bpd,
author = {Abel, David and Allen, Cameron and Arumugam, Dilip and Hershkowitz, D. Ellis and Littman, Michael L. and Wong, Lawson L.S.},
title = {Bad-Policy Density: A Measure of Reinforcement Learning Hardness},
booktitle = {ICML Workshop on Reinforcement Learning Theory},
year = {2021}
}
-
C. Allen*, K. Asadi*, M. Roderick, A. Mohamed, G. Konidaris, and M. Littman. Mean Actor Critic. arXiv:1709.00503 [stat.ML], September 2017. [Bibtex]
@article{allen2017mac,
title = {Mean Actor Critic},
author = {Allen, Cameron and Asadi, Kavosh and Roderick, Melrose and Mohamed, Abdel-rahman and Konidaris, George and Littman, Michael},
journal = {arXiv},
eid = {1709.00503},
year = {2017}
}