Intelligent Robot Learning Laboratory (IRL Lab) Books & Book Chapters

2013

  • Haitham Bou Ammar, Matthew E. Taylor, Karl Tuyls, and Gerhard Weiss. Reinforcement Learning Transfer using a Sparse Coded Inter-Task Mapping. In LNAI Post-proceedings of the European Workshop on Multi-agent Systems. Springer-Verlag, 2013.
    [BibTeX] [Download PDF]
    @inproceedings(LNAI13-Amar,
    author={Haitham Bou Ammar and Matthew E. Taylor and Karl Tuyls and Gerhard Weiss},
    title={{Reinforcement Learning Transfer using a Sparse Coded Inter-Task Mapping}},
    booktitle={{LNAI Post-proceedings of the European Workshop on Multi-agent Systems}},
    year={2013},
    publisher={Springer-Verlag},
    bib2html_rescat={Transfer Learning, Reinforcement Learning},
    bib2html_pubtype={Refereed Book Chapter},
    )

2012

  • Anestis Fachantidis, Ioannis Partalas, Matthew E. Taylor, and Ioannis Vlahavas. Transfer Learning via Multiple Inter-Task Mappings. In Scott Sanner and Marcus Hutter, editors, Recent Advances in Reinforcement Learning, volume 7188 of Lecture Notes in Artificial Intelligence, pages 225-236. Springer-Verlag, Berlin, 2012.
    [BibTeX] [Download PDF]
    @incollection{LNAI11-Fachantidis,
    author={Anestis Fachantidis and Ioannis Partalas and Matthew E. Taylor and Ioannis Vlahavas},
    title={{Transfer Learning via Multiple Inter-Task Mappings}},
    booktitle={{Recent Advances in Reinforcement Learning}},
    editor={Scott Sanner and Marcus Hutter},
    address={Berlin},
    year={2012},
    series={Lecture Notes in Artificial Intelligence},
    volume={7188},
    pages={225-236},
    isbn={978-3-642-29945-2},
    publisher={Springer-Verlag},
    bib2html_rescat={Transfer Learning, Reinforcement Learning},
    bib2html_pubtype={Refereed Book Chapter},
    }

2011

  • Matthew E. Taylor, Manish Jain, Christopher Kiekintveld, Jun-young Kwak, Rong Yang, Zhengyu Yin, and Milind Tambe. Two decades of multiagent teamwork research: past, present, and future. In C. Guttmann, F. Dignum, and M. Georgeff, editors, Collaborative agents – research and development (CARE) 2009-2010, volume 6066 of Lecture Notes in Artificial Intelligence. Springer-Verlag, 2011.
    [BibTeX] [Download PDF]
    @incollection{11CARE-Taylor,
    author={Matthew E. Taylor and Manish Jain and Christopher Kiekintveld and Jun-young Kwak and Rong Yang and Zhengyu Yin and Milind Tambe},
    title={Two Decades of Multiagent Teamwork Research: Past, Present, and Future},
    editor={C. Guttmann and F. Dignum and M. Georgeff},
    booktitle={Collaborative Agents - REsearch and Development {(CARE)} 2009-2010},
    publisher={Springer-Verlag},
    series={Lecture Notes in Artificial Intelligence},
    volume={6066},
    year={2011},
    bib2html_pubtype={Invited Book Chapter},
    byb2html_rescat={DCOP},
    }

  • Matthew E. Taylor, Christopher Kiekintveld, and Milind Tambe. Evaluating Deployed Decision Support Systems for Security: Challenges, Arguments, and Approaches. In Milind Tambe, editor, Security Games: Theory, Deployed Applications, Lessons Learned, pages 254-283. Cambridge University Press, 2011.
    [BibTeX] [Download PDF]
    @incollection(11Evaluation-Taylor,
    author={Matthew E. Taylor and Christopher Kiekintveld and Milind Tambe},
    title={{Evaluating Deployed Decision Support Systems for Security: Challenges, Arguments, and Approaches}},
    editor={Milind Tambe},
    booktitle={{Security Games: Theory, Deployed Applications, Lessons Learned}},
    publisher={Cambridge University Press},
    year={2011},
    pages={254-283},
    isbn={978-1-107-09642-4},
    bib2html_pubtype={Invited Book Chapter},
    bib2html_rescat={Security},
    )

2010

  • Marc Ponsen, Matthew E. Taylor, and Karl Tuyls. Abstraction and Generalization in Reinforcement Learning. In Matthew E. Taylor and Karl Tuyls, editors, Adaptive Agents and Multi-Agent Systems IV, volume 5924, pages 1-33. Springer-Verlag, 2010.
    [BibTeX] [Download PDF]
    @incollection(Ponsen10,
    author={Marc Ponsen and Matthew E. Taylor and Karl Tuyls},
    title={{Abstraction and Generalization in Reinforcement Learning}},
    booktitle={{Adaptive Agents and Multi-Agent Systems {IV}}},
    editor={Matthew E. Taylor and Karl Tuyls},
    publisher={Springer-Verlag},
    year={2010},
    pages={1--33},
    volume={5924},
    bib2html_pubtype={Invited Book Chapter},
    bib2html_rescat={Reinforcement Learning},
    )

2006

  • Peter Stone, Gregory Kuhlmann, Matthew E. Taylor, and Yaxin Liu. Keepaway Soccer: From Machine Learning Testbed to Benchmark. In Itsuki Noda, Adam Jacoff, Ansgar Bredenfeld, and Yasutake Takahashi, editors, RoboCup-2005: Robot Soccer World Cup IX, volume 4020, pages 93-105. Springer-Verlag, Berlin, 2006. 28% acceptance rate at {R}obo{C}up-2005
    [BibTeX] [Abstract] [Download PDF]

    Keepaway soccer has been previously put forth as a \emph{testbed} for machine learning. Although multiple researchers have used it successfully for machine learning experiments, doing so has required a good deal of domain expertise. This paper introduces a set of programs, tools, and resources designed to make the domain easily usable for experimentation without any prior knowledge of RoboCup or the Soccer Server. In addition, we report on new experiments in the Keepaway domain, along with performance results designed to be directly comparable with future experimental results. Combined, the new infrastructure and our concrete demonstration of its use in comparative experiments elevate the domain to a machine learning \emph{benchmark}, suitable for use by researchers across the field.

    @incollection(ROBOCUP05-stone,
    author={Peter Stone and Gregory Kuhlmann and Matthew E. Taylor and Yaxin Liu},
    title={{Keepaway Soccer: From Machine Learning Testbed to Benchmark}},
    booktitle={{{R}obo{C}up-2005: Robot Soccer World Cup {IX}}},
    editor={Itsuki Noda and Adam Jacoff and Ansgar Bredenfeld and Yasutake Takahashi},
    publisher={Springer-Verlag},
    address={Berlin},
    year={2006},
    volume={4020},
    pages={93--105},
    abstract={Keepaway soccer has been previously put forth as a \emph{testbed} for machine learning. Although multiple researchers have used it successfully for machine learning experiments, doing so has required a good deal of domain expertise. This paper introduces a set of programs, tools, and resources designed to make the domain easily usable for experimentation without any prior knowledge of RoboCup or the Soccer Server. In addition, we report on new experiments in the Keepaway domain, along with performance results designed to be directly comparable with future experimental results. Combined, the new infrastructure and our concrete demonstration of its use in comparative experiments elevate the domain to a machine learning \emph{benchmark}, suitable for use by researchers across the field.},
    note={28% acceptance rate at {R}obo{C}up-2005},
    wwwnote={Some <a href="http://www.cs.utexas.edu/users/AustinVilla/sim/keepaway/">simulations of keepaway</a> referenced in the paper and keepaway software.<br>Official version from <a href="http://dx.doi.org/10.1007/11780519_9">Publisher's Webpage</a>&copy Springer-Verlag},
    bib2html_pubtype={Refereed Book Chapter},
    bib2html_rescat={Simulated Robot Soccer, Reinforcement Learning},
    bib2html_funding={NSF, ONR, DARPA}
    )