@COMMENT This file was generated by bib2html.pl version 0.94
@COMMENT written by Patrick Riley
@COMMENT This file came from Samuel Barrett's publication pages at
@COMMENT http://www.cs.utexas.edu/~sbarrett/publications
@InProceedings{AAAI11-Lin,
author = {Raz Lin and Sarit Kraus and Noa Agmon and Samuel Barrett and Peter Stone},
title = {Comparing Agents: Success against People in Security Domains},
booktitle = {Proceedings of the Twenty-Fifth AAAI Conference on Artificial Intelligence},
location = {San Francisco, CA},
month = {August},
year = {2011},
abstract={
The interaction of people with autonomous agents has become increasingly
prevalent. Some of these settings include security domains, where people can
be characterized as uncooperative, hostile, manipulative, and tending to take
advantage of the situation for their own needs. This makes it challenging to
design proficient agents to interact with people in such environments.
Evaluating the success of the agents automatically before evaluating them
with people or deploying them could alleviate this challenge and result in
better designed agents. In this paper we show how Peer Designed Agents (PDAs)
- computer agents developed by human subjects - can be used as a method for
evaluating autonomous agents in security domains. Such evaluation can reduce
the effort and costs involved in evaluating autonomous agents interacting
with people to validate their efficacy. Our experiments included more than 70
human subjects and 40 PDAs developed by students. The study provides
empirical support that PDAs can be used to compare the proficiency of
autonomous agents when matched with people in security domains.
}
}