Emergent Tangled Graph Representations for Atari Game Playing Agents

Created by W.Langdon from gp-bibliography.bib Revision:1.3872

@InProceedings{Kelly:2017:EuroGP,
  author =       "Stephen Kelly and Malcolm I. Heywood",
  title =        "Emergent Tangled Graph Representations for Atari Game
                 Playing Agents",
  booktitle =    "EuroGP 2017: Proceedings of the 20th European
                 Conference on Genetic Programming",
  year =         "2017",
  month =        "19-21 " # apr,
  editor =       "Mauro Castelli and James McDermott and 
                 Lukas Sekanina",
  series =       "LNCS",
  volume =       "10196",
  publisher =    "Springer Verlag",
  address =      "Amsterdam",
  pages =        "64--79",
  organisation = "species",
  note =         "best paper",
  keywords =     "genetic algorithms, genetic programming",
  DOI =          "doi:10.1007/978-3-319-55696-3_5",
  abstract =     "Organizing code into coherent programs and relating
                 different programs to each other represents an
                 underlying requirement for scaling genetic programming
                 to more difficult task domains. Assuming a model in
                 which policies are defined by teams of programs, in
                 which team and program are represented using
                 independent populations and coevolved, has previously
                 been shown to support the development of variable sized
                 teams. In this work, we generalize the approach to
                 provide a complete framework for organizing multiple
                 teams into arbitrarily deep/wide structures through a
                 process of continuous evolution; hereafter the Tangled
                 Program Graph (TPG). Benchmarking is conducted using a
                 subset of 20 games from the Arcade Learning Environment
                 (ALE), an Atari 2600 video game emulator. The games
                 considered here correspond to those in which deep
                 learning was unable to reach a threshold of play
                 consistent with that of a human. Information provided
                 to the learning agent is limited to that which a human
                 would experience. That is, screen capture sensory
                 input, Atari joystick actions, and game score. The
                 performance of the proposed approach exceeds that of
                 deep learning in 15 of the 20 games, with 7 of the 15
                 also exceeding that associated with a human level of
                 competence. Moreover, in contrast to solutions from
                 deep learning, solutions discovered by TPG are also
                 very `sparse'. Rather than assuming that all of the
                 state space contributes to every decision, each action
                 in TPG is resolved following execution of a subset of
                 an individual's graph. This results in significantly
                 lower computational requirements for model building
                 than presently the case for deep learning.",
  notes =        "Part of \cite{Castelli:2017:GP} EuroGP'2017 held
                 inconjunction with EvoCOP2017, EvoMusArt2017 and
                 EvoApplications2017",
}

Genetic Programming entries for Stephen Kelly Malcolm Heywood

Citations