Genetic Network Programming with Updating Rule Accumulation

Created by W.Langdon from gp-bibliography.bib Revision:1.3872

@InProceedings{Wang:2011:GNPwURA,
  title =        "Genetic Network Programming with Updating Rule
                 Accumulation",
  author =       "Lutao Wang and Shingo Mabu and Kotaro Hirasawa",
  pages =        "2259--2266",
  booktitle =    "Proceedings of the 2011 IEEE Congress on Evolutionary
                 Computation",
  year =         "2011",
  editor =       "Alice E. Smith",
  month =        "5-8 " # jun,
  address =      "New Orleans, USA",
  organization = "IEEE Computational Intelligence Society",
  publisher =    "IEEE Press",
  ISBN =         "0-7803-8515-2",
  keywords =     "genetic algorithms, genetic programming, genetic
                 network programming, Evolutionary computation theory,
                 Evolutionary simulation-based optimisation,
                 Evolutionary games and multi-agent systems",
  DOI =          "doi:10.1109/CEC.2011.5949895",
  abstract =     "Conventional evolutionary computation methods aim to
                 find elite individuals as the optimal solutions. The
                 rule accumulation method tries to find good experiences
                 from individuals throughout the generations and store
                 them as decision rules, which is regarded as solutions.
                 Genetic Network Programming (GNP) is competent for
                 dynamic environments because of its directed graph
                 structure, reusability of nodes and partially
                 observable processes. A GNP based rule accumulation
                 method has been studied and applied to the stock
                 trading problem. However, with the changing of dynamic
                 environments, the old rules in the rule pool are
                 incompetent for guiding new agent's actions, thus
                 updating these rules becomes necessary. This paper
                 proposes a new method to update the accumulated rules
                 in accordance with the environment changes. Sarsa-
                 learning which is a good on-line learning policy is
                 combined with off-line evolution to generate better
                 individuals and update the rules in the rule pool.
                 Tileworld problem which is an excellent benchmark for
                 multi-agent systems is used as the simulation
                 environment. Simulation results demonstrate the
                 efficiency and effectiveness of the proposed method in
                 dealing with the changing environments.",
  notes =        "CEC2011 sponsored by the IEEE Computational
                 Intelligence Society, and previously sponsored by the
                 EPS and the IET.",
}

Genetic Programming entries for Lutao Wang Shingo Mabu Kotaro Hirasawa

Citations