Representing Communication and Learning in Femtocell Pilot Power Control Algorithms

Created by W.Langdon from gp-bibliography.bib Revision:1.3963

@InCollection{Hemberg:2012:GPTP,
  author =       "Erik Hemberg and Lester Ho and Michael O'Neill and 
                 Holger Claussen",
  title =        "Representing Communication and Learning in Femtocell
                 Pilot Power Control Algorithms",
  booktitle =    "Genetic Programming Theory and Practice X",
  year =         "2012",
  series =       "Genetic and Evolutionary Computation",
  editor =       "Rick Riolo and Ekaterina Vladislavleva and 
                 Marylyn D. Ritchie and Jason H. Moore",
  publisher =    "Springer",
  chapter =      "15",
  pages =        "223--238",
  address =      "Ann Arbor, USA",
  month =        "12-14 " # may,
  keywords =     "genetic algorithms, genetic programming, Grammatical
                 evolution, Femtocell, Symbolic regression",
  isbn13 =       "978-1-4614-6845-5",
  URL =          "http://dx.doi.org/10.1007/978-1-4614-6846-2_15",
  DOI =          "doi:10.1007/978-1-4614-6846-2_15",
  abstract =     "The overall goal of evolving algorithms for femtocells
                 is to create a continuous on-line evolution of the
                 femtocell pilot power control algorithm to optimise
                 their coverage. Two aspects of intelligence are used
                 for increasing the complexity of the input and the
                 behaviour, communication and learning. In this initial
                 study we investigate how to evolve more complex
                 behaviour in decentralised control algorithms by
                 changing the representation of communication and
                 learning. The communication is addressed by allowing
                 the femtocell to identify its neighbours and take the
                 values of its neighbours into account when making
                 decisions regarding the increase or decrease of pilot
                 power. Learning is considered in two variants: the use
                 of input parameters and the implementation of a
                 built-in reinforcement procedure. The reinforcement
                 allows learning during the simulation in addition to
                 the execution of fixed commands. The experiments
                 compare the new representation in the form of different
                 terminal symbols in a grammar. The results show that
                 there are differences between the communication and
                 learning combinations and that the best solution uses
                 both communication and learning.",
  notes =        "Also known as \cite{Hemberg:2013:GPTP}

                 part of \cite{Riolo:2012:GPTP} published after the
                 workshop in 2013",
}

Genetic Programming entries for Erik Hemberg Lester T W Ho Michael O'Neill Holger Claussen

Citations