1 package net
.adaptivebox
.deps
;
4 * Description: The description of agent with hybrid differential evolution and particle swarm.
6 * @ Author Create/Modi Note
7 * Xiaofeng Xie Jun 10, 2004
8 * Xiaofeng Xie Jul 01, 2008
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * Please acknowledge the author(s) if you use this code in any way.
26 * [1] Zhang W J, Xie X F. DEPSO: hybrid particle swarm with differential
27 * evolution operator. IEEE International Conference on Systems, Man & Cybernetics,
28 * Washington D C, USA, 2003: 3816-3821
29 * [2] X F Xie, W J Zhang. SWAF: swarm algorithm framework for numerical
30 * optimization. Genetic and Evolutionary Computation Conference (GECCO),
31 * Seattle, WA, USA, 2004: 238-250
32 * -> an agent perspective
35 import net
.adaptivebox
.deps
.behavior
.AbsGTBehavior
;
36 import net
.adaptivebox
.deps
.behavior
.DEGTBehavior
;
37 import net
.adaptivebox
.deps
.behavior
.PSGTBehavior
;
38 import net
.adaptivebox
.global
.RandomGenerator
;
39 import net
.adaptivebox
.goodness
.IGoodnessCompareEngine
;
40 import net
.adaptivebox
.knowledge
.ILibEngine
;
41 import net
.adaptivebox
.knowledge
.Library
;
42 import net
.adaptivebox
.knowledge
.SearchPoint
;
43 import net
.adaptivebox
.problem
.ProblemEncoder
;
44 import net
.adaptivebox
.space
.BasicPoint
;
46 public class DEPSAgent
{
48 // Describes the problem to be solved
49 private ProblemEncoder problemEncoder
;
51 // Forms the goodness landscape
52 private IGoodnessCompareEngine qualityComparator
;
54 // store the point that generated in current learning cycle
55 private SearchPoint trailPoint
;
58 private AbsGTBehavior selectGTBehavior
;
60 // the own memory: store the point that generated in old learning cycle
61 private BasicPoint pold_t
;
63 // the own memory: store the point that generated in last learning cycle
64 private BasicPoint pcurrent_t
;
66 // the own memory: store the personal best point
67 private SearchPoint pbest_t
;
69 // Generate-and-test behaviors.
70 private DEGTBehavior deGTBehavior
;
71 private PSGTBehavior psGTBehavior
;
73 private double switchP
= 0.5;
75 public DEPSAgent(ProblemEncoder encoder
, DEGTBehavior deGTBehavior
, PSGTBehavior psGTBehavior
,
76 double switchP
, IGoodnessCompareEngine comparer
, SearchPoint pbest
) {
77 this.switchP
= switchP
;
79 problemEncoder
= encoder
;
81 qualityComparator
= comparer
;
83 trailPoint
= problemEncoder
.getFreshSearchPoint();
84 pold_t
= problemEncoder
.getFreshSearchPoint();
85 pcurrent_t
= problemEncoder
.getFreshSearchPoint();
88 this.deGTBehavior
= deGTBehavior
;
89 this.deGTBehavior
.setMemPoints(pbest_t
, pcurrent_t
, pold_t
);
91 this.psGTBehavior
= psGTBehavior
;
92 this.psGTBehavior
.setMemPoints(pbest_t
, pcurrent_t
, pold_t
);
95 public void setSpecComparator(IGoodnessCompareEngine comparer
) {
96 qualityComparator
= comparer
;
99 private AbsGTBehavior
getGTBehavior() {
100 if (RandomGenerator
.doubleZeroOneRandom() < switchP
) {
107 public void setGTBehavior(AbsGTBehavior gtBehavior
) {
108 gtBehavior
.setMemPoints(pbest_t
, pcurrent_t
, pold_t
);
111 public void generatePoint() {
112 // generates a new point in the search space (S) based on
113 // its memory and the library
114 selectGTBehavior
= getGTBehavior();
115 selectGTBehavior
.generateBehavior(trailPoint
, problemEncoder
);
117 // evaluate into goodness information
118 problemEncoder
.evaluate(trailPoint
);
121 public void learn() {
122 selectGTBehavior
.testBehavior(trailPoint
, qualityComparator
);
125 public SearchPoint
getMGState() {