1 package net
.adaptivebox
.deps
;
4 * Description: The description of agent with hybrid differential evolution and particle swarm.
6 * @ Author Create/Modi Note
7 * Xiaofeng Xie Jun 10, 2004
8 * Xiaofeng Xie Jul 01, 2008
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * Please acknowledge the author(s) if you use this code in any way.
26 * [1] Zhang W J, Xie X F. DEPSO: hybrid particle swarm with differential
27 * evolution operator. IEEE International Conference on Systems, Man & Cybernetics,
28 * Washington D C, USA, 2003: 3816-3821
29 * [2] X F Xie, W J Zhang. SWAF: swarm algorithm framework for numerical
30 * optimization. Genetic and Evolutionary Computation Conference (GECCO),
31 * Seattle, WA, USA, 2004: 238-250
32 * -> an agent perspective
35 import net
.adaptivebox
.deps
.behavior
.*;
36 import net
.adaptivebox
.goodness
.IGoodnessCompareEngine
;
37 import net
.adaptivebox
.knowledge
.*;
38 import net
.adaptivebox
.problem
.*;
39 import net
.adaptivebox
.space
.*;
41 public class DEPSAgent
implements ILibEngine
{
43 //Describes the problem to be solved
44 protected ProblemEncoder problemEncoder
;
45 //Forms the goodness landscape
46 protected IGoodnessCompareEngine qualityComparator
;
48 //store the point that generated in current learning cycle
49 protected SearchPoint trailPoint
;
52 private AbsGTBehavior selectGTBehavior
;
54 //The referred library
55 protected Library socialLib
;
56 //the own memory: store the point that generated in old learning cycle
57 protected BasicPoint pold_t
;
58 //the own memory: store the point that generated in last learning cycle
59 protected BasicPoint pcurrent_t
;
60 //the own memory: store the personal best point
61 protected SearchPoint pbest_t
;
63 //Generate-and-test Behaviors
64 protected DEGTBehavior deGTBehavior
;
65 protected PSGTBehavior psGTBehavior
;
66 public double switchP
= 0.5;
68 public void setLibrary(Library lib
) {
70 deGTBehavior
.setLibrary(socialLib
);
71 psGTBehavior
.setLibrary(socialLib
);
74 public void setProblemEncoder(ProblemEncoder encoder
) {
75 problemEncoder
= encoder
;
76 trailPoint
= problemEncoder
.getFreshSearchPoint();
77 pold_t
= problemEncoder
.getFreshSearchPoint();
78 pcurrent_t
= problemEncoder
.getFreshSearchPoint();
81 public void setSpecComparator(IGoodnessCompareEngine comparer
) {
82 qualityComparator
= comparer
;
85 public void setPbest(SearchPoint pbest
) {
89 protected AbsGTBehavior
getGTBehavior() {
90 if (Math
.random()<switchP
) {
97 public void setGTBehavior(AbsGTBehavior gtBehavior
) {
98 if (gtBehavior
instanceof DEGTBehavior
) {
99 deGTBehavior
= ((DEGTBehavior
)gtBehavior
);
100 deGTBehavior
.setPbest(pbest_t
);
103 if (gtBehavior
instanceof PSGTBehavior
) {
104 psGTBehavior
= ((PSGTBehavior
)gtBehavior
);
105 psGTBehavior
.setMemPoints(pbest_t
, pcurrent_t
, pold_t
);
110 public void generatePoint() {
111 // generates a new point in the search space (S) based on
112 // its memory and the library
113 selectGTBehavior
= this.getGTBehavior();
114 selectGTBehavior
.generateBehavior(trailPoint
, problemEncoder
);
115 //evaluate into goodness information
116 problemEncoder
.evaluate(trailPoint
);
119 public void learn() {
120 selectGTBehavior
.testBehavior(trailPoint
, qualityComparator
);
123 public SearchPoint
getMGState() {