Claw 1.7.3
game_ai.tpp
1/*
2 CLAW - a C++ Library Absolutely Wonderful
3
4 CLAW is a free library without any particular aim but being useful to
5 anyone.
6
7 Copyright (C) 2005 Sébastien Angibaud
8 Copyright (C) 2005-2011 Julien Jorge
9
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
14
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
19
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23
24 contact: julien.jorge@gamned.org
25*/
26/**
27 * \file game_ai.tpp
28 * \brief Implémentation de fonctions d'intelligence artificielle.
29 * \author Julien Jorge & Sébastien Angibaud
30 */
31#include <claw/max_vector.hpp>
32
33#include <cstdlib>
34
35//**************************** gamestate **************************************
36
37/*---------------------------------------------------------------------------*/
38/**
39 * \brief Destructor.
40 */
41template<typename Action, typename Numeric>
42claw::ai::game::game_state<Action, Numeric>::~game_state()
43{
44 // nothing to do
45} // game_state::~game_state()
46
47/*---------------------------------------------------------------------------*/
48/**
49 * \brief Get the minimal score a state can get.
50 */
51template <typename Action, typename Numeric>
52typename claw::ai::game::game_state<Action, Numeric>::score
53claw::ai::game::game_state<Action, Numeric>::min_score()
54{
55 return s_min_score;
56} // game_state::min_score()
57
58/*---------------------------------------------------------------------------*/
59/**
60 * \brief Get the maximal score a state can get.
61 */
62template <typename Action, typename Numeric>
63typename claw::ai::game::game_state<Action, Numeric>::score
64claw::ai::game::game_state<Action, Numeric>::max_score()
65{
66 return s_max_score;
67} // game_state::max_score()
68
69/*---------------------------------------------------------------------------*/
70/**
71 * \brief Truncate a score to fit in the range (min_score(), max_score()).
72 * \param score_val The value to fit.
73 */
74template<typename Action, typename Numeric>
75typename claw::ai::game::game_state<Action, Numeric>::score
76claw::ai::game::game_state<Action, Numeric>::fit
77( score score_val ) const
78{
79 if ( s_max_score < score_val )
80 return s_max_score;
81 else if ( score_val < s_min_score )
82 return s_min_score;
83 else
84 return score_val;
85} // game_state::fit()
86
87
88//**************************** action_eval ************************************
89
90
91/*---------------------------------------------------------------------------*/
92/**
93 * \brief Constructor.
94 * \param a The evaluated action.
95 * \param e The evaluation of the action.
96 */
97template <typename Action, typename Numeric>
98claw::ai::game::action_eval<Action, Numeric>::action_eval
99( const Action& a, const Numeric& e)
100 : action(a), eval(e)
101{
102
103} // action_eval::action_eval()
104
105/*---------------------------------------------------------------------------*/
106/**
107 * \brief Compare with an otreh action.
108 * \param ae The other action.
109 */
110template <typename Action, typename Numeric>
111bool claw::ai::game::action_eval<Action, Numeric>::operator<
112 ( const action_eval& ae ) const
113{
114 return eval < ae.eval;
115} // action_eval::operator<()
116
117#if 0
118/*---------------------------------------------------------------------------*/
119/**
120 * \brief Egalité de deux actions.
121 * \return vrai si this->eval == ae.eval.
122 */
123template <typename Action, typename Numeric>
124bool claw::ai::game::action_eval<Action, Numeric>::operator==
125 ( const action_eval& ae ) const
126{
127 return eval == ae.eval;
128} // action_eval::operator==()
129#endif
130
131
132
133//********************************* min_max ***********************************
134
135
136/*---------------------------------------------------------------------------*/
137/**
138 * \brief Apply the min-max algorithm to find the best action.
139 * \param depth Depth of the search subtree we are allowed to explore.
140 * \param current_state The state of the game.
141 * \param computer_turn Tell if the next action is done by the computer.
142 */
143template<typename State>
144typename claw::ai::game::min_max<State>::score
145claw::ai::game::min_max<State>::operator()
146 ( int depth, const state& current_state, bool computer_turn ) const
147{
148 score score_val;
149
150 // we reached a final state or we are not allowed to search more.
151 if ( current_state.final() || (depth == 0) )
152 score_val = current_state.evaluate();
153 else
154 {
155 std::list<action> next_actions;
156 typename std::list<action>::const_iterator it;
157 state* new_state;
158
159 // get all reachable states
160 current_state.next_actions( next_actions );
161
162 if ( next_actions.empty() )
163 score_val = current_state.evaluate();
164 else if (computer_turn)
165 {
166 score_val = current_state.min_score();
167
168 for (it = next_actions.begin(); it!=next_actions.end(); ++it)
169 {
170 new_state=static_cast<state*>(current_state.do_action(*it));
171
172 // evaluate the action of the human player
173 score s = (*this)( depth-1, *new_state, false );
174
175 // and keep the best action he can do.
176 if (s > score_val)
177 score_val = s;
178
179 delete new_state;
180 }
181 }
182 else // human player's turn
183 {
184 score_val = current_state.max_score();
185
186 for (it = next_actions.begin(); it!=next_actions.end(); ++it)
187 {
188 new_state=static_cast<state*>(current_state.do_action(*it));
189
190 // evaluate the action of the computer player
191 score s = (*this)( depth-1, *new_state, true );
192
193 // and keep the worst action he can do
194 if (s < score_val)
195 score_val = s;
196
197 delete new_state;
198 }
199 }
200 }
201
202 return score_val;
203} // min_max::operator()
204
205
206
207
208
209//******************************** alpha_beta *********************************
210
211
212/*---------------------------------------------------------------------------*/
213/**
214 * \brief Apply the alpha-beta algorithm to find the best action.
215 * \param depth Depth of the search subtree we are allowed to explore.
216 * \param current_state The state of the game.
217 * \param computer_turn Tell if the next action is done by the computer.
218 */
219template <typename State>
220typename State::score claw::ai::game::alpha_beta<State>::operator()
221 ( int depth, const state& current_state, bool computer_turn ) const
222{
223 return this->compute
224 ( depth, current_state, computer_turn, current_state.min_score(),
225 current_state.max_score() );
226} // alpha_beta::operator()
227
228/*---------------------------------------------------------------------------*/
229/**
230 * \brief Find the best action using an alpha-beta algorithm.
231 * \param depth Depth of the search subtree we are allowed to explore.
232 * \param current_state The state of the game.
233 * \param computer_turn Tell if the next action is done by the computer.
234 * \param alpha Worst score of the current player.
235 * \param beta Best score of the other player.
236 */
237template<typename State>
238typename claw::ai::game::alpha_beta<State>::score
239claw::ai::game::alpha_beta<State>::compute
240( int depth, const state& current_state, bool computer_turn, score alpha,
241 score beta ) const
242{
243 score score_val;
244
245 // we reached a final state or we are not allowed to search more.
246 if ( current_state.final() || (depth == 0) )
247 score_val = current_state.evaluate();
248 else
249 {
250 std::list<action> next_actions;
251 typename std::list<action>::const_iterator it;
252 State* new_state;
253
254 // get all reachable states
255 current_state.next_actions( next_actions );
256
257 if ( next_actions.empty() )
258 score_val = current_state.evaluate();
259 else if (computer_turn)
260 {
261 score_val = current_state.min_score();
262
263 it = next_actions.begin();
264
265 while ( it!=next_actions.end() && (score_val < beta) )
266 {
267 new_state=static_cast<state*>(current_state.do_action(*it));
268
269 // evaluate the action of the human player
270 score s = compute
271 ( depth-1, *new_state, false, std::max(alpha, score_val), beta );
272
273 // and keep the best action he can do.
274 if (s > score_val)
275 score_val = s;
276
277 delete new_state;
278
279 ++it;
280 }
281 }
282 else // human player's turn
283 {
284 score_val = current_state.max_score();
285
286 it = next_actions.begin();
287
288 while ( it!=next_actions.end() && (score_val > alpha) )
289 {
290 new_state=static_cast<state*>(current_state.do_action(*it));
291
292 // evaluate the action of the computer player
293 score s = compute
294 ( depth-1, *new_state, true, alpha, std::min(beta, score_val) );
295
296 // and keep the worst action he can do
297 if (s < score_val)
298 score_val = s;
299 ++it;
300
301 delete new_state;
302 }
303 }
304 }
305
306 return score_val;
307} // alpha_beta::compute()
308
309
310
311
312
313//***************************** select_action *********************************
314
315
316
317
318/*---------------------------------------------------------------------------*/
319/**
320 * \brief Select an action using the given method.
321 * \param depth Maximum depth of the search tree.
322 * \param current_state The state of the game.
323 * \param new_action (in/out) Best known action.
324 * \param computer_turn Tell if the action is done by the computer.
325 */
326template<typename Method>
327void claw::ai::game::select_action<Method>::operator()
328 ( int depth, const state& current_state, action& new_action,
329 bool computer_turn ) const
330{
331 std::list<action> l;
332 typename std::list<action>::iterator it;
333 score best_eval;
334 Method method;
335
336 // get all reachable states
337 current_state.next_actions( l );
338 best_eval = current_state.min_score();
339
340 for (it=l.begin(); it!=l.end(); ++it)
341 {
342 state* new_state;
343 score eval;
344
345 // try and evaluate each action
346 new_state = static_cast<state*>(current_state.do_action(*it));
347 eval = method(depth-1, *new_state, !computer_turn);
348
349 delete new_state;
350
351 // we keep one of the best actions
352 if (eval > best_eval)
353 {
354 best_eval = eval;
355 new_action = *it;
356 }
357 }
358} // select_action::operator()
359
360
361//*************************** select_random_action ****************************
362
363/**
364 * \brief Select a random action among the best ones.
365 * \param depth Maximum depth of the search tree.
366 * \param current_state The state of the game.
367 * \param new_action (in/out) Best known action.
368 * \param computer_turn Tell if the action is done by the computer.
369 */
370template<typename Method>
371void claw::ai::game::select_random_action<Method>::operator()
372 ( int depth, const state& current_state, action& new_action,
373 bool computer_turn ) const
374{
375 std::list<action> l;
376 typename std::list<action>::iterator it;
377 action_eval<action, score> eval( new_action, current_state.min_score() );
378 Method method;
379 max_vector< action_eval<action, score> > events( eval );
380
381 // get all reachable states
382 current_state.next_actions( l );
383
384 for (it=l.begin(); it!=l.end(); ++it)
385 {
386 state* new_state;
387
388 // try and evaluate each action
389 new_state = static_cast<state*>(current_state.do_action(*it));
390
391 eval.action = *it;
392 eval.eval = method(depth-1, *new_state, !computer_turn);
393
394 delete new_state;
395
396 // keep the best actions.
397 events.add( eval );
398 }
399
400 std::size_t i = (double)rand()/(RAND_MAX + 1) * events.get_v().size();
401 new_action = events.get_v()[i].action;
402} // select_random_action::operator()
403