Author: Dann Corbit
Date: 14:30:36 03/19/04
Go up one level in this thread
On March 19, 2004 at 16:35:29, martin fierz wrote:
>i'm using crafty as one of the sparring partners for my engine. it's not much of
>a contest of course, but like with humans, the best way to improve is to play
>higher-rated opponents.
>
>recently, i peeked in the crafty directory and noticed lots of learn files. so i
>added "learn 0" to crafty.rc expecting it to stop generating those files. but
>it's still generating them.
>
>so: how do i disable learning in crafty? or is just producing the files but not
>using them with "learn 0"?
learn <n>.................enables/disables learning.
bookw weight <v>.......................sets weight for book ordering.
(weights are freq (frequency), eval (evaluation)
and learn (learned scores).
Clues in the source:
E:\crafty>grepcarl learn *.c
book.c ( 39): * 32 bits: learned value (floating point).
*
book.c ( 58): static int book_status[200], evaluations[200],
bs_learn[200], bs_CAP[200];
book.c ( 190): bs_learn[nmoves] = (int) (book_buffer[i].learn
* 100.0);
book.c ( 192): bs_learn[nmoves] *= -1;
book.c ( 226): * if any moves have a very bad or a very good learn
*
book.c ( 236): if (bs_learn[i] <= LEARN_COUNTER_BAD &&
!bs_percent[i] &&
book.c ( 248): if (bs_learn[i] >= LEARN_COUNTER_GOOD &&
!(book_status[i] & 003))
book.c ( 272): minlv = Min(minlv, bs_learn[i]);
book.c ( 273): maxlv = Max(maxlv, bs_learn[i]);
book.c ( 295): (bs_learn[i] - minlv) / (float) (maxlv -
book.c ( 296): minlv) * 1000.0 * book_weight_learn;
book.c ( 351): temp = bs_learn[i];
book.c ( 352): bs_learn[i] = bs_learn[i + 1];
book.c ( 353): bs_learn[i + 1] = temp;
book.c ( 384): " move played %% score learn
CAP sortv P%% P\n");
book.c ( 404): Print(128, "%9.2f", (float) bs_learn[i] /
100.0);
book.c ( 423): * moves with bad learned results.
*
book.c ( 1203): current.learn = 0.0;
book.c ( 1231): next.learn = 0.0;
book.c ( 1258): current.learn = 0.0;
crafty.c ( 38): #include "learn.c"
data.c ( 57): int book_learn_eval[LEARN_INTERVAL];
data.c ( 58): int book_learn_depth[LEARN_INTERVAL];
data.c ( 404): float book_weight_learn = 1.0;
data.c ( 409): int learning = 7;
data.c ( 410): int learning_cutoff = -2*PAWN_VALUE;
data.c ( 411): int learning_trigger = PAWN_VALUE/3;
init.c ( 147): Print(128, "learning is disabled\n");
init.c ( 175): Print(128, "unable to open book learning file
[%s/book.lrn].\n", book_path);
init.c ( 176): Print(128, "learning disabled.\n");
init.c ( 177): learning &= ~(book_learning + result_learning);
init.c ( 179): if (learning & position_learning) {
init.c ( 197): Print(128, "unable to open position learning
file [%s/position.bin].\n",
init.c ( 199): Print(128, "learning disabled.\n");
init.c ( 200): learning &= ~position_learning;
init.c ( 209): Print(128, "unable to open position learning
file [%s/position.bin].\n",
init.c ( 211): Print(128, "learning disabled.\n");
init.c ( 212): learning &= ~position_learning;
learn.c ( 16): * LearnBook() is used to accumulate the evaluations
for the first N moves *
learn.c ( 19): * or not. (N is set by the #define LEARN_INTERVAL
definition.) *
learn.c ( 23): * move will have its "learn" value reduced to
discourage playing this move *
learn.c ( 24): * again. (2) if the evaluation is even after N
moves, then the learn *
learn.c ( 27): * good after N moves, the learn value will be
increased by a large amount *
learn.c ( 32): void LearnBook(TREE * RESTRICT tree, int wtm, int
search_value,
learn.c ( 43): * learning array (book_learn_eval[]) for use later.
*
learn.c ( 49): if (!(learning & book_learning) && force != 2)
learn.c ( 51): if (!(learning & result_learning) && force == 2)
learn.c ( 53): if (moves_out_of_book <= LEARN_INTERVAL && !force) {
learn.c ( 55): book_learn_eval[moves_out_of_book - 1] =
search_value;
learn.c ( 56): book_learn_depth[moves_out_of_book - 1] =
search_depth;
learn.c ( 65): * better or worse, we need to update the learning
count. *
learn.c ( 69): else if (moves_out_of_book == LEARN_INTERVAL + 1 ||
force) {
learn.c ( 70): int move, i, j, learn_value, read;
learn.c ( 80): float book_learn[512], t_learn_value;
learn.c ( 84): Print(128, "LearnBook() executed\n");
learn.c ( 86): learning &= ~book_learning;
learn.c ( 88): learning &= ~result_learning;
learn.c ( 89): interval = Min(LEARN_INTERVAL, moves_out_of_book);
learn.c ( 94): if (book_learn_eval[i] > best_eval) {
learn.c ( 95): best_eval = book_learn_eval[i];
learn.c ( 98): if (book_learn_eval[i] < worst_eval) {
learn.c ( 99): worst_eval = book_learn_eval[i];
learn.c ( 105): if (book_learn_eval[i] < worst_after_best_eval)
learn.c ( 106): worst_after_best_eval = book_learn_eval[i];
learn.c ( 108): worst_after_best_eval = book_learn_eval[interval -
1];
learn.c ( 112): if (book_learn_eval[i] > best_after_worst_eval)
learn.c ( 113): best_after_worst_eval = book_learn_eval[i];
learn.c ( 115): best_after_worst_eval = book_learn_eval[interval -
1];
learn.c ( 118): Print(128, "Learning analysis ...\n");
learn.c ( 122): Print(128, "%d(%d) ", book_learn_eval[i],
book_learn_depth[i]);
learn.c ( 148): learn_value = best_eval;
learn.c ( 150): if (learn_value == book_learn_eval[i])
learn.c ( 151): search_depth = Max(search_depth,
book_learn_depth[i]);
learn.c ( 163): learn_value = worst_eval;
learn.c ( 165): if (learn_value == book_learn_eval[i])
learn.c ( 166): search_depth = Max(search_depth,
book_learn_depth[i]);
learn.c ( 178): learn_value = 0;
learn.c ( 181): learn_value += book_learn_eval[i];
learn.c ( 182): search_depth += book_learn_depth[i];
learn.c ( 184): learn_value /= interval;
learn.c ( 188): learn_value =
learn.c ( 189): LearnFunction(learn_value, search_depth,
learn.c ( 190): crafty_rating - opponent_rating, learn_value <
0);
learn.c ( 191): learn_value *= (crafty_is_white) ? 1 : -1;
learn.c ( 193): learn_value = search_value;
learn.c ( 240): book_buffer[j].learn > (float)
LEARN_COUNTER_BAD / 100.0) {
learn.c ( 254): * now we build a vector of book learning results. we
*
learn.c ( 256): * were alternatives 100% of the learned score. We
give *
learn.c ( 257): * the book move played at that point 100% of the
learned *
learn.c ( 258): * score as well. then we divide the learned score by
*
learn.c ( 265): t_learn_value = ((float) learn_value) / 100.0;
learn.c ( 272): book_learn[i] = t_learn_value * thisply / nplies;
learn.c ( 278): * book move learned value based on the computation we
*
learn.c ( 300): * now call LearnBookUpdate() to find this position in
*
learn.c ( 301): * the book database and update the learn stuff.
*
learn.c ( 305): temp_value = book_learn[i];
learn.c ( 306): LearnBookUpdate(tree, wtm, move, temp_value);
learn.c ( 344): fprintf(book_lrn_file, "%s {%d %d %d}\n", buff,
learn_value, search_depth,
learn.c ( 363): * LearnBookUpdate() is called to find the current
position in the book and *
learn.c ( 364): * update the learn counter. if it is supposed to
mark a move as not to be *
learn.c ( 366): * in the database, it returns (0) which will force
LearnBook() to back up *
learn.c ( 372): void LearnBookUpdate(TREE * RESTRICT tree, int wtm, int
move, float learn_value)
learn.c ( 404): if (book_buffer[move_index].learn == 0.0)
learn.c ( 405): book_buffer[move_index].learn = learn_value;
learn.c ( 407): book_buffer[move_index].learn =
learn.c ( 408): (book_buffer[move_index].learn +
learn_value) / 2.0;
learn.c ( 420): * LearnFunction() is called to compute the adjustment
value added to the *
learn.c ( 421): * learn counter in the opening book. it takes three
pieces of information *
learn.c ( 429): int LearnFunction(int sv, int search_depth, int
rating_difference,
learn.c ( 455): * LearnImport() is used to read in a learn data file
(*.lrn) and apply *
learn.c ( 458): * as needed, without losing all of the "learned"
openings in the database. *
learn.c ( 463): * to participate in this) "learn" what other crafty's
have already found out *
learn.c ( 467): * for LearnBook(), then set things up so that
LearnBook() can be called and *
learn.c ( 468): * it will behave just as though this book line was
just "learned". if the *
learn.c ( 474): * LearnImport() also will import data from the C.A.P.
project by Dan Corbitt *
learn.c ( 480): void LearnImport(TREE * RESTRICT tree, int nargs, char
**args)
learn.c ( 482): FILE *learn_in;
learn.c ( 490): * learned book lines.
*
learn.c ( 501): learn_in = fopen(*args, "r");
learn.c ( 502): if (learn_in == NULL) {
learn.c ( 506): eof = fscanf(learn_in, "%s", text);
learn.c ( 507): fclose(learn_in);
learn.c ( 511): LearnImportPosition(tree, nargs, args);
learn.c ( 513): LearnImportBook(tree, nargs, args);
learn.c ( 515): LearnImportCAP(tree, nargs, args);
learn.c ( 523): * LearnImportBook() is used to import book learning
and save it in the *
learn.c ( 524): * book.bin file (see LearnBook for details.)
*
learn.c ( 529): void LearnImportBook(TREE * RESTRICT tree, int nargs,
char **args)
learn.c ( 531): FILE *learn_in;
learn.c ( 533): int wtm, learn_value, depth, rating_difference, move =
0, i, added_lines = 0;
learn.c ( 539): * the entire book and clear every learned value.
*
learn.c ( 543): learn_in = fopen(args[0], "r");
learn.c ( 558): book_buffer[j].learn = 0.0;
learn.c ( 588): eof = fgets(text, 80, learn_in);
learn.c ( 626): nextc = fgetc(learn_in);
learn.c ( 630): ungetc(nextc, learn_in);
learn.c ( 631): move = ReadChessMove(tree, learn_in, wtm, 1);
learn.c ( 642): fscanf(learn_in, "%d %d %d}\n", &learn_value,
&depth, &rating_difference);
learn.c ( 643): moves_out_of_book = LEARN_INTERVAL + 1;
learn.c ( 644): move_number += LEARN_INTERVAL + 1 - wtm;
learn.c ( 645): for (i = 0; i < LEARN_INTERVAL; i++)
learn.c ( 646): book_learn_eval[i] = learn_value;
learn.c ( 649): learning |= book_learning;
learn.c ( 650): LearnBook(tree, wtm, learn_value, depth, 1, 1);
learn.c ( 654): Print(4095, "\nadded %d learned book lines to
book.bin\n", added_lines);
learn.c ( 661): * LearnImportCAP() is used to import data from Dan
Corbitt's C.A.P. project *
learn.c ( 682): void LearnImportCAP(TREE * RESTRICT tree, int nargs,
char **args)
learn.c ( 850): * LearnImportPosition() is used to import positions
and save them in the *
learn.c ( 851): * position.bin file. (see LearnPosition for
details.) *
learn.c ( 855): void LearnImportPosition(TREE * RESTRICT tree, int
nargs, char **args)
learn.c ( 863): FILE *learn_in;
learn.c ( 874): learn_in = fopen(args[0], "r");
learn.c ( 875): eof = fgets(text, 80, learn_in);
learn.c ( 891): printf("unable to open position learning file
[%s/position.lrn].\n",
learn.c ( 905): printf("unable to open position learning file
[%s/position.bin].\n",
learn.c ( 921): eof = fgets(text, 80, learn_in);
learn.c ( 954): eof = fgets(text, 80, learn_in);
learn.c ( 969): eof = fgets(text, 80, learn_in);
learn.c ( 1077): * LearnPosition() is the driver for the second phase
of Crafty's learning *
learn.c ( 1083): * were learned.
*
learn.c ( 1097): * the file will, by default, hold 65536 learned
positions. the first word *
learn.c ( 1105): void LearnPosition(TREE * RESTRICT tree, int wtm, int
last_value, int value)
learn.c ( 1116): * is there anything to learn? if we are already
behind *
learn.c ( 1118): * learning. otherwise if the score drops by 1/3 of a
*
learn.c ( 1120): * book, learning won't help either, as the position
will *
learn.c ( 1125): if (!(learning & position_learning))
learn.c ( 1129): if (last_value < learning_cutoff)
learn.c ( 1131): if (last_value < value + learning_trigger)
learn.c ( 1143): Print(128, "learning position, wtm=%d value=%d\n",
wtm, value);
learn.c ( 1216): * simply read from the learn.bin file, and stuffed
into the correct table. *
learn.c ( 1220): void LearnPositionLoad(void)
learn.c ( 1229): * If position learning file not accessible: exit.
also, *
learn.c ( 1234): if (!(learning & position_learning))
learn.c ( 1243): * first, find out how many learned positions are in
the *
learn.c ( 1256): * first, find out how many learned positions are in
the *
main.c ( 1426): * 11.8 first stage of "book learning" implemented.
Crafty monitors the *
main.c ( 1428): * computes a "learn value" if it thinks this
set of values shows *
main.c ( 1430): * is added to a "learn count" for each move
in the set of book *
main.c ( 1431): * moves it played, with the last move getting
the full learn value, *
main.c ( 1433): * percentage. (see learn.c for more
details). these values are *
main.c ( 1458): * White and Date PGN tags just for reference.
next learning mod is *
main.c ( 1459): * another "book random n" option, n=3. this
will use the "learned" *
main.c ( 1462): * been good, even if the learn count hasn't
reached the current *
main.c ( 1463): * threshold of 1,000. this makes learning
"activate" faster. this *
main.c ( 1464): * has one hole in it, in that once crafty
learns that one move has *
main.c ( 1465): * produced a positive learn value, it will
keep playing that move *
main.c ( 1467): * loses enough to take the learn value below
zero. this will be *
main.c ( 1470): * 11.11 new "book random 4" mode that simply takes
*all* learning scores *
main.c ( 1472): * worst value is +1 (by adding
-Min(all_scores)+1 to every learn *
main.c ( 1474): * follow book lines that it has learned to be
good, while still *
main.c ( 1475): * letting it try openings that it has not yet
tried (learn value *
main.c ( 1477): * learned to be bad. minor evaluation
tweaking for king placement *
main.c ( 1481): * 11.12 LearnFunction() now keeps positional score,
rather than using a *
main.c ( 1482): * constant value for scores < PAWN_VALUE, to
improve learning *
main.c ( 1483): * accuracy. book learn <filename> [clear]
command implemented to *
main.c ( 1484): * import learning files from other Crafty
programs. *
main.c ( 1489): * learning "curve" modified. it was
accidentally left in a sort *
main.c ( 1490): * of "geometric" shape, with the last move
getting the full learn *
main.c ( 1496): * 11.14 minor modification to book learn 4 code so
that when you start *
main.c ( 1497): * off with *no* learned data, Book() won't
exclude moves from the *
main.c ( 1511): * move affect the learned-value sorting
algorithm somewhat. minor *
main.c ( 1529): * 11.15 modified LearnBook() so that when Crafty
terminates, or resigns, *
main.c ( 1530): * or sees a mate, it will force LearnBook()
to execute the learning *
main.c ( 1532): * book. Crafty now trusts large positive
evals less when learning *
main.c ( 1535): * second learning stage implemented. Crafty
maintains a permanent *
main.c ( 1542): * import <filename> [clear] command imports
all learning data now, *
main.c ( 1543): * eliminating the old book learn command.
LearnFunction() modified *
main.c ( 1556): * problem fixed in learning code. the depth
used in LearnFunction *
main.c ( 1557): * was not the depth for the search that
produced the "learn value" *
main.c ( 1558): * the LearnBook() chooses, it was the depth
for the search last *
main.c ( 1561): * to "learn", the corresponding (correct)
depth is also now used. *
main.c ( 1562): * these learn values are now limited to
+6000/-6000 because in one *
main.c ( 1565): * distribute a "mate score" as the learned
value, it will instantly *
main.c ( 1570): * not correct if Crafty got mated, which
would avoid learning in *
main.c ( 1579): * before we toss in the towel. LearnBook()
now learns on all games *
main.c ( 1583): * result was that learning values became more
and more negative as *
main.c ( 1585): * the learning equation to help pull some of
these negative values *
main.c ( 1589): * position learning is turned off after 15
moves have passed since *
main.c ( 1590): * leaving book, to avoid learning things that
won't be useful at a *
main.c ( 1592): * entries so that the learned stuff can be
copied into the hash *
main.c ( 1602): * open a file(s) on the enemy king.
BookLearn() call was moved in *
main.c ( 1603): * Resign() so that it first learns, then
resigns, so that it won't *
main.c ( 1604): * get zapped by a SIGTERM right in the middle
of learning when *
main.c ( 1731): * caused odd problems, such as learning
positions after 1. e4. the *
main.c ( 1754): * would make learning add odd PV's to the
.lrn file. development *
main.c ( 1864): * vantage is not pretty solid. note that old
book learning will *
main.c ( 1865): * not work now, since the evals are wrong.
old position learning *
main.c ( 1867): * what must be done about book learning
results. new egtb=n option *
main.c ( 1897): * learning slightly modified to react
quicker. also scores are now *
main.c ( 1909): * null-move search to tell it so. book
learning greatly modified *
main.c ( 1910): * in the way it "distributes" the learned
values, in an effort to *
main.c ( 1911): * make learning happen faster. I now
construct a sort of "tree" *
main.c ( 1913): * at each book move it played. I then assign
the whole learn value *
main.c ( 1915): * and also assign the whole learn value to
the move where there *
main.c ( 1916): * was a choice. I then divide the learn
value by the number of *
main.c ( 1921): * the book line was completely forced. all
in an effort to learn *
main.c ( 1929): * book learning code. this is now caught and
avoided. BookUp() *
main.c ( 1941): * 14.6 minor change to book random 4 (use learned
value.) if all the *
main.c ( 1942): * learned values are 0, then use the book
random 3 option instead. *
main.c ( 1943): * bug in 50-move counter fixed. Learn()
could cause this to be set *
main.c ( 1983): * Crafty. a new type of learning, based on
results, has been added *
main.c ( 1984): * and is optional (learn=7 now enables every
type of learning.) *
main.c ( 1985): * this type of learning uses the win/lose
game result to flag an *
main.c ( 1989): * ratio; static evaluation; and learned
score. There are four *
main.c ( 2098): * LearnResult() that would crash crafty if no
book was being used, *
main.c ( 2112): * controls that much better. fixed an ugly
learning bug. if the *
main.c ( 2115): * a second time, which would break things
after learning was done. *
main.c ( 2577): * to make "position learning" remember the
current position and the *
main.c ( 2618): * the book. bug in position learning fixed
also. this was caused *
main.c ( 2621): * about how it might affect the position
learning since it is also *
main.c ( 2630): * 17.9 LearnPosition() called with wrong arguments
from main() which *
main.c ( 2631): * effectively disabled position learning.
this was broken in 17.7 *
main.c ( 2739): * -DRECAPTURE when compiling search.c and
option.c. LearnBook() *
main.c ( 2740): * has been modified to 'speed up' learning.
see the comments in *
main.c ( 2741): * that module to see how it was changed.
LearnResult() has been *
main.c ( 2742): * removed. LearnBook() is now used to do the
same thing, except *
main.c ( 2748): * with the new xboard/winboard 4.2.2
versions. book learning was *
main.c ( 2756): * LearnImportBook() confused the learn value
sign, due to the other *
main.c ( 2786): * 18.9 Book() modified to increase the
responsiveness of book learning. *
main.c ( 2788): * now make Crafty learn very aggressively and
repeat good opening *
main.c ( 2971): * files. new mode option "match" which sets
an aggressive learning *
main.c ( 2972): * mode, which is now the default. the old
learning mode can be set *
main.c ( 3012): * for those wanting to play with it. new
"learn" command option to *
main.c ( 3013): * allow the user to set the position learning
parameters that are *
main.c ( 3014): * used to trigger position learning and
disable position learning *
main.c ( 3016): * "learn trigger cutoff" and are expressed as
fractions of a pawn *
main.c ( 3017): * with a decimel point. the default is learn
.33 -2.0 which says *
main.c ( 3018): * to do position learning when the score
drops 1/3 of a pawn, but *
main.c ( 3020): * lost and learning won't help at this point.
this is the CCT-6 *
main.c ( 3568): * now execute LearnPosition() to determine if the
*
main.c ( 3573): LearnPosition(tree, wtm, last_search_value,
value);
main.c ( 3627): * now execute LearnBook() to determine if the book
line *
main.c ( 3628): * was bad or good. then follow up with LearnResult()
if *
main.c ( 3634): LearnBook(tree, wtm, last_value, last_pv.pathd +
2, 0, 0);
main.c ( 3639): LearnBook(tree, wtm, val, 0, 1, 2);
option.c ( 344): else if (!strcmp("learn", args[1]))
option.c ( 345): book_weight_learn = atof(args[2]);
option.c ( 351): Print(128, "learning
(learn)..............%4.2f\n", book_weight_learn);
option.c ( 781): LearnBook(tree, wtm, last_search_value, 0, 0, 1);
option.c ( 1236): LearnPositionLoad();
option.c ( 1416): printf(" and learn (learned scores).\n");
option.c ( 1700): printf("import <filename>.........imports learning
data (.lrn files).\n");
option.c ( 1709): printf("learn <n>.................enables/disables
learning.\n");
option.c ( 1800): * position.bin) just as though it had learned those
*
option.c ( 1812): LearnImport(tree, nargs - 1, args + 1);
option.c ( 1873): Print(128, "learning (learn)..............%4.2f\n",
book_weight_learn);
option.c ( 1898): * "learn" command enables/disables the learning
*
option.c ( 1903): * 000 -> no learning enabled.
*
option.c ( 1904): * 001 -> learn which book moves are good and bad.
*
option.c ( 1905): * 010 -> learn middlegame positions.
*
option.c ( 1906): * 100 -> learn from game "results" (win/lose).
*
option.c ( 1910): * forms of learning.
*
option.c ( 1913): * stead of one. it is used to control position
learning *
option.c ( 1915): * limit that shuts position learning off after a game
is *
option.c ( 1918): * learn trigger-value cutoff-value
*
option.c ( 1921): * position learning is triggered. the default is 1/3
of *
option.c ( 1925): * position learning is turned off. the default is
-2.0 *
option.c ( 1926): * and says that once the score is -2, do not learn
any *
option.c ( 1932): else if (OptionMatch("learn", *args)) {
option.c ( 1934): learning = atoi(args[1]);
option.c ( 1935): if (learning & book_learning)
option.c ( 1936): Print(128, "book learning enabled\n");
option.c ( 1938): Print(128, "book learning disabled\n");
option.c ( 1939): if (learning & result_learning)
option.c ( 1940): Print(128, "result learning enabled\n");
option.c ( 1942): Print(128, "result learning disabled\n");
option.c ( 1943): if (learning & position_learning)
option.c ( 1944): Print(128, "position learning enabled\n");
option.c ( 1946): Print(128, "position learning disabled\n");
option.c ( 1948): learning_trigger = atof(args[1]) * 100;
option.c ( 1949): learning_cutoff = atof(args[2]) * 100;
option.c ( 1950): Print(128, "learning trigger = %s\n",
DisplayEvaluation(learning_trigger,
option.c ( 1952): Print(128, "learning cutoff = %s\n",
DisplayEvaluation(learning_cutoff,
option.c ( 2562): book_weight_learn = 1.0;
option.c ( 2568): book_weight_learn = 1.0;
option.c ( 2575): book_weight_learn = 1.0;
option.c ( 3103): * the opponent's rating, which is used by the
learning *
option.c ( 3367): * the result of the current game. if learning
routines *
option.c ( 3377): LearnBook(tree, wtm, 300, 0, 1, 2);
option.c ( 3381): LearnBook(tree, wtm, -300, 0, 1, 2);
option.c ( 3829): * position.bin (position learning) file. this will
end *
option.c ( 3848): learning |= position_learning;
option.c ( 3855): LearnPosition(tree, wtm, Max(score + 100, 0),
score);
preeval.c ( 192): * now install the learned position information *
preeval.c ( 197): LearnPositionLoad();
preeval.c ( 213): LearnPositionLoad();
resign.c ( 101): LearnBook(tree, crafty_is_white, val, 0, 1, 2);
utility.c ( 1038): static int save_learning = 0;
utility.c ( 1050): save_learning = learning;
utility.c ( 1053): if (learning & book_learning && moves_out_of_book) {
utility.c ( 1056): LearnBook(tree, crafty_is_white, val, 0, 0, 1);
utility.c ( 1109): learning = save_learning;
This page took 0 seconds to execute
Last modified: Thu, 15 Apr 21 08:11:13 -0700
Current Computer Chess Club Forums at Talkchess. This site by Sean Mintz.