-
Notifications
You must be signed in to change notification settings - Fork 0
/
l2opt
executable file
·234 lines (214 loc) · 5.51 KB
/
l2opt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
#!/usr/bin/env coffee
fs = require 'fs'
{ N_PHASES } = require './pattern'
minibatch = require './minibatch.coffee'
coeffs_to_weights = require './coeffs_to_weights.coffee'
match = require './match.coffee'
{ watch_file } = require './util'
argv = require 'yargs'
.options
min:
desc: 'Minimum value to search'
default: 0.001
type: 'number'
requiresArg: true
max:
desc: 'Maximum value to search'
default: 10
type: 'number'
requiresArg: true
p:
alias: 'precision'
desc: 'Search precision'
default: 0.1
type: 'number'
requiresArg: true
b:
alias: 'book'
desc: 'Database file'
default: 'book.db'
requiresArg: true
l:
alias: 'logistic'
desc: 'Logistic regression'
type: 'boolean'
e:
alias: 'epochs'
desc: 'Train epochs'
type: 'number'
default: minibatch.defaults.epochs
loop:
desc: 'Loop forever'
type: 'boolean'
prefix:
desc: 'Prefix of coeffs'
default: 'tmp/l2opt/coeffs'
weights:
desc: 'Temporary weights file'
default: 'tmp/l2opt/weights.json'
requiresArg: true
default_l2:
desc: 'Default L2 value'
default: minibatch.defaults.l2
requiresArg: true
s:
alias: 'search'
desc: 'Number of UCT search of match'
default: match.defaults.search
w:
alias: 'wld'
desc: 'WLD search depth'
default: match.defaults.wld
f:
alias: 'full'
desc: 'Full search depth'
default: match.defaults.full
phase:
desc: 'Phase to optimize (default all)'
type: 'number'
requiresArg: true
skip_cv:
desc: 'Skip CV and use existing L2/* for starter'
type: 'boolean'
match_range:
desc: 'Match-search range'
default: 2
requiresArg: true
h:
alias: 'help'
.strict()
.version false
.argv
prepare_coeffs = (phase) ->
orig_file = "tmp/coeffs#{phase}"
if fs.existsSync(orig_file)
fs.copyFileSync orig_file, "#{argv.prefix}#{phase}"
else
l2file = "L2/#{phase}"
l2 =
if fs.existsSync(l2file)
parseFloat(fs.readFileSync(l2file))
else
argv.default_l2
process.stdout.write "Preparing coeffs for phase #{phase} l2=#{l2}: "
minibatch
l2: l2
book: argv.book
phase: phase
logistic: argv.logistic
epochs: argv.epochs
outfile: "#{argv.prefix}#{phase}"
verbose: false
process.stdout.write "done\n"
prepare = (phase) ->
for p in [0...N_PHASES]
if p != phase
prepare_coeffs p unless fs.existsSync("#{argv.prefix}#{p}")
optimize_cv = (phase) ->
if argv.skip_cv
throw new Error "L2/#{phase} not found" unless fs.existsSync("L2/#{phase}")
return
samples = minibatch.load_samples
book: argv.book
phase: phase
logistic: argv.logistic
verbose: true
l2 = minibatch
search: true
search_precision: 1 + argv.precision
search_min: argv.min
search_max: argv.max
samples: samples
phase: phase
logistic: argv.logistic
epochs: argv.epochs
fs.writeFileSync "L2/#{phase}", "#{l2}\n"
try_value = (phase, value, max, samples) ->
process.stdout.write "L2 #{value}: "
coeffs = minibatch
l2: value
samples: samples
phase: phase
logistic: argv.logistic
epochs: argv.epochs
outfile: "#{argv.prefix}#{phase}"
verbose: false
coeffs_to_weights
prefix: argv.prefix
outfile: argv.weights
verbose: false
{ winrate, avg } = match
weights: argv.weights
search: argv.search
wld: argv.wld
full: argv.full
quiet: true
openings: match.defaults.openings
min: max
score = winrate + avg * 0.0001
process.stdout.write "score #{Math.round(100000*score)/100000}"
if score > max
process.stdout.write ' *\n'
else
process.stdout.write '\n'
{ score, coeffs }
optimize_match = (phase) ->
prepare phase
samples = minibatch.load_samples
book: argv.book
phase: phase
logistic: argv.logistic
verbose: true
value =
if argv.skip_cv
parseFloat(fs.readFileSync("L2/#{phase}"))
else
minibatch
search: true
search_precision: 1.4
search_min: argv.min * Math.sqrt(argv.match_range)
search_max: argv.max / Math.sqrt(argv.match_range)
samples: samples
phase: phase
logistic: argv.logistic
epochs: argv.epochs
ubound = value * argv.match_range
lbound = value / argv.match_range
step = (ubound / lbound) ** .25
best = (lbound * ubound) ** .5
{ score, coeffs } = try_value(phase, best, -Infinity, samples)
max = score
best_coeffs = coeffs
loop
tmp = best * step
{ score, coeffs } = try_value(phase, tmp, max, samples)
if score > max
max = score
best = tmp
best_coeffs = coeffs
else
tmp = best / step
{ score, coeffs } = try_value(phase, tmp, max, samples)
if score > max
max = score
best = tmp
best_coeffs = coeffs
break if step - 1 < argv.precision
step **= .5
console.log "Best value #{best}"
fs.writeFileSync "L2/#{phase}", "#{best}\n"
fs.writeFileSync "#{argv.prefix}#{phase}", JSON.stringify(best_coeffs)
do ->
reload = watch_file('.reload-l2')
until reload()
for phase in [0...N_PHASES]
continue if argv.phase? and phase != argv.phase
console.log "Phase #{phase}"
if phase <= 2
optimize_cv phase
else
optimize_match phase
coeffs_to_weights
prefix: argv.prefix
outfile: argv.weights
break unless argv.loop