forked from soumith/imagenet-multiGPU.torch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
donkey.lua
223 lines (203 loc) · 7.12 KB
/
donkey.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
--
-- Copyright (c) 2014, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree. An additional grant
-- of patent rights can be found in the PATENTS file in the same directory.
--
require 'image'
require('pl.stringx').import()
paths.dofile('dataset.lua')
paths.dofile('util.lua')
-- This file contains the data-loading logic and details.
-- It is run by each data-loader thread.
------------------------------------------
-- a cache file of the training metadata (if doesnt exist, will be created)
local trainCache = paths.concat(opt.cache, 'trainCache.t7')
local testCache = paths.concat(opt.cache, 'testCache.t7')
local meanstdCache = paths.concat(opt.cache, 'meanstdCache.t7')
-- Check for existence of opt.data
if not os.execute('cd ' .. opt.data) then
error(("could not chdir to '%s'"):format(opt.data))
end
local loadSize = {3, opt.imageSize, opt.imageSize}
local sampleSize = {3, opt.cropSize, opt.cropSize}
if opt.imgExtInsensitive then
-- Overwrite image.load()
-- Load an image (with image.load()) regardless its extension
function image.load(p, n, t)
local img, ok
local f = io.open(p, 'r')
if f then
local h = f:read(4)
f:close()
if h:startswith(string.char(0xff, 0xd8, 0xff)) then
ok, img = pcall(image.loadJPG, p, n, t)
elseif h:startswith(string.char(0x89, 0x50, 0x4e, 0x47)) then
ok, img = pcall(image.loadPNG, p, n, t)
end
end
if ok and img:dim() == 3 and f then return img else
os.execute('echo "' .. p .. '" >> err.img')
return torch.FloatTensor(loadSize[1], loadSize[2], loadSize[3]):uniform()
end
end
end
local function loadImage(path)
local input = image.load(path, 3, 'float')
-- find the smaller dimension, and resize it to loadSize (while keeping aspect ratio)
if input:size(3) < input:size(2) then
input = image.scale(input, loadSize[2], loadSize[3] * input:size(2) / input:size(3))
else
input = image.scale(input, loadSize[2] * input:size(3) / input:size(2), loadSize[3])
end
return input
end
-- channel-wise mean and std. Calculate or load them from disk later in the script.
local mean,std
--------------------------------------------------------------------------------
--[[
Section 1: Create a train data loader (trainLoader),
which does class-balanced sampling from the dataset and does a random crop
--]]
-- function to load the image, jitter it appropriately (random crops etc.)
local trainHook = function(self, path)
collectgarbage()
local input = loadImage(path)
local iW = input:size(3)
local iH = input:size(2)
-- do random crop
local oW = sampleSize[3]
local oH = sampleSize[2]
local h1 = math.ceil(torch.uniform(1e-2, iH-oH))
local w1 = math.ceil(torch.uniform(1e-2, iW-oW))
local out = image.crop(input, w1, h1, w1 + oW, h1 + oH)
assert(out:size(3) == oW)
assert(out:size(2) == oH)
-- do hflip with probability 0.5
if torch.uniform() > 0.5 then out = image.hflip(out) end
-- mean/std
for i=1,3 do -- channels
if mean then out[{{i},{},{}}]:add(-mean[i]) end
if std then out[{{i},{},{}}]:div(std[i]) end
end
return out
end
if paths.filep(trainCache) then
print('Loading train metadata from cache')
trainLoader = torch.load(trainCache)
trainLoader.sampleHookTrain = trainHook
assert(trainLoader.paths[1] == paths.concat(opt.data, 'train'),
'cached files dont have the same path as opt.data. Remove your cached files at: '
.. trainCache .. ' and rerun the program')
else
print('Creating train metadata')
trainLoader = dataLoader{
paths = {paths.concat(opt.data, 'train')},
loadSize = loadSize,
sampleSize = sampleSize,
split = 100,
verbose = true
}
torch.save(trainCache, trainLoader)
trainLoader.sampleHookTrain = trainHook
end
collectgarbage()
-- do some sanity checks on trainLoader
do
local class = trainLoader.imageClass
local nClasses = #trainLoader.classes
assert(class:max() <= nClasses, "class logic has error")
assert(class:min() >= 1, "class logic has error")
end
-- End of train loader section
--------------------------------------------------------------------------------
--[[
Section 2: Create a test data loader (testLoader),
which can iterate over the test set and returns an image's
--]]
-- function to load the image
testHook = function(self, path)
collectgarbage()
local input = loadImage(path)
local oH = sampleSize[2]
local oW = sampleSize[3]
local iW = input:size(3)
local iH = input:size(2)
local w1 = math.ceil((iW-oW)/2)
local h1 = math.ceil((iH-oH)/2)
local out = image.crop(input, w1, h1, w1+oW, h1+oH) -- center patch
-- mean/std
for i=1,3 do -- channels
if mean then out[{{i},{},{}}]:add(-mean[i]) end
if std then out[{{i},{},{}}]:div(std[i]) end
end
return out
end
if paths.filep(testCache) then
print('Loading test metadata from cache')
testLoader = torch.load(testCache)
testLoader.sampleHookTest = testHook
assert(testLoader.paths[1] == paths.concat(opt.data, 'val'),
'cached files dont have the same path as opt.data. Remove your cached files at: '
.. testCache .. ' and rerun the program')
else
print('Creating test metadata')
testLoader = dataLoader{
paths = {paths.concat(opt.data, 'val')},
loadSize = loadSize,
sampleSize = sampleSize,
split = 0,
verbose = true,
forceClasses = trainLoader.classes -- force consistent class indices between trainLoader and testLoader
}
torch.save(testCache, testLoader)
testLoader.sampleHookTest = testHook
end
collectgarbage()
-- End of test loader section
-- Estimate the per-channel mean/std (so that the loaders can normalize appropriately)
if paths.filep(meanstdCache) then
local meanstd = torch.load(meanstdCache)
mean = meanstd.mean
std = meanstd.std
print('Loaded mean and std from cache.')
elseif not opt.normalize then
local cache = {}
cache.mean = mean
cache.std = std
torch.save(meanstdCache, cache)
else
local tm = torch.Timer()
local nSamples = 10000
print('Estimating the mean (per-channel, shared for all pixels) over ' .. nSamples .. ' randomly sampled training images')
local meanEstimate = {0,0,0}
for i=1,nSamples do
local img = trainLoader:sample(1)[1]
for j=1,3 do
meanEstimate[j] = meanEstimate[j] + img[j]:mean()
end
end
for j=1,3 do
meanEstimate[j] = meanEstimate[j] / nSamples
end
mean = meanEstimate
print('Estimating the std (per-channel, shared for all pixels) over ' .. nSamples .. ' randomly sampled training images')
local stdEstimate = {0,0,0}
for i=1,nSamples do
local img = trainLoader:sample(1)[1]
for j=1,3 do
stdEstimate[j] = stdEstimate[j] + img[j]:std()
end
end
for j=1,3 do
stdEstimate[j] = stdEstimate[j] / nSamples
end
std = stdEstimate
local cache = {}
cache.mean = mean
cache.std = std
torch.save(meanstdCache, cache)
print('Time to estimate:', tm:time().real)
end