Skip to content

Commit

Permalink
Rectified an issue which remained from a mistaken commit (ac61810). F…
Browse files Browse the repository at this point in the history
…ixes weighting error (closing #5) and accuracy (closes #11) [had to fix votes calculations due to accidental  in defining , and tentatively but incorrectly implemented zerosarray---kept the function just in case].  Woohoo!  That's the algorithm.
  • Loading branch information
jakewilliami committed Aug 27, 2020
1 parent ab7ced1 commit 65c9bd6
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 16 deletions.
2 changes: 1 addition & 1 deletion example.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ function main(alt::Bool=false)
# println(typeof(c))
# end

println("Loading test faces...")
println("\nLoading test faces...")
facesTesting = loadImages(posTestingPath)
facesIITesting = map(toIntegralImage, facesTesting) # list(map(...))
println("...done. ", length(facesTesting), " faces loaded.\n\nLoading test non-faces..")
Expand Down
60 changes: 46 additions & 14 deletions src/AdaBoost.jl
Original file line number Diff line number Diff line change
Expand Up @@ -96,40 +96,48 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi

# println(typeof(numImgs));println(typeof(numFeatures))
# create an empty array (of zeroes) with dimensions (numImgs, numFeautures)
global votes = zeros((numImgs, numFeatures)) # necessarily different from `zero.((numImgs, numFeatures))`
global votes = zeros((numImgs, numFeatures)) # necessarily different from `zero.((numImgs, numFeatures))`; previously zerosarray

# bar = progressbar.ProgressBar()
# @everywhere numImgs begin
# println(size(votes))
# println(votes)

# displaymatrix(votes)
# println(length(features))
# displaymatrix(features)
# show progress bar
# displaymatrix(images)
# println(size(images))
@everywhere begin
n = numImgs
processes = length(numImgs) # i.e., hypotheses
p = Progress(n, 1) # minimum update interval: 1 second
for t in 1:processes # bar(range(num_imgs)):
processes = numImgs # i.e., hypotheses
# println(processes)
# p = Progress(n, 1) # minimum update interval: 1 second
@showprogress for t in 1:processes # bar(range(num_imgs)):
# println(t)
# votes[i, :] = np.array(list(Pool(processes=None).map(partial(_get_feature_vote, image=images[i]), features)))
# votes[i, :] = Array(map(partial(getVote, images[i]), features))
votes[t, :] = Array(map(feature -> getVote(feature, images[t]), features))
# votes[t, :] = Array(map(partial(_get_feature_vote, images[t]), features))
votes[t, :] = Array(map(f -> _get_feature_vote(f, images[t]), features))
# votes[i, :] = [map(feature -> getVote(feature, images[i]), features)]
next!(p)
# next!(p)
end
end # end everywhere (end parallel processing)
# displaymatrix(votes)

# select classifiers
# classifiers = Array()
classifiers = []

println("Selecting classifiers...")
println("\nSelecting classifiers...")

n = numClassifiers
p = Progress(n, 1) # minimum update interval: 1 second
for t in 1:numClassifiers
# p = Progress(n, 1) # minimum update interval: 1 second
@showprogress for t in 1:numClassifiers
# for t in processes
# println(typeof(length(featureIndices)))
# print_matrix(stdout, weights)
# println(weights)
classificationErrors = zeros(length(featureIndices))
classificationErrors = zeros(length(featureIndices)) # previously, zerosarray

# normalize the weights $w_{t,i}\gets \frac{w_{t,i}}{\sum_{j=1}^n w_{t,j}}$
# weights *= 1. / np.sum(weights)
Expand Down Expand Up @@ -164,6 +172,7 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi

classificationErrors[j] = ε
end
# print_matrix(stdout, weights)

# choose the classifier $h_t$ with the lowest error $\varepsilon_t$
minErrorIDX = argmin(classificationErrors) # returns the index of the minimum in the array
Expand All @@ -179,7 +188,9 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi
# featureWeight = (1 - bestError) / bestError # β
# println(typeof(featureWeight))
# [println(f.weight) for f in features]
# print_matrix(stdout, weights)
bestFeature.weight = featureWeight
# print_matrix(stdout, weights)

# classifiers = vcat(classifiers, bestFeature)
# println(classifiers)
Expand All @@ -188,8 +199,22 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi
# update image weights $w_{t+1,i}=w_{t,i}\beta_{t}^{1-e_i}$
# weights = list(map(lambda img_idx: weights[img_idx] * np.sqrt((1-best_error)/best_error) if labels[img_idx] != votes[img_idx, best_feature_idx] else weights[img_idx] * np.sqrt(best_error/(1-best_error)), range(num_imgs)))
# weights = (imgIDX -> (labels[imgIDX] ≠ votes[imgIDX, bestFeatureIDX]) ? weights[imgIDX]*sqrt((1-bestError)/bestError) : weights[imgIDX]*sqrt(bestError/(1-bestError)), 1:numImgs)
# print_matrix(stdout, weights)
# weights = Array(map(imgIDX -> labels[imgIDX] ≠ votes[imgIDX, bestFeatureIDX] ? weights[imgIDX] * sqrt((1 - bestError) / bestError) : weights[imgIDX] * sqrt(bestError / (1 - bestError)), 1:numImgs))
weights = Array(map(i -> labels[i] votes[i, bestFeatureIDX] ? weights[i] * sqrt((1 - bestError) / bestError) : weights[i] * sqrt(bestError / (1 - bestError)), 1:numImgs))
# println(votes[:,bestFeatureIDX])

# print_matrix(stdout, weights)
# println(typeof(weights))

weights = Array(map(imgIDX -> (labels[imgIDX] votes[imgIDX, bestFeatureIDX]) ? weights[imgIDX] * featureWeight : weights[imgIDX] * featureWeight, 1:numImgs))
# imgIDX -> labels[imgIDX] ≠ votes[imgIDX, bestFeatureIDX] ? weights[imgIDX] * sqrt((1 - bestError) / bestError) : weights[imgIDX] * sqrt(bestError / (1 - bestError))

# weights = np.array(list(map(
#
# lambda img_idx:
# weights[img_idx] * np.sqrt((1-best_error)/best_error) if labels[img_idx] != votes[img_idx, best_feature_idx] else weights[img_idx] * np.sqrt(best_error/(1-best_error)),
#
# range(num_imgs))))

# β = ε / (1 - ε)
#
Expand All @@ -207,15 +232,22 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi
featureIndices = filter!(e -> e bestFeatureIDX, featureIndices) # note: without unicode operators, `e ∉ [a, b]` is `!(e in [a, b])`
# println(bestFeature)

next!(p)
# next!(p)
end
# println(weights)

# println(typeof(classifiers[1]))
# println(votes)
return classifiers

end


function _get_feature_vote(feature::HaarLikeFeature, image::AbstractArray)
return getVote(feature, image)
end


# function _get_feature_vote(feature::HaarLikeFeature, image::Int64)
# # return getVote(image)
# # return partial(getVote)(image)
Expand Down
28 changes: 27 additions & 1 deletion src/Utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,32 @@ deepfloat(a::Number) = a * 1.0
deepfloat(a) = deepfloat.(a)


function displaymatrix(M::AbstractArray)
return show(IOContext(stdout, :limit => true, :compact => true, :short => true), "text/plain", M)
# return show(IOContext(stdout, :limit => true, :compact => true, :short => true, :displaysize), "text/plain", M)
end


function zerosarray(a::Int64, b::Int64=1)
#=
To replicate numpy.zeros(...)
parameter `a`: A number of length for array to be [type: Integer]
parameter `b`: A number of the second dimension for array of arrays of zeros (defaults to one) [type: Integer]
return: Either an array of zeros (i.e., `[0, 0, 0]`), or an array of arrays of zeros (if b is given; i.e., [0, 0, 0, 0; 0, 0, 0, 0; 0, 0, 0, 0]) [type: Abstract Array]
=#

if isone(b)
# return [zeros(a) for _ in 1:b]
return collect(eachrow(zeros(b, a)))
else
# return [zeros(b) for _ in 1:a]
return collect(eachrow(zeros(a, b)))
end
end


# for adaboost
function partial(f,a...)
#=
Expand Down Expand Up @@ -218,7 +244,7 @@ function reconstruct(classifiers::AbstractArray, imgSize::Tuple)
sign = mod((sign + 1), 2)
end
for y in 1:c.height
image[c.topLeft[0] + x, c.topLeft[1] + y] += 1 * sign * c.weight
image[c.topLeft[1] + x, c.topLeft[2] + y] += 1 * sign * c.weight
end
end
elseif c.featureType == FeatureTypes[3] # three horizontal
Expand Down

0 comments on commit 65c9bd6

Please sign in to comment.