Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
longemen3000 committed Oct 29, 2020
2 parents 9e1952c + 8ded3d8 commit fd3c20d
Show file tree
Hide file tree
Showing 7 changed files with 145 additions and 195 deletions.
18 changes: 12 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,21 +30,21 @@ For a better explanation, read [the paper from 2001](http://citeseerx.ist.psu.ed
```julia
using FaceDetection, Serialization # Serialization is so that you can save your results

pos_training_path = "..." # positive images are, for example, faces
num_classifiers = 10 # this is the number of Haar-like features you want to select

pos_training_path = "..." # positive images are, for example, faces
neg_training_path = "..." # negative images are, for example, non-faces. However, the Viola-Jones algorithm is for object detection, not just for face detection

max_feature_width, max_feature_height, min_feature_height, min_feature_width, min_size_img = (1, 2, 3, 4) # or use the function to select reasonable sized feature parameters given your maximum image size (see below)

determine_feature_size(pos_training_path, neg_training_path)

classifiers = learn(pos_training_path, neg_training_path, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width) # get classifiers/features from your training data
votes, features = get_feature_votes(pos_training_path, neg_training_path, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width)

data_file = "..." # this is where you want to save your data
serialize(data_file, (votes, features)); # write classifiers to file

serialize(data_file, classifiers); # write classifiers to file

classifiers = deserialize(data_file); # read from saved data
votes, all_features = deserialize(data_file); # read from saved data
classifiers = FD.learn(pos_training_path, neg_training_path, all_features, votes, num_classifiers)

# obtain results
num_faces, num_non_faces = length(filtered_ls(pos_testing_path)), length(filtered_ls(neg_testing_path));
Expand All @@ -58,6 +58,11 @@ println("$(string(correct_faces, "/", num_faces)) ($(correct_faces_percent) %) o
println("$(string(correct_non_faces, "/", num_non_faces)) ($(correct_non_faces_percent) %) of positive images were correctly identified.")
```

Alternatively, you can skip the process of writing `votes` and `features` to a file, and get classifiers by running
```julia
classifiers = learn(pos_training_path, neg_training_path, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width) # get classifiers/features from your training data
```

## Caveats

In the current implementation of the Viola-Jones algorithm, we have not implemented scaling features. This means that you should idealy have your training set the same size as your test set. To make this easier while we work on scaling features, we have implemented keyword arguments to the functions `determine_feature_size` and `learn`. E.g.,
Expand Down Expand Up @@ -102,6 +107,7 @@ https://github.com/polarisZhao/awesome-face#-datasets
- [6b35f6d5](https://github.com/jakewilliami/FaceDetection.jl/commit/6b35f6d5) — Finally, the algorithm works as it should. Just enhancements from here on out.
- [854bba32](https://github.com/jakewilliami/FaceDetection.jl/commit/854bba32) and [655e0e14](https://github.com/jakewilliami/FaceDetection.jl/commit/655e0e14) — Implemented facelike scoring and wrote score data to CSV (see [#7](https://github.com/jakewilliami/FaceDetection.jl/issues/7)).
- [e7295f8d](https://github.com/jakewilliami/FaceDetection.jl/commit/e7295f8d) — Implemented writing training data to file and reading from that data to save computation time.
- [750aa22d](https://github.com/jakewilliami/FaceDetection.jl/commit/750aa22d)–[41a1a0bb](https://github.com/jakewilliami/FaceDetection.jl/commit/41a1a0bb) — Optimised performance.

### Acknowledgements

Expand Down
1 change: 0 additions & 1 deletion _git2_a31664

This file was deleted.

101 changes: 39 additions & 62 deletions src/AdaBoost.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,31 @@ using ProgressMeter: @showprogress, Progress, next!
function get_feature_votes(
positive_path::AbstractString,
negative_path::AbstractString,
num_classifiers::Integer=Int32(-1),
min_feature_width::Integer=Int32(1),
max_feature_width::Integer=Int32(-1),
min_feature_height::Integer=Int32(1),
max_feature_height::Integer=Int32(-1);
num_classifiers::Integer=-one(Int32),
min_feature_width::Integer=one(Int32),
max_feature_width::Integer=-one(Int32),
min_feature_height::Integer=one(Int32),
max_feature_height::Integer=-one(Int32);
scale::Bool = false,
scale_to::Tuple = (Int32(200), Int32(200))
)

#this transforms everything to maintain type stability
s1 ,s2 = scale_to
s₁, s₂ = scale_to
min_feature_width,
max_feature_width,
min_feature_height,
max_feature_height,s1,s2 = promote(min_feature_width,
max_feature_width,
min_feature_height,
max_feature_height,s1,s2)
scale_to = (s1,s2)
max_feature_height, s₁, s₂ = promote(
min_feature_width,
max_feature_width,
min_feature_height,
max_feature_height,
s₁,
s₂)
scale_to = (s₁, s₂)

_Int = typeof(max_feature_width)

# get number of positive and negative images (and create a global variable of the total number of images——global for the @everywhere scope)
positive_files = filtered_ls(positive_path)
negative_files = filtered_ls(negative_path)
Expand Down Expand Up @@ -73,8 +77,8 @@ function get_feature_votes(
map(partition(image_files, batch_size)) do batch
ii_imgs = load_image.(batch; scale=scale, scale_to=scale_to)
@threads for t in 1:length(batch)
# votes[:, num_processed+t] .= get_vote.(features, Ref(ii_imgs[t]))
map!(f -> get_vote(f, ii_imgs[t]), view(votes, :, num_processed + t), features)
votes[:, num_processed + t] .= get_vote.(features, Ref(ii_imgs[t]))
# map!(f -> get_vote(f, ii_imgs[t]), view(votes, :, num_processed + t), features)
next!(p) # increment progress bar
end
num_processed += length(batch)
Expand All @@ -84,40 +88,12 @@ function get_feature_votes(
return votes, features
end

"""
learn(
positive_iis::AbstractArray,
negative_iis::AbstractArray,
num_classifiers::Int64=-1,
min_feature_width::Int64=1,
max_feature_width::Int64=-1,
min_feature_height::Int64=1,
max_feature_height::Int64=-1
) ->::Array{HaarLikeObject,1}
The boosting algorithm for learning a query online. T hypotheses are constructed, each using a single feature.
The final hypothesis is a weighted linear combination of the T hypotheses, where the weights are inversely proportional to the training errors.
This function selects a set of classifiers. Iteratively takes the best classifiers based on a weighted error.
# Arguments
- `positive_iis::AbstractArray`: List of positive integral image examples
- `negative_iis::AbstractArray`: List of negative integral image examples
- `num_classifiers::Integer`: Number of classifiers to select. -1 will use all classifiers
- `min_feature_width::Integer`: the minimum width of the feature
- `max_feature_width::Integer`: the maximum width of the feature
- `min_feature_height::Integer`: the minimum height of the feature
- `max_feature_width::Integer`: the maximum height of the feature
# Returns `classifiers::Array{HaarLikeObject, 1}`: List of selected features
"""
function learn(
positive_path::AbstractString,
negative_path::AbstractString,
features::AbstractArray,
votes::AbstractArray,
num_classifiers::Integer=-1

features::Array{HaarLikeObject, 1},
votes::Matrix{Int8},
num_classifiers::Integer=-one(Int32)
)
# get number of positive and negative images (and create a global variable of the total number of images——global for the @everywhere scope)
num_pos = length(filtered_ls(positive_path))
Expand All @@ -139,7 +115,7 @@ function learn(

notify_user("Selecting classifiers...")
# select classifiers
classifiers = []
classifiers = HaarLikeObject[]
p = Progress(num_classifiers, 1) # minimum update interval: 1 second
classification_errors = Vector{Float64}(undef, length(feature_indices))

Expand All @@ -161,6 +137,7 @@ function learn(
# choose the classifier $h_t$ with the lowest error $\varepsilon_t$
best_error, min_error_idx = findmin(classification_errors)
best_feature_idx = feature_indices[min_error_idx]
best_feature = features[best_feature_idx]

# set feature weight
best_feature = features[best_feature_idx]
Expand Down Expand Up @@ -202,14 +179,14 @@ end
function learn(
positive_path::AbstractString,
negative_path::AbstractString,
num_classifiers::Integer=-1,
min_feature_width::Integer=1,
max_feature_width::Integer=-1,
min_feature_height::Integer=1,
max_feature_height::Integer=-1;
num_classifiers::Int64=-1,
min_feature_width::Int64=1,
max_feature_width::Int64=-1,
min_feature_height::Int64=1,
max_feature_height::Int64=-1;
scale::Bool = false,
scale_to::Tuple = (200, 200)
)::Array{HaarLikeObject,1}
)

votes, features = get_feature_votes(
positive_path,
Expand Down Expand Up @@ -252,15 +229,15 @@ Iteratively creates the Haar-like feautures
- `features::AbstractArray`: an array of Haar-like features found for an image
"""
function create_features(
img_height::Integer,
img_width::Integer,
min_feature_width::Integer,
max_feature_width::Integer,
min_feature_height::Integer,
max_feature_height::Integer
img_height::Int64,
img_width::Int64,
min_feature_width::Int64,
max_feature_width::Int64,
min_feature_height::Int64,
max_feature_height::Int64
)
notify_user("Creating Haar-like features...")
features = []
features = HaarLikeObject[]

if img_width < max_feature_width || img_height < max_feature_height
error("""
Expand All @@ -269,10 +246,10 @@ function create_features(
end

for feature in values(feature_types) # (feature_types are just tuples)
feature_start_width = max(min_feature_width, feature[1])
for feature_width in range(feature_start_width, stop=max_feature_width, step=feature[1])
feature_start_height = max(min_feature_height, feature[2])
for feature_height in range(feature_start_height, stop=max_feature_height, step=feature[2])
feature_start_width = max(min_feature_width, first(feature))
for feature_width in range(feature_start_width, stop=max_feature_width, step=first(feature))
feature_start_height = max(min_feature_height, last(feature))
for feature_height in range(feature_start_height, stop=max_feature_height, step=last(feature))
for x in 1:(img_width - feature_width)
for y in 1:(img_height - feature_height)
push!(features, HaarLikeObject(feature, (x, y), feature_width, feature_height, 0, 1))
Expand Down
Loading

0 comments on commit fd3c20d

Please sign in to comment.