Skip to content

Commit

Permalink
Merge pull request #277 from GatorEducator/enhance/random-conflicts
Browse files Browse the repository at this point in the history
Add conflict handling to random algorithm
  • Loading branch information
Jacob Sutter authored Mar 22, 2019
2 parents 9108bb2 + 197ccbf commit c026465
Show file tree
Hide file tree
Showing 3 changed files with 172 additions and 34 deletions.
14 changes: 9 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,15 @@ pipenv run python3 gatorgrouper_cli.py --file filepath --method=random
```

This will randomly group the list of students you have provided, and is the
default grouping method used when none is provided. This method of grouping is
appropriate for cases where the assignment does not require that groups have a
minimum number of members that have responded as having a skill related to the
assignment. Consider using this method for assignments like in class exercises,
small discussion groups, or peer editing.
default grouping method used when none is provided. Additionally, the random
grouping method allows for recognition of student conflicts via a numerical,
lower-is-better representation (e.g a level 1 conflict is for students who
don't like one another while a level 5 conflict is for students who are legally
mandated to be separated). The random method will take in that conflict, apply
it to both students and then group the students, attempting to minimize the
overall level of conflict within the group through iterating through the
function an arbitrary amount of times, creating groups with less and less
conflict.

### Round-robin Grouping Method

Expand Down
112 changes: 83 additions & 29 deletions gatorgrouper/utils/group_creation.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,97 @@
"""Contains all of the group creation algorithms"""

import logging
import itertools
import random
import itertools
from typing import List, Union
from gatorgrouper.utils import group_scoring


# group_random.py
def group_random_num_group(responses: str, numgrp: int) -> List[List[str]]:
""" group responses using randomization approach """
# number of students placed into a group
stunum = 0
iterable = iter(responses)
# number of students in each group (without overflow)
grpsize = int(len(responses) / numgrp)
groups = list()
for _ in range(0, numgrp):
group = list()
while len(group) is not grpsize and stunum < len(responses):
group.append(next(iterable))
stunum = stunum + 1
groups.append(group)
# deal with the last remaining students
if len(responses) % stunum != 0:
logging.info("Overflow students identified; distributing into groups.")
for _x in range(0, len(responses) % stunum):
groups[_x].append(next(iterable))
stunum = stunum + 1
# pylint: disable=bad-continuation
# pylint: disable=dangerous-default-value
def group_random_group_size(
responses: str, grpsize: int, conflicts=[]
) -> List[List[str]]:
"""
Calculate number of groups based on desired students per group.
Conflicts is an optional argument that should list 3-tuples with
conflict relations between two students in the format:
(str1, str2, int), where str1 and str2 are students and in is a
corresponding conflict weight.
"""
# number of groups = number of students / minimum students per group
numgrp = int(len(responses) / grpsize)

# scoring and return
scores, ave = [], 0
scores, ave = group_scoring.calculate_avg(groups)
logging.info("scores: %s", str(scores))
logging.info("average: %d", ave)
return groups
return group_random_num_group(responses, numgrp, conflicts)


# pylint: disable=dangerous-default-value
# pylint: disable=too-many-locals
def group_random_num_group(
responses: str, numgrp: int, conflicts=[]
) -> List[List[str]]:
"""
Group responses using randomization approach
Conflicts is an optional argument that should list 3-tuples with
conflict relations between two students in the format:
(str1, str2, int), where str1 and str2 are students and in is a
corresponding conflict weight.
"""
intensity = 100
# Intensity is the value that represents the number of attempts made to group
optimized_groups = list()
# Optimized Groups holds the groups after scoring maximization
top_ave = -10000000
# Top Average is our check to see if the group made is better than the group we have
while intensity > 0:
# number of students placed into a group
stunum = 0
iterable = iter(responses)
# number of students in each group (without overflow)
grpsize = int(len(responses) / numgrp)
groups = list()
for _ in range(0, numgrp):
group = list()
while len(group) is not grpsize and stunum < len(responses):
group.append(next(iterable))
stunum = stunum + 1
groups.append(group)
# deal with the last remaining students
if len(responses) % stunum != 0:
logging.info("Overflow students identified; distributing into groups.")
for _x in range(0, len(responses) % stunum):
groups[_x].append(next(iterable))
stunum = stunum + 1
# scoring and return
# a list of the conflict scores to affect group scores
conflict_scores = []
for grp in groups:
# iterate through groups
for confs in conflicts:
# iterate through conflicts given as args
if (confs[0] in grp) and (confs[1] in grp):
# if either name in the 3-tuple is in the group
conflict_scores.append(confs[2])
# add the conflict to the list of conflict scores
conf_ave = 0 # assume no conflicts
if conflict_scores:
# if there are conflicts, calculate the average
conf_ave = sum(conflict_scores) / len(conflict_scores)
# calculates average of the conflict scores
scores, ave = [], 0
scores.append(group_scoring.score_group(groups))
ave = sum(scores) / len(scores)
logging.info("scores: %d", str(scores))
logging.info("average: %d", ave)
intensity -= 1
# subtract conflict average from general average
ave = ave - conf_ave
if ave > top_ave:
top_ave = ave
optimized_groups = groups
return optimized_groups


# pylint: disable=bad-continuation
def shuffle_students(
responses: Union[str, List[List[Union[str, bool]]]]
) -> List[List[Union[str, bool]]]:
Expand Down
80 changes: 80 additions & 0 deletions tests/test_group_method.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,86 @@ def test_group_random_extra():
# assert len(returned_groups[0]) == size_count


def test_group_random_conflict_numgrp():
"""
Test that groups are still created randomly, even when given
conflict tuples as input. Note that there is no guarantee that
a conflict will be avoided, but rather that it is more likely that
the conflict will be avoided than it won't. Perfect group creation
with guaranteed conflict avoidance is an NP-Hard problem. To test
that conflict management is effective at least most of the time
(>90%), groups will be generated multiple times, and results will
be collected as a 0 (conflict avoided) or 1 (conflict still present),
and the test case will pass as long as more than 90% of the results as
a 0 and not a 1.
"""
responses = [
["Nick", True, False, True, False],
["Marvin", False, False, True, True],
["Evin", True, True, True, False],
["Nikki", True, True, False, False],
["Dan", False, True, False, True],
["Michael", True, True, False, False],
]
num_group = 3
# A conflict is added.
conflict = ("Nick", "Marvin", 5)
results = []
# Run 1000 tests for significance
for _x in range(0, 1000):
returned_groups = group_creation.group_random_num_group(responses, num_group)
for grp in returned_groups:
# if both members of the conflict relation are in a group, avoidance failed
if (conflict[0] in grp) and (conflict[1] in grp):
# 1 denotes conflict avoidance failure
results.append(1)
else:
# 0 denotes conflict avoidance failure
results.append(0)
# calculate the average of the results
results_avg = sum(results) / len(results)
# assert that the success rate of conflict avoidance is 90% minimum
assert results_avg < 0.9


def test_group_random_conflict_grpsize():
"""
Test that groups are still created randomly, even when given
conflict tuples as input. Note that there is no guarantee that
a conflict will be avoided, but rather that it is more likely that
the conflict will be avoided than it won't. See docstring comment for
test_group_random_conflict_numgrp for details of this testing method,
as the process is similar.
"""
responses = [
["Nick", True, False, True, False],
["Marvin", False, False, True, True],
["Evin", True, True, True, False],
["Nikki", True, True, False, False],
["Dan", False, True, False, True],
["Michael", True, True, False, False],
]
group_size = 2
# A conflict is added.
conflict = ("Nick", "Marvin", 5)
results = []
# Run 1000 tests for significance
for _x in range(0, 1000):
returned_groups = group_creation.group_random_group_size(responses, group_size)
for grp in returned_groups:
# if both members of the conflict relation are in a group, avoidance failed
if (conflict[0] in grp) and (conflict[1] in grp):
# 1 denotes conflict avoidance failure
results.append(1)
else:
# 0 denotes conflict avoidance failure
results.append(0)
# calculate the average of the results
results_avg = sum(results) / len(results)
# assert that the success rate of conflict avoidance is 90% minimum
assert results_avg < 0.9


def test_group_random():
"""Testing the random type of grouping with everyone in an assigned group"""
responses = [
Expand Down

0 comments on commit c026465

Please sign in to comment.