diff --git a/docs/api.rst b/docs/api.rst index bd2f5baef9..642682cfb3 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -7,7 +7,7 @@ User class ============ .. autoclass:: locust.User - :members: wait_time, tasks, weight, abstract, on_start, on_stop, wait, context, environment + :members: wait_time, tasks, weight, fixed_count, abstract, on_start, on_stop, wait, context, environment HttpUser class ================ diff --git a/docs/writing-a-locustfile.rst b/docs/writing-a-locustfile.rst index 3e43b76030..205a6f1c56 100644 --- a/docs/writing-a-locustfile.rst +++ b/docs/writing-a-locustfile.rst @@ -180,7 +180,7 @@ For example, the following User class would sleep for one second, then two, then ... -weight attribute +weight and fixed_count attributes ---------------- If more than one user class exists in the file, and no user classes are specified on the command line, @@ -204,6 +204,25 @@ classes. Say for example, web users are three times more likely than mobile user weight = 1 ... +Also you can set the :py:attr:`fixed_count ` attribute. +In this case the weight property will be ignored and the exact count users will be spawned. +These users are spawned first. In the below example the only instance of AdminUser +will be spawned to make some specific work with more accurate control +of request count independently of total user count. + +.. code-block:: python + + class AdminUser(User): + wait_time = constant(600) + fixed_count = 1 + + @task + def restart_app(self): + ... + + class WebUser(User): + ... + host attribute -------------- diff --git a/locust/argument_parser.py b/locust/argument_parser.py index 4677e51e8b..797276432f 100644 --- a/locust/argument_parser.py +++ b/locust/argument_parser.py @@ -431,10 +431,14 @@ def setup_parser_arguments(parser): other_group = parser.add_argument_group("Other options") other_group.add_argument( - "--show-task-ratio", action="store_true", help="Print table of the User classes' task execution ratio" + "--show-task-ratio", + action="store_true", + help="Print table of the User classes' task execution ratio. Use this with non-zero --user option if some classes define non-zero fixed_count property.", ) other_group.add_argument( - "--show-task-ratio-json", action="store_true", help="Print json data of the User classes' task execution ratio" + "--show-task-ratio-json", + action="store_true", + help="Print json data of the User classes' task execution ratio. Use this with non-zero --user option if some classes define non-zero fixed_count property.", ) # optparse gives you --version but we have to do it ourselves to get -V too other_group.add_argument( diff --git a/locust/dispatch.py b/locust/dispatch.py index dbd6aaa0ea..6f72bce6ac 100644 --- a/locust/dispatch.py +++ b/locust/dispatch.py @@ -4,7 +4,7 @@ import time from collections.abc import Iterator from operator import attrgetter -from typing import Dict, Generator, List, TYPE_CHECKING, Tuple, Type +from typing import Dict, Generator, List, TYPE_CHECKING, Optional, Tuple, Type import gevent import typing @@ -98,6 +98,10 @@ def __init__(self, worker_nodes: "List[WorkerNode]", user_classes: List[Type[Use self._rebalance = False + self._try_dispatch_fixed = True + + self._no_user_to_spawn = False + @property def dispatch_in_progress(self): return self._dispatch_in_progress @@ -132,6 +136,9 @@ def _dispatcher(self) -> Generator[Dict[str, Dict[str, int]], None, None]: if self._rebalance: self._rebalance = False yield self._users_on_workers + if self._no_user_to_spawn: + self._no_user_to_spawn = False + break while self._current_user_count > self._target_user_count: with self._wait_between_dispatch_iteration_context(): @@ -241,13 +248,19 @@ def _add_users_on_workers(self) -> Dict[str, Dict[str, int]]: current_user_count_target = min( self._current_user_count + self._user_count_per_dispatch_iteration, self._target_user_count ) + for user in self._user_generator: + if not user: + self._no_user_to_spawn = True + break worker_node = next(self._worker_node_generator) self._users_on_workers[worker_node.id][user] += 1 self._current_user_count += 1 self._active_users.append((worker_node, user)) if self._current_user_count >= current_user_count_target: - return self._users_on_workers + break + + return self._users_on_workers def _remove_users_from_workers(self) -> Dict[str, Dict[str, int]]: """Remove users from the workers until the target number of users is reached for the current dispatch iteration @@ -264,9 +277,17 @@ def _remove_users_from_workers(self) -> Dict[str, Dict[str, int]]: return self._users_on_workers self._users_on_workers[worker_node.id][user] -= 1 self._current_user_count -= 1 + self._try_dispatch_fixed = True if self._current_user_count == 0 or self._current_user_count <= current_user_count_target: return self._users_on_workers + def _get_user_current_count(self, user: str) -> int: + count = 0 + for users_on_node in self._users_on_workers.values(): + count += users_on_node.get(user, 0) + + return count + def _distribute_users( self, target_user_count: int ) -> Tuple[dict, Generator[str, None, None], typing.Iterator["WorkerNode"], List[Tuple["WorkerNode", str]]]: @@ -289,6 +310,8 @@ def _distribute_users( user_count = 0 while user_count < target_user_count: user = next(user_gen) + if not user: + break worker_node = next(worker_gen) users_on_workers[worker_node.id][user] += 1 user_count += 1 @@ -307,26 +330,66 @@ def _user_gen(self) -> Generator[str, None, None]: weighted round-robin algorithm, we'd get AAAAABAAAAAB which would make the distribution less accurate during ramp-up/down. """ - # Normalize the weights so that the smallest weight will be equal to "target_min_weight". - # The value "2" was experimentally determined because it gave a better distribution especially - # when dealing with weights which are close to each others, e.g. 1.5, 2, 2.4, etc. - target_min_weight = 2 - min_weight = min(u.weight for u in self._user_classes) - normalized_weights = [ - (user_class.__name__, round(target_min_weight * user_class.weight / min_weight)) - for user_class in self._user_classes - ] - gen = smooth(normalized_weights) - # Instead of calling `gen()` for each user, we cycle through a generator of fixed-length - # `generation_length_to_get_proper_distribution`. Doing so greatly improves performance because - # we only ever need to call `gen()` a relatively small number of times. The length of this generator - # is chosen as the sum of the normalized weights. So, for users A, B, C of weights 2, 5, 6, the length is - # 2 + 5 + 6 = 13 which would yield the distribution `CBACBCBCBCABC` that gets repeated over and over - # until the target user count is reached. - generation_length_to_get_proper_distribution = sum( - normalized_weight[1] for normalized_weight in normalized_weights - ) - yield from itertools.cycle(gen() for _ in range(generation_length_to_get_proper_distribution)) + + def infinite_cycle_gen(users: List[Tuple[User, int]]) -> Generator[Optional[str], None, None]: + if not users: + return itertools.cycle([None]) + + # Normalize the weights so that the smallest weight will be equal to "target_min_weight". + # The value "2" was experimentally determined because it gave a better distribution especially + # when dealing with weights which are close to each others, e.g. 1.5, 2, 2.4, etc. + target_min_weight = 2 + + # 'Value' here means weight or fixed count + normalized_values = [ + ( + user.__name__, + round(target_min_weight * value / min([u[1] for u in users])), + ) + for user, value in users + ] + generation_length_to_get_proper_distribution = sum( + normalized_val[1] for normalized_val in normalized_values + ) + gen = smooth(normalized_values) + + # Instead of calling `gen()` for each user, we cycle through a generator of fixed-length + # `generation_length_to_get_proper_distribution`. Doing so greatly improves performance because + # we only ever need to call `gen()` a relatively small number of times. The length of this generator + # is chosen as the sum of the normalized weights. So, for users A, B, C of weights 2, 5, 6, the length is + # 2 + 5 + 6 = 13 which would yield the distribution `CBACBCBCBCABC` that gets repeated over and over + # until the target user count is reached. + return itertools.cycle(gen() for _ in range(generation_length_to_get_proper_distribution)) + + fixed_users = {u.__name__: u for u in self._user_classes if u.fixed_count} + + cycle_fixed_gen = infinite_cycle_gen([(u, u.fixed_count) for u in fixed_users.values()]) + cycle_weighted_gen = infinite_cycle_gen([(u, u.weight) for u in self._user_classes if not u.fixed_count]) + + # Spawn users + while True: + if self._try_dispatch_fixed: + self._try_dispatch_fixed = False + current_fixed_users_count = {u: self._get_user_current_count(u) for u in fixed_users} + spawned_classes = set() + while len(spawned_classes) != len(fixed_users): + user_name = next(cycle_fixed_gen) + if not user_name: + break + + if current_fixed_users_count[user_name] < fixed_users[user_name].fixed_count: + current_fixed_users_count[user_name] += 1 + if current_fixed_users_count[user_name] == fixed_users[user_name].fixed_count: + spawned_classes.add(user_name) + yield user_name + + # 'self._try_dispatch_fixed' was changed outhere, we have to recalculate current count + if self._try_dispatch_fixed: + current_fixed_users_count = {u: self._get_user_current_count(u) for u in fixed_users} + spawned_classes.clear() + self._try_dispatch_fixed = False + + yield next(cycle_weighted_gen) @staticmethod def _fast_users_on_workers_copy(users_on_workers: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, int]]: diff --git a/locust/html.py b/locust/html.py index a3735bff65..e1a9c81e06 100644 --- a/locust/html.py +++ b/locust/html.py @@ -4,9 +4,10 @@ import datetime from itertools import chain from .stats import sort_stats -from .user.inspectuser import get_task_ratio_dict +from .user.inspectuser import get_ratio from html import escape from json import dumps +from .runners import MasterRunner def render_template(file, **kwargs): @@ -62,9 +63,14 @@ def get_html_report(environment, show_download_link=True): static_css.append(f.read()) static_css.extend(["", ""]) + is_distributed = isinstance(environment.runner, MasterRunner) + user_spawned = ( + environment.runner.reported_user_classes_count if is_distributed else environment.runner.user_classes_count + ) + task_data = { - "per_class": get_task_ratio_dict(environment.user_classes), - "total": get_task_ratio_dict(environment.user_classes, total=True), + "per_class": get_ratio(environment.user_classes, user_spawned, False), + "total": get_ratio(environment.user_classes, user_spawned, True), } res = render_template( diff --git a/locust/main.py b/locust/main.py index 8ad6840656..45bdf0b455 100644 --- a/locust/main.py +++ b/locust/main.py @@ -20,7 +20,7 @@ from .stats import print_error_report, print_percentile_stats, print_stats, stats_printer, stats_history from .stats import StatsCSV, StatsCSVFileWriter from .user import User -from .user.inspectuser import get_task_ratio_dict, print_task_ratio +from .user.inspectuser import print_task_ratio, print_task_ratio_json from .util.timespan import parse_timespan from .exception import AuthCredentialsError from .shape import LoadTestShape @@ -218,18 +218,13 @@ def main(): if options.show_task_ratio: print("\n Task ratio per User class") print("-" * 80) - print_task_ratio(user_classes) + print_task_ratio(user_classes, options.num_users, False) print("\n Total task ratio") print("-" * 80) - print_task_ratio(user_classes, total=True) + print_task_ratio(user_classes, options.num_users, True) sys.exit(0) if options.show_task_ratio_json: - - task_data = { - "per_class": get_task_ratio_dict(user_classes), - "total": get_task_ratio_dict(user_classes, total=True), - } - print(dumps(task_data)) + print_task_ratio_json(user_classes, options.num_users) sys.exit(0) if options.master: diff --git a/locust/static/tasks.js b/locust/static/tasks.js index 4fc8a63cff..11b9e68e82 100644 --- a/locust/static/tasks.js +++ b/locust/static/tasks.js @@ -29,11 +29,12 @@ function _getTasks_div(root, title) { } -function initTasks() { - var tasks = $('#tasks .tasks') - var tasksData = tasks.data('tasks'); - console.log(tasksData); - tasks.append(_getTasks_div(tasksData.per_class, 'Ratio per User class')); - tasks.append(_getTasks_div(tasksData.total, 'Total ratio')); +function updateTasks() { + $.get('/tasks', function (data) { + var tasks = $('#tasks .tasks'); + tasks.empty(); + tasks.append(_getTasks_div(data.per_class, 'Ratio per User class')); + tasks.append(_getTasks_div(data.total, 'Total ratio')); + }); } -initTasks(); \ No newline at end of file +updateTasks(); diff --git a/locust/templates/index.html b/locust/templates/index.html index 6f47260315..10aeaf15aa 100644 --- a/locust/templates/index.html +++ b/locust/templates/index.html @@ -333,6 +333,13 @@

Version + {% block extended_script %} {% endblock extended_script %} diff --git a/locust/test/test_dispatch.py b/locust/test/test_dispatch.py index f648132a05..5dbb5c1a00 100644 --- a/locust/test/test_dispatch.py +++ b/locust/test/test_dispatch.py @@ -1,7 +1,7 @@ import time import unittest from operator import attrgetter -from typing import Dict +from typing import Dict, List, Tuple from locust import User from locust.dispatch import UsersDispatcher @@ -1932,337 +1932,168 @@ class User2(User): class TestLargeScale(unittest.TestCase): - class User01(User): - weight = 5 - - class User02(User): - weight = 55 - - class User03(User): - weight = 37 - - class User04(User): - weight = 2 - - class User05(User): - weight = 97 - - class User06(User): - weight = 41 - - class User07(User): - weight = 33 - - class User08(User): - weight = 19 - - class User09(User): - weight = 19 - - class User10(User): - weight = 34 - - class User11(User): - weight = 78 - - class User12(User): - weight = 76 - - class User13(User): - weight = 28 - - class User14(User): - weight = 62 - - class User15(User): - weight = 69 - - class User16(User): - weight = 5 - - class User17(User): - weight = 55 - - class User18(User): - weight = 37 - - class User19(User): - weight = 2 - - class User20(User): - weight = 97 - - class User21(User): - weight = 41 - - class User22(User): - weight = 33 - - class User23(User): - weight = 19 - - class User24(User): - weight = 19 - - class User25(User): - weight = 34 - - class User26(User): - weight = 78 - - class User27(User): - weight = 76 - - class User28(User): - weight = 28 - - class User29(User): - weight = 62 - - class User30(User): - weight = 69 - - class User31(User): - weight = 41 - - class User32(User): - weight = 33 - - class User33(User): - weight = 19 - - class User34(User): - weight = 19 - - class User35(User): - weight = 34 - - class User36(User): - weight = 78 - - class User37(User): - weight = 76 - - class User38(User): - weight = 28 - - class User39(User): - weight = 62 - - class User40(User): - weight = 69 - - class User41(User): - weight = 41 - - class User42(User): - weight = 33 - - class User43(User): - weight = 19 - - class User44(User): - weight = 19 - - class User45(User): - weight = 34 - - class User46(User): - weight = 78 - - class User47(User): - weight = 76 - - class User48(User): - weight = 28 - - class User49(User): - weight = 62 - - class User50(User): - weight = 69 - - user_classes = [ - User01, - User02, - User03, - User04, - User05, - User06, - User07, - User08, - User09, - User10, - User11, - User12, - User13, - User14, - User15, - User16, - User17, - User18, - User19, - User20, - User21, - User22, - User23, - User24, - User25, - User26, - User27, - User28, - User29, - User30, - User31, - User32, - User33, - User34, - User35, - User36, - User37, - User38, - User39, - User40, - User41, - User42, - User43, - User44, - User45, - User46, - User47, - User48, - User49, - User50, + # fmt: off + weights = [ + 5, 55, 37, 2, 97, 41, 33, 19, 19, 34, 78, 76, 28, 62, 69, 5, 55, 37, 2, 97, 41, 33, 19, 19, 34, + 78, 76, 28, 62, 69, 41, 33, 19, 19, 34, 78, 76, 28, 62, 69, 41, 33, 19, 19, 34, 78, 76, 28, 62, 69 ] + # fmt: on + numerated_weights = dict(zip(range(len(weights)), weights)) + + weighted_user_classes = [type(f"User{i}", (User,), {"weight": w}) for i, w in numerated_weights.items()] + fixed_user_classes_10k = [type(f"FixedUser10k{i}", (User,), {"fixed_count": 2000}) for i in range(50)] + fixed_user_classes_1M = [type(f"FixedUser1M{i}", (User,), {"fixed_count": 20000}) for i in range(50)] + mixed_users = weighted_user_classes[:25] + fixed_user_classes_10k[25:] def test_distribute_users(self): - workers = [WorkerNode(str(i)) for i in range(10_000)] + for user_classes in [self.weighted_user_classes, self.fixed_user_classes_1M, self.mixed_users]: + workers = [WorkerNode(str(i)) for i in range(10_000)] - target_user_count = 1_000_000 + target_user_count = 1_000_000 - users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=self.user_classes) + users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=user_classes) - ts = time.perf_counter() - users_on_workers, user_gen, worker_gen, active_users = users_dispatcher._distribute_users( - target_user_count=target_user_count - ) - delta = time.perf_counter() - ts + ts = time.perf_counter() + users_on_workers, user_gen, worker_gen, active_users = users_dispatcher._distribute_users( + target_user_count=target_user_count + ) + delta = time.perf_counter() - ts - # Because tests are run with coverage, the code will be slower. - # We set the pass criterion to 5000ms, but in real life, the - # `_distribute_users` method runs faster than this. - self.assertLessEqual(1000 * delta, 5000) + # Because tests are run with coverage, the code will be slower. + # We set the pass criterion to 7000ms, but in real life, the + # `_distribute_users` method runs faster than this. + self.assertLessEqual(1000 * delta, 7000) - self.assertEqual(_user_count(users_on_workers), target_user_count) + self.assertEqual(_user_count(users_on_workers), target_user_count) def test_ramp_up_from_0_to_100_000_users_with_50_user_classes_and_1000_workers_and_5000_spawn_rate(self): - workers = [WorkerNode(str(i)) for i in range(1000)] + for user_classes in [ + self.weighted_user_classes, + self.fixed_user_classes_1M, + self.fixed_user_classes_10k, + self.mixed_users, + ]: + workers = [WorkerNode(str(i)) for i in range(1000)] - target_user_count = 100_000 + target_user_count = 100_000 - users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=self.user_classes) - users_dispatcher.new_dispatch(target_user_count=target_user_count, spawn_rate=5_000) - users_dispatcher._wait_between_dispatch = 0 + users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=user_classes) + users_dispatcher.new_dispatch(target_user_count=target_user_count, spawn_rate=5_000) + users_dispatcher._wait_between_dispatch = 0 - all_dispatched_users = list(users_dispatcher) + all_dispatched_users = list(users_dispatcher) - tol = 0.2 - self.assertTrue( - all( - dispatch_iteration_duration <= tol - for dispatch_iteration_duration in users_dispatcher.dispatch_iteration_durations - ), - "One or more dispatch took more than {:.0f}s to compute (max = {}ms)".format( - tol * 1000, 1000 * max(users_dispatcher.dispatch_iteration_durations) - ), - ) - - self.assertEqual(_user_count(all_dispatched_users[-1]), target_user_count) - - for dispatch_users in all_dispatched_users: - user_count_on_workers = [sum(user_classes_count.values()) for user_classes_count in dispatch_users.values()] - self.assertLessEqual( - max(user_count_on_workers) - min(user_count_on_workers), - 1, - "One or more workers have too much users compared to the other workers when user count is {}".format( - _user_count(dispatch_users) + tol = 0.2 + self.assertTrue( + all( + dispatch_iteration_duration <= tol + for dispatch_iteration_duration in users_dispatcher.dispatch_iteration_durations + ), + "One or more dispatch took more than {:.0f}s to compute (max = {}ms)".format( + tol * 1000, 1000 * max(users_dispatcher.dispatch_iteration_durations) ), ) - for i, dispatch_users in enumerate(all_dispatched_users): - aggregated_dispatched_users = _aggregate_dispatched_users(dispatch_users) - for user_class in self.user_classes: - target_relative_weight = user_class.weight / sum(map(attrgetter("weight"), self.user_classes)) - relative_weight = aggregated_dispatched_users[user_class.__name__] / _user_count(dispatch_users) - error_percent = 100 * (relative_weight - target_relative_weight) / target_relative_weight - if i == len(all_dispatched_users) - 1: - # We want the distribution to be as good as possible at the end of the ramp-up - tol = 0.5 - else: - tol = 15 + self.assertEqual(_user_count(all_dispatched_users[-1]), target_user_count) + + for dispatch_users in all_dispatched_users: + user_count_on_workers = [ + sum(user_classes_count.values()) for user_classes_count in dispatch_users.values() + ] self.assertLessEqual( - error_percent, - tol, - "Distribution for user class {} is off by more than {}% when user count is {}".format( - user_class, tol, _user_count(dispatch_users) + max(user_count_on_workers) - min(user_count_on_workers), + 1, + "One or more workers have too much users compared to the other workers when user count is {}".format( + _user_count(dispatch_users) ), ) + for i, dispatch_users in enumerate(all_dispatched_users): + aggregated_dispatched_users = _aggregate_dispatched_users(dispatch_users) + for user_class in [u for u in user_classes if not u.fixed_count]: + target_relative_weight = user_class.weight / sum( + map(attrgetter("weight"), [u for u in user_classes if not u.fixed_count]) + ) + relative_weight = aggregated_dispatched_users[user_class.__name__] / _user_count(dispatch_users) + error_percent = 100 * (relative_weight - target_relative_weight) / target_relative_weight + if i == len(all_dispatched_users) - 1: + # We want the distribution to be as good as possible at the end of the ramp-up + tol = 0.5 + else: + tol = 15 + self.assertLessEqual( + error_percent, + tol, + "Distribution for user class {} is off by more than {}% when user count is {}".format( + user_class, tol, _user_count(dispatch_users) + ), + ) + def test_ramp_down_from_100_000_to_0_users_with_50_user_classes_and_1000_workers_and_5000_spawn_rate(self): - initial_user_count = 100_000 + for user_classes in [ + self.weighted_user_classes, + self.fixed_user_classes_1M, + self.fixed_user_classes_10k, + self.mixed_users, + ]: + initial_user_count = 100_000 - workers = [WorkerNode(str(i)) for i in range(1000)] + workers = [WorkerNode(str(i)) for i in range(1000)] - # Ramp-up - users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=self.user_classes) - users_dispatcher.new_dispatch(target_user_count=initial_user_count, spawn_rate=initial_user_count) - users_dispatcher._wait_between_dispatch = 0 - list(users_dispatcher) + # Ramp-up + users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=user_classes) + users_dispatcher.new_dispatch(target_user_count=initial_user_count, spawn_rate=initial_user_count) + users_dispatcher._wait_between_dispatch = 0 + list(users_dispatcher) - # Ramp-down - users_dispatcher.new_dispatch(target_user_count=0, spawn_rate=5000) - users_dispatcher._wait_between_dispatch = 0 + # Ramp-down + users_dispatcher.new_dispatch(target_user_count=0, spawn_rate=5000) + users_dispatcher._wait_between_dispatch = 0 - all_dispatched_users = list(users_dispatcher) + all_dispatched_users = list(users_dispatcher) - tol = 0.2 - self.assertTrue( - all( - dispatch_iteration_duration <= tol - for dispatch_iteration_duration in users_dispatcher.dispatch_iteration_durations - ), - "One or more dispatch took more than {:.0f}ms to compute (max = {}ms)".format( - tol * 1000, 1000 * max(users_dispatcher.dispatch_iteration_durations) - ), - ) - - self.assertEqual(_user_count(all_dispatched_users[-1]), 0) - - for dispatch_users in all_dispatched_users[:-1]: - user_count_on_workers = [sum(user_classes_count.values()) for user_classes_count in dispatch_users.values()] - self.assertLessEqual( - max(user_count_on_workers) - min(user_count_on_workers), - 1, - "One or more workers have too much users compared to the other workers when user count is {}".format( - _user_count(dispatch_users) + tol = 0.2 + self.assertTrue( + all( + dispatch_iteration_duration <= tol + for dispatch_iteration_duration in users_dispatcher.dispatch_iteration_durations + ), + "One or more dispatch took more than {:.0f}ms to compute (max = {}ms)".format( + tol * 1000, 1000 * max(users_dispatcher.dispatch_iteration_durations) ), ) - for dispatch_users in all_dispatched_users[:-1]: - aggregated_dispatched_users = _aggregate_dispatched_users(dispatch_users) - for user_class in self.user_classes: - target_relative_weight = user_class.weight / sum(map(attrgetter("weight"), self.user_classes)) - relative_weight = aggregated_dispatched_users[user_class.__name__] / _user_count(dispatch_users) - error_percent = 100 * (relative_weight - target_relative_weight) / target_relative_weight - tol = 15 + self.assertEqual(_user_count(all_dispatched_users[-1]), 0) + + for dispatch_users in all_dispatched_users[:-1]: + user_count_on_workers = [ + sum(user_classes_count.values()) for user_classes_count in dispatch_users.values() + ] self.assertLessEqual( - error_percent, - tol, - "Distribution for user class {} is off by more than {}% when user count is {}".format( - user_class, tol, _user_count(dispatch_users) + max(user_count_on_workers) - min(user_count_on_workers), + 1, + "One or more workers have too much users compared to the other workers when user count is {}".format( + _user_count(dispatch_users) ), ) + for dispatch_users in all_dispatched_users[:-1]: + aggregated_dispatched_users = _aggregate_dispatched_users(dispatch_users) + for user_class in [u for u in user_classes if not u.fixed_count]: + target_relative_weight = user_class.weight / sum( + map(attrgetter("weight"), [u for u in user_classes if not u.fixed_count]) + ) + relative_weight = aggregated_dispatched_users[user_class.__name__] / _user_count(dispatch_users) + error_percent = 100 * (relative_weight - target_relative_weight) / target_relative_weight + tol = 15 + self.assertLessEqual( + error_percent, + tol, + "Distribution for user class {} is off by more than {}% when user count is {}".format( + user_class, tol, _user_count(dispatch_users) + ), + ) + class TestSmallConsecutiveRamping(unittest.TestCase): def test_consecutive_ramp_up_and_ramp_down(self): @@ -3291,6 +3122,271 @@ class User3(User): self.assertEqual(_user_count_on_worker(dispatched_users, worker_nodes[2].id), 3) +class TestRampUpUsersFromZeroWithFixed(unittest.TestCase): + class RampUpCase: + def __init__(self, fixed_counts: Tuple[int], weights: Tuple[int], target_user_count: int): + self.fixed_counts = fixed_counts + self.weights = weights + self.target_user_count = target_user_count + + def __str__(self): + return "".format( + self.fixed_counts, self.weights, self.target_user_count + ) + + def case_handler(self, cases: List[RampUpCase], expected: Dict[str, int], user_classes: List[User]): + self.assertEqual(len(cases), len(expected)) + + for case_num in range(len(cases)): + # Reset to default values + for user_class in user_classes: + user_class.weight, user_class.fixed_count = 1, 0 + + case = cases[case_num] + self.assertEqual( + len(case.fixed_counts) + len(case.weights), + len(user_classes), + msg="Invalid test case or user list.", + ) + + fixed_users = user_classes[: len(case.fixed_counts)] + weighted_users_list = user_classes[len(case.fixed_counts) :] + + for user, fixed_count in zip(fixed_users, case.fixed_counts): + user.fixed_count = fixed_count + + for user, weight in zip(weighted_users_list, case.weights): + user.weight = weight + + worker_node1 = WorkerNode("1") + + users_dispatcher = UsersDispatcher(worker_nodes=[worker_node1], user_classes=user_classes) + users_dispatcher.new_dispatch(target_user_count=case.target_user_count, spawn_rate=0.5) + users_dispatcher._wait_between_dispatch = 0 + + iterations = list(users_dispatcher) + self.assertDictEqual(iterations[-1]["1"], expected[case_num], msg=f"Wrong case {case}") + + def test_ramp_up_2_weigted_user_with_1_fixed_user(self): + class User1(User): + ... + + class User2(User): + ... + + class User3(User): + ... + + self.case_handler( + cases=[ + self.RampUpCase(fixed_counts=(1,), weights=(1, 1), target_user_count=3), + self.RampUpCase(fixed_counts=(1,), weights=(1, 1), target_user_count=9), + self.RampUpCase(fixed_counts=(8,), weights=(1, 1), target_user_count=10), + self.RampUpCase(fixed_counts=(2,), weights=(1, 1), target_user_count=1000), + self.RampUpCase(fixed_counts=(100,), weights=(1, 1), target_user_count=1000), + self.RampUpCase(fixed_counts=(960,), weights=(1, 1), target_user_count=1000), + self.RampUpCase(fixed_counts=(9990,), weights=(1, 1), target_user_count=10000), + self.RampUpCase(fixed_counts=(100,), weights=(1, 1), target_user_count=100), + ], + expected=[ + {"User1": 1, "User2": 1, "User3": 1}, + {"User1": 1, "User2": 4, "User3": 4}, + {"User1": 8, "User2": 1, "User3": 1}, + {"User1": 2, "User2": 499, "User3": 499}, + {"User1": 100, "User2": 450, "User3": 450}, + {"User1": 960, "User2": 20, "User3": 20}, + {"User1": 9990, "User2": 5, "User3": 5}, + {"User1": 100, "User2": 0, "User3": 0}, + ], + user_classes=[User1, User2, User3], + ) + + def test_ramp_up_various_count_weigted_and_fixed_users(self): + class User1(User): + ... + + class User2(User): + ... + + class User3(User): + ... + + class User4(User): + ... + + class User5(User): + ... + + self.case_handler( + cases=[ + self.RampUpCase(fixed_counts=(), weights=(1, 1, 1, 1, 1), target_user_count=5), + self.RampUpCase(fixed_counts=(1, 1), weights=(1, 1, 1), target_user_count=5), + self.RampUpCase(fixed_counts=(5, 2), weights=(1, 1, 1), target_user_count=10), + self.RampUpCase(fixed_counts=(9, 1), weights=(5, 3, 2), target_user_count=20), + self.RampUpCase(fixed_counts=(996,), weights=(1, 1, 1, 1), target_user_count=1000), + self.RampUpCase(fixed_counts=(500,), weights=(2, 1, 1, 1), target_user_count=1000), + self.RampUpCase(fixed_counts=(250, 250), weights=(3, 1, 1), target_user_count=1000), + self.RampUpCase(fixed_counts=(1, 1, 1, 1), weights=(100,), target_user_count=1000), + ], + expected=[ + {"User1": 1, "User2": 1, "User3": 1, "User4": 1, "User5": 1}, + {"User1": 1, "User2": 1, "User3": 1, "User4": 1, "User5": 1}, + {"User1": 5, "User2": 2, "User3": 1, "User4": 1, "User5": 1}, + {"User1": 9, "User2": 1, "User3": 5, "User4": 3, "User5": 2}, + {"User1": 996, "User2": 1, "User3": 1, "User4": 1, "User5": 1}, + {"User1": 500, "User2": 200, "User3": 100, "User4": 100, "User5": 100}, + {"User1": 250, "User2": 250, "User3": 300, "User4": 100, "User5": 100}, + {"User1": 1, "User2": 1, "User3": 1, "User4": 1, "User5": 996}, + ], + user_classes=[User1, User2, User3, User4, User5], + ) + + def test_ramp_up_only_fixed_users(self): + class User1(User): + ... + + class User2(User): + ... + + class User3(User): + ... + + class User4(User): + ... + + class User5(User): + ... + + self.case_handler( + cases=[ + self.RampUpCase(fixed_counts=(1, 1, 1, 1, 1), weights=(), target_user_count=5), + self.RampUpCase(fixed_counts=(13, 26, 39, 52, 1), weights=(), target_user_count=131), + self.RampUpCase(fixed_counts=(10, 10, 10, 10, 10), weights=(), target_user_count=100), + self.RampUpCase(fixed_counts=(10, 10, 10, 10, 10), weights=(), target_user_count=50), + ], + expected=[ + {"User1": 1, "User2": 1, "User3": 1, "User4": 1, "User5": 1}, + {"User1": 13, "User2": 26, "User3": 39, "User4": 52, "User5": 1}, + {"User1": 10, "User2": 10, "User3": 10, "User4": 10, "User5": 10}, + {"User1": 10, "User2": 10, "User3": 10, "User4": 10, "User5": 10}, + ], + user_classes=[User1, User2, User3, User4, User5], + ) + + def test_ramp_up_partially_ramp_down_and_rump_up_to_target(self): + class User1(User): + fixed_count = 50 + + class User2(User): + fixed_count = 50 + + target_count = User1.fixed_count + User2.fixed_count + + users_dispatcher = UsersDispatcher(worker_nodes=[WorkerNode("1")], user_classes=[User1, User2]) + users_dispatcher.new_dispatch(target_user_count=30, spawn_rate=0.5) + users_dispatcher._wait_between_dispatch = 0 + iterations = list(users_dispatcher) + self.assertDictEqual(iterations[-1]["1"], {"User1": 15, "User2": 15}) + + users_dispatcher.new_dispatch(target_user_count=20, spawn_rate=0.5) + users_dispatcher._wait_between_dispatch = 0 + iterations = list(users_dispatcher) + self.assertDictEqual(iterations[-1]["1"], {"User1": 10, "User2": 10}) + + users_dispatcher.new_dispatch(target_user_count=target_count, spawn_rate=0.5) + users_dispatcher._wait_between_dispatch = 0 + iterations = list(users_dispatcher) + self.assertDictEqual(iterations[-1]["1"], {"User1": 50, "User2": 50}) + + def test_ramp_up_ramp_down_and_rump_up_again(self): + for weights, fixed_counts in [ + [(1, 1, 1, 1, 1), (100, 100, 50, 50, 200)], + [(1, 1, 1, 1, 1), (100, 150, 50, 50, 0)], + [(1, 1, 1, 1, 1), (200, 100, 50, 0, 0)], + [(1, 1, 1, 1, 1), (200, 100, 0, 0, 0)], + [(1, 1, 1, 1, 1), (200, 0, 0, 0, 0)], + [(1, 1, 1, 1, 1), (0, 0, 0, 0, 0)], + ]: + + u1_weight, u2_weight, u3_weight, u4_weight, u5_weight = weights + u1_fixed_count, u2_fixed_count, u3_fixed_count, u4_fixed_count, u5_fixed_count = fixed_counts + + class User1(User): + weight = u1_weight + fixed_count = u1_fixed_count + + class User2(User): + weight = u2_weight + fixed_count = u2_fixed_count + + class User3(User): + weight = u3_weight + fixed_count = u3_fixed_count + + class User4(User): + weight = u4_weight + fixed_count = u4_fixed_count + + class User5(User): + weight = u5_weight + fixed_count = u5_fixed_count + + target_user_counts = [sum(fixed_counts), sum(fixed_counts) + 100] + down_counts = [0, max(min(fixed_counts) - 1, 0)] + user_classes = [User1, User2, User3, User4, User5] + + for worker_count in [3, 5, 9]: + workers = [WorkerNode(str(i + 1)) for i in range(worker_count)] + users_dispatcher = UsersDispatcher(worker_nodes=workers, user_classes=user_classes) + + for down_to_count in down_counts: + for target_user_count in target_user_counts: + + # Ramp-up to go to `target_user_count` ######### + + users_dispatcher.new_dispatch(target_user_count=target_user_count, spawn_rate=1) + users_dispatcher._wait_between_dispatch = 0 + + list(users_dispatcher) + + for user_class in user_classes: + if user_class.fixed_count: + self.assertEqual( + users_dispatcher._get_user_current_count(user_class.__name__), + user_class.fixed_count, + ) + + # Ramp-down to go to `down_to_count` + # and ensure the fixed users was decreased too + + users_dispatcher.new_dispatch(target_user_count=down_to_count, spawn_rate=1) + users_dispatcher._wait_between_dispatch = 0 + + list(users_dispatcher) + + for user_class in user_classes: + if user_class.fixed_count: + self.assertNotEqual( + users_dispatcher._get_user_current_count(user_class.__name__), + user_class.fixed_count, + ) + + # Ramp-up go back to `target_user_count` and ensure + # the fixed users return to their counts + + users_dispatcher.new_dispatch(target_user_count=target_user_count, spawn_rate=1) + users_dispatcher._wait_between_dispatch = 0 + + list(users_dispatcher) + + for user_class in user_classes: + if user_class.fixed_count: + self.assertEqual( + users_dispatcher._get_user_current_count(user_class.__name__), + user_class.fixed_count, + ) + + def _aggregate_dispatched_users(d: Dict[str, Dict[str, int]]) -> Dict[str, int]: user_classes = list(next(iter(d.values())).keys()) return {u: sum(d[u] for d in d.values()) for u in user_classes} diff --git a/locust/test/test_main.py b/locust/test/test_main.py index 3bdab6099a..55c891d408 100644 --- a/locust/test/test_main.py +++ b/locust/test/test_main.py @@ -621,6 +621,64 @@ def t(self): self.assertIn("Shutting down (exit code 0), bye.", output) self.assertEqual(0, proc.returncode) + def test_spawning_with_fixed(self): + LOCUSTFILE_CONTENT = textwrap.dedent( + """ + from locust import User, task, constant + + class User1(User): + fixed_count = 2 + wait_time = constant(1) + + @task + def t(self): + print("Test task is running") + + class User2(User): + wait_time = constant(1) + @task + def t(self): + print("Test task is running") + + class User3(User): + wait_time = constant(1) + @task + def t(self): + print("Test task is running") + """ + ) + with mock_locustfile(content=LOCUSTFILE_CONTENT) as mocked: + proc = subprocess.Popen( + " ".join( + [ + "locust", + "-f", + mocked.file_path, + "--headless", + "--run-time", + "5s", + "-u", + "10", + "-r", + "10", + "--loglevel", + "INFO", + ] + ), + stderr=STDOUT, + stdout=PIPE, + shell=True, + ) + + output = proc.communicate()[0].decode("utf-8") + self.assertIn("Ramping to 10 users at a rate of 10.00 per second", output) + self.assertIn('All users spawned: {"User1": 2, "User2": 4, "User3": 4} (10 total users)', output) + self.assertIn("Test task is running", output) + # ensure stats printer printed at least one report before shutting down and that there was a final report printed as well + self.assertRegex(output, r".*Aggregated[\S\s]*Shutting down[\S\s]*Aggregated.*") + self.assertIn("Shutting down (exit code 0)", output) + self.assertEqual(0, proc.returncode) + def test_html_report_option(self): with mock_locustfile() as mocked: with temporary_file("", suffix=".html") as html_report_file_path: diff --git a/locust/test/test_runners.py b/locust/test/test_runners.py index d0550bb9b9..483437251d 100644 --- a/locust/test/test_runners.py +++ b/locust/test/test_runners.py @@ -1033,6 +1033,102 @@ def tick(self): self.assertEqual("stopped", master.state) + def test_distributed_shape_with_fixed_users(self): + """ + Full integration test that starts both a MasterRunner and three WorkerRunner instances + and tests a basic LoadTestShape with scaling up and down users with 'fixed count' users + """ + + class TestUser(User): + @task + def my_task(self): + pass + + class FixedUser1(User): + fixed_count = 1 + + @task + def my_task(self): + pass + + class FixedUser2(User): + fixed_count = 11 + + @task + def my_task(self): + pass + + class TestShape(LoadTestShape): + def tick(self): + run_time = self.get_run_time() + if run_time < 1: + return 12, 12 + elif run_time < 2: + return 36, 24 + elif run_time < 3: + return 12, 24 + else: + return None + + with mock.patch("locust.runners.WORKER_REPORT_INTERVAL", new=0.3): + test_shape = TestShape() + master_env = Environment(user_classes=[TestUser, FixedUser1, FixedUser2], shape_class=test_shape) + master_env.shape_class.reset_time() + master = master_env.create_master_runner("*", 0) + + workers = [] + for _ in range(3): + worker_env = Environment(user_classes=[TestUser, FixedUser1, FixedUser2]) + worker = worker_env.create_worker_runner("127.0.0.1", master.server.port) + workers.append(worker) + + # Give workers time to connect + sleep(0.1) + + # Start a shape test + master.start_shape() + sleep(1) + + # Ensure workers have connected and started the correct amount of users (fixed is spawn first) + for worker in workers: + self.assertEqual(4, worker.user_count, "Shape test has not reached stage 1") + self.assertEqual( + 12, test_shape.get_current_user_count(), "Shape is not seeing stage 1 runner user count correctly" + ) + self.assertDictEqual(master.reported_user_classes_count, {"FixedUser1": 1, "FixedUser2": 11, "TestUser": 0}) + + # Ensure new stage with more users has been reached + sleep(1) + for worker in workers: + self.assertEqual(12, worker.user_count, "Shape test has not reached stage 2") + self.assertEqual( + 36, test_shape.get_current_user_count(), "Shape is not seeing stage 2 runner user count correctly" + ) + self.assertDictEqual( + master.reported_user_classes_count, {"FixedUser1": 1, "FixedUser2": 11, "TestUser": 24} + ) + + # Ensure new stage with less users has been reached + # and expected count of the fixed users is present + sleep(1) + for worker in workers: + self.assertEqual(4, worker.user_count, "Shape test has not reached stage 3") + self.assertEqual( + 12, test_shape.get_current_user_count(), "Shape is not seeing stage 3 runner user count correctly" + ) + self.assertDictEqual(master.reported_user_classes_count, {"FixedUser1": 1, "FixedUser2": 11, "TestUser": 0}) + + # Ensure test stops at the end + sleep(0.5) + for worker in workers: + self.assertEqual(0, worker.user_count, "Shape test has not stopped") + self.assertEqual( + 0, test_shape.get_current_user_count(), "Shape is not seeing stopped runner user count correctly" + ) + self.assertDictEqual(master.reported_user_classes_count, {"FixedUser1": 0, "FixedUser2": 0, "TestUser": 0}) + + self.assertEqual(STATE_STOPPED, master.state) + def test_distributed_shape_with_stop_timeout(self): """ Full integration test that starts both a MasterRunner and five WorkerRunner instances diff --git a/locust/test/test_stats.py b/locust/test/test_stats.py index eeac90493e..94b9b05674 100644 --- a/locust/test/test_stats.py +++ b/locust/test/test_stats.py @@ -15,10 +15,10 @@ from locust.stats import StatsCSVFileWriter from locust.stats import stats_history from locust.test.testcases import LocustTestCase -from locust.user.inspectuser import get_task_ratio_dict +from locust.user.inspectuser import _get_task_ratio -from .testcases import WebserverTestCase -from .test_runners import mocked_rpc +from locust.test.testcases import WebserverTestCase +from locust.test.test_runners import mocked_rpc _TEST_CSV_STATS_INTERVAL_SEC = 0.2 @@ -794,16 +794,16 @@ def task2(self): class TestInspectUser(unittest.TestCase): - def test_get_task_ratio_dict_relative(self): - ratio = get_task_ratio_dict([MyTaskSet]) + def test_get_task_ratio_relative(self): + ratio = _get_task_ratio([MyTaskSet], False, 1.0) self.assertEqual(1.0, ratio["MyTaskSet"]["ratio"]) self.assertEqual(0.75, ratio["MyTaskSet"]["tasks"]["root_task"]["ratio"]) self.assertEqual(0.25, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["ratio"]) self.assertEqual(0.5, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task1"]["ratio"]) self.assertEqual(0.5, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["tasks"]["task2"]["ratio"]) - def test_get_task_ratio_dict_total(self): - ratio = get_task_ratio_dict([MyTaskSet], total=True) + def test_get_task_ratio_total(self): + ratio = _get_task_ratio([MyTaskSet], True, 1.0) self.assertEqual(1.0, ratio["MyTaskSet"]["ratio"]) self.assertEqual(0.75, ratio["MyTaskSet"]["tasks"]["root_task"]["ratio"]) self.assertEqual(0.25, ratio["MyTaskSet"]["tasks"]["MySubTaskSet"]["ratio"]) diff --git a/locust/test/test_taskratio.py b/locust/test/test_taskratio.py index 09ab864542..05125b34fe 100644 --- a/locust/test/test_taskratio.py +++ b/locust/test/test_taskratio.py @@ -1,7 +1,7 @@ import unittest from locust.user import User, TaskSet, task -from locust.user.inspectuser import get_task_ratio_dict +from locust.user.inspectuser import get_ratio, _get_task_ratio class TestTaskRatio(unittest.TestCase): @@ -24,7 +24,7 @@ def task2(self): class MyUser(User): tasks = [Tasks] - ratio_dict = get_task_ratio_dict(Tasks.tasks, total=True) + ratio_dict = _get_task_ratio(Tasks.tasks, True, 1.0) self.assertEqual( { @@ -52,7 +52,7 @@ class MoreLikelyUser(User): weight = 3 tasks = [Tasks] - ratio_dict = get_task_ratio_dict([UnlikelyUser, MoreLikelyUser], total=True) + ratio_dict = get_ratio([UnlikelyUser, MoreLikelyUser], {"UnlikelyUser": 1, "MoreLikelyUser": 3}, True) self.assertDictEqual( { diff --git a/locust/user/inspectuser.py b/locust/user/inspectuser.py index e70af1670b..748b798a31 100644 --- a/locust/user/inspectuser.py +++ b/locust/user/inspectuser.py @@ -1,14 +1,43 @@ +from collections import defaultdict import inspect +from json import dumps from .task import TaskSet -from .users import User -def print_task_ratio(user_classes, total=False, level=0, parent_ratio=1.0): - d = get_task_ratio_dict(user_classes, total=total, parent_ratio=parent_ratio) +def print_task_ratio(user_classes, num_users, total): + """ + This function calculates the task ratio of users based on the user total count. + """ + d = get_ratio(user_classes, _calc_distribution(user_classes, num_users), total) _print_task_ratio(d) +def print_task_ratio_json(user_classes, num_users): + d = _calc_distribution(user_classes, num_users) + task_data = { + "per_class": get_ratio(user_classes, d, False), + "total": get_ratio(user_classes, d, True), + } + + print(dumps(task_data, indent=4)) + + +def _calc_distribution(user_classes, num_users): + fixed_count = sum([u.fixed_count for u in user_classes if u.fixed_count]) + total_weight = sum([u.weight for u in user_classes if not u.fixed_count]) + num_users = num_users or (total_weight if not fixed_count else 1) + weighted_count = num_users - fixed_count + weighted_count = weighted_count if weighted_count > 0 else 0 + user_classes_count = {} + + for u in user_classes: + count = u.fixed_count if u.fixed_count else (u.weight / total_weight) * weighted_count + user_classes_count[u.__name__] = round(count) + + return user_classes_count + + def _print_task_ratio(x, level=0): for k, v in x.items(): padding = 2 * " " * level @@ -18,33 +47,32 @@ def _print_task_ratio(x, level=0): _print_task_ratio(v["tasks"], level + 1) -def get_task_ratio_dict(tasks, total=False, parent_ratio=1.0): - """ - Return a dict containing task execution ratio info - """ - if len(tasks) > 0 and hasattr(tasks[0], "weight"): - divisor = sum(t.weight for t in tasks) - else: - divisor = len(tasks) / parent_ratio - ratio = {} +def get_ratio(user_classes, user_spawned, total): + user_count = sum(user_spawned.values()) or 1 + ratio_percent = {u: user_spawned.get(u.__name__, 0) / user_count for u in user_classes} + + task_dict = {} + for u, r in ratio_percent.items(): + d = {"ratio": r} + d["tasks"] = _get_task_ratio(u.tasks, total, r) + task_dict[u.__name__] = d + + return task_dict + + +def _get_task_ratio(tasks, total, parent_ratio): + parent_ratio = parent_ratio if total else 1.0 + ratio = defaultdict(int) for task in tasks: - ratio.setdefault(task, 0) - ratio[task] += task.weight if hasattr(task, "weight") else 1 + ratio[task] += 1 - # get percentage - ratio_percent = dict((k, float(v) / divisor) for k, v in ratio.items()) + ratio_percent = {t: r * parent_ratio / len(tasks) for t, r in ratio.items()} task_dict = {} - for locust, ratio in ratio_percent.items(): - d = {"ratio": ratio} - if inspect.isclass(locust): - if issubclass(locust, (User, TaskSet)): - T = locust.tasks - if total: - d["tasks"] = get_task_ratio_dict(T, total, ratio) - else: - d["tasks"] = get_task_ratio_dict(T, total) - - task_dict[locust.__name__] = d + for t, r in ratio_percent.items(): + d = {"ratio": r} + if inspect.isclass(t) and issubclass(t, TaskSet): + d["tasks"] = _get_task_ratio(t.tasks, total, r) + task_dict[t.__name__] = d return task_dict diff --git a/locust/user/users.py b/locust/user/users.py index db924faf92..1852c91ef5 100644 --- a/locust/user/users.py +++ b/locust/user/users.py @@ -97,6 +97,13 @@ class ForumPage(TaskSet): weight = 1 """Probability of user class being chosen. The higher the weight, the greater the chance of it being chosen.""" + fixed_count = 0 + """ + If the value > 0, the weight property will be ignored and the 'fixed_count'-instances will be spawned. + These Users are spawned first. If the total target count (specified by the --users arg) is not enougth + to spawn all instances of each User class with the defined property, the final count of each User is undefined. + """ + abstract = True """If abstract is True, the class is meant to be subclassed, and locust will not spawn users of this class during a test.""" diff --git a/locust/web.py b/locust/web.py index bed99ed31f..5274453614 100644 --- a/locust/web.py +++ b/locust/web.py @@ -21,7 +21,7 @@ from .stats import sort_stats from . import stats as stats_module, __version__ as version, argument_parser from .stats import StatsCSV -from .user.inspectuser import get_task_ratio_dict +from .user.inspectuser import get_ratio from .util.cache import memoize from .util.rounding import proper_round from .util.timespan import parse_timespan @@ -344,6 +344,19 @@ def exceptions_csv(): self.stats_csv_writer.exceptions_csv(writer) return _download_csv_response(data.getvalue(), "exceptions") + @app.route("/tasks") + @self.auth_required_if_enabled + def tasks(): + is_distributed = isinstance(self.environment.runner, MasterRunner) + runner = self.environment.runner + user_spawned = runner.reported_user_classes_count if is_distributed else runner.user_classes_count + + task_data = { + "per_class": get_ratio(self.environment.user_classes, user_spawned, False), + "total": get_ratio(self.environment.user_classes, user_spawned, True), + } + return task_data + def start(self): self.greenlet = gevent.spawn(self.start_server) self.greenlet.link_exception(greenlet_exception_handler) @@ -411,12 +424,6 @@ def update_template_args(self): worker_count = 0 stats = self.environment.runner.stats - - task_data = { - "per_class": get_task_ratio_dict(self.environment.user_classes), - "total": get_task_ratio_dict(self.environment.user_classes, total=True), - } - extra_options = argument_parser.ui_extra_args_dict() self.template_args = { @@ -433,6 +440,6 @@ def update_template_args(self): "worker_count": worker_count, "is_shape": self.environment.shape_class, "stats_history_enabled": options and options.stats_history_enabled, - "tasks": dumps(task_data), + "tasks": dumps({}), "extra_options": extra_options, }