Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add multi label selection question to the API #3010

Merged
merged 25 commits into from
May 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
323abdf
feat: add `LabelSelectionQuestionSettings` class
gabrielmbmb May 26, 2023
5b1c736
feat: update to use `QuestionSettings`
gabrielmbmb May 26, 2023
b30f683
feat: enable markdown configuration for text fields and questions (#3…
frascuchon May 26, 2023
0022ef1
feat: `options` should contain at least 2 items
gabrielmbmb May 29, 2023
c9bc944
feat: set min and max labels
gabrielmbmb May 29, 2023
9d9023f
feat: rename from `tooltip` to `description`
gabrielmbmb May 29, 2023
649a700
feat: `visible_options` has to be positive
gabrielmbmb May 29, 2023
7d033e2
feat: include `LabelSelectionQuestionSettings` in `Question` and
gabrielmbmb May 29, 2023
e4f61c7
feat: add unit tests for single label selection question
gabrielmbmb May 29, 2023
ae1c789
Merge branch 'develop' into feat/single-label-selection-question-api
gabrielmbmb May 29, 2023
96bedd1
fix: method resolution order
gabrielmbmb May 29, 2023
06a5066
feat: add unit tests for all question types
gabrielmbmb May 29, 2023
2c3bdee
Update src/argilla/server/search_engine.py
gabrielmbmb May 29, 2023
5eb05ea
feat: add single label selection validation schema
gabrielmbmb May 29, 2023
acd62a3
feat: add min and max length to label selection question fields
gabrielmbmb May 29, 2023
38d58a1
feat: remove not needed`PydanticField`
gabrielmbmb May 29, 2023
a1e0895
feat: add unit tests for validating label selection attributes str le…
gabrielmbmb May 29, 2023
877c7cf
Merge branch 'develop' into feat/single-label-selection-question-api
gabrielmbmb May 29, 2023
5f9b839
Merge branch 'develop' into feat/multi-label-selection-question-api
gabrielmbmb May 29, 2023
a42bb3f
feat: add multi label selection question
gabrielmbmb May 29, 2023
5b152df
feat: make `invalid_options` deterministic based on values
gabrielmbmb May 29, 2023
6f0d51a
feat: add unit tests for multi label selection question
gabrielmbmb May 29, 2023
2e94d85
docs: add label selection & multi label selection API
gabrielmbmb May 29, 2023
39b1368
feat: add checking list length
gabrielmbmb May 30, 2023
65c22ae
feat: add unit test for testing empty list
gabrielmbmb May 30, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ These are the section headers that we use:

- Added boolean `use_markdown` property to `TextFieldSettings` model.
- Added boolean `use_markdown` property to `TextQuestionSettings` model.
- Added `LabelSelectionQuestionSettings` class allowing to create label selection (single-choice) questions in the API.
- Added `MultiLabelSelectionQuestionSettings` class allowing to create multi-label selection (multi-choice) questions in the API.

## [1.8.0-dev](https://github.com/argilla-io/argilla/compare/v1.7.0...v1.8.0)

Expand Down
21 changes: 20 additions & 1 deletion src/argilla/server/models/questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class QuestionType(str, Enum):
text = "text"
rating = "rating"
label_selection = "label_selection"
multi_label_selection = "multi_label_selection"


class ResponseValue(BaseModel):
Expand Down Expand Up @@ -81,7 +82,25 @@ class LabelSelectionQuestionSettings(ValidOptionCheckerMixin[str], BaseQuestionS
visible_options: Optional[int] = None


class MultiLabelSelectionQuestionSettings(LabelSelectionQuestionSettings):
type: Literal[QuestionType.multi_label_selection]

def check_response(self, response: ResponseValue):
if not isinstance(response.value, list):
gabrielmbmb marked this conversation as resolved.
Show resolved Hide resolved
raise ValueError(f"Expected list of values, found {type(response.value)}")
if len(response.value) == 0:
raise ValueError("Expected list of values, found empty list")
invalid_options = sorted(list(set(response.value) - set(self.option_values)))
if invalid_options:
raise ValueError(f"{invalid_options!r} are not valid options.\nValid options are: {self.option_values!r}")


QuestionSettings = Annotated[
Union[TextQuestionSettings, RatingQuestionSettings, LabelSelectionQuestionSettings],
Union[
TextQuestionSettings,
RatingQuestionSettings,
LabelSelectionQuestionSettings,
MultiLabelSelectionQuestionSettings,
],
Field(..., discriminator="type"),
]
11 changes: 10 additions & 1 deletion src/argilla/server/schemas/v1/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,8 +185,17 @@ class LabelSelectionQuestionSettings(BaseModel):
visible_options: Optional[PositiveInt] = None


class MultiLabelSelectionQuestionSettings(LabelSelectionQuestionSettings):
type: Literal[QuestionType.multi_label_selection]


QuestionSettings = Annotated[
Union[TextQuestionSettings, RatingQuestionSettings, LabelSelectionQuestionSettings],
Union[
TextQuestionSettings,
RatingQuestionSettings,
LabelSelectionQuestionSettings,
MultiLabelSelectionQuestionSettings,
],
PydanticField(discriminator="type"),
]

Expand Down
2 changes: 1 addition & 1 deletion src/argilla/server/search_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def _field_mapping_for_question(self, question: Question):
if settings.type == QuestionType.rating:
# See https://www.elastic.co/guide/en/elasticsearch/reference/current/number.html
return {"type": "integer"}
elif settings.type in [QuestionType.text, QuestionType.label_selection]:
elif settings.type in [QuestionType.text, QuestionType.label_selection, QuestionType.multi_label_selection]:
# TODO: Review mapping for label selection. Could make sense to use `keyword` mapping instead. See https://www.elastic.co/guide/en/elasticsearch/reference/current/keyword.html
# See https://www.elastic.co/guide/en/elasticsearch/reference/current/text.html
return {"type": "text", "index": False}
Expand Down
11 changes: 11 additions & 0 deletions tests/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,3 +159,14 @@ class LabelSelectionQuestionFactory(QuestionFactory):
{"value": "option3", "text": "Option 3"},
],
}


class MultiLabelSelectionQuestionFactory(QuestionFactory):
settings = {
"type": QuestionType.multi_label_selection.value,
"options": [
{"value": "option1", "text": "Option 1"},
{"value": "option2", "text": "Option 2"},
{"value": "option3", "text": "Option 3"},
],
}
3 changes: 0 additions & 3 deletions tests/server/api/v1/test_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,8 @@
FIELD_CREATE_NAME_MAX_LENGTH,
FIELD_CREATE_TITLE_MAX_LENGTH,
LABEL_SELECTION_DESCRIPTION_MAX_LENGTH,
LABEL_SELECTION_DESCRIPTION_MIN_LENGTH,
LABEL_SELECTION_TEXT_MAX_LENGTH,
LABEL_SELECTION_TEXT_MIN_LENGTH,
LABEL_SELECTION_VALUE_MAX_LENGHT,
LABEL_SELECTION_VALUE_MIN_LENGHT,
QUESTION_CREATE_DESCRIPTION_MAX_LENGTH,
QUESTION_CREATE_NAME_MAX_LENGTH,
QUESTION_CREATE_TITLE_MAX_LENGTH,
Expand Down
98 changes: 55 additions & 43 deletions tests/server/api/v1/test_records.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
AnnotatorFactory,
DatasetFactory,
LabelSelectionQuestionFactory,
MultiLabelSelectionQuestionFactory,
RatingQuestionFactory,
RecordFactory,
ResponseFactory,
Expand All @@ -43,11 +44,15 @@ def create_text_questions(dataset: "Dataset") -> None:


def create_rating_questions(dataset: "Dataset") -> None:
RatingQuestionFactory.create(name="rating_question", dataset=dataset, required=False)
RatingQuestionFactory.create(name="rating_question", dataset=dataset)


def create_label_selection_questions(dataset: "Dataset") -> None:
LabelSelectionQuestionFactory.create(name="label_selection_question", dataset=dataset, required=False)
LabelSelectionQuestionFactory.create(name="label_selection_question", dataset=dataset)


def create_multi_label_selection_questions(dataset: "Dataset") -> None:
MultiLabelSelectionQuestionFactory.create(name="multi_label_selection_question", dataset=dataset)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -78,6 +83,22 @@ def create_label_selection_questions(dataset: "Dataset") -> None:
},
},
),
(
create_multi_label_selection_questions,
{
"values": {
"multi_label_selection_question": {"value": ["option1"]},
},
},
),
(
create_multi_label_selection_questions,
{
"values": {
"multi_label_selection_question": {"value": ["option1", "option2"]},
},
},
),
],
)
def test_create_record_response(
Expand Down Expand Up @@ -109,50 +130,18 @@ def test_create_record_response(
}


@pytest.mark.parametrize(
"create_questions_func, responses",
[
(
create_text_questions,
{
"values": {
"input_ok": {"value": "yes"},
"unknown_question": {"value": "Test"},
},
},
),
(
create_rating_questions,
{
"values": {
"rating_question": {"value": 5},
"unknown_question": {"value": "Test"},
},
},
),
(
create_label_selection_questions,
{
"values": {
"label_selection_question": {"value": "option1"},
"unknown_question": {"value": "Test"},
},
},
),
],
)
def test_create_record_response_with_extra_question_responses(
client: TestClient,
db: Session,
admin_auth_header: dict,
create_questions_func: Callable[["Dataset"], None],
responses: dict,
):
def test_create_record_response_with_extra_question_responses(client: TestClient, db: Session, admin_auth_header: dict):
dataset = DatasetFactory.create()
create_questions_func(dataset)
create_text_questions(dataset)
record = RecordFactory.create(dataset=dataset)

response_json = {**responses, "status": "submitted"}
response_json = {
"values": {
"input_ok": {"value": "yes"},
"unknown_question": {"value": "Test"},
},
"status": "submitted",
}
response = client.post(f"/api/v1/records/{record.id}/responses", headers=admin_auth_header, json=response_json)

assert response.status_code == 422
Expand Down Expand Up @@ -190,6 +179,29 @@ def test_create_record_response_with_extra_question_responses(
},
"False is not a valid option.\nValid options are: ['option1', 'option2', 'option3']",
),
(
gabrielmbmb marked this conversation as resolved.
Show resolved Hide resolved
create_multi_label_selection_questions,
{
"values": {
"multi_label_selection_question": {"value": "wrong-type"},
},
},
"Expected list of values, found <class 'str'>",
),
(
create_multi_label_selection_questions,
{
"values": {
"multi_label_selection_question": {"value": ["option4", "option5"]},
},
},
"['option4', 'option5'] are not valid options.\nValid options are: ['option1', 'option2', 'option3']",
),
(
create_multi_label_selection_questions,
{"values": {"multi_label_selection_question": {"value": []}}},
"Expected list of values, found empty list",
),
],
)
def test_create_record_response_with_wrong_response_value(
Expand Down