Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix test coverage #2020

Merged
merged 20 commits into from
Jul 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions datamodel_code_generator/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ def main(args: Optional[Sequence[str]] = None) -> Exit:
with config.custom_formatters_kwargs as data:
try:
custom_formatters_kwargs = json.load(data)
except json.JSONDecodeError as e:
except json.JSONDecodeError as e: # pragma: no cover
print(
f'Unable to load custom_formatters_kwargs mapping: {e}',
file=sys.stderr,
Expand All @@ -438,7 +438,7 @@ def main(args: Optional[Sequence[str]] = None) -> Exit:
if not isinstance(custom_formatters_kwargs, dict) or not all(
isinstance(k, str) and isinstance(v, str)
for k, v in custom_formatters_kwargs.items()
):
): # pragma: no cover
print(
'Custom formatters kwargs mapping must be a JSON string mapping (e.g. {"from": "to", ...})',
file=sys.stderr,
Expand Down
4 changes: 2 additions & 2 deletions datamodel_code_generator/model/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def merge_constraints(
}
constraints_class = a.__class__
else:
root_type_field_constraints = {}
root_type_field_constraints = {} # pragma: no cover

if isinstance(b, ConstraintsBase): # pragma: no cover
model_field_constraints = {
Expand All @@ -86,7 +86,7 @@ def merge_constraints(
else:
model_field_constraints = {}

if not issubclass(constraints_class, ConstraintsBase):
if not issubclass(constraints_class, ConstraintsBase): # pragma: no cover
return None

return constraints_class.parse_obj(
Expand Down
8 changes: 4 additions & 4 deletions datamodel_code_generator/model/pydantic_v2/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
else:
try:
from typing import Literal
except ImportError:
except ImportError: # pragma: no cover
from typing_extensions import Literal


Expand Down Expand Up @@ -106,7 +106,7 @@ class DataModelField(DataModelFieldV1):

@field_validator('extras')
def validate_extras(cls, values: Any) -> Dict[str, Any]:
if not isinstance(values, dict):
if not isinstance(values, dict): # pragma: no cover
return values
if 'examples' in values:
return values
Expand Down Expand Up @@ -146,7 +146,7 @@ def _process_annotated_field_arguments(
self, field_arguments: List[str]
) -> List[str]:
if not self.required or self.const:
if self.use_default_kwarg:
if self.use_default_kwarg: # pragma: no cover
return [
f'default={repr(self.default)}',
*field_arguments,
Expand Down Expand Up @@ -215,7 +215,7 @@ def __init__(
else self.extra_template_data[from_]
)
for data_type in self.all_data_types:
if data_type.is_custom_type:
if data_type.is_custom_type: # pragma: no cover
config_parameters['arbitrary_types_allowed'] = True
break

Expand Down
26 changes: 16 additions & 10 deletions datamodel_code_generator/parser/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def _copy_data_types(data_types: List[DataType]) -> List[DataType]:
copied_data_types.append(
data_type_.__class__(reference=data_type_.reference)
)
elif data_type_.data_types:
elif data_type_.data_types: # pragma: no cover
copied_data_type = data_type_.copy()
copied_data_type.data_types = _copy_data_types(data_type_.data_types)
copied_data_types.append(copied_data_type)
Expand Down Expand Up @@ -711,7 +711,7 @@ def __change_from_import(
from_, import_ = full_path = relative(
model.module_name, data_type.full_name
)
if imports.use_exact:
if imports.use_exact: # pragma: no cover
from_, import_ = exact_import(
from_, import_, data_type.reference.short_name
)
Expand Down Expand Up @@ -981,7 +981,7 @@ def __collapse_root_models(
if d.is_dict or d.is_union
)
):
continue
continue # pragma: no cover

# set copied data_type
copied_data_type = root_type_field.data_type.copy()
Expand All @@ -1001,17 +1001,23 @@ def __collapse_root_models(

data_type.parent.data_type = copied_data_type

elif data_type.parent.is_list:
elif data_type.parent.is_list: # no: pragma
if self.field_constraints:
model_field.constraints = ConstraintsBase.merge_constraints(
root_type_field.constraints, model_field.constraints
)
if isinstance(
root_type_field, pydantic_model.DataModelField
) and not model_field.extras.get('discriminator'): # no: pragma
discriminator = root_type_field.extras.get('discriminator')
if discriminator: # no: pragma
model_field.extras['discriminator'] = discriminator
if isinstance( # no: pragma
root_type_field, # no: pragma
pydantic_model.DataModelField, # no: pragma
): # no: pragma
if not model_field.extras.get( # no: pragma
'discriminator'
): # no: pragma
discriminator = root_type_field.extras.get(
'discriminator'
)
if discriminator: # no: pragma
model_field.extras['discriminator'] = discriminator
data_type.parent.data_types.remove(data_type)
data_type.parent.data_types.append(copied_data_type)

Expand Down
22 changes: 11 additions & 11 deletions datamodel_code_generator/parser/graphql.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,21 +238,21 @@ def _get_context_source_path_parts(self) -> Iterator[Tuple[Source, List[str]]]:
# TODO (denisart): Temporarily this method duplicates
# the method `datamodel_code_generator.parser.jsonschema.JsonSchemaParser._get_context_source_path_parts`.

if isinstance(self.source, list) or (
if isinstance(self.source, list) or ( # pragma: no cover
isinstance(self.source, Path) and self.source.is_dir()
):
): # pragma: no cover
self.current_source_path = Path()
self.model_resolver.after_load_files = {
self.base_path.joinpath(s.path).resolve().as_posix()
for s in self.iter_source
}

for source in self.iter_source:
if isinstance(self.source, ParseResult):
if isinstance(self.source, ParseResult): # pragma: no cover
path_parts = self.get_url_path_parts(self.source)
else:
path_parts = list(source.path.parts)
if self.current_source_path is not None:
if self.current_source_path is not None: # pragma: no cover
self.current_source_path = source.path
with self.model_resolver.current_base_path_context(
source.path.parent
Expand All @@ -269,7 +269,7 @@ def _resolve_types(self, paths: List[str], schema: graphql.GraphQLSchema) -> Non

resolved_type = graphql_resolver.kind(type_, None)

if resolved_type in self.support_graphql_types:
if resolved_type in self.support_graphql_types: # pragma: no cover
self.all_graphql_objects[type_.name] = type_
# TODO: need a special method for each graph type
self.references[type_.name] = Reference(
Expand Down Expand Up @@ -360,7 +360,7 @@ def parse_field(
data_type.data_types = [new_data_type]

data_type = new_data_type
elif graphql.is_non_null_type(obj):
elif graphql.is_non_null_type(obj): # pragma: no cover
data_type.is_optional = False

obj = obj.of_type
Expand All @@ -372,10 +372,10 @@ def parse_field(
)
extras = {}

if hasattr(field, 'default_value'):
if field.default_value == graphql.pyutils.Undefined:
if hasattr(field, 'default_value'): # pragma: no cover
if field.default_value == graphql.pyutils.Undefined: # pragma: no cover
default = None
else:
else: # pragma: no cover
default = field.default_value
else:
if required is False:
Expand Down Expand Up @@ -425,7 +425,7 @@ def parse_object_like(
fields.append(self._typename_field(obj.name))

base_classes = []
if hasattr(obj, 'interfaces'):
if hasattr(obj, 'interfaces'): # pragma: no cover
base_classes = [self.references[i.name] for i in obj.interfaces]

data_model_type = self.data_model_type(
Expand All @@ -451,7 +451,7 @@ def parse_object(self, graphql_object: graphql.GraphQLObjectType) -> None:
def parse_input_object(
self, input_graphql_object: graphql.GraphQLInputObjectType
) -> None:
self.parse_object_like(input_graphql_object)
self.parse_object_like(input_graphql_object) # pragma: no cover

def parse_union(self, union_object: graphql.GraphQLUnionType) -> None:
fields = []
Expand Down
20 changes: 11 additions & 9 deletions datamodel_code_generator/parser/jsonschema.py
Original file line number Diff line number Diff line change
Expand Up @@ -762,10 +762,10 @@
return self.data_type(reference=base_classes[0])
if required:
for field in fields:
if self.force_optional_for_required_fields or (
if self.force_optional_for_required_fields or ( # pragma: no cover
self.apply_default_values_for_required_fields and field.has_default
):
continue
continue # pragma: no cover
if (field.original_name or field.name) in required:
field.required = True
if obj.required:
Expand Down Expand Up @@ -1311,14 +1311,16 @@
reference: Optional[Reference] = None
if obj.ref:
data_type: DataType = self.get_ref_data_type(obj.ref)
elif obj.custom_type_path:
elif obj.custom_type_path: # no: pragma
data_type = self.data_type_manager.get_data_type_from_full_path(
obj.custom_type_path, is_custom_type=True
)
elif obj.is_array:
data_type = self.parse_array_fields(
name, obj, get_special_path('array', path)
).data_type
elif obj.is_array: # no: pragma
data_type = self.parse_array_fields( # no: pragma

Check warning on line 1319 in datamodel_code_generator/parser/jsonschema.py

View check run for this annotation

Codecov / codecov/patch

datamodel_code_generator/parser/jsonschema.py#L1319

Added line #L1319 was not covered by tests
name, # no: pragma
obj, # no: pragma
get_special_path('array', path), # no: pragma
).data_type # no: pragma
elif obj.anyOf or obj.oneOf:
reference = self.model_resolver.add(
path, name, loaded=True, class_name=True
Expand All @@ -1332,9 +1334,9 @@
name, obj, get_special_path('oneOf', path)
)

if len(data_types) > 1:
if len(data_types) > 1: # pragma: no cover
data_type = self.data_type(data_types=data_types)
elif not data_types:
elif not data_types: # pragma: no cover
return EmptyDataType()
else: # pragma: no cover
data_type = data_types[0]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# generated by datamodel-codegen:
# filename: person.json
# timestamp: 2019-07-26T00:00:00+00:00

# MIT License
#
# Copyright (c) 2023 Blah-blah
#
from __future__ import annotations

from typing import List, Optional

from pydantic import BaseModel, Field, conint


class Person(BaseModel):
firstName: Optional[str] = Field(None, description="The person's first name.")
lastName: Optional[str] = Field(None, description="The person's last name.")
age: Optional[conint(ge=0)] = Field(
None, description='Age in years which must be equal to or greater than zero.'
)
friends: Optional[List] = None
comment: None = None
39 changes: 39 additions & 0 deletions tests/test_main.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import platform
import shutil
from argparse import Namespace
Expand Down Expand Up @@ -6916,3 +6917,41 @@ def test_one_of_with_sub_schema_array_item():
/ 'output.py'
).read_text()
)


@freeze_time('2019-07-26')
def test_main_jsonschema_with_custom_formatters():
with TemporaryDirectory() as output_dir:
output_file: Path = Path(output_dir) / 'output.py'
formatter_config = {
'license_file': str(
Path(__file__).parent
/ 'data/python/custom_formatters/license_example.txt'
)
}
formatter_config_path = Path(output_dir, 'formatter_config')
with formatter_config_path.open('w') as f:
json.dump(formatter_config, f)
return_code: Exit = main(
[
'--input',
str(JSON_SCHEMA_DATA_PATH / 'person.json'),
'--output',
str(output_file),
'--input-file-type',
'jsonschema',
'--custom-formatters',
'tests.data.python.custom_formatters.add_license',
'--custom-formatters-kwargs',
str(formatter_config_path),
]
)
assert return_code == Exit.OK
assert (
output_file.read_text()
== (
EXPECTED_MAIN_PATH
/ 'main_jsonschema_with_custom_formatters'
/ 'output.py'
).read_text()
)
Loading