Skip to content

Commit

Permalink
Add vit_little in12k + in12k-ft-in1k weights
Browse files Browse the repository at this point in the history
  • Loading branch information
rwightman committed May 27, 2024
1 parent 3c0283f commit 5dce710
Showing 1 changed file with 18 additions and 0 deletions.
18 changes: 18 additions & 0 deletions timm/models/vision_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1872,6 +1872,13 @@ def _cfg(url: str = '', **kwargs) -> Dict[str, Any]:
'vit_pwee_patch16_reg1_gap_256.sbb_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95),
'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95),
'vit_little_patch16_reg1_gap_256.sbb_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
input_size=(3, 256, 256), crop_pct=0.95),
'vit_little_patch16_reg4_gap_256.sbb_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 256, 256), crop_pct=0.95),
Expand Down Expand Up @@ -2975,6 +2982,17 @@ def vit_pwee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionT
return model


@register_model
def vit_little_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6,
class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg',
)
model = _create_vision_transformer(
'vit_little_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model


@register_model
def vit_little_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
model_args = dict(
Expand Down

0 comments on commit 5dce710

Please sign in to comment.