[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2025-03-11 06:31:34 +00:00
parent a13072f72f
commit ed30c7d0a5
2 changed files with 5 additions and 4 deletions

View File

@ -13,6 +13,7 @@ from timm.models.vision_transformer import Mlp, use_fused_attn
from torch.jit import Final
from transformers import AutoModel
from transformers.modeling_utils import PreTrainedModel
from .configuration_scaledp import ScaleDPPolicyConfig
_logger = logging.getLogger(__name__)
@ -193,8 +194,6 @@ class FinalLayer(nn.Module):
return x
class ScaleDP(PreTrainedModel):
"""
Diffusion models with a Transformer backbone.

View File

@ -48,7 +48,7 @@ from transformers.utils import (
replace_return_docstrings,
)
from lerobot.common.policies.dexvla.fusion_modules import ActionProjector,FiLM
from lerobot.common.policies.dexvla.fusion_modules import ActionProjector, FiLM
from .configuration_qwen2_vla import Qwen2VLAConfig, Qwen2VLVisionConfig
@ -1376,7 +1376,9 @@ class Qwen2VLModel(Qwen2VLPreTrainedModel):
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
)
diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
if config.sliding_window is not None and (not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length):
if config.sliding_window is not None and (
not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length
):
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not
sliding_attend_mask = torch.arange(target_length, device=device) <= (