Merge branch '2025_02_20_add_dexvla' of https://github.com/JayceWen/lerobot into 2025_02_20_add_dexvla
This commit is contained in:
commit
463add8fc8
|
@ -13,6 +13,7 @@ from timm.models.vision_transformer import Mlp, use_fused_attn
|
|||
from torch.jit import Final
|
||||
from transformers import AutoModel
|
||||
from transformers.modeling_utils import PreTrainedModel
|
||||
|
||||
from .configuration_scaledp import ScaleDPPolicyConfig
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
@ -193,8 +194,6 @@ class FinalLayer(nn.Module):
|
|||
return x
|
||||
|
||||
|
||||
|
||||
|
||||
class ScaleDP(PreTrainedModel):
|
||||
"""
|
||||
Diffusion models with a Transformer backbone.
|
||||
|
|
|
@ -1377,7 +1377,9 @@ class Qwen2VLModel(Qwen2VLPreTrainedModel):
|
|||
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
||||
)
|
||||
diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
||||
if config.sliding_window is not None and (not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length):
|
||||
if config.sliding_window is not None and (
|
||||
not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length
|
||||
):
|
||||
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
|
||||
# the check is needed to verify is current checkpoint was trained with sliding window or not
|
||||
sliding_attend_mask = torch.arange(target_length, device=device) <= (
|
||||
|
|
Loading…
Reference in New Issue