-
Notifications
You must be signed in to change notification settings - Fork 27.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ModernBERT bug fixes #35404
base: main
Are you sure you want to change the base?
ModernBERT bug fixes #35404
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,6 +14,7 @@ | |
# limitations under the License. | ||
|
||
import math | ||
from contextlib import nullcontext | ||
from typing import Dict, Literal, Optional, Tuple, Union | ||
|
||
import torch | ||
|
@@ -141,6 +142,9 @@ class ModernBertConfig(PretrainedConfig): | |
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not | ||
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may | ||
be faster in some scenarios. | ||
repad_logits_with_grad (`bool`, *optional*, defaults to `False`): | ||
When True, ModernBertForMaskedLM keep track of the logits' gradient when repadding for output. This only | ||
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient. | ||
|
||
Examples: | ||
|
||
|
@@ -196,6 +200,7 @@ def __init__( | |
sparse_prediction=False, | ||
sparse_pred_ignore_index=-100, | ||
reference_compile=None, | ||
repad_logits_with_grad=False, | ||
**kwargs, | ||
): | ||
super().__init__( | ||
|
@@ -235,6 +240,7 @@ def __init__( | |
self.sparse_prediction = sparse_prediction | ||
self.sparse_pred_ignore_index = sparse_pred_ignore_index | ||
self.reference_compile = reference_compile | ||
self.repad_logits_with_grad = repad_logits_with_grad | ||
|
||
if self.classifier_pooling not in ["cls", "mean"]: | ||
raise ValueError( | ||
|
@@ -852,12 +858,14 @@ def _autoset_attn_implementation( | |
): | ||
# If the user didn't specify anything, try to use flash_attention_2 if available. | ||
# Otherwise we fall back to the default SDPA -> Eager from the super() method. | ||
# ModernBert's FA2 implementation correctly handles non-fp16/bf16 dtypes, we don't | ||
# need the FA2 warning for non-fp16/bf16 dtypes so we set fp16 for the FA2 check. | ||
if config._attn_implementation_internal is None: | ||
config._attn_implementation_internal = "flash_attention_2" | ||
try: | ||
return cls._check_and_enable_flash_attn_2( | ||
config, | ||
torch_dtype=torch_dtype, | ||
torch_dtype=torch.float16, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this is a cleaner solution to avoid the unnecessary FP32 warning than I figured was possible, nice. |
||
device_map=device_map, | ||
hard_check_only=False, | ||
check_device_map=check_device_map, | ||
|
@@ -867,7 +875,7 @@ def _autoset_attn_implementation( | |
return super()._autoset_attn_implementation( | ||
config, | ||
use_flash_attention_2=use_flash_attention_2, | ||
torch_dtype=torch_dtype, | ||
torch_dtype=torch.float16, | ||
device_map=device_map, | ||
check_device_map=check_device_map, | ||
) | ||
|
@@ -892,6 +900,13 @@ def _maybe_set_compile(self): | |
) | ||
self.config.reference_compile = False | ||
|
||
if self.device.type == "cpu": | ||
logger.warning_once( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we should wrap this in Unless you'd rather always give this warning so people aren't confused why there's a small precision-esque difference between GPU setups? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That was my intention, I forgot the if statement. Fixed in ab11657. |
||
"Compiling the model with `torch.compile` and using a `torch.cpu` device is not supported. " | ||
"Falling back to non-compiled mode." | ||
) | ||
self.config.reference_compile = False | ||
|
||
if self.config.reference_compile is None: | ||
self.config.reference_compile = is_triton_available() | ||
|
||
|
@@ -911,8 +926,8 @@ def resize_token_embeddings(self, *args, **kwargs): | |
MODERNBERT_INPUTS_DOCSTRING = r""" | ||
Args: | ||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): | ||
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide | ||
it. | ||
Indices of input sequence tokens in the vocabulary. With Flash Attention 2.0, padding will be ignored | ||
by default should you provide it. | ||
|
||
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and | ||
[`PreTrainedTokenizer.__call__`] for details. | ||
|
@@ -941,7 +956,7 @@ def resize_token_embeddings(self, *args, **kwargs): | |
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): | ||
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers | ||
perform global attention, while the rest perform local attention. This mask is used to avoid attending to | ||
far-away tokens in the local attention layers. | ||
far-away tokens in the local attention layers when not using Flash Attention. | ||
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | ||
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, | ||
config.n_positions - 1]`. | ||
|
@@ -952,11 +967,11 @@ def resize_token_embeddings(self, *args, **kwargs): | |
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): | ||
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. | ||
max_seqlen (`int`, *optional*): | ||
Maximum sequence length in the batch. Used to pad the output tensors. | ||
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. | ||
batch_size (`int`, *optional*): | ||
Batch size of the input sequences. Used to pad the output tensors. | ||
seq_len (`int`, *optional*): | ||
Sequence length of the input sequences. Used to pad the output tensors. | ||
Sequence length of the input sequences including padding tokens. Used to pad the output tensors. | ||
output_attentions (`bool`, *optional*): | ||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned | ||
tensors for more detail. | ||
|
@@ -1246,8 +1261,9 @@ def forward( | |
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size) | ||
|
||
if self.config._attn_implementation == "flash_attention_2": | ||
with torch.no_grad(): | ||
with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad(): | ||
logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) | ||
|
||
if not return_dict: | ||
output = (logits,) | ||
return ((loss,) + output) if loss is not None else output | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good call - I got carried away with the Python class naming