From e743cb04d09e6cdeaa1835fe8d8cf917f3efb946 Mon Sep 17 00:00:00 2001 From: Balaram Sarkar Date: Thu, 18 Jan 2024 10:58:20 +0530 Subject: [PATCH 1/2] Update huffman_coder.py bitarray doesnt support util, util.rindex() -> index(,right=1) --- fairseq/data/huffman/huffman_coder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fairseq/data/huffman/huffman_coder.py b/fairseq/data/huffman/huffman_coder.py index c04f84564e..287f4b3010 100644 --- a/fairseq/data/huffman/huffman_coder.py +++ b/fairseq/data/huffman/huffman_coder.py @@ -8,7 +8,7 @@ from collections import Counter, deque from dataclasses import dataclass -from bitarray import bitarray, util +from bitarray import bitarray from fairseq.data import Dictionary # basically we have to write to addressable bytes for the memory mapped @@ -43,7 +43,7 @@ def _unpad(self, a: bitarray) -> bitarray: """ # count the 0 padding at the end until we find the first 1 # we want to remove the one too - remove_cnt = util.rindex(a, 1) + remove_cnt = index(a, right=1) return a[:remove_cnt] def encode(self, iter: tp.List[str]) -> bytes: From 0df4794b6192b9b844604c31edcd54cd1f36c1e2 Mon Sep 17 00:00:00 2001 From: Balaram Sarkar Date: Thu, 18 Jan 2024 11:00:26 +0530 Subject: [PATCH 2/2] Update wav2vec2_classification.py --- fairseq/models/wav2vec/wav2vec2_classification.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fairseq/models/wav2vec/wav2vec2_classification.py b/fairseq/models/wav2vec/wav2vec2_classification.py index c9bbaab28e..e976b07966 100644 --- a/fairseq/models/wav2vec/wav2vec2_classification.py +++ b/fairseq/models/wav2vec/wav2vec2_classification.py @@ -206,7 +206,7 @@ def __init__( if cfg.latent_embed_dim is not None else encoder_embed_dim ) - logging.debug(f"| {self.latent_embed_dim=}") + logging.debug(f"| {self.latent_embed_dim}") self.linear = Linear(encoder_embed_dim, self.latent_embed_dim) self.projection = Linear(self.latent_embed_dim, num_targets) @@ -345,4 +345,4 @@ def forward(self, last_layer_feats, padding_mask, all_layer_feats): weighted_avg_features = weighted_avg_features.view(*original_feat_shape) # Mean Pooling on weighted average features. - return super().forward(weighted_avg_features, padding_mask) \ No newline at end of file + return super().forward(weighted_avg_features, padding_mask)