diff --git a/.github/workflows/fbgemm_gpu_lint.yml b/.github/workflows/fbgemm_gpu_lint.yml index 3dccceacc..75e06355b 100644 --- a/.github/workflows/fbgemm_gpu_lint.yml +++ b/.github/workflows/fbgemm_gpu_lint.yml @@ -35,7 +35,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: [ "3.11" ] + python-version: [ "3.12" ] steps: - name: Checkout the Repository diff --git a/fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py b/fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py index 177c79508..207fa350b 100644 --- a/fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py +++ b/fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py @@ -835,7 +835,7 @@ def cache( # noqa C901 param_size_multiplier = weights_precision.bit_rate() / 8.0 logging.info( f"Embedding tables: {E * T} rows, {nparams / 1.0e9: .2f} GParam, " - f"{nparams * param_size_multiplier / 1.0e9: .2f} GB" + f"{nparams * param_size_multiplier / 1.0e9: .2f} GB" ) logging.info( f"Accessed weights per batch: {B * T * L} rows, " @@ -889,11 +889,11 @@ def cache( # noqa C901 cache_misses.append((emb.lxu_cache_locations_list[0] == NOT_FOUND).sum().item()) emb.forward(indices.long(), offsets.long()) logging.info( - f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, " + f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines) / len(requests): .2f}, " f"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}" ) logging.info( - f"Cache miss -- mean: {sum(cache_misses)/len(requests)}, " + f"Cache miss -- mean: {sum(cache_misses) / len(requests)}, " f"max: {max(cache_misses)}, min: {min(cache_misses)}" ) @@ -2386,24 +2386,24 @@ def nbit_cache( # noqa C901 input_indices.append(len(indices)) logging.info( - f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, " + f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines) / len(requests): .2f}, " f"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}" ) logging.info( - f"Cache miss -- mean: {sum(cache_misses)/len(requests)}, " + f"Cache miss -- mean: {sum(cache_misses) / len(requests)}, " f"max: {max(cache_misses)}, min: {min(cache_misses)}" ) logging.info( - f"input_indices -- mean: {sum(input_indices)/len(requests)}, " + f"input_indices -- mean: {sum(input_indices) / len(requests)}, " f"max: {max(input_indices)}, min: {min(input_indices)}" ) logging.info( - f"unique_indices -- mean: {sum(unique_indices)/len(requests)}, " + f"unique_indices -- mean: {sum(unique_indices) / len(requests)}, " f"max: {max(unique_indices)}, min: {min(unique_indices)}" ) unique_miss_rate = [a / b for (a, b) in zip(exchanged_cache_lines, unique_indices)] logging.info( - f"unique_miss_rate -- mean: {sum(unique_miss_rate)/len(requests)}, " + f"unique_miss_rate -- mean: {sum(unique_miss_rate) / len(requests)}, " f"max: {max(unique_miss_rate)}, min: {min(unique_miss_rate)}" ) if record_cache_miss_counter or record_tablewise_cache_miss: diff --git a/fbgemm_gpu/bench/ssd_table_batched_embeddings_benchmark.py b/fbgemm_gpu/bench/ssd_table_batched_embeddings_benchmark.py index 25540c190..a35fcdddf 100644 --- a/fbgemm_gpu/bench/ssd_table_batched_embeddings_benchmark.py +++ b/fbgemm_gpu/bench/ssd_table_batched_embeddings_benchmark.py @@ -149,7 +149,7 @@ def benchmark_read_write( gibps_wr = byte_seconds_per_ns / (write_lat_ns * 2**30) gibps_tot = 2 * byte_seconds_per_ns / ((read_lat_ns + write_lat_ns) * 2**30) logging.info( - f"Total bytes: {total_bytes/1e9:0.2f} GB, " + f"Total bytes: {total_bytes / 1e9:0.2f} GB, " f"Read_us: {read_lat_ns / 1000:8.0f}, " f"Write_us: {write_lat_ns / 1000:8.0f}, " f"Total_us: {(read_lat_ns + write_lat_ns) / 1000:8.0f}, " @@ -389,7 +389,7 @@ def gen_split_tbe_generator( + param_size_multiplier * B * sum(Ds) * L ) - logging.info(f"Batch read write bytes: {read_write_bytes/1.0e9: .2f} GB") + logging.info(f"Batch read write bytes: {read_write_bytes / 1.0e9: .2f} GB") # Compute width of test name and bandwidth widths to improve report # readability diff --git a/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops_inference.py b/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops_inference.py index d988563ae..f1671d29d 100644 --- a/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops_inference.py +++ b/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops_inference.py @@ -531,10 +531,10 @@ def print_cache_miss_counter(self) -> None: f"Miss counter value [3] - # of total requested indices : {self.cache_miss_counter[3]}, " ) logging.info( - f"unique_miss_rate using counter : {self.cache_miss_counter[1]/self.cache_miss_counter[2]}, \n" + f"unique_miss_rate using counter : {self.cache_miss_counter[1] / self.cache_miss_counter[2]}, \n" ) logging.info( - f"total_miss_rate using counter : {self.cache_miss_counter[1]/self.cache_miss_counter[3]}, \n" + f"total_miss_rate using counter : {self.cache_miss_counter[1] / self.cache_miss_counter[3]}, \n" ) def get_uvm_cache_stats(self) -> Tensor: @@ -558,8 +558,8 @@ def print_uvm_cache_stats(self) -> None: ) if uvm_cache_stats[1]: logging.info( - f"unique indices / requested indices: {uvm_cache_stats[2]/uvm_cache_stats[1]}\n" - f"unique misses / requested indices: {uvm_cache_stats[3]/uvm_cache_stats[1]}\n" + f"unique indices / requested indices: {uvm_cache_stats[2] / uvm_cache_stats[1]}\n" + f"unique misses / requested indices: {uvm_cache_stats[3] / uvm_cache_stats[1]}\n" ) @torch.jit.export diff --git a/fbgemm_gpu/test/quantize/fp8_rowwise_test.py b/fbgemm_gpu/test/quantize/fp8_rowwise_test.py index a5989d9b1..b3cc06a13 100644 --- a/fbgemm_gpu/test/quantize/fp8_rowwise_test.py +++ b/fbgemm_gpu/test/quantize/fp8_rowwise_test.py @@ -225,9 +225,9 @@ def test_quantize_and_dequantize_op_padded_fp8_rowwise( logging.info(f"qref {torch.gather(qref, dim=1, index=idx)}") logging.info(f"dqcat {torch.gather(dqcat, dim=1, index=idx)}") logging.info( - f"relative error: max: {errors.abs().max()*100:.1f}%, " - f"median: {errors.abs().median()*100:.1f}%, " - f"mean: {errors.abs().mean()*100:.1f}%" + f"relative error: max: {errors.abs().max() * 100:.1f}%, " + f"median: {errors.abs().median() * 100:.1f}%, " + f"mean: {errors.abs().mean() * 100:.1f}%" ) torch.testing.assert_allclose(dqcat, qref, rtol=0.1, atol=0.05)