diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b101207d5ff..e6b1b0442c04 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.4 + rev: v0.3.5 hooks: - id: ruff - id: ruff-format diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index 6449bc3f3dce..4e6ea1b18fb4 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -13,7 +13,7 @@ def make_lowpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-pass filter @@ -43,7 +43,7 @@ def make_lowpass( def make_highpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-pass filter @@ -73,7 +73,7 @@ def make_highpass( def make_bandpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a band-pass filter @@ -104,7 +104,7 @@ def make_bandpass( def make_allpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates an all-pass filter @@ -132,7 +132,7 @@ def make_peak( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a peak filter @@ -164,7 +164,7 @@ def make_lowshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-shelf filter @@ -201,7 +201,7 @@ def make_highshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-shelf filter diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 0439413d95b5..9d4c1bdbb57a 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -85,7 +85,7 @@ def depth(self) -> int: """ return self._depth(self.root) - def _depth(self, node: Node | None) -> int: # noqa: UP007 + def _depth(self, node: Node | None) -> int: if not node: return 0 return 1 + max(self._depth(node.left), self._depth(node.right)) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 42c78a3a1be0..45c476701d79 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,7 +87,7 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: # noqa: E741 + def query(self, l: int, r: int) -> T | None: """ Get range query value in log(N) time :param l: left element index diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index bdd808c828e0..e68d8d1e3735 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -152,7 +152,7 @@ def _insert_repair(self) -> None: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 + def remove(self, label: int) -> RedBlackTree: """Remove label from this tree.""" if self.label == label: if self.left and self.right: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 3b0b32946f6e..bb9c1ae2268b 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,7 +35,7 @@ def right(self, idx): """ return idx * 2 + 1 - def build(self, idx, l, r): # noqa: E741 + def build(self, idx, l, r): if l == r: self.st[idx] = self.A[l] else: @@ -56,7 +56,7 @@ def update(self, a, b, val): """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 + def update_recursive(self, idx, l, r, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ @@ -83,7 +83,7 @@ def query(self, a, b): """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): # noqa: E741 + def query_recursive(self, idx, l, r, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index ecb1876493b0..39f6d99e8a4c 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,7 +66,7 @@ def build_heap(self, array): # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) # noqa: E741 + l = self.get_left_child_idx(idx) r = self.get_right_child_idx(idx) smallest = idx diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 178b4169b213..22f50a166ae4 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 + l = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 5e11d729f395..44e333e97779 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,13 +7,13 @@ from __future__ import annotations -def ceil_index(v, l, r, key): # noqa: E741 +def ceil_index(v, l, r, key): while r - l > 1: m = (l + r) // 2 if v[m] >= key: r = m else: - l = m # noqa: E741 + l = m return r diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index d28045282425..3fcaffd73725 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,5 +1,5 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): # noqa: E741 +def compute_ap(l): n = len(l) out_edge_count = 0 low = [0] * n diff --git a/graphs/dinic.py b/graphs/dinic.py index aaf3a119525c..4f5e81236984 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow): # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # noqa: E741 l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 4c0b58deb978..45da35813978 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -61,7 +61,8 @@ def _create_spd_matrix(dimension: int) -> Any: >>> _is_matrix_spd(spd_matrix) True """ - random_matrix = np.random.randn(dimension, dimension) + rng = np.random.default_rng() + random_matrix = rng.normal(size=(dimension, dimension)) spd_matrix = np.dot(random_matrix, random_matrix.T) assert _is_matrix_spd(spd_matrix) return spd_matrix @@ -157,7 +158,8 @@ def test_conjugate_gradient() -> None: # Create linear system with SPD matrix and known solution x_true. dimension = 3 spd_matrix = _create_spd_matrix(dimension) - x_true = np.random.randn(dimension, 1) + rng = np.random.default_rng() + x_true = rng.normal(size=(dimension, 1)) b = np.dot(spd_matrix, x_true) # Numpy solution. diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 7f129919a3ce..e48905eeac6a 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -187,7 +187,8 @@ def main(): tree = DecisionTree(depth=10, min_leaf_size=10) tree.train(x, y) - test_cases = (np.random.rand(10) * 2) - 1 + rng = np.random.default_rng() + test_cases = (rng.random(10) * 2) - 1 predictions = np.array([tree.predict(x) for x in test_cases]) avg_error = np.mean((predictions - test_cases) ** 2) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 9f6646944458..a926362fc18b 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -55,12 +55,12 @@ def get_initial_centroids(data, k, seed=None): """Randomly choose k data points as initial centroids""" - if seed is not None: # useful for obtaining consistent results - np.random.seed(seed) + # useful for obtaining consistent results + rng = np.random.default_rng(seed) n = data.shape[0] # number of data points # Pick K indices from range [0, N). - rand_indices = np.random.randint(0, n, k) + rand_indices = rng.integers(0, n, k) # Keep centroids as dense format, as many entries will be nonzero due to averaging. # As long as at least one document in a cluster contains a word, diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index be16baca1a4c..408d59ab5d29 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -289,12 +289,13 @@ def _choose_a2(self, i1): if cmd is None: return - for i2 in np.roll(self.unbound, np.random.choice(self.length)): + rng = np.random.default_rng() + for i2 in np.roll(self.unbound, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return - for i2 in np.roll(self._all_samples, np.random.choice(self.length)): + for i2 in np.roll(self._all_samples, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 7e0bdbbe2857..6131a13e945e 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -51,8 +51,9 @@ def __init__( self.is_input_layer = is_input_layer def initializer(self, back_units): - self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.units, back_units))) - self.bias = np.asmatrix(np.random.normal(0, 0.5, self.units)).T + rng = np.random.default_rng() + self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units))) + self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T if self.activation is None: self.activation = sigmoid @@ -174,7 +175,8 @@ def plot_loss(self): def example(): - x = np.random.randn(10, 10) + rng = np.random.default_rng() + x = rng.normal(size=(10, 10)) y = np.asarray( [ [0.8, 0.4], diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 07cc456b7466..3c551924442d 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -41,15 +41,16 @@ def __init__( self.size_pooling1 = size_p1 self.rate_weight = rate_w self.rate_thre = rate_t + rng = np.random.default_rng() self.w_conv1 = [ - np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) + np.asmatrix(-1 * rng.random((self.conv1[0], self.conv1[0])) + 0.5) for i in range(self.conv1[1]) ] - self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) - self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) - self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1 - self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1 - self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1 + self.wkj = np.asmatrix(-1 * rng.random((self.num_bp3, self.num_bp2)) + 0.5) + self.vji = np.asmatrix(-1 * rng.random((self.num_bp2, self.num_bp1)) + 0.5) + self.thre_conv1 = -2 * rng.random(self.conv1[1]) + 1 + self.thre_bp2 = -2 * rng.random(self.num_bp2) + 1 + self.thre_bp3 = -2 * rng.random(self.num_bp3) + 1 def save_model(self, save_path): # save model dict with pickle diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 9d4195487dbb..d189e3f9e0d9 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -153,7 +153,7 @@ def __init__( """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - np.random.seed(seed1 if seed is None else seed2) + self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -211,7 +211,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: perm0 = np.arange(self._num_examples) - np.random.shuffle(perm0) + self._rng.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -225,7 +225,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): # Shuffle the data if shuffle: perm = np.arange(self._num_examples) - np.random.shuffle(perm) + self._rng.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index dea7e2342d9f..d488de590cc2 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -28,19 +28,20 @@ def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = np.random.rand( - self.input_array.shape[1], 4 + rng = np.random.default_rng() + self.input_layer_and_first_hidden_layer_weights = rng.random( + (self.input_array.shape[1], 4) ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) + self.first_hidden_layer_and_second_hidden_layer_weights = rng.random((4, 3)) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = rng.random((3, 1)) # Real output values provided. self.output_array = output_array diff --git a/other/sdes.py b/other/sdes.py index 31105984b9bb..a69add3430c3 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) # noqa: E741 + l = apply_sbox(s0, temp[:4]) r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l # noqa: E741 + l = "0" * (2 - len(l)) + l r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) temp = xor(left, temp) diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 9ea0db991aaf..2958305331a9 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,7 +35,7 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] # noqa: E741 + l = [] for _ in range(20): l.append([int(x) for x in f.readline().split()]) diff --git a/pyproject.toml b/pyproject.toml index 22da7cb777b5..50cd38005f09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX @@ -15,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules - "RUF100", # Unused `noqa` directive -- FIX ME "S101", # Use of `assert` detected -- DO NOT FIX "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME diff --git a/strings/manacher.py b/strings/manacher.py index c58c7c19ec44..ca546e533acd 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -50,7 +50,7 @@ def palindromic_string(input_string: str) -> str: # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: - l = j - k + 1 # noqa: E741 + l = j - k + 1 r = j + k - 1 # update max_length and start position