Skip to content

[pull] master from TheAlgorithms:master #22

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Apr 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ repos:
- id: auto-walrus

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.4
rev: v0.3.5
hooks:
- id: ruff
- id: ruff-format
Expand Down
14 changes: 7 additions & 7 deletions audio_filters/butterworth_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
def make_lowpass(
frequency: int,
samplerate: int,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates a low-pass filter
Expand Down Expand Up @@ -43,7 +43,7 @@ def make_lowpass(
def make_highpass(
frequency: int,
samplerate: int,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates a high-pass filter
Expand Down Expand Up @@ -73,7 +73,7 @@ def make_highpass(
def make_bandpass(
frequency: int,
samplerate: int,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates a band-pass filter
Expand Down Expand Up @@ -104,7 +104,7 @@ def make_bandpass(
def make_allpass(
frequency: int,
samplerate: int,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates an all-pass filter
Expand Down Expand Up @@ -132,7 +132,7 @@ def make_peak(
frequency: int,
samplerate: int,
gain_db: float,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates a peak filter
Expand Down Expand Up @@ -164,7 +164,7 @@ def make_lowshelf(
frequency: int,
samplerate: int,
gain_db: float,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates a low-shelf filter
Expand Down Expand Up @@ -201,7 +201,7 @@ def make_highshelf(
frequency: int,
samplerate: int,
gain_db: float,
q_factor: float = 1 / sqrt(2), # noqa: B008
q_factor: float = 1 / sqrt(2),
) -> IIRFilter:
"""
Creates a high-shelf filter
Expand Down
2 changes: 1 addition & 1 deletion data_structures/binary_tree/basic_binary_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def depth(self) -> int:
"""
return self._depth(self.root)

def _depth(self, node: Node | None) -> int: # noqa: UP007
def _depth(self, node: Node | None) -> int:
if not node:
return 0
return 1 + max(self._depth(node.left), self._depth(node.right))
Expand Down
2 changes: 1 addition & 1 deletion data_structures/binary_tree/non_recursive_segment_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def update(self, p: int, v: T) -> None:
p = p // 2
self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])

def query(self, l: int, r: int) -> T | None: # noqa: E741
def query(self, l: int, r: int) -> T | None:
"""
Get range query value in log(N) time
:param l: left element index
Expand Down
2 changes: 1 addition & 1 deletion data_structures/binary_tree/red_black_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def _insert_repair(self) -> None:
self.grandparent.color = 1
self.grandparent._insert_repair()

def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912
def remove(self, label: int) -> RedBlackTree:
"""Remove label from this tree."""
if self.label == label:
if self.left and self.right:
Expand Down
6 changes: 3 additions & 3 deletions data_structures/binary_tree/segment_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def right(self, idx):
"""
return idx * 2 + 1

def build(self, idx, l, r): # noqa: E741
def build(self, idx, l, r):
if l == r:
self.st[idx] = self.A[l]
else:
Expand All @@ -56,7 +56,7 @@ def update(self, a, b, val):
"""
return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)

def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
def update_recursive(self, idx, l, r, a, b, val):
"""
update(1, 1, N, a, b, v) for update val v to [a,b]
"""
Expand All @@ -83,7 +83,7 @@ def query(self, a, b):
"""
return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)

def query_recursive(self, idx, l, r, a, b): # noqa: E741
def query_recursive(self, idx, l, r, a, b):
"""
query(1, 1, N, a, b) for query max of [a,b]
"""
Expand Down
2 changes: 1 addition & 1 deletion data_structures/heap/min_heap.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def build_heap(self, array):
# this is min-heapify method
def sift_down(self, idx, array):
while True:
l = self.get_left_child_idx(idx) # noqa: E741
l = self.get_left_child_idx(idx)
r = self.get_right_child_idx(idx)

smallest = idx
Expand Down
2 changes: 1 addition & 1 deletion dynamic_programming/longest_common_subsequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str):
n = len(y)

# declaring the array for storing the dp values
l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741
l = [[0] * (n + 1) for _ in range(m + 1)]

for i in range(1, m + 1):
for j in range(1, n + 1):
Expand Down
4 changes: 2 additions & 2 deletions dynamic_programming/longest_increasing_subsequence_o_nlogn.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,13 @@
from __future__ import annotations


def ceil_index(v, l, r, key): # noqa: E741
def ceil_index(v, l, r, key):
while r - l > 1:
m = (l + r) // 2
if v[m] >= key:
r = m
else:
l = m # noqa: E741
l = m
return r


Expand Down
2 changes: 1 addition & 1 deletion graphs/articulation_points.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Finding Articulation Points in Undirected Graph
def compute_ap(l): # noqa: E741
def compute_ap(l):
n = len(l)
out_edge_count = 0
low = [0] * n
Expand Down
2 changes: 1 addition & 1 deletion graphs/dinic.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow):
# Here we calculate the flow that reaches the sink
def max_flow(self, source, sink):
flow, self.q[0] = 0, source
for l in range(31): # noqa: E741 l = 30 maybe faster for random data
for l in range(31): # l = 30 maybe faster for random data
while True:
self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q)
qi, qe, self.lvl[source] = 0, 1, 1
Expand Down
6 changes: 4 additions & 2 deletions linear_algebra/src/conjugate_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ def _create_spd_matrix(dimension: int) -> Any:
>>> _is_matrix_spd(spd_matrix)
True
"""
random_matrix = np.random.randn(dimension, dimension)
rng = np.random.default_rng()
random_matrix = rng.normal(size=(dimension, dimension))
spd_matrix = np.dot(random_matrix, random_matrix.T)
assert _is_matrix_spd(spd_matrix)
return spd_matrix
Expand Down Expand Up @@ -157,7 +158,8 @@ def test_conjugate_gradient() -> None:
# Create linear system with SPD matrix and known solution x_true.
dimension = 3
spd_matrix = _create_spd_matrix(dimension)
x_true = np.random.randn(dimension, 1)
rng = np.random.default_rng()
x_true = rng.normal(size=(dimension, 1))
b = np.dot(spd_matrix, x_true)

# Numpy solution.
Expand Down
3 changes: 2 additions & 1 deletion machine_learning/decision_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,8 @@ def main():
tree = DecisionTree(depth=10, min_leaf_size=10)
tree.train(x, y)

test_cases = (np.random.rand(10) * 2) - 1
rng = np.random.default_rng()
test_cases = (rng.random(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
avg_error = np.mean((predictions - test_cases) ** 2)

Expand Down
6 changes: 3 additions & 3 deletions machine_learning/k_means_clust.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,12 @@

def get_initial_centroids(data, k, seed=None):
"""Randomly choose k data points as initial centroids"""
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
# useful for obtaining consistent results
rng = np.random.default_rng(seed)
n = data.shape[0] # number of data points

# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
rand_indices = rng.integers(0, n, k)

# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
Expand Down
5 changes: 3 additions & 2 deletions machine_learning/sequential_minimum_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,12 +289,13 @@ def _choose_a2(self, i1):
if cmd is None:
return

for i2 in np.roll(self.unbound, np.random.choice(self.length)):
rng = np.random.default_rng()
for i2 in np.roll(self.unbound, rng.choice(self.length)):
cmd = yield i1, i2
if cmd is None:
return

for i2 in np.roll(self._all_samples, np.random.choice(self.length)):
for i2 in np.roll(self._all_samples, rng.choice(self.length)):
cmd = yield i1, i2
if cmd is None:
return
Expand Down
8 changes: 5 additions & 3 deletions neural_network/back_propagation_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,9 @@ def __init__(
self.is_input_layer = is_input_layer

def initializer(self, back_units):
self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.units, back_units)))
self.bias = np.asmatrix(np.random.normal(0, 0.5, self.units)).T
rng = np.random.default_rng()
self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units)))
self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T
if self.activation is None:
self.activation = sigmoid

Expand Down Expand Up @@ -174,7 +175,8 @@ def plot_loss(self):


def example():
x = np.random.randn(10, 10)
rng = np.random.default_rng()
x = rng.normal(size=(10, 10))
y = np.asarray(
[
[0.8, 0.4],
Expand Down
13 changes: 7 additions & 6 deletions neural_network/convolution_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,16 @@ def __init__(
self.size_pooling1 = size_p1
self.rate_weight = rate_w
self.rate_thre = rate_t
rng = np.random.default_rng()
self.w_conv1 = [
np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5)
np.asmatrix(-1 * rng.random((self.conv1[0], self.conv1[0])) + 0.5)
for i in range(self.conv1[1])
]
self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5)
self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5)
self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1
self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1
self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1
self.wkj = np.asmatrix(-1 * rng.random((self.num_bp3, self.num_bp2)) + 0.5)
self.vji = np.asmatrix(-1 * rng.random((self.num_bp2, self.num_bp1)) + 0.5)
self.thre_conv1 = -2 * rng.random(self.conv1[1]) + 1
self.thre_bp2 = -2 * rng.random(self.num_bp2) + 1
self.thre_bp3 = -2 * rng.random(self.num_bp3) + 1

def save_model(self, save_path):
# save model dict with pickle
Expand Down
6 changes: 3 additions & 3 deletions neural_network/input_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def __init__(
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
np.random.seed(seed1 if seed is None else seed2)
self._rng = np.random.default_rng(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype)
Expand Down Expand Up @@ -211,7 +211,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True):
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = np.arange(self._num_examples)
np.random.shuffle(perm0)
self._rng.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
Expand All @@ -225,7 +225,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True):
# Shuffle the data
if shuffle:
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._rng.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
Expand Down
9 changes: 5 additions & 4 deletions neural_network/two_hidden_layers_neural_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,20 @@ def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None:
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
self.input_layer_and_first_hidden_layer_weights = np.random.rand(
self.input_array.shape[1], 4
rng = np.random.default_rng()
self.input_layer_and_first_hidden_layer_weights = rng.random(
(self.input_array.shape[1], 4)
)

# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3)
self.first_hidden_layer_and_second_hidden_layer_weights = rng.random((4, 3))

# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1)
self.second_hidden_layer_and_output_layer_weights = rng.random((3, 1))

# Real output values provided.
self.output_array = output_array
Expand Down
4 changes: 2 additions & 2 deletions other/sdes.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message):
right = message[4:]
temp = apply_table(right, expansion)
temp = xor(temp, key)
l = apply_sbox(s0, temp[:4]) # noqa: E741
l = apply_sbox(s0, temp[:4])
r = apply_sbox(s1, temp[4:])
l = "0" * (2 - len(l)) + l # noqa: E741
l = "0" * (2 - len(l)) + l
r = "0" * (2 - len(r)) + r
temp = apply_table(l + r, p4_table)
temp = xor(left, temp)
Expand Down
2 changes: 1 addition & 1 deletion project_euler/problem_011/sol2.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def solution():
70600674
"""
with open(os.path.dirname(__file__) + "/grid.txt") as f:
l = [] # noqa: E741
l = []
for _ in range(20):
l.append([int(x) for x in f.readline().split()])

Expand Down
2 changes: 0 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,13 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule
"EXE001", # Shebang is present but file is not executable" -- FIX ME
"G004", # Logging statement uses f-string
"INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME
"NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME
"PGH003", # Use specific rule codes when ignoring type issues -- FIX ME
"PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey
"PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX
"PLW2901", # PLW2901: Redefined loop variable -- FIX ME
"PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception
"PT018", # Assertion should be broken down into multiple parts
"RUF00", # Ambiguous unicode character and other rules
"RUF100", # Unused `noqa` directive -- FIX ME
"S101", # Use of `assert` detected -- DO NOT FIX
"S105", # Possible hardcoded password: 'password'
"S113", # Probable use of requests call without timeout -- FIX ME
Expand Down
2 changes: 1 addition & 1 deletion strings/manacher.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def palindromic_string(input_string: str) -> str:
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
l = j - k + 1 # noqa: E741
l = j - k + 1
r = j + k - 1

# update max_length and start position
Expand Down