Skip to content

Commit c1e4a74

Browse files
Add leaky_relu and softmax functions (TheAlgorithms#558)
1 parent 767e34c commit c1e4a74

File tree

3 files changed

+107
-0
lines changed

3 files changed

+107
-0
lines changed

src/math/leaky_relu.rs

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
//! # Leaky ReLU Function
2+
//!
3+
//! The `leaky_relu` function computes the Leaky Rectified Linear Unit (ReLU) values of a given vector
4+
//! of f64 numbers with a specified alpha parameter.
5+
//!
6+
//! The Leaky ReLU activation function is commonly used in neural networks to introduce a small negative
7+
//! slope (controlled by the alpha parameter) for the negative input values, preventing neurons from dying
8+
//! during training.
9+
//!
10+
//! ## Formula
11+
//!
12+
//! For a given input vector `x` and an alpha parameter `alpha`, the Leaky ReLU function computes the output
13+
//! `y` as follows:
14+
//!
15+
//! `y_i = { x_i if x_i >= 0, alpha * x_i if x_i < 0 }`
16+
//!
17+
//! ## Leaky ReLU Function Implementation
18+
//!
19+
//! This implementation takes a reference to a vector of f64 values and an alpha parameter, and returns a new
20+
//! vector with the Leaky ReLU transformation applied to each element. The input vector is not altered.
21+
//!
22+
pub fn leaky_relu(vector: &Vec<f64>, alpha: f64) -> Vec<f64> {
23+
let mut _vector = vector.to_owned();
24+
25+
for value in &mut _vector {
26+
if value < &mut 0. {
27+
*value *= alpha;
28+
}
29+
}
30+
31+
_vector
32+
}
33+
34+
#[cfg(test)]
35+
mod tests {
36+
use super::*;
37+
38+
#[test]
39+
fn test_leaky_relu() {
40+
let test_vector = vec![-10., 2., -3., 4., -5., 10., 0.05];
41+
let alpha = 0.01;
42+
assert_eq!(
43+
leaky_relu(&test_vector, alpha),
44+
vec![-0.1, 2.0, -0.03, 4.0, -0.05, 10.0, 0.05]
45+
);
46+
}
47+
}

src/math/mod.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ mod interest;
2424
mod interpolation;
2525
mod karatsuba_multiplication;
2626
mod lcm_of_n_numbers;
27+
mod leaky_relu;
2728
mod linear_sieve;
2829
mod matrix_ops;
2930
mod mersenne_primes;
@@ -44,6 +45,7 @@ mod sigmoid;
4445
mod signum;
4546
mod simpson_integration;
4647
mod sine;
48+
mod softmax;
4749
mod square_pyramidal_numbers;
4850
mod square_root;
4951
mod sum_of_digits;
@@ -83,6 +85,7 @@ pub use self::interest::{compound_interest, simple_interest};
8385
pub use self::interpolation::{lagrange_polynomial_interpolation, linear_interpolation};
8486
pub use self::karatsuba_multiplication::multiply;
8587
pub use self::lcm_of_n_numbers::lcm;
88+
pub use self::leaky_relu::leaky_relu;
8689
pub use self::linear_sieve::LinearSieve;
8790
pub use self::matrix_ops::Matrix;
8891
pub use self::mersenne_primes::{get_mersenne_primes, is_mersenne_prime};
@@ -103,6 +106,7 @@ pub use self::sigmoid::sigmoid;
103106
pub use self::signum::signum;
104107
pub use self::simpson_integration::simpson_integration;
105108
pub use self::sine::sine;
109+
pub use self::softmax::softmax;
106110
pub use self::square_pyramidal_numbers::square_pyramidal_number;
107111
pub use self::square_root::{fast_inv_sqrt, square_root};
108112
pub use self::sum_of_digits::{sum_digits_iterative, sum_digits_recursive};

src/math/softmax.rs

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
//! # Softmax Function
2+
//!
3+
//! The `softmax` function computes the softmax values of a given array of f32 numbers.
4+
//!
5+
//! The softmax operation is often used in machine learning for converting a vector of real numbers into a
6+
//! probability distribution. It exponentiates each element in the input array, and then normalizes the
7+
//! results so that they sum to 1.
8+
//!
9+
//! ## Formula
10+
//!
11+
//! For a given input array `x`, the softmax function computes the output `y` as follows:
12+
//!
13+
//! `y_i = e^(x_i) / sum(e^(x_j) for all j)`
14+
//!
15+
//! ## Softmax Function Implementation
16+
//!
17+
//! This implementation uses the `std::f32::consts::E` constant for the base of the exponential function. and
18+
//! f32 vectors to compute the values. The function creates a new vector and not altering the input vector.
19+
//!
20+
use std::f32::consts::E;
21+
22+
pub fn softmax(array: Vec<f32>) -> Vec<f32> {
23+
let mut softmax_array = array.clone();
24+
25+
for value in &mut softmax_array {
26+
*value = E.powf(*value);
27+
}
28+
29+
let sum: f32 = softmax_array.iter().sum();
30+
31+
for value in &mut softmax_array {
32+
*value /= sum;
33+
}
34+
35+
softmax_array
36+
}
37+
38+
#[cfg(test)]
39+
mod tests {
40+
use super::*;
41+
42+
#[test]
43+
fn test_softmax() {
44+
let test = vec![9.0, 0.5, -3.0, 0.0, 3.0];
45+
assert_eq!(
46+
softmax(test),
47+
vec![
48+
0.9971961,
49+
0.00020289792,
50+
6.126987e-6,
51+
0.00012306382,
52+
0.0024718025
53+
]
54+
);
55+
}
56+
}

0 commit comments

Comments
 (0)