Write Gradient Descent / Active Function from scratch

Created
TagsML Coding
# example of gradient descent for a one-dimensional function
from numpy import asarray
from numpy.random import rand
 
# objective function
def objective(x):
	return x**2.0
 
# derivative of objective function
def derivative(x):
	return x * 2.0
 
# gradient descent algorithm
def gradient_descent(objective, derivative, bounds, n_iter, step_size):
	# generate an initial point
	solution = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])
	
	# run the gradient descent
	for i in range(n_iter):
		# calculate gradient
		gradient = derivative(solution)
		# take a step
		solution = solution - step_size * gradient
		# evaluate candidate point
		solution_eval = objective(solution)
		# report progress
		print('>%d f(%s) = %.5f' % (i, solution, solution_eval))
	return [solution, solution_eval]
 
# define range for input
bounds = asarray([[-1.0, 1.0]])
# define the total iterations
n_iter = 30
# define the step size
step_size = 0.1
# perform the gradient descent search
best, score = gradient_descent(objective, derivative, bounds, n_iter, step_size)
print('Done!')
print('f(%s) = %f' % (best, score))

def activation_fn(self, x):
        """
        A method of FFL which contains the operation and defination of given activation function.
        """        
        if self.activation == 'relu':
            x[x < 0] = 0
            return x
        if self.activation == None or self.activation == "linear":
            return x        
        if self.activation == 'tanh':
            return np.tanh(x)
        if self.activation == 'sigmoid':    
            return 1 / (1 + np.exp(-x))
        if self.activation == "softmax":
            x = x - np.max(x)
            s = np.exp(x)
            return s / np.sum(s)