You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

105 lines
3.0 KiB
Python

import numpy as np
from csv import reader
from random import seed
from random import randrange
def load_csv(filename, skip=False):
dataset = list() # Keep the starter code structure intact
with open(filename, 'r', newline='') as file:
csv_reader = reader(file)
if skip:
next(csv_reader) # Skip the header row if needed
for row in csv_reader:
if not row:
continue
dataset.append([float(value) for value in row]) # Convert all values to float
return dataset
# Split the dataset into X_train, Y_train, X_test, Y_test sets.
def train_test_split(dataset, split):
train_size = int(split * len(dataset))
train_set = dataset[:train_size]
test_set = dataset[train_size:]
X_train = [row[:-1] for row in train_set]
y_train = [row[-1] for row in train_set]
X_test = [row[:-1] for row in test_set]
y_test = [row[-1] for row in test_set]
return X_train, y_train, X_test, y_test
#Defining the Perceptron class that contains the weights, bias, learning rate and epochs.
class Perceptron:
def __init__(self, input_size, bias, learning_rate, epochs):
self.weights = np.zeros(input_size)
self.bias = bias
self.learning_rate = learning_rate
self.epochs = epochs
def activation_function(x):
return 1 if x >= 0 else 0
def predict(inputs, weights, bias):
weighted_sum = np.dot(inputs, weights) + bias
return activation_function(weighted_sum)
def train(X_train, y_train, learning_rate, epochs, weights, bias):
for epoch in range(epochs):
for inputs, target in zip(X_train, y_train):
prediction = predict(inputs, weights, bias)
error = target - prediction
weights += learning_rate * error * np.array(inputs)
bias += learning_rate * error
return weights, bias
def perceptron_accuracy(y, y_hat):
correct_predictions = sum([1 for true, pred in zip(y, y_hat) if true == pred])
accuracy = correct_predictions / len(y) * 100
return accuracy
# Implement the neural network
# Set the seed
seed(1)
# Load the csv file
filename = 'moons.csv'
dataset = load_csv(filename, skip=True)
# Configure the perception with the bias, learning rate and epochs
custom_split = 0.8
custom_bias = 0
custom_learning_rate = 0.1
custom_epochs = 10000
# Split the dataset for both training and testing
X_train, y_train, X_test, y_test = train_test_split(dataset, split=custom_split)
perceptron = Perceptron(input_size=2, bias=custom_bias, learning_rate=custom_learning_rate, epochs=custom_epochs)
# Training
weights, bias = train(X_train, y_train, perceptron.learning_rate, perceptron.epochs, perceptron.weights, perceptron.bias)
# Predictions
y_hat = []
# Testing
for i in range(len(X_test)):
prediction = predict(X_test[i], weights, bias)
y_hat.append(prediction)
print(f"Input: {X_test[i]}, Predicted: {prediction}, Actual: {y_test[i]}")
# Test for Accuracy
perceptron_accuracy(y_test, y_hat)