Skip to content

Commit

Permalink
Merge pull request lcompilers#1751 from Shaikh-Ubaid/regression_pkg
Browse files Browse the repository at this point in the history
PKG: Add linear regression package
  • Loading branch information
certik committed May 4, 2023
2 parents 269345b + 6111d46 commit d2bf0f1
Show file tree
Hide file tree
Showing 11 changed files with 252 additions and 46 deletions.
3 changes: 2 additions & 1 deletion integration_tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,8 @@ RUN(NAME str_to_list_cast LABELS cpython llvm c)

RUN(NAME test_package_01 LABELS cpython llvm)
RUN(NAME test_pkg_lpdraw LABELS cpython llvm wasm)
RUN(NAME test_pkg_lnn LABELS cpython llvm)
RUN(NAME test_pkg_lnn_01 LABELS cpython llvm)
RUN(NAME test_pkg_lnn_02 LABELS cpython llvm)

RUN(NAME generics_01 LABELS cpython llvm c)
RUN(NAME generics_02 LABELS cpython llvm c)
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/lnn/perceptron/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .perceptron_main import init_perceptron, train_dataset, test_perceptron, normalize_input_vectors, print_perceptron, Perceptron
from .perceptron_main import init_perceptron, train_dataset, test_perceptron, print_perceptron, Perceptron
31 changes: 0 additions & 31 deletions integration_tests/lnn/perceptron/perceptron_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,37 +11,6 @@ class Perceptron:
cur_accuracy: f64
epochs_cnt: i32

def normalize(value: f64, leftMin: f64, leftMax: f64, rightMin: f64, rightMax: f64) -> f64:
# Figure out how 'wide' each range is
leftSpan: f64 = leftMax - leftMin
rightSpan: f64 = rightMax - rightMin

# Convert the left range into a 0-1 range (float)
valueScaled: f64 = (value - leftMin) / leftSpan

# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)

def normalize_input_vectors(input_vectors: list[list[f64]]):
rows: i32 = len(input_vectors)
cols: i32 = len(input_vectors[0])

j: i32
for j in range(cols):
colMinVal: f64 = input_vectors[0][j]
colMaxVal: f64 = input_vectors[0][j]
i: i32
for i in range(rows):
if input_vectors[i][j] > colMaxVal:
colMaxVal = input_vectors[i][j]
if input_vectors[i][j] < colMinVal:
colMinVal = input_vectors[i][j]

for i in range(rows):
input_vectors[i][j] = normalize(input_vectors[i][j], colMinVal, colMaxVal, -1.0, 1.0)



def get_inp_vec_with_bias(a: list[f64]) -> list[f64]:
b: list[f64] = []
i: i32
Expand Down
1 change: 1 addition & 0 deletions integration_tests/lnn/regression/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .regression_main import init_perceptron, train_dataset, test_perceptron, print_perceptron, Perceptron
92 changes: 92 additions & 0 deletions integration_tests/lnn/regression/regression_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
from lpython import dataclass, i32, f64
from sys import exit

@dataclass
class Perceptron:
no_of_inputs: i32
weights: list[f64]
learn_rate: f64
iterations_limit: i32
err_limit: f64
err: f64
epochs_cnt: i32

def get_inp_vec_with_bias(a: list[f64]) -> list[f64]:
b: list[f64] = []
i: i32
for i in range(len(a)):
b.append(a[i])
b.append(1.0)
return b

def init_weights(size: i32) -> list[f64]:
weights: list[f64] = []
i: i32
for i in range(size):
weights.append(0.0)
weights.append(0.0) # append bias
return weights

def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, err_limit: f64):
p.no_of_inputs = n
p.weights = init_weights(n)
p.learn_rate = rate
p.iterations_limit = iterations_limit
p.err_limit = err_limit
p.err = 1.0
p.epochs_cnt = 0

def train_perceptron(p: Perceptron, input_vector: list[f64], actual_output: f64):
predicted_output: f64 = predict_perceptron(p, input_vector)
error: f64 = actual_output - predicted_output
i: i32
for i in range(len(input_vector)):
p.weights[i] += p.learn_rate * f64(error) * f64(input_vector[i])

def predict_perceptron(p: Perceptron, input_vector: list[f64]) -> f64:
weighted_sum: f64 = 0.0
i: i32 = 0
for i in range(len(input_vector)):
weighted_sum = weighted_sum + p.weights[i] * f64(input_vector[i])
return activation_function(weighted_sum)

def activation_function(value: f64) -> f64:
return value

def train_epoch(p: Perceptron, input_vectors: list[list[f64]], outputs: list[f64]):
i: i32
for i in range(len(input_vectors)):
input_vector: list[f64] = get_inp_vec_with_bias(input_vectors[i])
if predict_perceptron(p, input_vector) != outputs[i]:
train_perceptron(p, input_vector, outputs[i])

def train_dataset(p: Perceptron, input_vectors: list[list[f64]], outputs: list[f64]):
prev_err: f64 = 0.0
p.err = 1.0
p.epochs_cnt = 0
while abs(p.err - prev_err) >= p.err_limit and p.epochs_cnt < p.iterations_limit:
p.epochs_cnt += 1
train_epoch(p, input_vectors, outputs)
prev_err = p.err
p.err = test_perceptron(p, input_vectors, outputs)

def test_perceptron(p: Perceptron, input_vectors: list[list[f64]], outputs: list[f64]) -> f64:
err: f64 = 0.0
i: i32
for i in range(len(input_vectors)):
input_vector: list[f64] = get_inp_vec_with_bias(input_vectors[i])
err = err + (outputs[i] - predict_perceptron(p, input_vector)) ** 2.0
return err

def print_perceptron(p: Perceptron):
print("weights = [", end = "")
i: i32
for i in range(p.no_of_inputs):
print(p.weights[i], end = ", ")
print(p.weights[p.no_of_inputs], end = "(bias)]\n")
print("learn_rate = ", end = "")
print(p.learn_rate)
print("error = ", end = "")
print(p.err)
print("epochs_cnt = ", end = "")
print(p.epochs_cnt)
1 change: 1 addition & 0 deletions integration_tests/lnn/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .utils_main import normalize, normalize_input_vectors, normalize_output_vector
44 changes: 44 additions & 0 deletions integration_tests/lnn/utils/utils_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from lpython import i32, f64

def normalize(value: f64, leftMin: f64, leftMax: f64, rightMin: f64, rightMax: f64) -> f64:
# Figure out how 'wide' each range is
leftSpan: f64 = leftMax - leftMin
rightSpan: f64 = rightMax - rightMin

# Convert the left range into a 0-1 range (float)
valueScaled: f64 = (value - leftMin) / leftSpan

# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)

def normalize_input_vectors(input_vectors: list[list[f64]]):
rows: i32 = len(input_vectors)
cols: i32 = len(input_vectors[0])

j: i32
for j in range(cols):
colMinVal: f64 = input_vectors[0][j]
colMaxVal: f64 = input_vectors[0][j]
i: i32
for i in range(rows):
if input_vectors[i][j] > colMaxVal:
colMaxVal = input_vectors[i][j]
if input_vectors[i][j] < colMinVal:
colMinVal = input_vectors[i][j]

for i in range(rows):
input_vectors[i][j] = normalize(input_vectors[i][j], colMinVal, colMaxVal, -1.0, 1.0)

def normalize_output_vector(output_vector: list[f64]):
rows: i32 = len(output_vector)
colMinVal: f64 = output_vector[0]
colMaxVal: f64 = output_vector[0]
i: i32
for i in range(rows):
if output_vector[i] > colMaxVal:
colMaxVal = output_vector[i]
if output_vector[i] < colMinVal:
colMinVal = output_vector[i]

for i in range(rows):
output_vector[i] = normalize(output_vector[i], colMinVal, colMaxVal, -1.0, 1.0)
21 changes: 14 additions & 7 deletions integration_tests/lpdraw/draw.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

def Pixel(H: i32, W: i32, Screen: i32[H, W], x: i32, y: i32) -> None:
if x >= 0 and y >= 0 and x < W and y < H:
Screen[i32(int(H - 1 - y)), i32(int(x))] = 255
Screen[H - 1 - y, x] = 255

def Clear(H: i32, W: i32, Screen: i32[H, W]):
i: i32
Expand Down Expand Up @@ -53,30 +53,37 @@ def Display(H: i32, W: i32, Screen: i32[H, W]):
def Line(H: i32, W: i32, Screen: i32[H, W], x1: i32, y1: i32, x2: i32, y2: i32) -> None:
dx: i32 = abs(x2 - x1)
dy: i32 = abs(y2 - y1)

sx: i32
sy: i32

if x1 > x2:
sx = -1
else:
if x1 < x2:
sx = 1
if y1 > y2:
sy = -1
else:
sx = -1

if y1 < y2:
sy = 1
else:
sy = -1

err: i32 = dx - dy

while x1 != x2 or y1 != y2:
Pixel(H, W, Screen, x1, y1)
e2: i32 = 2 * err

if e2 > -dy:
err -= dy
x1 += sx

if x1 == x2 and y1 == y2:
Pixel(H, W, Screen, x1, y1)
break

if e2 < dx:
err += dx
y1 += sy
Pixel(H, W, Screen, x2, y2)

def Circle(H: i32, W: i32, Screen: i32[H, W], x: i32, y: i32, r: f64) -> None:
x0: i32 = i32(int(r))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from lnn.perceptron import init_perceptron, print_perceptron, normalize_input_vectors, Perceptron, train_dataset
from lnn.perceptron import init_perceptron, print_perceptron, Perceptron, train_dataset
from lnn.utils import normalize_input_vectors
from lpdraw import Line, Circle, Display, Clear
from lpython import i32, f64, Const
from numpy import empty, int32
Expand Down Expand Up @@ -30,7 +31,7 @@ def plot_graph(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]
y2 *= scale_offset

# print (x1, y1, x2, y2)
Line(Height, Width, Screen, i32(x1 + shift_offset), i32(y1 + shift_offset), i32(x2 + shift_offset), i32(y2 + shift_offset))
Line(Height, Width, Screen, i32(int(x1 + shift_offset)), i32(int(y1 + shift_offset)), i32(int(x2 + shift_offset)), i32(int(y2 + shift_offset)))

i: i32
point_size: i32 = 5
Expand All @@ -40,12 +41,12 @@ def plot_graph(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]
input_vectors[i][0] += shift_offset
input_vectors[i][1] += shift_offset
if outputs[i] == 1:
x: i32 = i32(input_vectors[i][0])
y: i32 = i32(input_vectors[i][1])
x: i32 = i32(int(input_vectors[i][0]))
y: i32 = i32(int(input_vectors[i][1]))
Line(Height, Width, Screen, x - point_size, y, x + point_size, y)
Line(Height, Width, Screen, x, y - point_size, x, y + point_size)
else:
Circle(Height, Width, Screen, i32(input_vectors[i][0]), i32(input_vectors[i][1]), f64(point_size))
Circle(Height, Width, Screen, i32(int(input_vectors[i][0])), i32(int(input_vectors[i][1])), f64(point_size))

Display(Height, Width, Screen)

Expand Down
89 changes: 89 additions & 0 deletions integration_tests/test_pkg_lnn_02.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
from lnn.regression import init_perceptron, print_perceptron, Perceptron, train_dataset
from lnn.utils import normalize_input_vectors, normalize_output_vector
from lpdraw import Line, Circle, Display, Clear
from lpython import i32, f64, Const
from numpy import empty, int32


def compute_decision_boundary(p: Perceptron, x: f64) -> f64:
bias: f64 = p.weights[1]
slope: f64 = p.weights[0]
intercept: f64 = bias
return slope * x + intercept

def plot_graph(p: Perceptron, input_vectors: list[list[f64]], outputs: list[f64]):
Width: Const[i32] = 500 # x-axis limits [0, 499]
Height: Const[i32] = 500 # y-axis limits [0, 499]
Screen: i32[Height, Width] = empty((Height, Width), dtype=int32)
Clear(Height, Width, Screen)

x1: f64 = 1.0
y1: f64 = compute_decision_boundary(p, x1)
x2: f64 = -1.0
y2: f64 = compute_decision_boundary(p, x2)

# center the graph using the following offset
scale_offset: f64 = Width / 4
shift_offset: f64 = Width / 2
x1 *= scale_offset
y1 *= scale_offset
x2 *= scale_offset
y2 *= scale_offset

# print (x1, y1, x2, y2)
Line(Height, Width, Screen, i32(int(x1 + shift_offset)), i32(int(y1 + shift_offset)), i32(int(x2 + shift_offset)), i32(int(y2 + shift_offset)))

i: i32
point_size: i32 = 5
for i in range(len(input_vectors)):
input_vectors[i][0] *= scale_offset
input_vectors[i][0] += shift_offset
outputs[i] *= scale_offset
outputs[i] += shift_offset

Circle(Height, Width, Screen, i32(int(input_vectors[i][0])), i32(int(outputs[i])), f64(point_size))

Display(Height, Width, Screen)

def main0():
p: Perceptron = Perceptron(0, [0.0], 0.0, 0, 0.0, 0.0, 0)
init_perceptron(p, 1, 0.0005, 10000, 1e-16)

input_vectors: list[list[f64]] = [[1.1], [1.3], [1.5], [2.0], [2.2], [2.9], [3.0], [3.2], [3.2], [3.7], [3.9], [4.0], [4.0], [4.1], [4.5], [4.9], [5.1], [5.3], [5.9], [6.0], [6.8], [7.1], [7.9], [8.2], [8.7], [9.0], [9.5], [9.6], [10.3], [10.5], [11.2], [11.5], [12.3], [12.9], [13.5]]
outputs: list[f64] = [39343.0, 46205.0, 37731.0, 43525.0, 39891.0, 56642.0, 60150.0, 54445.0, 64445.0, 57189.0, 63218.0, 55794.0, 56957.0, 57081.0, 61111.0, 67938.0, 66029.0, 83088.0, 81363.0, 93940.0, 91738.0, 98273.0, 101302.0, 113812.0, 109431.0, 105582.0, 116969.0, 112635.0, 122391.0, 121872.0, 127345.0, 126756.0, 128765.0, 135675.0, 139465.0]

normalize_input_vectors(input_vectors)
normalize_output_vector(outputs)

train_dataset(p, input_vectors, outputs)
print_perceptron(p)

assert abs(p.weights[0] - (1.0640975812232145)) <= 1e-12
assert abs(p.weights[1] - (0.0786977829749839)) <= 1e-12
assert abs(p.err - (0.4735308448814293)) <= 1e-12
assert p.epochs_cnt == 4515

plot_graph(p, input_vectors, outputs)

def main1():
p: Perceptron = Perceptron(0, [0.0], 0.0, 0, 0.0, 0.0, 0)
init_perceptron(p, 1, 0.0005, 10000, 1e-16)

input_vectors: list[list[f64]] = [[1.0], [3.0], [7.0]]
outputs: list[f64] = [8.0, 4.0, -2.0]

normalize_input_vectors(input_vectors)
normalize_output_vector(outputs)

train_dataset(p, input_vectors, outputs)
print_perceptron(p)

assert abs(p.weights[0] - (-0.9856542200697508)) <= 1e-12
assert abs(p.weights[1] - (-0.0428446744717655)) <= 1e-12
assert abs(p.err - 0.011428579012311327) <= 1e-12
assert p.epochs_cnt == 10000

plot_graph(p, input_vectors, outputs)

main0()
main1()
3 changes: 2 additions & 1 deletion integration_tests/test_pkg_lpdraw.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ def test_screen(H: i32, W: i32, Screen: i32[H, W]):
for j in range(W):
cnt += (Screen[i, j] - 256)

assert cnt == -979375
assert cnt == -979630


def main():
Width: i32 = 100 # x-axis limits [0, 99]
Expand Down

0 comments on commit d2bf0f1

Please sign in to comment.