Commit eec2d671 authored by Chetan Sharma's avatar Chetan Sharma
Browse files

Updating codebase and whatnot

parent c8c4d501
......@@ -4,6 +4,7 @@
private/
.vscode/
saved_cuts/
generated_assets/
*~$*
# Byte-compiled / optimized / DLL files
......
......@@ -2,6 +2,8 @@
A CNC controller that learns material characteristics and optimizes its own feeds and speeds.
# Project Status
Nearing completion, collecting test data
## Current Goals
The current goal of this project is to make a system that faces a material using an endmill while simultaneously performing regression on sensor data to complete its model. This model is used to optimize subsequent passes (feedrate and WOC) by means of an objective function that weighs MMR against the chance of failure (deflection, breakage, spindle overload).
......@@ -10,7 +12,7 @@ Models for forces experienced during the cutting process and models for tool / m
The linear model converges well when given test data sweeps.
![](assets/model_converge.png)
![](assets/6061-sweep-wide_forces.png)
## Hardware
The hardware setup is finished. The machine is a Taig Micro Mill (kindly donated by Ted Hall).
......
"""
Intended to be the brain box of this project; for now, it just runs a test sweep to collect initial data.
Intended to be the brain box of this project.
"""
FAKE = False
import numpy as np
import time
import shelve
import logging
import os
# hacky fix for issues with serial library
if not FAKE:
from cut import Cut
from objects import EndMill, Conditions, MachineChar
from cut import Cut
from ml import LinearModel
from optimize import Optimizer
from fake_cut import ReplayCut
from models import T_lin, F_lin, T_lin_full, F_lin_full
from ml import UnifiedLinearModel, UnifiedLinearModelFull
from optimize import Optimizer #, OptimizerFull, OptimizerPSO, OptimizerPSOFull
import logging
logging.basicConfig(level="INFO")
MACHINE_PORT = '/dev/ttyS25'
SPINDLE_PORT = '/dev/ttyS33'
TFD_PORT = '/dev/ttyS35'
TFD_PORT = '/dev/ttyS36'
# input variables
D = 1e-3 # depth of cut (always unchanging...)
W = 1e-3 # initial width of cut for bootstrap
f_r = 0.001 # initial feedrate for bootstrap
f_r_clearing = 0.003 # feedrate for facing and cutting start groove
w = 200 # spindle speed
START_DEPTH = 0.0e-3
START_FACE_D = 0.2e-3
ENDMILL = EndMill(3, 3.175e-3, 3.175e-3, 12e-3, 5e-3)
# ENDMILL = EndMill(3, 9.525e-3/2, 9.525e-3/2, 12.7e-3, 5e-3)
D_A = 0.1e-3 # maximum allowable deflection
N = 50 # total number of cuts to take, including bootstrap cuts
CONFIDENCE_RATE = np.linspace(0.2, 1, 5) # confidence progression during bootstrap cuts
USE_OLD_DATA = False
NAME = "ammp-lcs-1_4" # name to save to / name to draw data from if doing a fake cut
MODEL = UnifiedLinearModel
EQUATIONS = (T_lin, F_lin)
OPTIMIZER = Optimizer # optimizer to use
# taig machine
MACHINE = MachineChar(
r_e = 1,
K_T = 0.10281,
R_w = 0.188,
V_max = 48,
I_max = 10,
T_nom = 0.12,
f_r_max = 0.01,
K_machine = 1.25e6,
D_a = D_A
)
FIXED_CONDITIONS = Conditions(
D = D,
W = W,
f_r = f_r,
w = w,
endmill = ENDMILL
)
logging.info("Initializing all structures")
endmill = EndMill(3, 3.175e-3, 3.175e-3, 19.05e-3, 1e-3)
cut = None
if FAKE:
cut = ReplayCut(NAME, MODEL(), *EQUATIONS, [0,0], [0.1,2])
else:
cut = Cut(MACHINE_PORT, SPINDLE_PORT, TFD_PORT, ENDMILL, 80e-3, 50.8e-3, f_r_clearing, w, START_DEPTH, NAME)
model = MODEL()
optimizer = OPTIMIZER(model, MACHINE, D_A, FIXED_CONDITIONS)
cut = Cut(MACHINE_PORT, SPINDLE_PORT, TFD_PORT, endmill, 80e-3, 50.8e-3, 5e-3, 300, save_as = "6061-sweep")
logging.info("Beginning facing operation, creating starting groove")
if START_FACE_D:
cut.face_layer(START_FACE_D)
cut.begin_layer(D)
f_r_range = np.linspace(2e-3, 5e-3, 9)
W_range = np.linspace(1e-3, 3.175e-3, 10)
logging.info("First bootstrap cut to obtain a basic characterization")
cut.face_layer(D = 0.3e-3)
conditions_conservative = Conditions(D, W, f_r, w, ENDMILL)
datum = cut.cut(conditions_conservative, save=True, auto_layer=True)
model.ingest_datum(datum)
cut.begin_layer(D = 1e-3)
logging.info("After bootstrap cut, model params are actually at: " + ", ".join(["{:.5e}".format(p) for p in model.params]))
if USE_OLD_DATA and not FAKE:
with shelve.open(os.path.join("saved_cuts", "db")) as db:
model.ingest_data(db[USE_OLD_DATA])
logging.info("Partially optimized bootstrap cuts starting now")
for f_r in f_r_range:
for W in W_range:
conditions = Conditions(1e-3, W, f_r, 300, endmill)
cut.cut(conditions, save = True, auto_layer=True)
# start optimizing, but only slowly start accepting new datums
confidences = list(CONFIDENCE_RATE) + [1] * (N - len(CONFIDENCE_RATE))
for confidence in confidences:
logging.info("Confidence at : " + str(confidence))
conditions_optimized = optimizer.optimize(verbose = True)
logging.info("Optimized : " + str(conditions_optimized))
conditions_compromise = conditions_conservative.compromise(conditions_optimized, confidence)
logging.info("Compromised : " + str(conditions_compromise))
logging.info("Model guesses : " + str(model.predict_one(conditions_compromise)))
datum = cut.cut(conditions_compromise, save = True, auto_layer=True)
logging.info("Datum obtained : " + str(datum))
model.ingest_datum(datum)
logging.info("Params updated to: " + ", ".join(["{:.5e}".format(p) for p in model.params]))
cut.close()
\ No newline at end of file
if FAKE:
logging.info("Actual cut params: " + ", ".join(["{:.5e}".format(p) for p in cut.params]))
\ No newline at end of file
"""
Intended to be the brain box of this project; for now, it just runs a test sweep to collect initial data.
"""
import numpy as np
import time
from objects import EndMill, Conditions, MachineChar
from cut import Cut
from ml import LinearModel
from optimize import Optimizer
import logging
logging.basicConfig(level="INFO")
MACHINE_PORT = '/dev/ttyS25'
SPINDLE_PORT = '/dev/ttyS33'
TFD_PORT = '/dev/ttyS36'
IGNORE_FIRST = 0
machine = MachineChar(
r_e = 1,
K_T = 0.10281,
R_w = 0.188,
V_max = 48,
I_max = 10,
T_nom = 0.12,
f_r_max = 0.017,
K_machine = 5e6,
D_a = 1
)
endmill = EndMill(3, 3.175e-3, 3.175e-3, 9.5e-3, 3e-3)
fixed_conditions = Conditions(
D = 1e-3,
W = 1e-3,
f_r = 0.001,
w = 300,
endmill = endmill
)
cut = Cut(MACHINE_PORT, SPINDLE_PORT, TFD_PORT, endmill, 80e-3, 50.8e-3, 5e-3, 300, save_as = "6061-sweep-speed")
f_r_range = np.linspace(2e-3, 0.011, 4)
W_range = np.linspace(1e-3, 3.175e-3 * 1.8, 5)
w_range = np.linspace(100, 300, 5)
cut.face_layer(D = 0.3e-3)
cut.begin_layer(D = 1e-3)
for f_r in f_r_range:
for W in W_range:
for w in w_range:
conditions = Conditions(1e-3, W, f_r, w, endmill)
cut.cut(conditions, save = True, auto_layer=True)
cut.close()
\ No newline at end of file
......@@ -16,8 +16,8 @@ class MachineCrash(Exception):
class Cut:
def __init__(self, machine_port, spindle_port, tfd_port, endmill, x_max, y_max, f_r_clearing, w_clearing, initial_z=0, save_as=None):
self.machine = Machine(machine_port)
def __init__(self, machine_port, spindle_port, tfd_port, endmill, x_max, y_max, f_r_clearing, w_clearing, initial_z=0, save_as=None, graceful_shutdown = False):
self.machine = Machine(machine_port, graceful_shutdown = graceful_shutdown)
self.machine.unlock()
self.machine.zero()
self.spindle = Spindle_Applied(spindle_port)
......@@ -111,10 +111,10 @@ class Cut:
self.spindle.set_w(self.w_clearing)
self.machine.rapid({'x': self.X_START, 'y': self.Y_START})
self.machine.rapid({'z': self.cut_z})
self.machine.rapid({'z': self.cut_z + D / 2})
self.machine.cut({'y': self.Y_END}, self.f_r_clearing)
self.machine.rapid({'z': self.cut_z + D + 1e-3})
self.machine.rapid({'x': self.X_START, 'y': self.Y_START})
self.machine.rapid({'z': self.cut_z})
self.machine.cut({'y': self.Y_START}, self.f_r_clearing)
self.machine.rapid({'z': self.cut_z})
self.machine.hold_until_still()
......
This diff is collapsed.
from models import T_lin, F_lin
from models import T_lin, F_lin, T_lin_full, F_lin_full
from objects import Conditions, Data
import numpy as np
import shelve
import os
from ml import UnifiedLinearModel
import logging
log = logging.getLogger(__name__)
class Fake_Cut:
"""
Fake cutting process. Returns results using prebaked parameters and specified noise levels.
Args:
params: list of format [K_tc, K_te, K_rc, K_re]
params: list of params
T_func: A function to use for torques
F_func: A function to use for forces
error: list of standard deviations of format [o_T, o_Fy]. Simulates the sensor being "wrong" sometimes.
error: list of standard deviations of format [o_T, o_Fy]. Simulates the sensor being noisy.
"""
def __init__(self, params, error, noise):
def __init__(self, params, T_func, F_func, error, noise):
self.params = params
self.T_func = T_func
self.F_func = F_func
self.error = error
self.noise = noise
log.info("Initialized fake cut with params: " + str(self.params))
def begin_cut(self, *args, **kwargs):
def face_layer(self, *args, **kwargs):
log.info("Face cut called")
pass
def cut(self, conditions: Conditions):
def begin_layer(self, *args, **kwargs):
log.info("Begin cut called")
pass
def cut(self, conditions: Conditions, *args, **kwargs):
# use prediction as output
T = T_lin(conditions, *self.params[:2])
_, Fy = F_lin(conditions, *self.params)
T = self.T_func(conditions, *self.params)
_, Fy = self.F_func(conditions, *self.params)
# add sensor error
T_error = np.random.normal(T, self.error[0])
Fy_error = np.random.normal(Fy, self.error[1])
......@@ -36,18 +49,17 @@ class Fake_Cut:
# generate fake times
t = np.linspace(0, 1, 100)
# return fake reading
return Data(*conditions.unpack(), np.array([t, T_noisy]), np.array([t, Fy_noisy]))
data = Data(*conditions.unpack(), np.array([t, T_noisy]).T, np.array([t, Fy_noisy]).T)
return data
def scale_coefs(self, scale):
self.params = [scale * p for p in self.params]
class ReplayCut:
def __init__(self, replay_data):
self.data = None
with shelve.open(os.path.join("saved_data", "db")) as db:
self.data = db[replay_data]
def begin_cut(self, *args, **kwargs):
pass
def cut(self, *args, **kwargs):
for datum in self.data:
yield datum
class ReplayCut(Fake_Cut):
def __init__(self, replay_data, model, T_func, F_func, error, noise):
self.model = model
with shelve.open(os.path.join("saved_cuts", "db")) as db:
data = db[replay_data]
self.model.ingest_data(data)
super().__init__(self.model.params, T_func, F_func, error, noise)
\ No newline at end of file
......@@ -2,11 +2,18 @@ import abc
import numpy as np
from sklearn import linear_model
from scipy import stats
from matplotlib import pyplot as plt
from models import T_lin, F_lin, T_x_vector, T_x_vector_padded, Fy_x_vector
from models import T_lin, F_lin, T_lin_full, F_lin_full, T_x_vector, T_x_vector_padded, Fy_x_vector, T_x_vector_full, Fy_x_vector_full
from objects import Data, Conditions, EndMill, Prediction
# https://stackoverflow.com/questions/11686720
def mean_no_outliers(data, m=2):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / (mdev if mdev else 1.)
return np.mean(data[s < m])
class Model(abc.ABC):
"""
......@@ -37,7 +44,7 @@ class Model(abc.ABC):
class LinearModel(Model):
def __init__(self):
def __init__(self, initial_params = [0, 0, 0, 0]):
self.training_T_x = list()
self.training_T_y = list()
self.training_Fy_x = list()
......@@ -45,12 +52,12 @@ class LinearModel(Model):
self.regressor_T = linear_model.LinearRegression(fit_intercept=False)
self.regressor_Fy = linear_model.LinearRegression(fit_intercept=False)
self.params = np.array([0, 0, 0, 0], dtype='float64')
self.params = initial_params
def ingest_datum(self, datum):
# decompose
_, _, _, _, _, Ts, Fys = datum.unpack()
T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
T, Fy = mean_no_outliers(Ts[:, 1]), mean_no_outliers(Fys[:, 1])
# get linear coefficients
T_x = T_x_vector(datum.conditions())
Fy_x = Fy_x_vector(datum.conditions())
......@@ -83,35 +90,39 @@ class LinearModel(Model):
def predict_one(self, conditions):
# evaluate
T = T_lin(conditions, *self.params[:2])
T = T_lin(conditions, *self.params)
F = F_lin(conditions, *self.params)
# repack and return
return Prediction(*conditions.unpack(), T, F)
class UnifiedLinearModel(Model):
def __init__(self):
def __init__(self, initial_params = [0,0,0,0]):
self.training_Tx = list()
self.training_Ty = list()
self.training_Fyx = list()
self.training_Fyy = list()
self.regressor = linear_model.LinearRegression(fit_intercept=False)
self.params = np.array([0, 0, 0, 0], dtype='float64')
self.params = initial_params
def ingest_datum(self, datum):
# decompose
_, _, _, _, _, Ts, Fys = datum.unpack()
T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
# mean filter + rejection of outliers
# T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
T, Fy = mean_no_outliers(Ts[:, 1]), mean_no_outliers(Fys[:, 1])
# get linear coefficients
T_x = np.array(T_x_vector_padded(datum.conditions()))
Fy_x = np.array(Fy_x_vector(datum.conditions()))
# we want to artificially inflate T to be as big as F
# this is a little arbitrary, might not be the best idea lol
ratio = Fy_x[:2].mean() / T_x[:2].mean()
T_x *= ratio
T *= ratio
# normalizing independently while preserving ratios
norm_T = np.linalg.norm(T_x)
T_x /= norm_T
T /= norm_T
norm_Fy = np.linalg.norm(Fy_x)
Fy_x /= norm_Fy
Fy /= norm_Fy
# add T to training set
self.training_Tx.append(T_x)
......@@ -130,14 +141,70 @@ class UnifiedLinearModel(Model):
def predict_one(self, conditions):
# evaluate
T = T_lin(conditions, *self.params[:2])
T = T_lin(conditions, *self.params)
F = F_lin(conditions, *self.params)
# repack and return
return Prediction(*conditions.unpack(), T, F)
def score(self):
return self.regressor.score(self.training_Tx + self.training_Fyx, self.training_Ty + self.training_Fyy)
class UnifiedLinearModelFull(Model):
def __init__(self, initial_params = [0,0,0,0,0,0]):
self.training_Tx = list()
self.training_Ty = list()
self.training_Fyx = list()
self.training_Fyy = list()
self.regressor = linear_model.LinearRegression(fit_intercept=False)
self.params = initial_params
def ingest_datum(self, datum):
# decompose
_, _, _, _, _, Ts, Fys = datum.unpack()
# mean filter + rejection of outliers
# T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
T_f, Fy_f = reject_outliers(Ts[:, 1]), reject_outliers(Fys[:, 1])
T, Fy = np.mean(T_f), np.mean(Fy_f)
# get linear coefficients
T_x = np.array(T_x_vector_full(datum.conditions()))
Fy_x = np.array(Fy_x_vector_full(datum.conditions()))
# normalizing independently while preserving ratios
norm_T = np.linalg.norm(T_x)
T_x /= norm_T
T /= norm_T
norm_Fy = np.linalg.norm(Fy_x)
Fy_x /= norm_Fy
Fy /= norm_Fy
# add T to training set
self.training_Tx.append(T_x)
self.training_Ty.append(T)
# add Fy to training set
self.training_Fyx.append(Fy_x)
self.training_Fyy.append(Fy)
self.update()
def update(self):
# calculate best fit from data
self.regressor.fit(self.training_Tx + self.training_Fyx, self.training_Ty + self.training_Fyy)
self.params = np.array(self.regressor.coef_)
def predict_one(self, conditions):
# evaluate
T = T_lin_full(conditions, *self.params)
F = F_lin_full(conditions, *self.params)
# repack and return
return Prediction(*conditions.unpack(), T, F)
class RANSACLinearModel(Model):
def __init__(self):
def __init__(self, initial_params = [0,0,0,0]):
self.training_Tx = list()
self.training_Ty = list()
self.training_Fyx = list()
......@@ -145,7 +212,7 @@ class RANSACLinearModel(Model):
base_regressor = linear_model.LinearRegression(fit_intercept=False)
self.regressor = linear_model.RANSACRegressor(base_regressor, min_samples=5)
self.params = np.array([0, 0, 0, 0], dtype='float64')
self.params = initial_params
def ingest_datum(self, datum):
# decompose
......@@ -179,7 +246,70 @@ class RANSACLinearModel(Model):
def predict_one(self, conditions):
# evaluate
T = T_lin(conditions, *self.params[:2])
T = T_lin(conditions, *self.params)
F = F_lin(conditions, *self.params)
# repack and return
return Prediction(*conditions.unpack(), T, F)
class BayesianLinearModel(Model):
"""
Utilizes a Bayesian approach to pick "safe" values for the coefficients.
That is, we have a confidence interval for our parameters. We pick the upper end for
each coefficient so we don't make too aggressive of a cut
"""
def __init__(self, initial_params = [0,0,0,0], percentile = .90):
self.training_Tx = list()
self.training_Ty = list()
self.training_Fyx = list()
self.training_Fyy = list()
self.regressor = linear_model.ARDRegression(fit_intercept=False, tol = 1e-6, alpha_1 = 1e-9, alpha_2 = 1e-9, lambda_1 = 1e-9, lambda_2 = 1e-9)
self.params = initial_params
self.zscore = stats.norm.ppf(percentile)
def ingest_datum(self, datum):
# decompose
_, _, _, _, _, Ts, Fys = datum.unpack()
T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
# get linear coefficients
T_x = np.array(T_x_vector_padded(datum.conditions()))
Fy_x = np.array(Fy_x_vector(datum.conditions()))
# normalize while preserving ratio between x and y
norm_T = np.linalg.norm(T_x)
T_x /= norm_T
T /= norm_T
norm_Fy = np.linalg.norm(Fy_x)
Fy_x /= norm_Fy
Fy /= norm_Fy
# add T to training set
self.training_Tx.append(T_x)
self.training_Ty.append(T)
# add Fy to training set
self.training_Fyx.append(Fy_x)
self.training_Fyy.append(Fy)
self.update()
def update(self):
# calculate best fit from data
self.regressor.fit(self.training_Tx + self.training_Fyx, self.training_Ty + self.training_Fyy)
# get params and variance matrix, convert to std deviation
param_mean = np.array(self.regressor.coef_)
param_stdv = np.sqrt(np.diag(self.regressor.sigma_))
print("Param mean: ", param_mean)
print("param stdv: ", param_stdv)
# set params to the lower end of our confidence interval, but make sure they're above 0
self.params = param_mean
def predict_one(self, conditions):
# evaluate
T = T_lin(conditions, *self.params)
F = F_lin(conditions, *self.params)
# repack and return
......
......@@ -4,13 +4,13 @@ from objects import Conditions, Data, Prediction, MachineChar
def calc_f_t(conditions: Conditions):
D, W, f_r, w, endmill = conditions.unpack()
_, _, f_r, w, endmill = conditions.unpack()
N = endmill.N
return (2 * np.pi * f_r) / (N * w)
def calc_h_a(conditions: Conditions):
D, W, f_r, w, endmill = conditions.unpack()
_, W, _, _, endmill = conditions.unpack()
R = endmill.r_c
f_t = calc_f_t(conditions)
phi_st = np.pi - np.arccos(1 - W / R)
......@@ -20,7 +20,7 @@ def calc_h_a(conditions: Conditions):
# exponential models, account for strange decrease in cutting forces as chip thickness increases.
def T_exp_lin(conditions: Conditions, K_TC, K_te, p):
D, W, f_r, w, endmill = conditions.unpack()
</