Commit eec2d671 authored by Chetan Sharma's avatar Chetan Sharma
Browse files

Updating codebase and whatnot

parent c8c4d501
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
private/ private/
.vscode/ .vscode/
saved_cuts/ saved_cuts/
generated_assets/
*~$* *~$*
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
......
...@@ -2,15 +2,17 @@ ...@@ -2,15 +2,17 @@
A CNC controller that learns material characteristics and optimizes its own feeds and speeds. A CNC controller that learns material characteristics and optimizes its own feeds and speeds.
# Project Status # Project Status
Nearing completion, collecting test data
## Current Goals ## Current Goals
The current goal of this project is to make a system that faces a material using an endmill while simultaneously performing regression on sensor data to complete its model. This model is used to optimize subsequent passes (feedrate and WOC) by means of an objective function that weighs MMR against the chance of failure (deflection, breakage, spindle overload). The current goal of this project is to make a system that faces a material using an endmill while simultaneously performing regression on sensor data to complete its model. This model is used to optimize subsequent passes (feedrate and WOC) by means of an objective function that weighs MMR against the chance of failure (deflection, breakage, spindle overload).
## Modeling ## Modeling
Models for forces experienced during the cutting process and models for tool / machine failure are in [software/models.py](software/models.py). Models for forces experienced during the cutting process and models for tool / machine failure are in [software/models.py](software/models.py).
The linear model converges well when given test data sweeps. The linear model converges well when given test data sweeps.
![](assets/model_converge.png) ![](assets/6061-sweep-wide_forces.png)
## Hardware ## Hardware
The hardware setup is finished. The machine is a Taig Micro Mill (kindly donated by Ted Hall). The hardware setup is finished. The machine is a Taig Micro Mill (kindly donated by Ted Hall).
......
""" """
Intended to be the brain box of this project; for now, it just runs a test sweep to collect initial data. Intended to be the brain box of this project.
""" """
FAKE = False
import numpy as np import numpy as np
import time import time
import shelve
import logging
import os
# hacky fix for issues with serial library
if not FAKE:
from cut import Cut
from objects import EndMill, Conditions, MachineChar from objects import EndMill, Conditions, MachineChar
from cut import Cut from fake_cut import ReplayCut
from ml import LinearModel from models import T_lin, F_lin, T_lin_full, F_lin_full
from optimize import Optimizer from ml import UnifiedLinearModel, UnifiedLinearModelFull
from optimize import Optimizer #, OptimizerFull, OptimizerPSO, OptimizerPSOFull
import logging
logging.basicConfig(level="INFO") logging.basicConfig(level="INFO")
MACHINE_PORT = '/dev/ttyS25' MACHINE_PORT = '/dev/ttyS25'
SPINDLE_PORT = '/dev/ttyS33' SPINDLE_PORT = '/dev/ttyS33'
TFD_PORT = '/dev/ttyS35' TFD_PORT = '/dev/ttyS36'
# input variables
D = 1e-3 # depth of cut (always unchanging...)
W = 1e-3 # initial width of cut for bootstrap
f_r = 0.001 # initial feedrate for bootstrap
f_r_clearing = 0.003 # feedrate for facing and cutting start groove
w = 200 # spindle speed
START_DEPTH = 0.0e-3
START_FACE_D = 0.2e-3
ENDMILL = EndMill(3, 3.175e-3, 3.175e-3, 12e-3, 5e-3)
# ENDMILL = EndMill(3, 9.525e-3/2, 9.525e-3/2, 12.7e-3, 5e-3)
D_A = 0.1e-3 # maximum allowable deflection
N = 50 # total number of cuts to take, including bootstrap cuts
CONFIDENCE_RATE = np.linspace(0.2, 1, 5) # confidence progression during bootstrap cuts
USE_OLD_DATA = False
NAME = "ammp-lcs-1_4" # name to save to / name to draw data from if doing a fake cut
MODEL = UnifiedLinearModel
EQUATIONS = (T_lin, F_lin)
OPTIMIZER = Optimizer # optimizer to use
# taig machine
MACHINE = MachineChar(
r_e = 1,
K_T = 0.10281,
R_w = 0.188,
V_max = 48,
I_max = 10,
T_nom = 0.12,
f_r_max = 0.01,
K_machine = 1.25e6,
D_a = D_A
)
FIXED_CONDITIONS = Conditions(
D = D,
W = W,
f_r = f_r,
w = w,
endmill = ENDMILL
)
logging.info("Initializing all structures")
cut = None
if FAKE:
cut = ReplayCut(NAME, MODEL(), *EQUATIONS, [0,0], [0.1,2])
else:
cut = Cut(MACHINE_PORT, SPINDLE_PORT, TFD_PORT, ENDMILL, 80e-3, 50.8e-3, f_r_clearing, w, START_DEPTH, NAME)
model = MODEL()
optimizer = OPTIMIZER(model, MACHINE, D_A, FIXED_CONDITIONS)
endmill = EndMill(3, 3.175e-3, 3.175e-3, 19.05e-3, 1e-3) logging.info("Beginning facing operation, creating starting groove")
if START_FACE_D:
cut.face_layer(START_FACE_D)
cut.begin_layer(D)
cut = Cut(MACHINE_PORT, SPINDLE_PORT, TFD_PORT, endmill, 80e-3, 50.8e-3, 5e-3, 300, save_as = "6061-sweep") logging.info("First bootstrap cut to obtain a basic characterization")
f_r_range = np.linspace(2e-3, 5e-3, 9) conditions_conservative = Conditions(D, W, f_r, w, ENDMILL)
W_range = np.linspace(1e-3, 3.175e-3, 10) datum = cut.cut(conditions_conservative, save=True, auto_layer=True)
model.ingest_datum(datum)
cut.face_layer(D = 0.3e-3) logging.info("After bootstrap cut, model params are actually at: " + ", ".join(["{:.5e}".format(p) for p in model.params]))
if USE_OLD_DATA and not FAKE:
with shelve.open(os.path.join("saved_cuts", "db")) as db:
model.ingest_data(db[USE_OLD_DATA])
logging.info("Partially optimized bootstrap cuts starting now")
cut.begin_layer(D = 1e-3) # start optimizing, but only slowly start accepting new datums
confidences = list(CONFIDENCE_RATE) + [1] * (N - len(CONFIDENCE_RATE))
for confidence in confidences:
logging.info("Confidence at : " + str(confidence))
conditions_optimized = optimizer.optimize(verbose = True)
logging.info("Optimized : " + str(conditions_optimized))
conditions_compromise = conditions_conservative.compromise(conditions_optimized, confidence)
logging.info("Compromised : " + str(conditions_compromise))
logging.info("Model guesses : " + str(model.predict_one(conditions_compromise)))
datum = cut.cut(conditions_compromise, save = True, auto_layer=True)
logging.info("Datum obtained : " + str(datum))
model.ingest_datum(datum)
logging.info("Params updated to: " + ", ".join(["{:.5e}".format(p) for p in model.params]))
for f_r in f_r_range: if FAKE:
for W in W_range: logging.info("Actual cut params: " + ", ".join(["{:.5e}".format(p) for p in cut.params]))
conditions = Conditions(1e-3, W, f_r, 300, endmill) \ No newline at end of file
cut.cut(conditions, save = True, auto_layer=True)
cut.close()
\ No newline at end of file
"""
Intended to be the brain box of this project; for now, it just runs a test sweep to collect initial data.
"""
import numpy as np
import time
from objects import EndMill, Conditions, MachineChar
from cut import Cut
from ml import LinearModel
from optimize import Optimizer
import logging
logging.basicConfig(level="INFO")
MACHINE_PORT = '/dev/ttyS25'
SPINDLE_PORT = '/dev/ttyS33'
TFD_PORT = '/dev/ttyS36'
IGNORE_FIRST = 0
machine = MachineChar(
r_e = 1,
K_T = 0.10281,
R_w = 0.188,
V_max = 48,
I_max = 10,
T_nom = 0.12,
f_r_max = 0.017,
K_machine = 5e6,
D_a = 1
)
endmill = EndMill(3, 3.175e-3, 3.175e-3, 9.5e-3, 3e-3)
fixed_conditions = Conditions(
D = 1e-3,
W = 1e-3,
f_r = 0.001,
w = 300,
endmill = endmill
)
cut = Cut(MACHINE_PORT, SPINDLE_PORT, TFD_PORT, endmill, 80e-3, 50.8e-3, 5e-3, 300, save_as = "6061-sweep-speed")
f_r_range = np.linspace(2e-3, 0.011, 4)
W_range = np.linspace(1e-3, 3.175e-3 * 1.8, 5)
w_range = np.linspace(100, 300, 5)
cut.face_layer(D = 0.3e-3)
cut.begin_layer(D = 1e-3)
for f_r in f_r_range:
for W in W_range:
for w in w_range:
conditions = Conditions(1e-3, W, f_r, w, endmill)
cut.cut(conditions, save = True, auto_layer=True)
cut.close()
\ No newline at end of file
...@@ -16,8 +16,8 @@ class MachineCrash(Exception): ...@@ -16,8 +16,8 @@ class MachineCrash(Exception):
class Cut: class Cut:
def __init__(self, machine_port, spindle_port, tfd_port, endmill, x_max, y_max, f_r_clearing, w_clearing, initial_z=0, save_as=None): def __init__(self, machine_port, spindle_port, tfd_port, endmill, x_max, y_max, f_r_clearing, w_clearing, initial_z=0, save_as=None, graceful_shutdown = False):
self.machine = Machine(machine_port) self.machine = Machine(machine_port, graceful_shutdown = graceful_shutdown)
self.machine.unlock() self.machine.unlock()
self.machine.zero() self.machine.zero()
self.spindle = Spindle_Applied(spindle_port) self.spindle = Spindle_Applied(spindle_port)
...@@ -111,10 +111,10 @@ class Cut: ...@@ -111,10 +111,10 @@ class Cut:
self.spindle.set_w(self.w_clearing) self.spindle.set_w(self.w_clearing)
self.machine.rapid({'x': self.X_START, 'y': self.Y_START}) self.machine.rapid({'x': self.X_START, 'y': self.Y_START})
self.machine.rapid({'z': self.cut_z}) self.machine.rapid({'z': self.cut_z + D / 2})
self.machine.cut({'y': self.Y_END}, self.f_r_clearing) self.machine.cut({'y': self.Y_END}, self.f_r_clearing)
self.machine.rapid({'z': self.cut_z + D + 1e-3}) self.machine.rapid({'z': self.cut_z})
self.machine.rapid({'x': self.X_START, 'y': self.Y_START}) self.machine.cut({'y': self.Y_START}, self.f_r_clearing)
self.machine.rapid({'z': self.cut_z}) self.machine.rapid({'z': self.cut_z})
self.machine.hold_until_still() self.machine.hold_until_still()
......
This diff is collapsed.
from models import T_lin, F_lin from models import T_lin, F_lin, T_lin_full, F_lin_full
from objects import Conditions, Data from objects import Conditions, Data
import numpy as np import numpy as np
import shelve import shelve
import os import os
from ml import UnifiedLinearModel
import logging
log = logging.getLogger(__name__)
class Fake_Cut: class Fake_Cut:
""" """
Fake cutting process. Returns results using prebaked parameters and specified noise levels. Fake cutting process. Returns results using prebaked parameters and specified noise levels.
Args: Args:
params: list of format [K_tc, K_te, K_rc, K_re] params: list of params
T_func: A function to use for torques
F_func: A function to use for forces
error: list of standard deviations of format [o_T, o_Fy]. Simulates the sensor being "wrong" sometimes. error: list of standard deviations of format [o_T, o_Fy]. Simulates the sensor being "wrong" sometimes.
error: list of standard deviations of format [o_T, o_Fy]. Simulates the sensor being noisy. error: list of standard deviations of format [o_T, o_Fy]. Simulates the sensor being noisy.
""" """
def __init__(self, params, error, noise): def __init__(self, params, T_func, F_func, error, noise):
self.params = params self.params = params
self.T_func = T_func
self.F_func = F_func
self.error = error self.error = error
self.noise = noise self.noise = noise
log.info("Initialized fake cut with params: " + str(self.params))
def begin_cut(self, *args, **kwargs): def face_layer(self, *args, **kwargs):
log.info("Face cut called")
pass pass
def cut(self, conditions: Conditions): def begin_layer(self, *args, **kwargs):
log.info("Begin cut called")
pass
def cut(self, conditions: Conditions, *args, **kwargs):
# use prediction as output # use prediction as output
T = T_lin(conditions, *self.params[:2]) T = self.T_func(conditions, *self.params)
_, Fy = F_lin(conditions, *self.params) _, Fy = self.F_func(conditions, *self.params)
# add sensor error # add sensor error
T_error = np.random.normal(T, self.error[0]) T_error = np.random.normal(T, self.error[0])
Fy_error = np.random.normal(Fy, self.error[1]) Fy_error = np.random.normal(Fy, self.error[1])
...@@ -36,18 +49,17 @@ class Fake_Cut: ...@@ -36,18 +49,17 @@ class Fake_Cut:
# generate fake times # generate fake times
t = np.linspace(0, 1, 100) t = np.linspace(0, 1, 100)
# return fake reading # return fake reading
return Data(*conditions.unpack(), np.array([t, T_noisy]), np.array([t, Fy_noisy])) data = Data(*conditions.unpack(), np.array([t, T_noisy]).T, np.array([t, Fy_noisy]).T)
return data
def scale_coefs(self, scale):
self.params = [scale * p for p in self.params]
class ReplayCut:
def __init__(self, replay_data):
self.data = None
with shelve.open(os.path.join("saved_data", "db")) as db:
self.data = db[replay_data]
def begin_cut(self, *args, **kwargs):
pass
def cut(self, *args, **kwargs): class ReplayCut(Fake_Cut):
for datum in self.data: def __init__(self, replay_data, model, T_func, F_func, error, noise):
yield datum self.model = model
with shelve.open(os.path.join("saved_cuts", "db")) as db:
data = db[replay_data]
self.model.ingest_data(data)
super().__init__(self.model.params, T_func, F_func, error, noise)
\ No newline at end of file
...@@ -2,11 +2,18 @@ import abc ...@@ -2,11 +2,18 @@ import abc
import numpy as np import numpy as np
from sklearn import linear_model from sklearn import linear_model
from scipy import stats
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
from models import T_lin, F_lin, T_x_vector, T_x_vector_padded, Fy_x_vector from models import T_lin, F_lin, T_lin_full, F_lin_full, T_x_vector, T_x_vector_padded, Fy_x_vector, T_x_vector_full, Fy_x_vector_full
from objects import Data, Conditions, EndMill, Prediction from objects import Data, Conditions, EndMill, Prediction
# https://stackoverflow.com/questions/11686720
def mean_no_outliers(data, m=2):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / (mdev if mdev else 1.)
return np.mean(data[s < m])
class Model(abc.ABC): class Model(abc.ABC):
""" """
...@@ -37,7 +44,7 @@ class Model(abc.ABC): ...@@ -37,7 +44,7 @@ class Model(abc.ABC):
class LinearModel(Model): class LinearModel(Model):
def __init__(self): def __init__(self, initial_params = [0, 0, 0, 0]):
self.training_T_x = list() self.training_T_x = list()
self.training_T_y = list() self.training_T_y = list()
self.training_Fy_x = list() self.training_Fy_x = list()
...@@ -45,12 +52,12 @@ class LinearModel(Model): ...@@ -45,12 +52,12 @@ class LinearModel(Model):
self.regressor_T = linear_model.LinearRegression(fit_intercept=False) self.regressor_T = linear_model.LinearRegression(fit_intercept=False)
self.regressor_Fy = linear_model.LinearRegression(fit_intercept=False) self.regressor_Fy = linear_model.LinearRegression(fit_intercept=False)
self.params = np.array([0, 0, 0, 0], dtype='float64') self.params = initial_params
def ingest_datum(self, datum): def ingest_datum(self, datum):
# decompose # decompose
_, _, _, _, _, Ts, Fys = datum.unpack() _, _, _, _, _, Ts, Fys = datum.unpack()
T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1]) T, Fy = mean_no_outliers(Ts[:, 1]), mean_no_outliers(Fys[:, 1])
# get linear coefficients # get linear coefficients
T_x = T_x_vector(datum.conditions()) T_x = T_x_vector(datum.conditions())
Fy_x = Fy_x_vector(datum.conditions()) Fy_x = Fy_x_vector(datum.conditions())
...@@ -83,35 +90,39 @@ class LinearModel(Model): ...@@ -83,35 +90,39 @@ class LinearModel(Model):
def predict_one(self, conditions): def predict_one(self, conditions):
# evaluate # evaluate
T = T_lin(conditions, *self.params[:2]) T = T_lin(conditions, *self.params)
F = F_lin(conditions, *self.params) F = F_lin(conditions, *self.params)
# repack and return # repack and return
return Prediction(*conditions.unpack(), T, F) return Prediction(*conditions.unpack(), T, F)
class UnifiedLinearModel(Model): class UnifiedLinearModel(Model):
def __init__(self): def __init__(self, initial_params = [0,0,0,0]):
self.training_Tx = list() self.training_Tx = list()
self.training_Ty = list() self.training_Ty = list()
self.training_Fyx = list() self.training_Fyx = list()
self.training_Fyy = list() self.training_Fyy = list()
self.regressor = linear_model.LinearRegression(fit_intercept=False) self.regressor = linear_model.LinearRegression(fit_intercept=False)
self.params = np.array([0, 0, 0, 0], dtype='float64') self.params = initial_params
def ingest_datum(self, datum): def ingest_datum(self, datum):
# decompose # decompose
_, _, _, _, _, Ts, Fys = datum.unpack() _, _, _, _, _, Ts, Fys = datum.unpack()
T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1]) # mean filter + rejection of outliers
# T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
T, Fy = mean_no_outliers(Ts[:, 1]), mean_no_outliers(Fys[:, 1])
# get linear coefficients # get linear coefficients
T_x = np.array(T_x_vector_padded(datum.conditions())) T_x = np.array(T_x_vector_padded(datum.conditions()))
Fy_x = np.array(Fy_x_vector(datum.conditions())) Fy_x = np.array(Fy_x_vector(datum.conditions()))
# we want to artificially inflate T to be as big as F # normalizing independently while preserving ratios
# this is a little arbitrary, might not be the best idea lol norm_T = np.linalg.norm(T_x)
ratio = Fy_x[:2].mean() / T_x[:2].mean() T_x /= norm_T
T_x *= ratio T /= norm_T
T *= ratio norm_Fy = np.linalg.norm(Fy_x)
Fy_x /= norm_Fy
Fy /= norm_Fy
# add T to training set # add T to training set
self.training_Tx.append(T_x) self.training_Tx.append(T_x)
...@@ -130,14 +141,70 @@ class UnifiedLinearModel(Model): ...@@ -130,14 +141,70 @@ class UnifiedLinearModel(Model):
def predict_one(self, conditions): def predict_one(self, conditions):
# evaluate # evaluate
T = T_lin(conditions, *self.params[:2]) T = T_lin(conditions, *self.params)
F = F_lin(conditions, *self.params) F = F_lin(conditions, *self.params)
# repack and return # repack and return
return Prediction(*conditions.unpack(), T, F) return Prediction(*conditions.unpack(), T, F)
def score(self):
return self.regressor.score(self.training_Tx + self.training_Fyx, self.training_Ty + self.training_Fyy)
class UnifiedLinearModelFull(Model):
def __init__(self, initial_params = [0,0,0,0,0,0]):
self.training_Tx = list()
self.training_Ty = list()
self.training_Fyx = list()
self.training_Fyy = list()
self.regressor = linear_model.LinearRegression(fit_intercept=False)
self.params = initial_params
def ingest_datum(self, datum):
# decompose
_, _, _, _, _, Ts, Fys = datum.unpack()
# mean filter + rejection of outliers
# T, Fy = np.median(Ts[:, 1]), np.median(Fys[:, 1])
T_f, Fy_f = reject_outliers(Ts[:, 1]), reject_outliers(Fys[:, 1])
T, Fy = np.mean(T_f), np.mean(Fy_f)
# get linear coefficients
T_x = np.array(T_x_vector_full(datum.conditions()))
Fy_x = np.array(Fy_x_vector_full(datum.conditions()))
# normalizing independently while preserving ratios
norm_T = np.linalg.norm(T_x)
T_x /= norm_T
T /= norm_T
norm_Fy = np.linalg.norm(Fy_x)
Fy_x /= norm_Fy
Fy /= norm_Fy
# add T to training set
self.training_Tx.append(T_x)
self.training_Ty.append(T)
# add Fy to training set
self.training_Fyx.append(Fy_x)
self.training_Fyy.append(Fy)
self.update()
def update(self):
# calculate best fit from data
self.regressor.fit(self.training_Tx + self.training_Fyx, self.training_Ty + self.training_Fyy)
self.params = np.array(self.regressor.coef_)
def predict_one(self, conditions):
# evaluate
T = T_lin_full(conditions, *self.params)
F = F_lin_full(conditions, *self.params)
# repack and return
return Prediction(*conditions.unpack(), T, F)
class RANSACLinearModel(Model): class RANSACLinearModel(Model):
def __init__(self): def __init__(self, initial_params = [0,0,0,0]):