Working implementation of python server with static gp
This commit is contained in:
parent
7f9719cc64
commit
32873283c9
5 changed files with 881 additions and 0 deletions
71
server/callbacks.py
Normal file
71
server/callbacks.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
import casadi as cs
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
# Package the resulting regression model in a CasADi callback
|
||||
class GPR(cs.Callback):
|
||||
def __init__(self, name, model, opts={}):
|
||||
cs.Callback.__init__(self)
|
||||
|
||||
self.model = model
|
||||
self.n_in = model.data[0].shape[1]
|
||||
# Create a variable to keep all the gradient callback references
|
||||
self.refs = []
|
||||
|
||||
self.construct(name, opts)
|
||||
|
||||
# Number of inputs/outputs
|
||||
def get_n_in(self): return 1
|
||||
def get_n_out(self): return 1
|
||||
|
||||
|
||||
# Sparsity of the input/output
|
||||
def get_sparsity_in(self,i):
|
||||
return cs.Sparsity.dense(1,self.n_in)
|
||||
def get_sparsity_out(self,i):
|
||||
return cs.Sparsity.dense(1,1)
|
||||
|
||||
|
||||
def eval(self, arg):
|
||||
inp = np.array(arg[0])
|
||||
inp = tf.Variable(inp, dtype=tf.float64)
|
||||
[mean, _] = self.model.predict_f(inp)
|
||||
return [mean.numpy()]
|
||||
|
||||
def has_reverse(self, nadj): return nadj==1
|
||||
def get_reverse(self, nadj, name, inames, onames, opts):
|
||||
grad_callback = GPR_grad(name, self.model)
|
||||
self.refs.append(grad_callback)
|
||||
|
||||
nominal_in = self.mx_in()
|
||||
nominal_out = self.mx_out()
|
||||
adj_seed = self.mx_out()
|
||||
return cs.Function(name, nominal_in+nominal_out+adj_seed, grad_callback.call(nominal_in), inames, onames)
|
||||
|
||||
class GPR_grad(cs.Callback):
|
||||
def __init__(self, name, model, opts={}):
|
||||
cs.Callback.__init__(self)
|
||||
self.model = model
|
||||
self.n_in = model.data[0].shape[1]
|
||||
|
||||
self.construct(name, opts)
|
||||
|
||||
|
||||
def get_n_in(self): return 1
|
||||
def get_n_out(self): return 1
|
||||
|
||||
def get_sparsity_in(self,i):
|
||||
return cs.Sparsity.dense(1,self.n_in)
|
||||
def get_sparsity_out(self,i):
|
||||
return cs.Sparsity.dense(1,self.n_in)
|
||||
|
||||
|
||||
def eval(self, arg):
|
||||
inp = np.array(arg[0])
|
||||
inp = tf.Variable(inp, dtype=tf.float64)
|
||||
|
||||
with tf.GradientTape() as tape:
|
||||
preds = self.model.predict_f(inp)
|
||||
|
||||
grads = tape.gradient(preds, inp)
|
||||
return [grads.numpy()]
|
415
server/controllers.py
Normal file
415
server/controllers.py
Normal file
|
@ -0,0 +1,415 @@
|
|||
import pickle
|
||||
from pathlib import Path
|
||||
|
||||
import casadi as cs
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
import gpflow
|
||||
import tensorflow as tf
|
||||
|
||||
from sklearn.preprocessing import MinMaxScaler
|
||||
|
||||
import callbacks
|
||||
from helpers import *
|
||||
|
||||
|
||||
class PIDcontroller:
|
||||
def __init__(self, P, I = 0, D = 0):
|
||||
self.P = P
|
||||
self.I = I
|
||||
self.D = D
|
||||
|
||||
self.ref = 22
|
||||
|
||||
self.err_acc = 0
|
||||
self.err_old = 0
|
||||
|
||||
def get_control(self, y):
|
||||
"""
|
||||
y: Measured output
|
||||
"""
|
||||
|
||||
err= self.ref - y
|
||||
self.err_acc += err
|
||||
|
||||
sig_P = self.P * err
|
||||
sig_I = self.I * self.err_acc
|
||||
sig_D = self.D * (err - self.err_old)
|
||||
|
||||
self.err_old = err
|
||||
|
||||
#print(f"P: {sig_P}, I: {sig_I}, D: {sig_D}")
|
||||
return sig_P + sig_I + sig_D
|
||||
|
||||
|
||||
|
||||
class GP_MPCcontroller:
|
||||
def __init__(self, dict_cols, model = None, scaler = None, N_horizon = 10, recover_from_crash = False):
|
||||
|
||||
|
||||
self.recover_from_crash = recover_from_crash
|
||||
|
||||
if self.recover_from_crash:
|
||||
self.model = pickle.load(open("controller_model.pkl", 'rb'))
|
||||
self.scaler = pickle.load(open("controller_scaler.pkl", 'rb'))
|
||||
self.X_log = pickle.load(open("controller_X_log.pkl", 'rb'))
|
||||
df = pd.read_pickle("controller_df.pkl")
|
||||
self.recovery_signal = iter(df['SimulatedHeat'])
|
||||
|
||||
|
||||
if model is not None:
|
||||
# Model is already trained. Using as is.
|
||||
if scaler is None: raise ValueError("Not allowed to pass a model without a scaler")
|
||||
self.model = model
|
||||
self.cs_model = callbacks.GPR("gpr", self.model)
|
||||
self.scaler = scaler
|
||||
self.scaler_helper = ScalerHelper(self.scaler)
|
||||
else:
|
||||
# No model has been passed. Setting up model initialization
|
||||
self.model = None
|
||||
self.nb_data = 500 + 1
|
||||
self.ident_signal = get_random_signal(self.nb_data, signal_type = 'analog')
|
||||
|
||||
|
||||
self.Pel = 2 * 6300
|
||||
self.COP = 5.0
|
||||
|
||||
self.ident_signal = iter(self.Pel * self.COP * self.ident_signal)
|
||||
|
||||
|
||||
self.dict_cols = dict_cols
|
||||
self.max_lag = max([lag for lag,_ in self.dict_cols.values()])
|
||||
self.N_horizon = N_horizon
|
||||
self.X_log = []
|
||||
|
||||
|
||||
# Complete measurement history
|
||||
# Columns are: [SolRad, OutsideTemp] (Disturbance), Heat(Input), Temperature (Output)
|
||||
self.data_cols = []
|
||||
for lags, cols in self.dict_cols.values():
|
||||
self.data_cols += cols
|
||||
self.data = np.empty((0, len(self.data_cols)))
|
||||
|
||||
# The current weather forcast
|
||||
self.weather_forecast = None
|
||||
|
||||
# Current measurements
|
||||
self.w, self.u, self.y = None, None, None
|
||||
|
||||
|
||||
|
||||
###
|
||||
# GPflow model training and update
|
||||
###
|
||||
def _train_model(self):
|
||||
""" Identify model from gathered data """
|
||||
|
||||
nb_train_pts = self.nb_data - 1
|
||||
###
|
||||
# Dataset
|
||||
###
|
||||
df = pd.DataFrame(self.data[:nb_train_pts], columns = self.data_cols)
|
||||
self.scaler = MinMaxScaler(feature_range = (-1, 1))
|
||||
self.scaler_helper = ScalerHelper(self.scaler)
|
||||
df_sc = get_scaled_df(df, self.dict_cols, self.scaler)
|
||||
df_gpr_train = data_to_gpr(df_sc, self.dict_cols)
|
||||
|
||||
df_input_train = df_gpr_train.drop(columns = self.dict_cols['w'][1] + self.dict_cols['u'][1] + self.dict_cols['y'][1])
|
||||
df_output_train = df_gpr_train[self.dict_cols['y'][1]]
|
||||
|
||||
np_input_train = df_input_train.to_numpy()
|
||||
np_output_train = df_output_train.to_numpy().reshape(-1, 1)
|
||||
|
||||
data_train = (np_input_train, np_output_train)
|
||||
|
||||
df_test = pd.DataFrame(self.data[nb_train_pts:], columns = self.data_cols)
|
||||
df_test_sc = get_scaled_df(df_test, self.dict_cols, self.scaler)
|
||||
df_gpr_test = data_to_gpr(df_test_sc, self.dict_cols)
|
||||
df_input_test = df_gpr_test.drop(columns = self.dict_cols['w'][1] + self.dict_cols['u'][1] + self.dict_cols['y'][1])
|
||||
df_output_test = df_gpr_test[self.dict_cols['y'][1]]
|
||||
np_input_test = df_input_test.to_numpy()
|
||||
np_output_test = df_output_test.to_numpy()
|
||||
|
||||
###
|
||||
# Kernel
|
||||
###
|
||||
|
||||
nb_dims = np_input_train.shape[1]
|
||||
rational_dims = np.arange(0, (self.dict_cols['t'][0] + 1) * len(self.dict_cols['t'][1]), 1)
|
||||
nb_rational_dims = len(rational_dims)
|
||||
squared_dims = np.arange(nb_rational_dims, nb_dims, 1)
|
||||
nb_squared_dims = len(squared_dims)
|
||||
|
||||
default_lscale = 75
|
||||
while True:
|
||||
try:
|
||||
|
||||
squared_l = np.linspace(default_lscale, 2*default_lscale, nb_squared_dims)
|
||||
rational_l = np.linspace(default_lscale, 2*default_lscale, nb_rational_dims)
|
||||
|
||||
variance = tf.math.reduce_variance(np_input_train)
|
||||
|
||||
k0 = gpflow.kernels.SquaredExponential(lengthscales = squared_l, active_dims = squared_dims, variance = variance)
|
||||
k1 = gpflow.kernels.Constant(variance = variance)
|
||||
k2 = gpflow.kernels.RationalQuadratic(lengthscales = rational_l, active_dims = rational_dims, variance = variance)
|
||||
k3 = gpflow.kernels.Periodic(k2)
|
||||
|
||||
k = (k0 + k1) * k2
|
||||
k = k0
|
||||
|
||||
###
|
||||
# Model
|
||||
###
|
||||
|
||||
m = gpflow.models.GPR(
|
||||
data = data_train,
|
||||
kernel = k,
|
||||
mean_function = None,
|
||||
)
|
||||
|
||||
###
|
||||
# Training
|
||||
###
|
||||
print(f"Training a model with lscale:{default_lscale}")
|
||||
opt = gpflow.optimizers.Scipy()
|
||||
opt.minimize(m.training_loss, m.trainable_variables)
|
||||
break
|
||||
except:
|
||||
print(f"Failed.Increasing lengthscale")
|
||||
default_lscale += 5
|
||||
|
||||
###
|
||||
# Save model
|
||||
###
|
||||
self.model = m
|
||||
self.n_states = self.model.data[0].shape[1]
|
||||
|
||||
# # Manual model validation
|
||||
# import matplotlib.pyplot as plt
|
||||
#
|
||||
# plt.figure()
|
||||
# plt.plot(data_train[1], label = 'real')
|
||||
# mean, var = self.model.predict_f(data_train[0])
|
||||
# plt.plot(mean, label = 'model')
|
||||
# plt.legend()
|
||||
# plt.show()
|
||||
#
|
||||
# plt.figure()
|
||||
# plt.plot(np_output_test, label = 'real')
|
||||
# mean, var = self.model.predict_f(np_input_test)
|
||||
# plt.plot(mean, label = 'model')
|
||||
# plt.legend()
|
||||
# plt.show()
|
||||
#
|
||||
# import pdb; pdb.set_trace()
|
||||
pass
|
||||
|
||||
###
|
||||
# Update measurements
|
||||
###
|
||||
|
||||
def add_disturbance_measurement(self, w):
|
||||
self.w = np.array(w).reshape(1, -1)
|
||||
|
||||
def add_output_measurement(self, y):
|
||||
self.y = np.array(y).reshape(1, -1)
|
||||
|
||||
def _add_input_measurement(self, u):
|
||||
self.u = np.array(u).reshape(1, -1)
|
||||
|
||||
def set_weather_forecast(self, W):
|
||||
assert (W.shape[0] == self.N_horizon)
|
||||
self.weather_forecast = W
|
||||
|
||||
def update_model(self):
|
||||
new_data = np.hstack([self.w, self.u, self.y])
|
||||
print(f"Adding new data: {new_data}")
|
||||
self.data = np.vstack([self.data, new_data])
|
||||
print(f"Data size: {self.data.shape[0]}")
|
||||
self.w, self.u, self.y = None, None, None
|
||||
print("---")
|
||||
|
||||
###
|
||||
# Set up optimal problem solver
|
||||
###
|
||||
|
||||
def _setup_solver(self):
|
||||
|
||||
###
|
||||
# Initialization
|
||||
###
|
||||
self.cs_model = callbacks.GPR("gpr", self.model)
|
||||
|
||||
T_set = 21
|
||||
T_set_sc = self.scaler_helper.scale_output(T_set)
|
||||
|
||||
|
||||
X = cs.MX.sym("X", self.N_horizon + 1, self.n_states)
|
||||
x0 = cs.MX.sym("x0", 1, self.n_states)
|
||||
W = cs.MX.sym("W", self.N_horizon, 2)
|
||||
|
||||
g = []
|
||||
lbg = []
|
||||
ubg = []
|
||||
|
||||
lbx = -np.inf*np.ones(X.shape)
|
||||
ubx = np.inf*np.ones(X.shape)
|
||||
|
||||
u_idx = self.dict_cols['w'][0] * len(self.dict_cols['w'][1])
|
||||
y_idx = u_idx + self.dict_cols['u'][0] * len(self.dict_cols['u'][1])
|
||||
# Stage cost
|
||||
u_cost = 0.05 * cs.dot(X[:, u_idx], X[:, u_idx])
|
||||
u_cost = 0
|
||||
y_cost = cs.dot(X[:, y_idx] - T_set_sc, X[:, y_idx] - T_set_sc)
|
||||
J = u_cost + y_cost
|
||||
|
||||
# Set up equality constraints for the first step
|
||||
for idx in range(self.n_states):
|
||||
g.append(X[0, idx] - x0[0, idx])
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
|
||||
# Set up equality constraints for the following steps
|
||||
for idx in range(1, self.N_horizon + 1):
|
||||
base_col = 0
|
||||
# w
|
||||
nb_cols = self.dict_cols['w'][0]
|
||||
for w_idx in range(W.shape[1]):
|
||||
w_base_col = w_idx * nb_cols
|
||||
g.append(X[idx, base_col + w_base_col] - W[idx - 1, w_idx])
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
for w_lag_idx in range(1, nb_cols):
|
||||
g.append(X[idx, base_col + w_base_col + w_lag_idx] - X[idx - 1, base_col + w_base_col + w_lag_idx - 1])
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
|
||||
base_col += nb_cols * W.shape[1]
|
||||
# u
|
||||
nb_cols = self.dict_cols['u'][0]
|
||||
|
||||
lbx[idx, base_col] = -1 #lower bound on input
|
||||
ubx[idx, base_col] = 1 #upper bound on input
|
||||
for u_lag_idx in range(1, nb_cols):
|
||||
g.append(X[idx, base_col + u_lag_idx] - X[idx - 1, base_col + u_lag_idx - 1])
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
|
||||
base_col += nb_cols
|
||||
# y
|
||||
nb_cols = self.dict_cols['y'][0]
|
||||
g.append(X[idx, base_col] - self.cs_model(X[idx - 1, :]))
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
for y_lag_idx in range(1, nb_cols):
|
||||
g.append(X[idx, base_col + y_lag_idx] - X[idx - 1, base_col + y_lag_idx - 1])
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
|
||||
p = cs.vertcat(cs.vec(W), cs.vec(x0))
|
||||
|
||||
prob = {'f': J, 'x': cs.vec(X), 'g': cs.vertcat(*g), 'p': p}
|
||||
options = {"ipopt": {"hessian_approximation": "limited-memory", "max_iter": 100,
|
||||
#"acceptable_tol": 1e-6, "tol": 1e-6,
|
||||
#"linear_solver": "ma57",
|
||||
#"acceptable_obj_change_tol": 1e-5,
|
||||
#"mu_strategy": "adaptive",
|
||||
"print_level": 0
|
||||
}}
|
||||
|
||||
self.lbg = lbg
|
||||
self.ubg = ubg
|
||||
self.lbx = lbx
|
||||
self.ubx = ubx
|
||||
self.solver = cs.nlpsol("solver","ipopt",prob, options)
|
||||
|
||||
print("Initialized casadi solver")
|
||||
|
||||
###
|
||||
# Compute control signal
|
||||
###
|
||||
|
||||
def get_control_input(self):
|
||||
|
||||
# Recovery pre-loop
|
||||
if self.recover_from_crash:
|
||||
try:
|
||||
u = next(self.recovery_signal)
|
||||
self._add_input_measurement(u)
|
||||
return u
|
||||
except StopIteration:
|
||||
# Finished passing in the pre-existing control
|
||||
# Switching to normal operation
|
||||
self.recover_from_crash = False
|
||||
|
||||
# Training pre-loop
|
||||
if self.model is None:
|
||||
try:
|
||||
# No model yet. Sending next step of identification signal
|
||||
u = next(self.ident_signal)
|
||||
self._add_input_measurement(u)
|
||||
return u
|
||||
except StopIteration:
|
||||
# No more identification signal. Training a model and proceeding
|
||||
self._train_model()
|
||||
self._setup_solver()
|
||||
# Continue now since model exists
|
||||
|
||||
# Model exists. Compute optimal control input
|
||||
|
||||
|
||||
data_scaled = self.scaler.transform(self.data)
|
||||
df = pd.DataFrame(data_scaled, columns = self.data_cols)
|
||||
|
||||
x0 = data_to_gpr(df, self.dict_cols).drop(
|
||||
columns = self.dict_cols['w'][1] + self.dict_cols['u'][1] + self.dict_cols['y'][1]
|
||||
).to_numpy()[-1, :]
|
||||
|
||||
x0 = cs.vec(x0)
|
||||
|
||||
W = self.scaler_helper.scale_weather(self.weather_forecast)
|
||||
W = cs.vec(W)
|
||||
|
||||
p = cs.vertcat(W, x0)
|
||||
|
||||
res = self.solver(
|
||||
x0 = 1,
|
||||
lbx = cs.vec(self.lbx),
|
||||
ubx = cs.vec(self.ubx),
|
||||
p = p,
|
||||
lbg = cs.vec(self.lbg),
|
||||
ubg = cs.vec(self.ubg)
|
||||
)
|
||||
|
||||
X = np.array(res['x'].reshape((self.N_horizon + 1, self.n_states)))
|
||||
self.X_log.append(X)
|
||||
u_idx = self.dict_cols['w'][0] * len(self.dict_cols['w'][1])
|
||||
# Take the first control action and apply it
|
||||
u = X[1, u_idx]
|
||||
# Unpack u after scaling since ScalerHelper returns a list/array
|
||||
[u] = self.scaler_helper.inverse_scale_input(u)
|
||||
self._add_input_measurement(u)
|
||||
return u
|
||||
|
||||
def save_data(self):
|
||||
df = pd.DataFrame(self.data, columns = self.data_cols)
|
||||
df.to_pickle("controller_df.pkl")
|
||||
|
||||
pickle.dump(self.scaler, open(Path("controller_scaler.pkl"), 'wb'))
|
||||
pickle.dump(self.model, open(Path("controller_model.pkl"), 'wb'))
|
||||
pickle.dump(self.X_log, open(Path("controller_X_log.pkl"), 'wb'))
|
||||
|
||||
return
|
||||
|
||||
|
||||
class TestController:
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
def get_control_input(self): return 2 * 3600 * 5.0 * np.random.rand()
|
||||
def add_disturbance_measurement(self, w): return
|
||||
def set_weather_forecast(self, W): return
|
||||
def add_output_measurement(self, y): return
|
||||
def update_model(self): return
|
||||
def save_data(self): return
|
196
server/helpers.py
Normal file
196
server/helpers.py
Normal file
|
@ -0,0 +1,196 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
import gpflow
|
||||
|
||||
from sklearn.exceptions import NotFittedError
|
||||
|
||||
|
||||
def get_random_signal(nstep, a_range = (-1, 1), b_range = (2, 10), signal_type = 'analog'):
|
||||
|
||||
a = np.random.rand(nstep) * (a_range[1]-a_range[0]) + a_range[0] # range for amplitude
|
||||
b = np.random.rand(nstep) *(b_range[1]-b_range[0]) + b_range[0] # range for frequency
|
||||
b = np.round(b)
|
||||
b = b.astype(int)
|
||||
|
||||
b[0] = 0
|
||||
|
||||
for i in range(1,np.size(b)):
|
||||
b[i] = b[i-1]+b[i]
|
||||
|
||||
if signal_type == 'analog':
|
||||
# Random Signal
|
||||
i=0
|
||||
random_signal = np.zeros(nstep)
|
||||
while b[i]<np.size(random_signal):
|
||||
k = b[i]
|
||||
random_signal[k:] = a[i]
|
||||
i=i+1
|
||||
return random_signal
|
||||
elif signal_type == 'prbs':
|
||||
# PRBS
|
||||
a = np.zeros(nstep)
|
||||
j = 0
|
||||
while j < nstep - 1:
|
||||
a[j] = a_range[1]
|
||||
a[j+1] = a_range[0]
|
||||
j = j+2
|
||||
|
||||
i=0
|
||||
prbs = np.zeros(nstep)
|
||||
while b[i]<np.size(prbs):
|
||||
k = b[i]
|
||||
prbs[k:] = a[i]
|
||||
i=i+1
|
||||
|
||||
return prbs
|
||||
else:
|
||||
raise ValueError(signal_type)
|
||||
|
||||
|
||||
def get_scaled_df(df, dict_cols, scaler):
|
||||
|
||||
"""
|
||||
Scale the dataframe with the given scaler. Drops unused columns.
|
||||
"""
|
||||
|
||||
t_list = dict_cols['t'][1]
|
||||
w_list = dict_cols['w'][1]
|
||||
u_list = dict_cols['u'][1]
|
||||
y_list = dict_cols['y'][1]
|
||||
|
||||
df_local = df[t_list + w_list + u_list + y_list]
|
||||
df_scaled = df_local.to_numpy()
|
||||
|
||||
try:
|
||||
df_scaled = scaler.transform(df_scaled)
|
||||
except NotFittedError:
|
||||
df_scaled = scaler.fit_transform(df_scaled)
|
||||
|
||||
df_scaled = pd.DataFrame(
|
||||
df_scaled,
|
||||
index = df_local.index,
|
||||
columns = df_local.columns
|
||||
)
|
||||
|
||||
return df_scaled
|
||||
|
||||
def data_to_gpr(df, dict_cols):
|
||||
|
||||
t_list = dict_cols['t'][1]
|
||||
w_list = dict_cols['w'][1]
|
||||
u_list = dict_cols['u'][1]
|
||||
y_list = dict_cols['y'][1]
|
||||
|
||||
df_gpr = df[t_list + w_list + u_list + y_list].copy()
|
||||
|
||||
for lags, names in dict_cols.values():
|
||||
for name in names:
|
||||
col_idx = df_gpr.columns.get_loc(name)
|
||||
for lag in range(1, lags + 1):
|
||||
df_gpr.insert(
|
||||
col_idx + lag,
|
||||
f"{name}_{lag}",
|
||||
df_gpr.loc[:, name].shift(lag)
|
||||
)
|
||||
# Drop empty rows (first rows without enough lags)
|
||||
df_gpr.dropna(inplace = True)
|
||||
|
||||
return df_gpr
|
||||
|
||||
def get_gp_model(data, dict_cols):
|
||||
|
||||
nb_dims = data[0].shape[1]
|
||||
rational_dims = np.arange(0, (dict_cols['t'][0] + 1) * len(dict_cols['t'][1]), 1)
|
||||
nb_rational_dims = len(rational_dims)
|
||||
squared_dims = np.arange(nb_rational_dims, nb_dims, 1)
|
||||
nb_squared_dims = len(squared_dims)
|
||||
|
||||
squared_l = np.linspace(10, 10, nb_squared_dims)
|
||||
rational_l = np.linspace(10, 10, nb_rational_dims)
|
||||
|
||||
variance = 1
|
||||
# Define the kernel to be used
|
||||
k0 = gpflow.kernels.SquaredExponential(
|
||||
lengthscales = squared_l,
|
||||
active_dims = squared_dims,
|
||||
variance = variance
|
||||
)
|
||||
k1 = gpflow.kernels.Constant(
|
||||
variance = variance
|
||||
)
|
||||
k2 = gpflow.kernels.RationalQuadratic(
|
||||
lengthscales = rational_l,
|
||||
active_dims = rational_dims,
|
||||
variance = variance
|
||||
)
|
||||
#k3 = gpflow.kernels.Periodic(k2)
|
||||
k4 = gpflow.kernels.Linear(variance = [1]*nb_dims)
|
||||
|
||||
k = k4
|
||||
|
||||
model = gpflow.models.GPR(
|
||||
data = data,
|
||||
kernel = k,
|
||||
mean_function = None
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
class ScalerHelper:
|
||||
|
||||
"""
|
||||
Column order in scaler should be as follows:
|
||||
t_cols + w_cols + u_cols + y_cols
|
||||
"""
|
||||
|
||||
def __init__(self, scaler):
|
||||
self.scaler = scaler
|
||||
|
||||
def scale_time(self, t):
|
||||
raise NotImplementedError
|
||||
|
||||
def inverse_scale_time(self, t):
|
||||
raise NotImplementedError
|
||||
|
||||
def scale_weather(self, W):
|
||||
W_local = np.array(W).reshape(-1, 2)
|
||||
W_filled = np.hstack([W_local, np.zeros((W_local.shape[0], 4 - W_local.shape[1]))])
|
||||
W_scaled = self.scaler.transform(W_filled)
|
||||
W_scaled = W_scaled[:, :2]
|
||||
return W_scaled
|
||||
|
||||
def inverse_scale_weather(self, W):
|
||||
W_local = np.array(W).reshape(-1, 2)
|
||||
W_filled = np.hstack([W_local, np.zeros((W_local.shape[0], 4 - W_local.shape[1]))])
|
||||
W_scaled = self.scaler.inverse_transform(W_filled)
|
||||
W_scaled = W_scaled[:, :2]
|
||||
return W_scaled
|
||||
|
||||
|
||||
def scale_input(self, U):
|
||||
U_local = np.array(U).reshape(-1, 1)
|
||||
U_filled = np.hstack([np.zeros((U_local.shape[0], 2)), U_local, np.zeros((U_local.shape[0], 1))])
|
||||
U_scaled = self.scaler.transform(U_filled)
|
||||
U_scaled = U_scaled[:, 2]
|
||||
return U_scaled
|
||||
|
||||
def inverse_scale_input(self, U):
|
||||
U_local = np.array(U).reshape(-1, 1)
|
||||
U_filled = np.hstack([np.zeros((U_local.shape[0], 2)), U_local, np.zeros((U_local.shape[0], 1))])
|
||||
U_scaled = self.scaler.inverse_transform(U_filled)
|
||||
U_scaled = U_scaled[:, 2]
|
||||
return U_scaled
|
||||
|
||||
def scale_output(self, Y):
|
||||
Y_local = np.array(Y).reshape(-1, 1)
|
||||
Y_filled = np.hstack([np.zeros((Y_local.shape[0], 3)), Y_local])
|
||||
Y_scaled = self.scaler.transform(Y_filled)
|
||||
Y_scaled = Y_scaled[:, 3]
|
||||
return Y_scaled
|
||||
|
||||
def inverse_scale_output(self, Y):
|
||||
Y_local = np.array(Y).reshape(-1, 1)
|
||||
Y_filled = np.hstack([np.zeros((Y_local.shape[0], 3)), Y_local])
|
||||
Y_scaled = self.scaler.inverse_transform(Y_filled)
|
||||
Y_scaled = Y_scaled[:, 3]
|
||||
return Y_scaled
|
130
server/server.py
Normal file
130
server/server.py
Normal file
|
@ -0,0 +1,130 @@
|
|||
import socket
|
||||
import struct
|
||||
from time import sleep
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
import numpy as np
|
||||
|
||||
from controllers import GP_MPCcontroller, TestController
|
||||
|
||||
clients = ['measure', 'control', 'weather']
|
||||
|
||||
N_horizon = 6
|
||||
|
||||
HOST = '127.0.0.1'
|
||||
PORT = {
|
||||
'measure': 10000,
|
||||
'control': 10001,
|
||||
'weather': 10002
|
||||
}
|
||||
|
||||
print(f"[*] Server IP {HOST}")
|
||||
print(f"[*] Measuring on port {PORT['measure']}")
|
||||
print(f"[*] Controlling on port {PORT['control']}")
|
||||
print(f"[*] Weather on port {PORT['weather']}")
|
||||
|
||||
sock = {}
|
||||
|
||||
# Create a socket for each of the clients and start listening
|
||||
for client in clients:
|
||||
sock[client] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock[client].bind((HOST, PORT[client]))
|
||||
sock[client].listen()
|
||||
|
||||
temps_list = []
|
||||
|
||||
conn = {}
|
||||
|
||||
# Initialize controller
|
||||
|
||||
t_cols = []
|
||||
w_cols = ['SolRad', 'OutsideTemp']
|
||||
u_cols = ['SimulatedHeat']
|
||||
y_cols = ['SimulatedTemp']
|
||||
|
||||
t_lags = 0
|
||||
w_lags = 1
|
||||
u_lags = 2
|
||||
y_lags = 3
|
||||
|
||||
dict_cols = {
|
||||
't': (t_lags, t_cols),
|
||||
'w': (w_lags, w_cols),
|
||||
'u': (u_lags, u_cols),
|
||||
'y': (y_lags, y_cols)
|
||||
}
|
||||
|
||||
controller = GP_MPCcontroller(dict_cols = dict_cols, N_horizon = N_horizon, recover_from_crash = False)
|
||||
|
||||
# Enter TCP server loop
|
||||
while True:
|
||||
# Connect to the clients
|
||||
for client in clients:
|
||||
print(f"[+] Waiting for a {client} connection...", end = ' ')
|
||||
iter_conn,_ = sock[client].accept()
|
||||
conn[client] = iter_conn
|
||||
print("Done")
|
||||
# Sending first control signal (initialization)
|
||||
print("[*] Entering the control loop")
|
||||
|
||||
try:
|
||||
while True:
|
||||
|
||||
###
|
||||
# Begin timestep
|
||||
###
|
||||
print("---")
|
||||
# Send control signal
|
||||
print("[*] Sending control signal...", end = ' ')
|
||||
u = controller.get_control_input()
|
||||
print(f"Applying control signal {u}")
|
||||
data = struct.pack(">d", u)
|
||||
conn['control'].sendall(data)
|
||||
print("Done")
|
||||
|
||||
# Read the inputs and update controller measures
|
||||
|
||||
|
||||
# Read weather prediction
|
||||
weather = []
|
||||
print("[*] Reading weather measurement/prediction...", end = ' ')
|
||||
for idx in range((N_horizon + 1) * 2):
|
||||
weather_item = conn['weather'].recv(8)
|
||||
if weather_item:
|
||||
weather_item = struct.unpack(">d", weather_item)
|
||||
weather.append(weather_item)
|
||||
else:
|
||||
break
|
||||
if len(weather) == ((N_horizon + 1) *2):
|
||||
weather = np.array(weather).reshape((2, N_horizon + 1)).T
|
||||
pass
|
||||
else:
|
||||
print("\nDid not get a complete weather prediction. Simulation ended?")
|
||||
break
|
||||
controller.add_disturbance_measurement(weather[0,:])
|
||||
controller.set_weather_forecast(weather[1:, :])
|
||||
print("Done")
|
||||
|
||||
# Read temperature measurement
|
||||
print("Reading temperature measurement")
|
||||
data = conn['measure'].recv(8)
|
||||
if data:
|
||||
t_iter = struct.unpack(">d", data)[0]
|
||||
temps_list.append(t_iter)
|
||||
else:
|
||||
break
|
||||
print(f"Read temperature measurement {t_iter}")
|
||||
controller.add_output_measurement(t_iter)
|
||||
|
||||
# Update the model since all data has been read
|
||||
controller.update_model()
|
||||
|
||||
finally:
|
||||
print(f"[-] Closing connection to simulink")
|
||||
for client in clients:
|
||||
conn[client].close()
|
||||
print(f"[i] Dumping controller data")
|
||||
controller.save_data()
|
||||
|
||||
|
69
server/test.py
Normal file
69
server/test.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
from pathlib import Path
|
||||
from shutil import copyfile
|
||||
import pickle
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.preprocessing import MinMaxScaler, RobustScaler
|
||||
from sklearn.exceptions import NotFittedError
|
||||
|
||||
import gpflow
|
||||
import tensorflow as tf
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
from gpflow.utilities import print_summary
|
||||
|
||||
import casadi as cs
|
||||
|
||||
import callbacks
|
||||
from helpers import *
|
||||
from controllers import GP_MPCcontroller
|
||||
|
||||
from time import sleep
|
||||
|
||||
t_cols = []
|
||||
w_cols = ['SolRad', 'OutsideTemp']
|
||||
u_cols = ['SimulatedHeat']
|
||||
y_cols = ['SimulatedTemp']
|
||||
|
||||
t_lags = 0
|
||||
w_lags = 1
|
||||
u_lags = 2
|
||||
y_lags = 3
|
||||
|
||||
dict_cols = {
|
||||
't': (t_lags, t_cols),
|
||||
'w': (w_lags, w_cols),
|
||||
'u': (u_lags, u_cols),
|
||||
'y': (y_lags, y_cols)
|
||||
}
|
||||
|
||||
N_horizon = 10
|
||||
|
||||
|
||||
controller = GP_MPCcontroller(dict_cols = dict_cols, N_horizon = N_horizon)
|
||||
|
||||
|
||||
|
||||
idx = 0
|
||||
while True:
|
||||
u = controller.get_control_input()
|
||||
|
||||
# Measure disturbance
|
||||
w = np.random.rand(2)
|
||||
controller.add_disturbance_measurement(w)
|
||||
w_forecast = np.random.rand(N_horizon, 2)
|
||||
controller.set_weather_forecast(w_forecast)
|
||||
|
||||
# Measure output
|
||||
y = np.random.rand(1)
|
||||
controller.add_output_measurement(y)
|
||||
|
||||
# Update model
|
||||
controller.update_model()
|
||||
|
||||
idx += 1
|
||||
|
||||
if idx > 502:
|
||||
import pdb; pdb.set_trace()
|
||||
pass
|
Loading…
Add table
Add a link
Reference in a new issue