Updated server code with SIA reference and 96 steps model update
This commit is contained in:
parent
dd883f2c11
commit
93301cd195
3 changed files with 163 additions and 54 deletions
|
@ -19,26 +19,38 @@ from helpers import *
|
|||
|
||||
|
||||
class PIDcontroller:
|
||||
def __init__(self, P, I = 0, D = 0):
|
||||
def __init__(self, P, I = 0, D = 0, arw_range = (-np.inf, np.inf)):
|
||||
self.P = P
|
||||
self.I = I
|
||||
self.D = D
|
||||
|
||||
self.ref = 22
|
||||
self.arw_range = arw_range
|
||||
|
||||
self.ref = 25 # reference temperature
|
||||
self.y = 23 # T0
|
||||
|
||||
self.err_acc = 0
|
||||
self.err_old = 0
|
||||
|
||||
def get_control(self, y):
|
||||
"""
|
||||
y: Measured output
|
||||
"""
|
||||
def add_output_measurement(self, y):
|
||||
self.y = y
|
||||
|
||||
err= self.ref - y
|
||||
def get_control_input(self):
|
||||
|
||||
err= self.ref - self.y
|
||||
self.err_acc += err
|
||||
|
||||
# P
|
||||
sig_P = self.P * err
|
||||
|
||||
# I
|
||||
sig_I = self.I * self.err_acc
|
||||
if sig_I < self.arw_range[0]:
|
||||
sig_I = self.arw_range[0]
|
||||
elif sig_I > self.arw_range[1]:
|
||||
sit_I = self.arw_range[1]
|
||||
|
||||
# D
|
||||
sig_D = self.D * (err - self.err_old)
|
||||
|
||||
self.err_old = err
|
||||
|
@ -46,10 +58,39 @@ class PIDcontroller:
|
|||
#print(f"P: {sig_P}, I: {sig_I}, D: {sig_D}")
|
||||
return sig_P + sig_I + sig_D
|
||||
|
||||
class sysIDcontroller(object):
|
||||
def __init__(self, u_range = (-1, 1)):
|
||||
self.u_range = u_range
|
||||
id_P = 10000
|
||||
id_I = 50000/(3 * 3600)
|
||||
id_D = 0
|
||||
self.PIDcontroller = PIDcontroller(P = id_P, I = id_I, D = id_D,
|
||||
arw_range = self.u_range)
|
||||
|
||||
def get_control_input(self):
|
||||
|
||||
# Input of PID controller
|
||||
sig_pid = self.PIDcontroller.get_control_input()
|
||||
# Random disturbance
|
||||
sig_w = (self.u_range[1] - self.u_range[0]) * np.random.rand() + self.u_range[0]
|
||||
# Combine and saturate
|
||||
print(f"sig_pid: {sig_pid}; sig_w: {sig_w}")
|
||||
sig = sig_pid + sig_w
|
||||
if sig < self.u_range[0]:
|
||||
sig = self.u_range[0]
|
||||
elif sig > self.u_range[1]:
|
||||
sig = self.u_range[1]
|
||||
|
||||
return sig
|
||||
|
||||
def add_output_measurement(self, y):
|
||||
self.PIDcontroller.add_output_measurement(y)
|
||||
|
||||
class Base_MPCcontroller(object):
|
||||
def __init__(self, dict_cols, model = None, scaler = None, N_horizon = 10, recover_from_crash = False):
|
||||
|
||||
self.T_sample = 15 * 60 # Used for update frequency and reference
|
||||
# calculation
|
||||
self.dict_cols = dict_cols
|
||||
self.max_lag = max([lag for lag,_ in self.dict_cols.values()])
|
||||
self.N_horizon = N_horizon
|
||||
|
@ -61,13 +102,13 @@ class Base_MPCcontroller(object):
|
|||
# Complete measurement history
|
||||
# Columns are: [SolRad, OutsideTemp] (Disturbance), Heat(Input), Temperature (Output)
|
||||
self.data_cols = []
|
||||
for lags, cols in self.dict_cols.values():
|
||||
for _, cols in self.dict_cols.values():
|
||||
self.data_cols += cols
|
||||
self.data = np.empty((0, len(self.data_cols)))
|
||||
|
||||
# Dataset used for training
|
||||
self.dataset_train_minsize = 500
|
||||
self.dataset_train_maxsize = 500
|
||||
self.dataset_train_minsize = 5 * (24*3600)/self.T_sample # 5 days worth
|
||||
self.dataset_train_maxsize = np.iinfo(np.int32).max # maximum 32bit int
|
||||
self.dataset_train = np.empty((0, self.n_states))
|
||||
|
||||
# The current weather forcast
|
||||
|
@ -76,6 +117,13 @@ class Base_MPCcontroller(object):
|
|||
# Current measurements
|
||||
self.w, self.u, self.y = None, None, None
|
||||
|
||||
# Solver parameters
|
||||
self.lbg = None
|
||||
self.ubg = None
|
||||
self.lbx = None
|
||||
self.ubx = None
|
||||
self.solver = None
|
||||
|
||||
# Recover from a previous crash with precomputed values and continue
|
||||
self.recover_from_crash = recover_from_crash
|
||||
|
||||
|
@ -91,7 +139,9 @@ class Base_MPCcontroller(object):
|
|||
# Pre-existing model passed. Load all the necessary objects
|
||||
if model is not None:
|
||||
# Model is already trained. Using as is.
|
||||
if scaler is None: raise ValueError("Not allowed to pass a model without a scaler")
|
||||
self.id_mode = False
|
||||
if scaler is None:
|
||||
raise ValueError("Not allowed to pass a model without a scaler")
|
||||
self.model = model
|
||||
self.cs_model = callbacks.GPR("gpr", self.model, self.n_states)
|
||||
self.scaler = scaler
|
||||
|
@ -100,13 +150,15 @@ class Base_MPCcontroller(object):
|
|||
else:
|
||||
# No model has been passed. Setting up model initialization
|
||||
self.model = None
|
||||
self.id_mode = True
|
||||
|
||||
# Define an identification signal to be used first
|
||||
self.Pel = 2 * 6300
|
||||
self.COP = 5.0
|
||||
|
||||
self.ident_signal = get_identification_signal(size = self.dataset_train_minsize)
|
||||
self.ident_signal = iter(self.COP * self.Pel * self.ident_signal)
|
||||
# Set up identification controller
|
||||
u_range = self.COP * self.Pel * np.array([-1, 1])
|
||||
self.id_controller = sysIDcontroller(u_range)
|
||||
|
||||
return
|
||||
|
||||
|
@ -119,6 +171,10 @@ class Base_MPCcontroller(object):
|
|||
|
||||
def add_output_measurement(self, y):
|
||||
self.y = np.array(y).reshape(1, -1)
|
||||
# Also add measurement to ID controller if enabled
|
||||
if self.id_mode:
|
||||
print()
|
||||
self.id_controller.add_output_measurement(y)
|
||||
|
||||
def _add_input_measurement(self, u):
|
||||
self.u = np.array(u).reshape(1, -1)
|
||||
|
@ -144,14 +200,10 @@ class Base_MPCcontroller(object):
|
|||
###
|
||||
self.cs_model = callbacks.GPR("gpr", self.model, self.n_states)
|
||||
|
||||
T_set = 21
|
||||
T_set_sc = self.scaler_helper.scale_output(T_set)
|
||||
self.T_set_sc = T_set_sc
|
||||
|
||||
|
||||
X = cs.MX.sym("X", self.N_horizon + 1, self.n_states)
|
||||
x0 = cs.MX.sym("x0", 1, self.n_states)
|
||||
W = cs.MX.sym("W", self.N_horizon, 2)
|
||||
T_set_sc = cs.MX.sym("T_set_sc")
|
||||
|
||||
g = []
|
||||
lbg = []
|
||||
|
@ -185,7 +237,8 @@ class Base_MPCcontroller(object):
|
|||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
for w_lag_idx in range(1, nb_cols):
|
||||
g.append(X[idx, base_col + w_base_col + w_lag_idx] - X[idx - 1, base_col + w_base_col + w_lag_idx - 1])
|
||||
g.append(X[idx, base_col + w_base_col + w_lag_idx] - \
|
||||
X[idx - 1, base_col + w_base_col + w_lag_idx - 1])
|
||||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
|
||||
|
@ -211,7 +264,7 @@ class Base_MPCcontroller(object):
|
|||
lbg.append(0)
|
||||
ubg.append(0)
|
||||
|
||||
p = cs.vertcat(cs.vec(W), cs.vec(x0))
|
||||
p = cs.vertcat(cs.vec(W), cs.vec(x0), cs.vec(T_set_sc))
|
||||
|
||||
prob = {'f': J, 'x': cs.vec(X), 'g': cs.vertcat(*g), 'p': p}
|
||||
options = {"ipopt": {"hessian_approximation": "limited-memory", "max_iter": 100,
|
||||
|
@ -230,6 +283,12 @@ class Base_MPCcontroller(object):
|
|||
|
||||
print("Initialized casadi solver")
|
||||
|
||||
def _train_model(self):
|
||||
"""
|
||||
Placeholder function to silence linter warning
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
###
|
||||
# Compute control signal
|
||||
###
|
||||
|
@ -249,14 +308,16 @@ class Base_MPCcontroller(object):
|
|||
self.recover_from_crash = False
|
||||
|
||||
# Training pre-loop
|
||||
if self.model is None:
|
||||
try:
|
||||
# No model yet. Sending next step of identification signal
|
||||
u = next(self.ident_signal)
|
||||
|
||||
if self.id_mode:
|
||||
if self.data.shape[0] < self.dataset_train_minsize:
|
||||
u = self.id_controller.get_control_input()
|
||||
self._add_input_measurement(u)
|
||||
return u
|
||||
except StopIteration:
|
||||
# No more identification signal. Training a model and proceeding
|
||||
else:
|
||||
# Collected enough data. Turn off identification mode
|
||||
self.id_mode = False
|
||||
# Training a model
|
||||
self._train_model()
|
||||
self._setup_solver()
|
||||
# Continue now since model exists
|
||||
|
@ -277,10 +338,18 @@ class Base_MPCcontroller(object):
|
|||
|
||||
x0 = cs.vec(x0)
|
||||
|
||||
# Compute mean outside temperature for last 48 hours
|
||||
nb_tref_points = 48 * 3600// self.T_sample # // for integer division
|
||||
T_out_avg = np.mean(self.data[-nb_tref_points:, 1])
|
||||
|
||||
T_set = get_tref_mean(T_out_avg)
|
||||
T_set_sc = self.scaler_helper.scale_output(T_set)
|
||||
print(f"T_set: {T_set} T_set_sc: {T_set_sc}")
|
||||
|
||||
W = self.scaler_helper.scale_weather(self.weather_forecast)
|
||||
W = cs.vec(W)
|
||||
|
||||
p = cs.vertcat(W, x0)
|
||||
p = cs.vertcat(W, x0, T_set_sc)
|
||||
|
||||
res = self.solver(
|
||||
x0 = 1,
|
||||
|
@ -326,11 +395,11 @@ class GP_MPCcontroller(Base_MPCcontroller):
|
|||
""" Identify model from gathered data """
|
||||
|
||||
nb_train_pts = self.dataset_train_minsize - 1
|
||||
nb_train_pts = 500
|
||||
###
|
||||
# Dataset
|
||||
###
|
||||
df = pd.DataFrame(self.data[:nb_train_pts], columns = self.data_cols)
|
||||
# Train the model on the last nb_train_pts
|
||||
df = pd.DataFrame(self.data[-nb_train_pts:], columns = self.data_cols)
|
||||
self.scaler = MinMaxScaler(feature_range = (-1, 1))
|
||||
self.scaler_helper = ScalerHelper(self.scaler)
|
||||
df_sc = get_scaled_df(df, self.dict_cols, self.scaler)
|
||||
|
@ -474,7 +543,7 @@ class SVGP_MPCcontroller(Base_MPCcontroller):
|
|||
super().__init__(dict_cols, model, scaler, N_horizon, recover_from_crash)
|
||||
|
||||
# Adaptive models update parameters
|
||||
self.model_update_frequency = 100
|
||||
self.model_update_frequency = (24 * 3600)/self.T_sample # once per day
|
||||
self.pts_since_update = 0
|
||||
|
||||
# Model log
|
||||
|
@ -486,12 +555,11 @@ class SVGP_MPCcontroller(Base_MPCcontroller):
|
|||
def _train_model(self):
|
||||
""" Identify model from gathered data """
|
||||
|
||||
#nb_train_pts = self.dataset_train_minsize - 1
|
||||
nb_train_pts = self.data.shape[0]
|
||||
nb_train_pts = self.dataset_train_maxsize
|
||||
###
|
||||
# Dataset
|
||||
###
|
||||
df = pd.DataFrame(self.data[:nb_train_pts], columns = self.data_cols)
|
||||
df = pd.DataFrame(self.data[-nb_train_pts:], columns = self.data_cols)
|
||||
self.scaler = MinMaxScaler(feature_range = (-1, 1))
|
||||
self.scaler_helper = ScalerHelper(self.scaler)
|
||||
df_sc = get_scaled_df(df, self.dict_cols, self.scaler)
|
||||
|
@ -645,11 +713,11 @@ class SVGP_MPCcontroller(Base_MPCcontroller):
|
|||
|
||||
def update_model(self):
|
||||
self._add_measurement_set()
|
||||
if self.model is not None and not self.recover_from_crash:
|
||||
print(f"Points since last update: {self.pts_since_update}")
|
||||
if self.pts_since_update < self.model_update_frequency:
|
||||
|
||||
if not self.id_mode and not self.recover_from_crash:
|
||||
self.pts_since_update += 1
|
||||
else:
|
||||
|
||||
if self.pts_since_update >= self.model_update_frequency:
|
||||
print(f"Updating model after {self.pts_since_update} measurements")
|
||||
# Append old model to log
|
||||
self.model_log.append(self.model)
|
||||
|
|
|
@ -32,6 +32,49 @@ def get_combined_evaluator(model):
|
|||
return preds, grads
|
||||
return combined_evaluator
|
||||
|
||||
##################################
|
||||
# SIA norm Temperature Reference
|
||||
##################################
|
||||
|
||||
def _get_tref_min(x):
|
||||
X = [19, 23.5]
|
||||
Y = [20.5, 22]
|
||||
a, b = np.polyfit(X, Y, deg = 1)
|
||||
y = a*x + b
|
||||
if y > Y[1]:
|
||||
y = Y[1]
|
||||
elif y < Y[0]:
|
||||
y = Y[0]
|
||||
return y
|
||||
|
||||
def _get_tref_max(x):
|
||||
X = [12, 17.5]
|
||||
Y = [24.5, 26.5]
|
||||
a, b = np.polyfit(X, Y, deg = 1)
|
||||
y = a*x + b
|
||||
if y > Y[1]:
|
||||
y = Y[1]
|
||||
elif y < Y[0]:
|
||||
y = Y[0]
|
||||
return y
|
||||
|
||||
def get_tref_range(x):
|
||||
"""
|
||||
Returns the allowed range of reference Temperatures for a given 48h mean
|
||||
outside temperature
|
||||
"""
|
||||
tref_range = (_get_tref_min(x), _get_tref_max(x))
|
||||
return tref_range
|
||||
|
||||
def get_tref_mean(x):
|
||||
"""
|
||||
Returns the mean reference temperature for a given 48h mean outside
|
||||
temperature
|
||||
"""
|
||||
tref_range = get_tref_range(x)
|
||||
return 1/2*(tref_range[0] + tref_range[1])
|
||||
|
||||
|
||||
def get_random_signal(nstep, a_range = (-1, 1), b_range = (2, 10), signal_type = 'analog'):
|
||||
|
||||
a = np.random.rand(nstep) * (a_range[1]-a_range[0]) + a_range[0] # range for amplitude
|
||||
|
@ -76,7 +119,7 @@ def get_random_signal(nstep, a_range = (-1, 1), b_range = (2, 10), signal_type =
|
|||
|
||||
def get_identification_signal(size):
|
||||
# Base random signal
|
||||
rand_signal = get_random_signal(size, signal_type = 'analog')
|
||||
rand_signal = get_random_signal(size, signal_type = 'prbs')
|
||||
# Integrator (cumulative sum)
|
||||
cum_signal = 3/size * np.ones((1, size))
|
||||
cum_signal = np.cumsum(cum_signal)
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
import socket
|
||||
import struct
|
||||
from time import sleep
|
||||
from pathlib import Path
|
||||
import pickle
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
@ -56,7 +53,11 @@ dict_cols = {
|
|||
'y': (y_lags, y_cols)
|
||||
}
|
||||
|
||||
controller = SVGP_MPCcontroller(dict_cols = dict_cols, N_horizon = N_horizon, recover_from_crash = False)
|
||||
controller = SVGP_MPCcontroller(
|
||||
dict_cols = dict_cols,
|
||||
N_horizon = N_horizon,
|
||||
recover_from_crash = False
|
||||
)
|
||||
|
||||
# Enter TCP server loop
|
||||
while True:
|
||||
|
@ -99,7 +100,6 @@ while True:
|
|||
break
|
||||
if len(weather) == ((N_horizon + 1) *2):
|
||||
weather = np.array(weather).reshape((2, N_horizon + 1)).T
|
||||
pass
|
||||
else:
|
||||
print("\nDid not get a complete weather prediction. Simulation ended?")
|
||||
break
|
||||
|
@ -122,10 +122,8 @@ while True:
|
|||
controller.update_model()
|
||||
|
||||
finally:
|
||||
print(f"[-] Closing connection to simulink")
|
||||
print("[-] Closing connection to simulink")
|
||||
for client in clients:
|
||||
conn[client].close()
|
||||
print(f"[i] Dumping controller data")
|
||||
print("[i] Dumping controller data")
|
||||
controller.save_data()
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue