I would like to design an MPC for my system. This is a real system. I work with Simulink. I have worked with the example on the homepage. But the problem is, the example is not designed so that the setpoints can be predefined. The example has an automatic setpoint of zero. But in the case of I have to specify setpoints. That's why I tried to do it differently for the time being.
classdef momul < matlab.System & matlab.system.mixin.Propagates
% untitled Add summary here
%
% This template includes the minimum set of functions required
% to define a System object with discrete state.
properties
% Public, tunable properties.
end
properties (DiscreteState)
end
properties (Access = private)
% Pre-computed constants.
casadi_solver
x0
lbx
ubx
lbg
ubg
p0
end
methods (Access = protected)
function num = getNumInputsImpl(~)
num = 2;
end
function num = getNumOutputsImpl(~)
num = 1;
end
function dt1 = getOutputDataTypeImpl(~)
dt1 = 'double';
end
function dt1 = getInputDataTypeImpl(~)
dt1 = 'double';
end
function sz1 = getOutputSizeImpl(~)
sz1 = [2,1];
end
function sz1 = getInputSizeImpl(~)
sz1 = [1,1];
end
function cp1 = isInputComplexImpl(~)
cp1 = false;
end
function cp1 = isOutputComplexImpl(~)
cp1 = false;
end
function fz1 = isInputFixedSizeImpl(~)
fz1 = true;
end
function fz1 = isOutputFixedSizeImpl(~)
fz1 = true;
end
%%
function setupImpl(obj,~,~)
% Implement tasks that need to be performed only once,
% such as pre-computed constants.
import casadi.*
T = 20; % Time horizon
N = 100; % number of control intervals
v_f_max = 24;
v_f_min = -24;
v_b_max = 24;
v_b_min = -24;
% Declare model variables
x1 = SX.sym('x1');
x2 = SX.sym('x2');
x3 = SX.sym('x3');
x4 = SX.sym('x4');
x5 = SX.sym('x5');
x6 = SX.sym('x6');
states = [x1;x2;x3;x4;x5;x6];
n_states = length(states);
v_f = SX.sym('v_f');
v_b = SX.sym('v_b');
controls = [v_f;v_b];
n_controls = length(controls);
%Modell
rhs = [x4;x5;x6;0.0858*(v_f+v_b);0.581*(v_f-v_b);-1.230*x2];
f = Function('f',{states,controls},{rhs}); % nonlinear mapping function f(x,u)
U = SX.sym('U',n_controls,N); % Decision variables (controls)
P = SX.sym('P',n_states + n_states);
% parameters (which include the initial and the reference state of the robot)
X = SX.sym('X',n_states,(N+1));
% A Matrix that represents the states over the optimization problem.
obj = 0; % Objective function
g = []; % constraints vector
Q = zeros(6,6); Q(1,1) = 0.4;Q(2,2) = 1;Q(3,3) = 9;Q(4,4) = 0.1;Q(5,5) = 1 ;Q(6,6) = 10; % weighing matrices (states)
R = zeros(2,2); R(1,1) = 0.5; R(2,2) = 0.5; % weighing matrices (control
st = X(:,1); % initial state
g = [g;st-P(1:6)]; % initial condition constraints
for k = 1:N
st = X(:,k); con = U(:,k);
obj = obj+(st-P(7:12))'*Q*(st-P(7:12)) + con'*R*con; % calculate obj
st_next = X(:,k+1);
f_value = f(st,con);
st_next_euler = st+ (T*f_value);
g = [g;st_next-st_next_euler]; % compute constraints
end
% make the decision variable one column vector
OPT_variables = [reshape(X,6*(N+1),1);reshape(U,2*N,1)];
args = struct;
args.lbg(1:6*(N+1)) = 0; % -1e-20 % Equality constraints
args.ubg(1:6*(N+1)) = 0; % 1e-20 % Equality constraints
args.lbx(1:6:6*(N+1),1) = -inf; %state x lower bound
args.ubx(1:6:6*(N+1),1) = inf; %state x upper bound
args.lbx(2:6:6*(N+1),1) = -inf; %state y lower bound
args.ubx(2:6:6*(N+1),1) = inf; %state y upper bound
args.lbx(3:6:6*(N+1),1) = -inf; %state theta lower bound
args.ubx(3:6:6*(N+1),1) = 6; %state theta upper bound
args.lbx(4:6:6*(N+1),1) = -inf; %state x lower bound
args.ubx(4:6:6*(N+1),1) = 10; %state x upper bound
args.lbx(5:6:6*(N+1),1) = -inf; %state y lower bound
args.ubx(5:6:6*(N+1),1) = 10; %state y upper bound
args.lbx(6:6:6*(N+1),1) = -inf; %state theta lower bound
args.ubx(6:6:6*(N+1),1) = 10; %state theta upper bound
args.lbx(6*(N+1)+1:2:6*(N+1)+2*N,1) = v_f_min; %v lower bound
args.ubx(6*(N+1)+1:2:6*(N+1)+2*N,1) = v_f_max; %v upper bound
args.lbx(6*(N+1)+2:2:6*(N+1)+2*N,1) = v_b_min; %omega lower bound
args.ubx(6*(N+1)+2:2:6*(N+1)+2*N,1) = v_b_max; %omega upper bound
prob = struct('f', obj, 'x', OPT_variables, 'g', g, 'p', P);
opts = struct;
opts.ipopt.max_iter = 2000;
opts.ipopt.print_level =0;%0,3
opts.print_time = 0;
opts.ipopt.acceptable_tol =1e-8;
opts.ipopt.acceptable_obj_change_tol = 1e-6;
solver = nlpsol('solver', 'ipopt', prob,opts);
x0 = [2.0 ; 0.0 ; 0.0;0.0 ; 0.0 ; 3.0]; % initial condition.
xs = [10 ; 1 ; 6; 1.0 ; 1.0 ;1.0]; % Reference posture.
xx(:,1) = x0; % xx contains the history of states
u0 = zeros(N,2); % two control inputs
X0 = repmat(x0,1,N+1)'; % initialization of the states decision variables
args.p = [x0;xs]; % set the values of the parameters vector
args.x0 = [reshape(X0',6*(N+1),1);reshape(u0',2*N,1)]; % initial value of the optimization variables
obj.casadi_solver = solver;
obj.x0 = args.x0;
obj.lbx = args.lbw;
obj.ubx = args.ubw;
obj.lbg = args.lbg;
obj.ubg = args.ubg;
obj.p0 = args.p;
end
%%
function controls = stepImpl(obj,states,t)
disp(t)
tic
u0 = obj.x0;
args.lbw = obj.lbx;
args.ubw = obj.ubx;
solver = obj.casadi_solver;
args.lbw(1:6) = states;
args.ubw(1:6) = states;
sol = solver('x0', args.x0, 'lbx', args.lbx, 'ubx', args.ubx,...
'lbg', args.lbg, 'ubg', args.ubg,'p',args.p);
controls =full(sol.x);
toc
end
%%
function resetImpl(obj)
% Initialize discrete-state properties.
end
end
end