Теория

См. подробный математический вывод A и b здесь: https://mlai.cs.uni-bonn.de/lecturenotes/ml2r-cn-linearprogramming2.pdf

Код

from scipy.io import loadmat
from scipy.optimize import linprog
import numpy as np
x_train = loadmat('Xtrain.mat')
y_train = loadmat('Ytrain.mat')
# let x and y be sorted by x ascending 
x_train_inds = x_train['Xtrain'].T.argsort() # [::-1] for descending
  # NOTE to x.T, otherwise argost() returns [0 0 0...]
x_train_arr = x_train['Xtrain'][x_train_inds].squeeze()
  # squeeze redundant dimension, otherwise can not be plot
  # NOTE squeeze also does .T to these ndarrays
y_train_arr = y_train['Ytrain'][x_train_inds].squeeze()
# generate dth polynomial
d = 2; #phi(x) should be x**2 + x + 1 #d==m
if d == 0:
  X_train_poly = np.vstack([np.ones(len(x_train_arr))])
if d > 0:
  for i in range(1,d+1):
    X_train_poly = np.vstack([x_train_arr**i, X_train_poly])
X_train_poly = X_train_poly.T #phi(x)
# phi(x) = [x1**d, ..., x1**2, x1, 1]
#          [x2**d, ..., x2**2, x2, 1]
# generate weight w based on LAD
n, m = X_train_poly.shape
matA = np.zeros((2*n, m+n))
matA[:n,:m] = +X_train_poly
matA[n:,:m] = -X_train_poly
matA[:n,m:] = -np.eye(n)
matA[n:,m:] = -np.eye(n)
vecB = np.zeros(2*n)
vecB[:n] = +y_train_arr
vecB[n:] = -y_train_arr
vecC = np.hstack((np.zeros(m), np.ones(n)))
LADres = linprog(vecC, A_ub=matA, b_ub=vecB)
matW_LAD = LADres.x[:m]
loss_train_LAD = LADres.x[m:]
# estimate y
y_train_estimate_LAD = (np.dot(X_train_poly,matW_LAD)).squeeze()
print(str(d)+'th LAD loss on training set:', loss_train_LAD[d])
print(str(d)+'th LAD weight on training set:', matW_LAD)