Skip to content
193 changes: 103 additions & 90 deletions ezyrb/reducedordermodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def n_reduction(self):
@property
def n_approximation(self):
value_, class_ = self.approximation, Approximation
return len(value_) if isinstance(value_, class_) else 1
return len(value_) if not isinstance(value_, class_) else 1

def fit_reduction(self):
"""
Expand Down Expand Up @@ -846,6 +846,10 @@ def approximation(self, value):
else:
self._approximation = value

@approximation.deleter
def approximation(self):
del self._approximation

@property
def n_database(self):
value_, class_ = self.database, Database
Expand All @@ -859,7 +863,7 @@ def n_reduction(self):
@property
def n_approximation(self):
value_, class_ = self.approximation, Approximation
return len(value_) if isinstance(value_, class_) else 1
return len(value_) if not isinstance(value_, class_) else 1

def fit(self):
r"""
Expand Down Expand Up @@ -983,23 +987,34 @@ def test_error(self, test, norm=np.linalg.norm, relative=True):
test snapshots.
:rtype: numpy.ndarray
"""
predicted_test = self.predict(test.parameters_matrix)
if relative:
return np.mean(
norm(
predicted_test.snapshots_matrix - test.snapshots_matrix,
axis=1,
)
/ norm(test.snapshots_matrix, axis=1)
)

return np.mean(
norm(
predicted_test.snapshots_matrix - test.snapshots_matrix,
axis=1,
)
)

errors = {}
is_dict = isinstance(test,dict)
sample_key = list(test.keys())[0] if is_dict else None
params = test[sample_key].parameters_matrix if is_dict else test.parameters_matrix
predicted_test = self.predict(params)

for key in predicted_test:
if is_dict:
if key in test:
db_key = key
elif isinstance(key, tuple) and key[0] in test:
db_key = key[0]
else:
db_key = sample_key
test_snaps = test[db_key].snapshots_matrix
else:
test_snaps = test.snapshots_matrix

diff = predicted_test[key] - test_snaps

if relative:
errors[key] = np.mean(norm(diff, axis=1)/norm(test_snaps, axis=1))
else:
errors[key] = np.mean(norm(diff, axis=1))


return errors

def kfold_cv_error(
self, n_splits, *args, norm=np.linalg.norm, relative=True, **kwargs
):
Expand All @@ -1021,23 +1036,27 @@ def kfold_cv_error(
:return: the vector containing the errors corresponding to each fold.
:rtype: numpy.ndarray
"""
error = []
errors = {k: [] for k in self.roms.keys()}
kf = KFold(n_splits=n_splits)
for train_index, test_index in kf.split(self.database):
new_db = self.database[train_index]
db_len = len(list(self.database.values())[0])

for train_index, test_index in kf.split(range(db_len)):
new_db = {k: v[train_index] for k, v in self.database.items()}
test_db = {k: v[test_index] for k, v in self.database.items()}
# TODO: Fix plugins handling - should pass:
# plugins=[copy.deepcopy(p) for p in self.plugins]
rom = type(self)(
mrom = type(self)(
new_db,
copy.deepcopy(self.reduction),
copy.deepcopy(self.approximation),
).fit(*args, **kwargs)

error.append(
rom.test_error(self.database[test_index], norm, relative)
)
fold_errors = mrom.test_error(test_db, norm, relative)

for k in errors:
errors[k].append(fold_errors[k])

return np.array(error)
return {k: np.array(v) for k, v in errors.items()}

def loo_error(self, *args, norm=np.linalg.norm, **kwargs):
r"""
Expand All @@ -1058,26 +1077,28 @@ def loo_error(self, *args, norm=np.linalg.norm, **kwargs):
parametric points.
:rtype: numpy.ndarray
"""
error = np.zeros(len(self.database))
db_range = list(range(len(self.database)))
db_len = len(list(self.database.values())[0])
errors = {k: np.zeros(db_len) for k in self.roms.keys()}

for j in db_range:
indeces = np.array([True] * len(self.database))
for j in range(db_len):
indeces = np.array([True] * db_len)
indeces[j] = False

new_db = self.database[indeces]
test_db = self.database[~indeces]
# TODO: Fix plugins handling - should pass:
# plugins=[copy.deepcopy(p) for p in self.plugins]
rom = type(self)(
new_db = {k: v[indeces] for k, v in self.database.items()}
test_db = {k: v[~indeces] for k, v in self.database.items()}

mrom = type(self)(
new_db,
copy.deepcopy(self.reduction),
copy.deepcopy(self.approximation),
).fit()
).fit(*args, **kwargs)

error[j] = rom.test_error(test_db, norm=norm)
loo_errors = mrom.test_error(test_db, norm=norm, **kwargs)

return error
for k in errors:
errors[k][j] = loo_errors[k]

return errors

def optimal_mu(self, error=None, k=1):
"""
Expand All @@ -1097,26 +1118,32 @@ def optimal_mu(self, error=None, k=1):
if error is None:
error = self.loo_error()

mu = self.database.parameters_matrix
first_db = list(self.database.values())[0]
mu = first_db.parameters_matrix
tria = Delaunay(mu)

error_on_simplex = np.array(
[
np.sum(error[smpx]) * self._simplex_volume(mu[smpx])
for smpx in tria.simplices
]
)
opt_mu_dict = {}

barycentric_point = []
for index in np.argpartition(error_on_simplex, -k)[-k:]:
worst_tria_pts = mu[tria.simplices[index]]
worst_tria_err = error[tria.simplices[index]]

barycentric_point.append(
np.average(worst_tria_pts, axis=0, weights=worst_tria_err)
for key, err in error.items():
error_on_simplex = np.array(
[
np.sum(err[smpx]) * self._simplex_volume(mu[smpx]) # Use 'err', not 'error'
for smpx in tria.simplices
]
)

return np.asarray(barycentric_point)
barycentric_point = []
for index in np.argpartition(error_on_simplex, -k)[-k:]:
worst_tria_pts = mu[tria.simplices[index]]
worst_tria_err = err[tria.simplices[index]] # Use 'err', not 'error'

barycentric_point.append(
np.average(worst_tria_pts, axis=0, weights=worst_tria_err)
)

opt_mu_dict[key] = np.asarray(barycentric_point)

return opt_mu_dict

def _simplex_volume(self, vertices):
"""
Expand Down Expand Up @@ -1165,25 +1192,19 @@ def reduction_error(self, db=None, relative=True, eps=1e-12):
>>> err_test_reduct = rom.reconstruction_error(db_test, relative=True)
"""

errs = []
if db is None:
db = self.database
snap = db.snapshots_matrix
snap_red = self.reduction.transform(snap.T)
snap_full = self.reduction.inverse_transform(snap_red).T

E = snap - snap_full
errors = {}
for key, rom in self.roms.items():
if db is None:
db_k = None
elif isinstance(db, dict):
db_key = key if key in db else (key[0] if isinstance(key, tuple) and key[0] in db else list(db.keys())[0])
db_k = db[db_key]
else:
db_k = db

if relative:
num = np.linalg.norm(E, axis=1)
den = np.linalg.norm(snap, axis=1) + eps

err = float(np.mean(num / den))
else:
err = float(np.mean(np.linalg.norm(E, axis=1)))
errs.append(err)
errors[key] = rom.reduction_error(db=db_k, relative=relative, eps= eps)

return np.array(errs)
return errors

def approximation_error(self, db=None, relative=True, eps=1e-12):
"""
Expand Down Expand Up @@ -1215,26 +1236,18 @@ def approximation_error(self, db=None, relative=True, eps=1e-12):
>>> err_test_approx = rom.approximation_error(db_test, relative=True)

"""
errs = []
if db is None:
db = self.database

snap = db.snapshots_matrix
params_true = self.reduction.transform(snap.T).T
errors = {}

params = db.parameters_matrix
for key, rom in self.roms.items():
if db is None:
db_k = None
elif isinstance(db, dict):
db_key = key if key in db else (key[0] if isinstance(key, tuple) and key[0] in db else list(db.keys())[0])
db_k = db[db_key]
else:
db_k = db

params_approx = self.approximation.predict(params)
errors[key] = rom.approximation_error(db=db_k, relative=relative, eps=eps)

E = params_true - params_approx

if relative:
num = np.linalg.norm(E, axis=1)
den = np.linalg.norm(params_true, axis=1) + eps

err = float(np.mean(num / den))
else:
err = float(np.mean(np.linalg.norm(E, axis=1)))
errs.append(err)

return np.array(errs)
return errors
3 changes: 2 additions & 1 deletion tests/test_parallel/test_reducedordermodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@
from ezyrb.parallel import ReducedOrderModel as ParallelROM
ezyrb.ReducedOrderModel = ParallelROM

from tests.test_reducedordermodel import *
# Explicitly import ONLY the original base tests, not the new extended ones
from tests.test_reducedordermodel import TestReducedOrderModel, test_invariant_pod
Loading
Loading