Skip to content

Commit 380f9f6

Browse files
committed
v1.0.0
1 parent 84f7528 commit 380f9f6

File tree

11 files changed

+56
-92
lines changed

11 files changed

+56
-92
lines changed

.coverage

-60 KB
Binary file not shown.

.coveragerc

Lines changed: 0 additions & 34 deletions
This file was deleted.

.gitattributes

Lines changed: 0 additions & 3 deletions
This file was deleted.

README.md

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
<p align="center">
32
<img src="docs/source/figures/logo.png" width="70%" align="center">
43
</p>
@@ -10,27 +9,26 @@
109
| [**GitHub**](https://github.com/JiaxiangYi96/mfpml)
1110
| [**Tutorials**](https://github.com/JiaxiangYi96/mfpml/tree/main/tutorials)
1211

13-
1412
## Summary
1513

16-
This repository aims to provide a package for multi-fidelity probabilistic machine learning. The package is developed by Jiaxiang Yi and Ji Cheng based on their learning curve on multi-fidelity probabilistic macgin learning, and multi-fidelity Bayesian optimization, and multi-fidelity reliability analysis.
14+
This repository aims to provide a package for multi-fidelity probabilistic machine learning. The package is developed by Jiaxiang Yi and Ji Cheng based on their learning curve on multi-fidelity probabilistic machine learning, and multi-fidelity Bayesian optimization, and multi-fidelity reliability analysis.
1715

18-
Overall, this mfpml package has two main goals, the first one is to provide basic code on implement typical methods in modeling, optimization, and reliability analysis field. Then, based on the basic code, we also provide some advanced methods based on our publications.
16+
Overall, this `mfpml` package has two main goals, the first one is to provide basic code on implement typical methods in modeling, optimization, and reliability analysis field. Then, based on the basic code, we also provide some advanced methods based on our publications.
1917

2018
---
2119

2220
(1) Basic methods
2321

2422
**Models**
2523

26-
- Kriging model
24+
- Kriging model
2725
- Multi-fidelity Kriging model
2826

2927
**Optimizations**
3028

31-
- Evolutionary algorithms
29+
- Evolutionary algorithms
3230
- Single fidelity Bayesian optimization
33-
- Multi-fidelity Bayesian optimization
31+
- Multi-fidelity Bayesian optimization
3432

3533
**Reliability analysis**
3634

@@ -64,7 +62,7 @@ please check out those papers:
6462
## Community Support
6563

6664
If you find any issues, bugs or problems with this package, you can raise an issue
67-
on the github page, or contact the Authors directly.
65+
on the github page, or contact the authors directly.
6866

6967
## License
7068

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ packages = ["src"]
77

88
[tool.pytest.ini_options]
99
pythonpath = [".", "src"]
10-
addopts = "--cov-report term --cov-report html --cov-config=.coveragerc --cov=mfpml --cov-fail-under=80.00 --ignore=examples --ignore=docs "
10+
addopts = "--cov-report term --cov-report html --cov-config=.coveragerc --cov=mfpml --cov-fail-under=50.00 --ignore=examples --ignore=docs "
1111
minversion = "6.0"
1212
testpaths = [
1313
"tests,"

requirements.txt

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
matplotlib
2-
numpy
3-
scipy
4-
pandas
5-
1+
matplotlib>=3.7.2
2+
numpy>=1.25.1
3+
scipy>=1.11.1
4+
pandas>=2.0.3
5+
scikit-learn>=1.3.0
66

src/mfpml/models/co_kriging.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,9 @@ def _update_kernel_matrix(self) -> None:
147147
# step 2: get the optimal sigma2
148148
self.gamma = solve(self.L.T, solve(
149149
self.L, (self.sample_y_scaled - np.dot(self.f, self.beta))))
150-
self.sigma2 = np.dot((self.sample_y_scaled - np.dot(self.f, self.beta)).T,
151-
self.gamma) / self.num_samples
150+
self.sigma2 = np.dot((self.sample_y_scaled -
151+
np.dot(self.f, self.beta)).T, self.gamma) \
152+
/ self.num_samples
152153

153154
# step 3: get the optimal log likelihood
154155
self.logp = (-0.5 * self.num_samples * np.log(self.sigma2) -
@@ -390,7 +391,7 @@ def predict(
390391
# transfer to 2d array
391392
Xnew = np.atleast_2d(self.normalize_input(x_predict))
392393
#
393-
oneC = np.ones((self.C.shape[0], 1))
394+
# oneC = np.ones((self.C.shape[0], 1))
394395
# calculate the covariance matrix
395396
c = np.concatenate(
396397
(
@@ -399,7 +400,8 @@ def predict(
399400
* self.lf_model.kernel.get_kernel_matrix(
400401
self.sample_xl_scaled, Xnew),
401402
self.rho**2
402-
* self.lf_model.sigma2*self.lf_model.kernel.get_kernel_matrix(self.sample_xh_scaled, Xnew) + self.sigma2 *
403+
* self.lf_model.sigma2*self.lf_model.kernel.get_kernel_matrix(
404+
self.sample_xh_scaled, Xnew) + self.sigma2 *
403405
self.kernel.get_kernel_matrix(self.sample_xh_scaled, Xnew),
404406
),
405407
axis=0,

src/mfpml/models/gaussian_process.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,8 @@ def _update_kernel_matrix(self) -> None:
246246
# update parameters with optimized hyper-parameters
247247
self.K = self.kernel.get_kernel_matrix(
248248
self.sample_scaled_x,
249-
self.sample_scaled_x) + np.eye(self.num_samples) * (self.noise/self.y_std)**2
249+
self.sample_scaled_x) + \
250+
np.eye(self.num_samples) * (self.noise/self.y_std)**2
250251
self.L = cholesky(self.K)
251252

252253
# step 1: get the optimal beta
@@ -264,8 +265,9 @@ def _update_kernel_matrix(self) -> None:
264265
# step 2: get the optimal sigma2
265266
self.gamma = solve(self.L.T, solve(
266267
self.L, (self.sample_y_scaled - np.dot(self.f, self.beta))))
267-
self.sigma2 = np.dot((self.sample_y_scaled - np.dot(self.f, self.beta)).T,
268-
self.gamma) / self.num_samples
268+
self.sigma2 = np.dot((self.sample_y_scaled -
269+
np.dot(self.f, self.beta)).T, self.gamma) \
270+
/ self.num_samples
269271

270272
# step 3: get the optimal log likelihood
271273
self.logp = (-0.5 * self.num_samples * self.sigma2 -

src/mfpml/models/hierarchical_kriging.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,8 @@ def _logLikelihood(self, params):
172172
mu = (np.dot(self.F.T, alpha) / np.dot(self.F.T, beta)).squeeze()
173173
gamma = solve(L.T, solve(L, (self.sample_yh_scaled - mu * self.F)))
174174
sigma2 = (
175-
np.dot((self.sample_yh_scaled - mu * self.F).T, gamma).squeeze()
175+
np.dot((self.sample_yh_scaled -
176+
mu * self.F).T, gamma).squeeze()
176177
/ self._num_xh
177178
).squeeze()
178179
logp = -self._num_xh * np.log(sigma2) - 2 * np.sum(

src/mfpml/models/kriging.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,8 +148,9 @@ def _update_kernel_matrix(self) -> None:
148148
# step 2: get the optimal sigma2
149149
self.gamma = solve(self.L.T, solve(
150150
self.L, (self.sample_y_scaled - np.dot(self.f, self.beta))))
151-
self.sigma2 = np.dot((self.sample_y_scaled - np.dot(self.f, self.beta)).T,
152-
self.gamma) / self.num_samples
151+
self.sigma2 = np.dot((self.sample_y_scaled -
152+
np.dot(self.f, self.beta)).T, self.gamma) \
153+
/ self.num_samples
153154

154155
# step 3: get the optimal log likelihood
155156
self.logp = (-0.5 * self.num_samples * np.log(self.sigma2) -

0 commit comments

Comments
 (0)