From 77266050a6062777fc75a0626a05d532e0e8d0d6 Mon Sep 17 00:00:00 2001 From: Kiichiro YUKAWA Date: Fri, 25 Aug 2023 12:16:42 +0900 Subject: [PATCH] Update charts and improvement conbench (#2119) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl status handle of continuous benchmark crds (#1955) Signed-off-by: vankichi * Impl benchmark jobs (#1977) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl benchmark jobs Signed-off-by: vankichi * :recycle: apply feedback Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * create helm template for benchmark operator (#2027) * :sparkles: create helm template for benchmark operator Signed-off-by: vankichi * :sparkles: refactor helm template Signed-off-by: vankichi * style: Format code with gofumpt and prettier --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * Refactor helm template and operator logic (#2043) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl status handle of continuous benchmark crds (#1955) Signed-off-by: vankichi * Impl benchmark jobs (#1977) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl benchmark jobs Signed-off-by: vankichi * :recycle: apply feedback Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :recycle: Refactor helm template and operator logic Signed-off-by: vankichi * :recycle: Add download original dataset URL option Signed-off-by: vankichi * :recycle: Set docker image location at the benchmark operator configmap and use it when information is set Signed-off-by: vankichi * add search algorithm benchmark and update search aggregation algo Signed-off-by: kpango * :sparkles: Add search result aggregation option Signed-off-by: vankichi * style: Format code with prettier and gofumpt * Improve job performance (#2061) * :bug: Fix job function to apply rate limiter * :recycle: Add pyroscope setting Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :bug: Fix build error Signed-off-by: vankichi --------- Signed-off-by: vankichi * :bug: Fix docker file and add concurrencyLimit for job goroutine Signed-off-by: vankichi * :recycle: Fix job_template.go by feedback Signed-off-by: vankichi * :recycle: Fix job logic by feedback Signed-off-by: vankichi * :recycle: Fix Signed-off-by: vankichi --------- Signed-off-by: vankichi Signed-off-by: kpango Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * Impl benchmark jobs (#1977) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl benchmark jobs Signed-off-by: vankichi * :recycle: apply feedback Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * Refactor helm template and operator logic (#2043) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl status handle of continuous benchmark crds (#1955) Signed-off-by: vankichi * Impl benchmark jobs (#1977) * Implement base of continuous benchmark tool (#1776) * Create Continuous Bench Search Job tool (#1733) * :sparkles: create bench job search tools Signed-off-by: vankichi * :sparkles: add load hdf5 functions Signed-off-by: vankichi * :recycle: fix format Signed-off-by: vankichi * :recycle: fix docker and use hdf5 data Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: refactor benchmark job Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :recycle: fix proto Signed-off-by: vankichi * :green_heart: add benchmark job image build ci Signed-off-by: vankichi * :green_heart: invest Signed-off-by: vankichi * Revert ":green_heart: invest" This reverts commit f0f585ccf71b1c95a88559941557a27774096e69. * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply code review Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Hiroto Funakoshi * :sparkles: apply from feedback Signed-off-by: vankichi * Update internal/config/benchmark.go Co-authored-by: Yusuke Kato * :recycle: change directory path Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi * Add crds for continuous benchmark tools (#1789) * :sparkles: add crds for continuous benchmark operator Signed-off-by: vankichi * :sparkles: add benchmark operator/job scheme Signed-off-by: vankichi * :sparkles: rename package names and add doc.go Signed-off-by: vankichi * :sparkles: create runtime object Signed-off-by: vankichi * Apply suggestions from code review Co-authored-by: Yusuke Kato * :recycle: apply feedback Signed-off-by: vankichi Signed-off-by: vankichi Co-authored-by: Yusuke Kato * Add Job reconciler & Change directory constitution of internal/k8s for benchmark (#1825) * :sparkles: :recycle: add Job reconciler & use scenario instead of operator Signed-off-by: vankichi * :recycle: fix format & rename file Signed-off-by: vankichi Signed-off-by: vankichi * Add benchmark operator framework (#1916) * :sparkles: impl benchmark reconciler Signed-off-by: vankichi * :sparkles: create benchmark operator framework Signed-off-by: vankichi * :recycle: remove unness changes Signed-off-by: vankichi Signed-off-by: vankichi * Format code with prettier and gofumpt * impl reconcile logic for create benchmark job (#1923) * :sparkles: impl reconcile logic for create benchmark job Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: fix Signed-off-by: vankichi * :recycle: refactor continuous benchmark's crds Signed-off-by: vankichi * :recycle: resolve error due to update conn bench crds for pkg/tools/benchmark/job Signed-off-by: vankichi * :recycle: refactor continuous benchmark job logic Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: update charts Signed-off-by: vankichi * Format code with prettier and gofumpt * :recycle: rafactor con bench config and bug fix reconcile logic Signed-off-by: vankichi * :bug: Bugfix: fix typo and recall function logic Signed-off-by: vankichi * :recycle: refactor pkg benchmark job Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :sparkles: impl benchmark jobs Signed-off-by: vankichi * :recycle: apply feedback Signed-off-by: vankichi --------- Signed-off-by: vankichi Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :recycle: Refactor helm template and operator logic Signed-off-by: vankichi * :recycle: Add download original dataset URL option Signed-off-by: vankichi * :recycle: Set docker image location at the benchmark operator configmap and use it when information is set Signed-off-by: vankichi * add search algorithm benchmark and update search aggregation algo Signed-off-by: kpango * :sparkles: Add search result aggregation option Signed-off-by: vankichi * style: Format code with prettier and gofumpt * Improve job performance (#2061) * :bug: Fix job function to apply rate limiter * :recycle: Add pyroscope setting Signed-off-by: vankichi * :recycle: fix Signed-off-by: vankichi * :bug: Fix build error Signed-off-by: vankichi --------- Signed-off-by: vankichi * :bug: Fix docker file and add concurrencyLimit for job goroutine Signed-off-by: vankichi * :recycle: Fix job_template.go by feedback Signed-off-by: vankichi * :recycle: Fix job logic by feedback Signed-off-by: vankichi * :recycle: Fix Signed-off-by: vankichi --------- Signed-off-by: vankichi Signed-off-by: kpango Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> * :recycle: Add noise to vector when update/upsert and update crd Signed-off-by: vankichi * :recycle: add deepmerge func for override default config by user-defined config Signed-off-by: vankichi * :recycle: refactor deepmerge Signed-off-by: vankichi * style: Format code with prettier and gofumpt * style: Format code with prettier and gofumpt * :recycle: change add noise func Signed-off-by: vankichi * :recycle: use golang/sync/error for prevent OOM KILL Signed-off-by: vankichi * :recycle: fix config adn add test Signed-off-by: vankichi * style: Format code with prettier and gofumpt --------- Signed-off-by: vankichi Signed-off-by: kpango Co-authored-by: Yusuke Kato Co-authored-by: Hiroto Funakoshi Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> --- apis/docs/v1/docs.md | 272 +++++++ .../crds/valdbenchmarkjob.yaml | 438 ++++++++++ .../job-values.schema.json | 748 +++++++++++++++++- .../scenario-values.schema.json | 2 +- .../schemas/job-values.yaml | 476 ++++++++++- .../values.schema.json | 2 +- cmd/tools/benchmark/job/sample.yaml | 296 ++++--- go.mod | 5 +- go.sum | 2 - internal/config/config.go | 130 +++ internal/config/config_test.go | 459 ++++++++++- internal/errors/config.go | 18 + .../k8s/vald/benchmark/api/v1/job_types.go | 36 +- .../operator/crds/valdbenchmarkjob.yaml | 438 ++++++++++ pkg/tools/benchmark/job/config/config.go | 247 ++++-- .../benchmark/job/handler/grpc/handler.go | 6 +- pkg/tools/benchmark/job/service/insert.go | 8 +- pkg/tools/benchmark/job/service/job.go | 38 +- pkg/tools/benchmark/job/service/object.go | 14 +- pkg/tools/benchmark/job/service/remove.go | 8 +- pkg/tools/benchmark/job/service/search.go | 11 +- pkg/tools/benchmark/job/service/update.go | 10 +- pkg/tools/benchmark/job/service/upsert.go | 10 +- pkg/tools/benchmark/job/usecase/benchmarkd.go | 33 - .../operator/handler/grpc/handler.go | 4 - 25 files changed, 3416 insertions(+), 295 deletions(-) diff --git a/apis/docs/v1/docs.md b/apis/docs/v1/docs.md index 735bc9351bf..c3ea2eb7728 100644 --- a/apis/docs/v1/docs.md +++ b/apis/docs/v1/docs.md @@ -151,6 +151,7 @@ - [ResourceInfo](#rpc-v1-ResourceInfo) - [RetryInfo](#rpc-v1-RetryInfo) - [apis/proto/v1/benchmark/benchmark.proto](#apis_proto_v1_benchmark_benchmark-proto) + - [Controller](#benchmark-v1-Controller) - [Job](#benchmark-v1-Job) @@ -1526,6 +1527,277 @@ Upsert service provides ways to insert/update vectors. | StreamUpsert | [.payload.v1.Upsert.Request](#payload-v1-Upsert-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to insert/update multiple vectors by bidirectional streaming. | | MultiUpsert | [.payload.v1.Upsert.MultiRequest](#payload-v1-Upsert-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to insert/update multiple vectors in a single request. | + + +

Top

+ +## apis/proto/v1/rpc/error_details.proto + + + +### BadRequest + +Describes violations in a client request. This error type focuses on the +syntactic aspects of the request. + +| Field | Type | Label | Description | +| ---------------- | -------------------------------------------------------------- | -------- | --------------------------------------------- | +| field_violations | [BadRequest.FieldViolation](#rpc-v1-BadRequest-FieldViolation) | repeated | Describes all violations in a client request. | + + + +### BadRequest.FieldViolation + +A message type used to describe a single bad request field. + +| Field | Type | Label | Description | +| ----- | ----------------- | ----- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| field | [string](#string) | | A path that leads to a field in the request body. The value will be a sequence of dot-separated identifiers that identify a protocol buffer field. | + +Consider the following: + +message CreateContactRequest { message EmailAddress { enum Type { TYPE_UNSPECIFIED = 0; HOME = 1; WORK = 2; } + +optional string email = 1; repeated EmailType type = 2; } + +string full_name = 1; repeated EmailAddress email_addresses = 2; } + +In this example, in proto `field` could take one of the following values: + +- `full_name` for a violation in the `full_name` value _ `email_addresses[1].email` for a violation in the `email` field of the first `email_addresses` message _ `email_addresses[3].type[2]` for a violation in the second `type` value in the third `email_addresses` message. + +In JSON, the same values are represented as: + +- `fullName` for a violation in the `fullName` value _ `emailAddresses[1].email` for a violation in the `email` field of the first `emailAddresses` message _ `emailAddresses[3].type[2]` for a violation in the second `type` value in the third `emailAddresses` message. | + | description | [string](#string) | | A description of why the request element is bad. | + + + +### DebugInfo + +Describes additional debugging info. + +| Field | Type | Label | Description | +| ------------- | ----------------- | -------- | ------------------------------------------------------------ | +| stack_entries | [string](#string) | repeated | The stack trace entries indicating where the error occurred. | +| detail | [string](#string) | | Additional debugging information provided by the server. | + + + +### ErrorInfo + +Describes the cause of the error with structured details. + +Example of an error when contacting the "pubsub.googleapis.com" API when it +is not enabled: + + { "reason": "API_DISABLED" + "domain": "googleapis.com" + "metadata": { + "resource": "projects/123", + "service": "pubsub.googleapis.com" + } + } + +This response indicates that the pubsub.googleapis.com API is not enabled. + +Example of an error that is returned when attempting to create a Spanner +instance in a region that is out of stock: + + { "reason": "STOCKOUT" + "domain": "spanner.googleapis.com", + "metadata": { + "availableRegions": "us-central1,us-east2" + } + } + +| Field | Type | Label | Description | +| -------- | ---------------------------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| reason | [string](#string) | | The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. | +| domain | [string](#string) | | The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". | +| metadata | [ErrorInfo.MetadataEntry](#rpc-v1-ErrorInfo-MetadataEntry) | repeated | Additional structured details about this error. | + +Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request. | + + + +### ErrorInfo.MetadataEntry + +| Field | Type | Label | Description | +| ----- | ----------------- | ----- | ----------- | +| key | [string](#string) | | | +| value | [string](#string) | | | + + + +### Help + +Provides links to documentation or for performing an out of band action. + +For example, if a quota check failed with an error indicating the calling +project hasn't enabled the accessed service, this can contain a URL pointing +directly to the right place in the developer console to flip the bit. + +| Field | Type | Label | Description | +| ----- | ------------------------------ | -------- | ------------------------------------------------------------------------ | +| links | [Help.Link](#rpc-v1-Help-Link) | repeated | URL(s) pointing to additional information on handling the current error. | + + + +### Help.Link + +Describes a URL link. + +| Field | Type | Label | Description | +| ----------- | ----------------- | ----- | ------------------------------- | +| description | [string](#string) | | Describes what the link offers. | +| url | [string](#string) | | The URL of the link. | + + + +### LocalizedMessage + +Provides a localized error message that is safe to return to the user +which can be attached to an RPC error. + +| Field | Type | Label | Description | +| ------- | ----------------- | ----- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| locale | [string](#string) | | The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" | +| message | [string](#string) | | The localized error message in the above locale. | + + + +### PreconditionFailure + +Describes what preconditions have failed. + +For example, if an RPC failed because it required the Terms of Service to be +acknowledged, it could list the terms of service violation in the +PreconditionFailure message. + +| Field | Type | Label | Description | +| ---------- | ---------------------------------------------------------------------- | -------- | -------------------------------------- | +| violations | [PreconditionFailure.Violation](#rpc-v1-PreconditionFailure-Violation) | repeated | Describes all precondition violations. | + + + +### PreconditionFailure.Violation + +A message type used to describe a single precondition failure. + +| Field | Type | Label | Description | +| ----------- | ----------------- | ----- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| type | [string](#string) | | The type of PreconditionFailure. We recommend using a service-specific enum type to define the supported precondition violation subjects. For example, "TOS" for "Terms of Service violation". | +| subject | [string](#string) | | The subject, relative to the type, that failed. For example, "google.com/cloud" relative to the "TOS" type would indicate which terms of service is being referenced. | +| description | [string](#string) | | A description of how the precondition failed. Developers can use this description to understand how to fix the failure. | + +For example: "Terms of service not accepted". | + + + +### QuotaFailure + +Describes how a quota check failed. + +For example if a daily limit was exceeded for the calling project, +a service could respond with a QuotaFailure detail containing the project +id and the description of the quota limit that was exceeded. If the +calling project hasn't enabled the service in the developer console, then +a service could respond with the project id and set `service_disabled` +to true. + +Also see RetryInfo and Help types for other details about handling a +quota failure. + +| Field | Type | Label | Description | +| ---------- | -------------------------------------------------------- | -------- | ------------------------------- | +| violations | [QuotaFailure.Violation](#rpc-v1-QuotaFailure-Violation) | repeated | Describes all quota violations. | + + + +### QuotaFailure.Violation + +A message type used to describe a single quota violation. For example, a +daily quota or a custom quota that was exceeded. + +| Field | Type | Label | Description | +| ----------- | ----------------- | ----- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| subject | [string](#string) | | The subject on which the quota check failed. For example, "clientip:<ip address of client>" or "project:<Google developer project id>". | +| description | [string](#string) | | A description of how the quota check failed. Clients can use this description to find more about the quota configuration in the service's public documentation, or find the relevant quota limit to adjust through developer console. | + +For example: "Service disabled" or "Daily Limit for read operations exceeded". | + + + +### RequestInfo + +Contains metadata about the request that clients can attach when filing a bug +or providing other forms of feedback. + +| Field | Type | Label | Description | +| ------------ | ----------------- | ----- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| request_id | [string](#string) | | An opaque string that should only be interpreted by the service generating it. For example, it can be used to identify requests in the service's logs. | +| serving_data | [string](#string) | | Any data that was used to serve this request. For example, an encrypted stack trace that can be sent back to the service provider for debugging. | + + + +### ResourceInfo + +Describes the resource that is being accessed. + +| Field | Type | Label | Description | +| ------------- | ----------------- | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| resource_type | [string](#string) | | A name for the type of resource being accessed, e.g. "sql table", "cloud storage bucket", "file", "Google calendar"; or the type URL of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". | +| resource_name | [string](#string) | | The name of the resource being accessed. For example, a shared calendar name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. | +| owner | [string](#string) | | The owner of the resource (optional). For example, "user:<owner email>" or "project:<Google developer project id>". | +| description | [string](#string) | | Describes what error is encountered when accessing this resource. For example, updating a cloud project may require the `writer` permission on the developer console project. | + + + +### RetryInfo + +Describes when the clients can retry a failed request. Clients could ignore +the recommendation here or retry when this information is missing from error +responses. + +It's always recommended that clients should use exponential backoff when +retrying. + +Clients should wait until `retry_delay` amount of time has passed since +receiving the error response before retrying. If retrying requests also +fail, clients should use an exponential backoff scheme to gradually increase +the delay between retries based on `retry_delay`, until either a maximum +number of retries have been reached or a maximum retry delay cap has been +reached. + +| Field | Type | Label | Description | +| ----------- | ----------------------------------------------------- | ----- | ------------------------------------------------------------------------- | +| retry_delay | [google.protobuf.Duration](#google-protobuf-Duration) | | Clients should wait at least this long between retrying the same request. | + + + +

Top

+ +## apis/proto/v1/benchmark/benchmark.proto + + + +### Controller + +TODO define API spec here + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ----------- | + + + +### Job + +TODO define API spec here + +| Method Name | Request Type | Response Type | Description | +| ----------- | ------------ | ------------- | ----------- | + ## Scalar Value Types | .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby | diff --git a/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml b/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml index d85ea0c12b7..f5669958ee6 100644 --- a/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml +++ b/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml @@ -280,6 +280,34 @@ spec: dimension: type: integer minimum: 1 + global_config: + type: object + properties: + logging: + type: object + properties: + format: + type: string + enum: + - raw + - json + level: + type: string + enum: + - debug + - info + - warn + - error + - fatal + logger: + type: string + enum: + - glg + - zap + time_zone: + type: string + version: + type: string insert_config: type: object properties: @@ -349,6 +377,416 @@ spec: type: number timeout: type: string + server_config: + type: object + properties: + healths: + type: object + properties: + liveness: + type: object + properties: + enabled: + type: boolean + host: + type: string + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + httpGet: + type: object + properties: + path: + type: string + port: + type: string + scheme: + type: string + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + timeoutSeconds: + type: integer + port: + type: integer + maximum: 65535 + minimum: 0 + server: + type: object + properties: + http: + type: object + properties: + handler_timeout: + type: string + idle_timeout: + type: string + read_header_timeout: + type: string + read_timeout: + type: string + shutdown_duration: + type: string + write_timeout: + type: string + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 + readiness: + type: object + properties: + enabled: + type: boolean + host: + type: string + port: + type: integer + maximum: 65535 + minimum: 0 + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + httpGet: + type: object + properties: + path: + type: string + port: + type: string + scheme: + type: string + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + timeoutSeconds: + type: integer + server: + type: object + properties: + http: + type: object + properties: + handler_timeout: + type: string + idle_timeout: + type: string + read_header_timeout: + type: string + read_timeout: + type: string + shutdown_duration: + type: string + write_timeout: + type: string + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 + startup: + type: object + properties: + enabled: + type: boolean + port: + type: integer + maximum: 65535 + minimum: 0 + startupProbe: + type: object + properties: + failureThreshold: + type: integer + httpGet: + type: object + properties: + path: + type: string + port: + type: string + scheme: + type: string + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + timeoutSeconds: + type: integer + servers: + type: object + properties: + grpc: + type: object + properties: + enabled: + type: boolean + host: + type: string + port: + type: integer + maximum: 65535 + minimum: 0 + server: + type: object + properties: + grpc: + type: object + properties: + bidirectional_stream_concurrency: + type: integer + connection_timeout: + type: string + enable_reflection: + type: boolean + header_table_size: + type: integer + initial_conn_window_size: + type: integer + initial_window_size: + type: integer + interceptors: + type: array + items: + type: string + enum: + - RecoverInterceptor + - AccessLogInterceptor + - TraceInterceptor + - MetricInterceptor + keepalive: + type: object + properties: + max_conn_age: + type: string + max_conn_age_grace: + type: string + max_conn_idle: + type: string + min_time: + type: string + permit_without_stream: + type: boolean + time: + type: string + timeout: + type: string + max_header_list_size: + type: integer + max_receive_message_size: + type: integer + max_send_message_size: + type: integer + read_buffer_size: + type: integer + write_buffer_size: + type: integer + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + restart: + type: boolean + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 + rest: + type: object + properties: + enabled: + type: boolean + host: + type: string + port: + type: integer + maximum: 65535 + minimum: 0 + server: + type: object + properties: + http: + type: object + properties: + handler_timeout: + type: string + idle_timeout: + type: string + read_header_timeout: + type: string + read_timeout: + type: string + shutdown_duration: + type: string + write_timeout: + type: string + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 target: type: object properties: diff --git a/charts/vald-benchmark-operator/job-values.schema.json b/charts/vald-benchmark-operator/job-values.schema.json index 40759dd01bc..582302dfdfc 100644 --- a/charts/vald-benchmark-operator/job-values.schema.json +++ b/charts/vald-benchmark-operator/job-values.schema.json @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", + "$schema": "https://json-schema.org/draft-07/schema#", "title": "Values", "type": "object", "properties": { @@ -342,6 +342,24 @@ "description": "vector dimension", "minimum": 1 }, + "global_config": { + "type": "object", + "properties": { + "logging": { + "type": "object", + "properties": { + "format": { "type": "string", "enum": ["raw", "json"] }, + "level": { + "type": "string", + "enum": ["debug", "info", "warn", "error", "fatal"] + }, + "logger": { "type": "string", "enum": ["glg", "zap"] } + } + }, + "time_zone": { "type": "string" }, + "version": { "type": "string" } + } + }, "insert_config": { "type": "object", "description": "insert config", @@ -443,6 +461,734 @@ } } }, + "server_config": { + "type": "object", + "properties": { + "healths": { + "type": "object", + "properties": { + "liveness": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "liveness server enabled" + }, + "host": { + "type": "string", + "description": "liveness server host" + }, + "livenessProbe": { + "type": "object", + "properties": { + "failureThreshold": { + "type": "integer", + "description": "liveness probe failure threshold" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "liveness probe path" + }, + "port": { + "type": "string", + "description": "liveness probe port" + }, + "scheme": { + "type": "string", + "description": "liveness probe scheme" + } + } + }, + "initialDelaySeconds": { + "type": "integer", + "description": "liveness probe initial delay seconds" + }, + "periodSeconds": { + "type": "integer", + "description": "liveness probe period seconds" + }, + "successThreshold": { + "type": "integer", + "description": "liveness probe success threshold" + }, + "timeoutSeconds": { + "type": "integer", + "description": "liveness probe timeout seconds" + } + } + }, + "port": { + "type": "integer", + "description": "liveness server port", + "maximum": 65535, + "minimum": 0 + }, + "server": { + "type": "object", + "properties": { + "http": { + "type": "object", + "properties": { + "handler_timeout": { + "type": "string", + "description": "liveness server handler timeout" + }, + "idle_timeout": { + "type": "string", + "description": "liveness server idle timeout" + }, + "read_header_timeout": { + "type": "string", + "description": "liveness server read header timeout" + }, + "read_timeout": { + "type": "string", + "description": "liveness server read timeout" + }, + "shutdown_duration": { + "type": "string", + "description": "liveness server shutdown duration" + }, + "write_timeout": { + "type": "string", + "description": "liveness server write timeout" + } + } + }, + "mode": { + "type": "string", + "description": "liveness server mode" + }, + "network": { + "type": "string", + "description": "mysql network", + "enum": [ + "tcp", + "tcp4", + "tcp6", + "udp", + "udp4", + "udp6", + "unix", + "unixgram", + "unixpacket" + ] + }, + "probe_wait_time": { + "type": "string", + "description": "liveness server probe wait time" + }, + "socket_option": { + "type": "object", + "properties": { + "ip_recover_destination_addr": { + "type": "boolean", + "description": "server listen socket option for ip_recover_destination_addr functionality" + }, + "ip_transparent": { + "type": "boolean", + "description": "server listen socket option for ip_transparent functionality" + }, + "reuse_addr": { + "type": "boolean", + "description": "server listen socket option for reuse_addr functionality" + }, + "reuse_port": { + "type": "boolean", + "description": "server listen socket option for reuse_port functionality" + }, + "tcp_cork": { + "type": "boolean", + "description": "server listen socket option for tcp_cork functionality" + }, + "tcp_defer_accept": { + "type": "boolean", + "description": "server listen socket option for tcp_defer_accept functionality" + }, + "tcp_fast_open": { + "type": "boolean", + "description": "server listen socket option for tcp_fast_open functionality" + }, + "tcp_no_delay": { + "type": "boolean", + "description": "server listen socket option for tcp_no_delay functionality" + }, + "tcp_quick_ack": { + "type": "boolean", + "description": "server listen socket option for tcp_quick_ack functionality" + } + } + }, + "socket_path": { + "type": "string", + "description": "mysql socket_path" + } + } + }, + "servicePort": { + "type": "integer", + "description": "liveness server service port", + "maximum": 65535, + "minimum": 0 + } + } + }, + "readiness": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "readiness server enabled" + }, + "host": { + "type": "string", + "description": "readiness server host" + }, + "port": { + "type": "integer", + "description": "readiness server port", + "maximum": 65535, + "minimum": 0 + }, + "readinessProbe": { + "type": "object", + "properties": { + "failureThreshold": { + "type": "integer", + "description": "readiness probe failure threshold" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "readiness probe path" + }, + "port": { + "type": "string", + "description": "readiness probe port" + }, + "scheme": { + "type": "string", + "description": "readiness probe scheme" + } + } + }, + "initialDelaySeconds": { + "type": "integer", + "description": "readiness probe initial delay seconds" + }, + "periodSeconds": { + "type": "integer", + "description": "readiness probe period seconds" + }, + "successThreshold": { + "type": "integer", + "description": "readiness probe success threshold" + }, + "timeoutSeconds": { + "type": "integer", + "description": "readiness probe timeout seconds" + } + } + }, + "server": { + "type": "object", + "properties": { + "http": { + "type": "object", + "properties": { + "handler_timeout": { + "type": "string", + "description": "readiness server handler timeout" + }, + "idle_timeout": { + "type": "string", + "description": "readiness server idle timeout" + }, + "read_header_timeout": { + "type": "string", + "description": "readiness server read header timeout" + }, + "read_timeout": { + "type": "string", + "description": "readiness server read timeout" + }, + "shutdown_duration": { + "type": "string", + "description": "readiness server shutdown duration" + }, + "write_timeout": { + "type": "string", + "description": "readiness server write timeout" + } + } + }, + "mode": { + "type": "string", + "description": "readiness server mode" + }, + "network": { + "type": "string", + "description": "mysql network", + "enum": [ + "tcp", + "tcp4", + "tcp6", + "udp", + "udp4", + "udp6", + "unix", + "unixgram", + "unixpacket" + ] + }, + "probe_wait_time": { + "type": "string", + "description": "readiness server probe wait time" + }, + "socket_option": { + "type": "object", + "properties": { + "ip_recover_destination_addr": { + "type": "boolean", + "description": "server listen socket option for ip_recover_destination_addr functionality" + }, + "ip_transparent": { + "type": "boolean", + "description": "server listen socket option for ip_transparent functionality" + }, + "reuse_addr": { + "type": "boolean", + "description": "server listen socket option for reuse_addr functionality" + }, + "reuse_port": { + "type": "boolean", + "description": "server listen socket option for reuse_port functionality" + }, + "tcp_cork": { + "type": "boolean", + "description": "server listen socket option for tcp_cork functionality" + }, + "tcp_defer_accept": { + "type": "boolean", + "description": "server listen socket option for tcp_defer_accept functionality" + }, + "tcp_fast_open": { + "type": "boolean", + "description": "server listen socket option for tcp_fast_open functionality" + }, + "tcp_no_delay": { + "type": "boolean", + "description": "server listen socket option for tcp_no_delay functionality" + }, + "tcp_quick_ack": { + "type": "boolean", + "description": "server listen socket option for tcp_quick_ack functionality" + } + } + }, + "socket_path": { + "type": "string", + "description": "mysql socket_path" + } + } + }, + "servicePort": { + "type": "integer", + "description": "readiness server service port", + "maximum": 65535, + "minimum": 0 + } + } + }, + "startup": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "startup server enabled" + }, + "port": { + "type": "integer", + "description": "startup server port", + "maximum": 65535, + "minimum": 0 + }, + "startupProbe": { + "type": "object", + "properties": { + "failureThreshold": { + "type": "integer", + "description": "startup probe failure threshold" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string", + "description": "startup probe path" + }, + "port": { + "type": "string", + "description": "startup probe port" + }, + "scheme": { + "type": "string", + "description": "startup probe scheme" + } + } + }, + "initialDelaySeconds": { + "type": "integer", + "description": "startup probe initial delay seconds" + }, + "periodSeconds": { + "type": "integer", + "description": "startup probe period seconds" + }, + "successThreshold": { + "type": "integer", + "description": "startup probe success threshold" + }, + "timeoutSeconds": { + "type": "integer", + "description": "startup probe timeout seconds" + } + } + } + } + } + } + }, + "servers": { + "type": "object", + "properties": { + "grpc": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "gRPC server enabled" + }, + "host": { "type": "string", "description": "gRPC server host" }, + "port": { + "type": "integer", + "description": "gRPC server port", + "maximum": 65535, + "minimum": 0 + }, + "server": { + "type": "object", + "properties": { + "grpc": { + "type": "object", + "properties": { + "bidirectional_stream_concurrency": { + "type": "integer", + "description": "gRPC server bidirectional stream concurrency" + }, + "connection_timeout": { + "type": "string", + "description": "gRPC server connection timeout" + }, + "enable_reflection": { + "type": "boolean", + "description": "gRPC server reflection option" + }, + "header_table_size": { + "type": "integer", + "description": "gRPC server header table size" + }, + "initial_conn_window_size": { + "type": "integer", + "description": "gRPC server initial connection window size" + }, + "initial_window_size": { + "type": "integer", + "description": "gRPC server initial window size" + }, + "interceptors": { + "type": "array", + "description": "gRPC server interceptors", + "items": { + "type": "string", + "enum": [ + "RecoverInterceptor", + "AccessLogInterceptor", + "TraceInterceptor", + "MetricInterceptor" + ] + } + }, + "keepalive": { + "type": "object", + "properties": { + "max_conn_age": { + "type": "string", + "description": "gRPC server keep alive max connection age" + }, + "max_conn_age_grace": { + "type": "string", + "description": "gRPC server keep alive max connection age grace" + }, + "max_conn_idle": { + "type": "string", + "description": "gRPC server keep alive max connection idle" + }, + "min_time": { + "type": "string", + "description": "gRPC server keep alive min_time" + }, + "permit_without_stream": { + "type": "boolean", + "description": "gRPC server keep alive permit_without_stream" + }, + "time": { + "type": "string", + "description": "gRPC server keep alive time" + }, + "timeout": { + "type": "string", + "description": "gRPC server keep alive timeout" + } + } + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC server max header list size" + }, + "max_receive_message_size": { + "type": "integer", + "description": "gRPC server max receive message size" + }, + "max_send_message_size": { + "type": "integer", + "description": "gRPC server max send message size" + }, + "read_buffer_size": { + "type": "integer", + "description": "gRPC server read buffer size" + }, + "write_buffer_size": { + "type": "integer", + "description": "gRPC server write buffer size" + } + } + }, + "mode": { + "type": "string", + "description": "gRPC server server mode" + }, + "network": { + "type": "string", + "description": "mysql network", + "enum": [ + "tcp", + "tcp4", + "tcp6", + "udp", + "udp4", + "udp6", + "unix", + "unixgram", + "unixpacket" + ] + }, + "probe_wait_time": { + "type": "string", + "description": "gRPC server probe wait time" + }, + "restart": { + "type": "boolean", + "description": "gRPC server restart" + }, + "socket_option": { + "type": "object", + "properties": { + "ip_recover_destination_addr": { + "type": "boolean", + "description": "server listen socket option for ip_recover_destination_addr functionality" + }, + "ip_transparent": { + "type": "boolean", + "description": "server listen socket option for ip_transparent functionality" + }, + "reuse_addr": { + "type": "boolean", + "description": "server listen socket option for reuse_addr functionality" + }, + "reuse_port": { + "type": "boolean", + "description": "server listen socket option for reuse_port functionality" + }, + "tcp_cork": { + "type": "boolean", + "description": "server listen socket option for tcp_cork functionality" + }, + "tcp_defer_accept": { + "type": "boolean", + "description": "server listen socket option for tcp_defer_accept functionality" + }, + "tcp_fast_open": { + "type": "boolean", + "description": "server listen socket option for tcp_fast_open functionality" + }, + "tcp_no_delay": { + "type": "boolean", + "description": "server listen socket option for tcp_no_delay functionality" + }, + "tcp_quick_ack": { + "type": "boolean", + "description": "server listen socket option for tcp_quick_ack functionality" + } + } + }, + "socket_path": { + "type": "string", + "description": "mysql socket_path" + } + } + }, + "servicePort": { + "type": "integer", + "description": "gRPC server service port", + "maximum": 65535, + "minimum": 0 + } + } + }, + "rest": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "REST server enabled" + }, + "host": { "type": "string", "description": "REST server host" }, + "port": { + "type": "integer", + "description": "REST server port", + "maximum": 65535, + "minimum": 0 + }, + "server": { + "type": "object", + "properties": { + "http": { + "type": "object", + "properties": { + "handler_timeout": { + "type": "string", + "description": "REST server handler timeout" + }, + "idle_timeout": { + "type": "string", + "description": "REST server idle timeout" + }, + "read_header_timeout": { + "type": "string", + "description": "REST server read header timeout" + }, + "read_timeout": { + "type": "string", + "description": "REST server read timeout" + }, + "shutdown_duration": { + "type": "string", + "description": "REST server shutdown duration" + }, + "write_timeout": { + "type": "string", + "description": "REST server write timeout" + } + } + }, + "mode": { + "type": "string", + "description": "REST server server mode" + }, + "network": { + "type": "string", + "description": "mysql network", + "enum": [ + "tcp", + "tcp4", + "tcp6", + "udp", + "udp4", + "udp6", + "unix", + "unixgram", + "unixpacket" + ] + }, + "probe_wait_time": { + "type": "string", + "description": "REST server probe wait time" + }, + "socket_option": { + "type": "object", + "properties": { + "ip_recover_destination_addr": { + "type": "boolean", + "description": "server listen socket option for ip_recover_destination_addr functionality" + }, + "ip_transparent": { + "type": "boolean", + "description": "server listen socket option for ip_transparent functionality" + }, + "reuse_addr": { + "type": "boolean", + "description": "server listen socket option for reuse_addr functionality" + }, + "reuse_port": { + "type": "boolean", + "description": "server listen socket option for reuse_port functionality" + }, + "tcp_cork": { + "type": "boolean", + "description": "server listen socket option for tcp_cork functionality" + }, + "tcp_defer_accept": { + "type": "boolean", + "description": "server listen socket option for tcp_defer_accept functionality" + }, + "tcp_fast_open": { + "type": "boolean", + "description": "server listen socket option for tcp_fast_open functionality" + }, + "tcp_no_delay": { + "type": "boolean", + "description": "server listen socket option for tcp_no_delay functionality" + }, + "tcp_quick_ack": { + "type": "boolean", + "description": "server listen socket option for tcp_quick_ack functionality" + } + } + }, + "socket_path": { + "type": "string", + "description": "mysql socket_path" + } + } + }, + "servicePort": { + "type": "integer", + "description": "REST server service port", + "maximum": 65535, + "minimum": 0 + } + } + } + } + } + } + }, "target": { "type": "object", "description": "target cluster location", diff --git a/charts/vald-benchmark-operator/scenario-values.schema.json b/charts/vald-benchmark-operator/scenario-values.schema.json index c4aadae03c6..7817e50c38b 100644 --- a/charts/vald-benchmark-operator/scenario-values.schema.json +++ b/charts/vald-benchmark-operator/scenario-values.schema.json @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", + "$schema": "https://json-schema.org/draft-07/schema#", "title": "Values", "type": "object", "properties": { diff --git a/charts/vald-benchmark-operator/schemas/job-values.yaml b/charts/vald-benchmark-operator/schemas/job-values.yaml index 021418e9f56..a795d321b6d 100644 --- a/charts/vald-benchmark-operator/schemas/job-values.yaml +++ b/charts/vald-benchmark-operator/schemas/job-values.yaml @@ -19,9 +19,8 @@ target: # @schema {"name": "target.host", "type": "string", "minLength": 1} # target.host -- target cluster host - host: - "vald-lb-gateway.default.svc.cluster.local" - # @schema {"name": "target.port", "type": "integer", "minimum": 0, "maximum": 65535} + host: "vald-lb-gateway.default.svc.cluster.local" + # @schema {"name": "target.port", "type": "integer", "minimum": 0, "maximum": 65535} # target.port -- target cluster port port: 8081 # @schema {"name": "dataset", "type": "object", "required": ["name", "indexes", "group", "range"]} @@ -57,6 +56,18 @@ replica: 1 # @schema {"name": "repetition", "type": "integer", "minimum": 1} # repetition -- the number of repeat job repetition: 1 +# @schema {"name": "rules", "type": "array", "items": {"type": "string"}} +# rules -- executing rule +rules: [] +# @schema {"name": "rps", "type": "integer", "minimum": 0, "maximum": 65535} +# rps -- desired request per sec +rps: 1000 +# @schema {"name": "concurrency_limit", "type": "integer", "minimum": 0, "maximum": 65535} +# concurrency_limit -- concurrency_limit represents the goroutine limit count. It affects the job performance. +concurrency_limit: 200 +# @schema {"name": "ttl_seconds_after_finished", "type": "integer", "minimum": 0, "maximum": 65535} +# ttl_seconds_after_finished -- limits the lifetime of a Job that has finished execution. +ttl_seconds_after_finished: 10 # @schema {"name": "job_type", "type": "string", "enum": ["insert", "update", "upsert", "search", "remove", "getobject", "exists"]} # job_type -- job type name job_type: "search" @@ -117,7 +128,6 @@ search_config: # @schema {"name": "search_config.aggregation_algorithm", "type": "string", "enum": ["Unknown", "ConcurrentQueue", "SortSlice", "SortPoolSlice", "PairingHeap"]} # search_config.aggregation_algorithm -- search result aggregation algorithm aggregation_algorithm: "Unknown" - # @schema {"name": "remove_config", "type": "object"} # remove_config -- remove config remove_config: @@ -361,15 +371,449 @@ client_config: # @schema {"name": "client_config.tls.insecure_skip_verify", "type": "boolean"} # client_config.tls.insecure_skip_verify -- enable/disable skip SSL certificate verification insecure_skip_verify: false -# @schema {"name": "rules", "type": "array", "items": {"type": "string"}} -# rules -- executing rule -rules: [] -# @schema {"name": "rps", "type": "integer", "minimum": 0, "maximum": 65535} -# rps -- desired request per sec -rps: 1000 -# @schema {"name": "concurrency_limit", "type": "integer", "minimum": 0, "maximum": 65535} -# concurrency_limit -- concurrency_limit represents the goroutine limit count. It affects the job performance. -concurrency_limit: 200 -# @schema {"name": "ttl_seconds_after_finished", "type": "integer", "minimum": 0, "maximum": 65535} -# ttl_seconds_after_finished -- limits the lifetime of a Job that has finished execution. -ttl_seconds_after_finished: 10 +# @schema {"name": "global_config", "type": "object"} +global_config: + # @schema {"name": "global_config.version", "type": "string", "default": "v0.0.1"} + # version -- version info + version: "v0.0.1" + # @schema {"name": "global_config.time_zone", "type": "string"} + # time_zone -- Time zone + time_zone: UTC + # @schema {"name": "global_config.logging", "type": "object", "anchor": "logging"} + logging: + # @schema {"name": "global_config.logging.logger", "type": "string", "enum": ["glg", "zap"]} + # logging.logger -- logger name. + # currently logger must be `glg` or `zap`. + logger: glg + # @schema {"name": "global_config.logging.level", "type": "string", "enum": ["debug", "info", "warn", "error", "fatal"]} + # logging.level -- logging level. + # logging level must be `debug`, `info`, `warn`, `error` or `fatal`. + level: debug + # @schema {"name": "global_config.logging.format", "type": "string", "enum": ["raw", "json"]} + # logging.format -- logging format. + # logging format must be `raw` or `json` + format: raw +# @schema {"name": "server_config", "type": "object", "anchor": "server_config"} +server_config: + # @schema {"name": "server_config.servers", "type": "object"} + servers: + # @schema {"name": "server_config.servers.rest", "type": "object"} + rest: + # @schema {"name": "server_config.servers.rest.enabled", "type": "boolean"} + # server_config.servers.rest.enabled -- REST server enabled + enabled: false + # @schema {"name": "server_config.servers.rest.host", "type": "string"} + # server_config.servers.rest.host -- REST server host + host: 0.0.0.0 + # @schema {"name": "server_config.servers.rest.port", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.servers.rest.port -- REST server port + port: 8080 + # @schema {"name": "server_config.servers.rest.servicePort", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.servers.rest.servicePort -- REST server service port + servicePort: 8080 + # @schema {"name": "server_config.servers.rest.server", "type": "object"} + server: + # @schema {"name": "server_config.servers.rest.server.mode", "type": "string"} + # server_config.servers.rest.server.mode -- REST server server mode + mode: REST + # @schema {"name": "server_config.servers.rest.server.probe_wait_time", "type": "string"} + # server_config.servers.rest.server.probe_wait_time -- REST server probe wait time + probe_wait_time: 3s + # @schema {"name": "server_config.servers.rest.server.network", "type": "string", "enum": ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix", "unixgram", "unixpacket"]} + # server_config.servers.rest.server.network -- mysql network + network: tcp + # @schema {"name": "server_config.servers.rest.server.socket_path", "type": "string"} + # server_config.servers.rest.server.socket_path -- mysql socket_path + socket_path: "" + # @schema {"name": "server_config.servers.rest.server.http", "type": "object"} + http: + # @schema {"name": "server_config.servers.rest.server.http.shutdown_duration", "type": "string"} + # server_config.servers.rest.server.http.shutdown_duration -- REST server shutdown duration + shutdown_duration: 5s + # @schema {"name": "server_config.servers.rest.server.http.handler_timeout", "type": "string"} + # server_config.servers.rest.server.http.handler_timeout -- REST server handler timeout + handler_timeout: 5s + # @schema {"name": "server_config.servers.rest.server.http.idle_timeout", "type": "string"} + # server_config.servers.rest.server.http.idle_timeout -- REST server idle timeout + idle_timeout: 2s + # @schema {"name": "server_config.servers.rest.server.http.read_header_timeout", "type": "string"} + # server_config.servers.rest.server.http.read_header_timeout -- REST server read header timeout + read_header_timeout: 1s + # @schema {"name": "server_config.servers.rest.server.http.read_timeout", "type": "string"} + # server_config.servers.rest.server.http.read_timeout -- REST server read timeout + read_timeout: 1s + # @schema {"name": "server_config.servers.rest.server.http.write_timeout", "type": "string"} + # server_config.servers.rest.server.http.write_timeout -- REST server write timeout + write_timeout: 1s + # @schema {"name": "server_config.servers.rest.server.socket_option", "type": "object", "anchor": "socket_option"} + socket_option: + # @schema {"name": "server_config.servers.rest.server.socket_option.reuse_port", "type": "boolean"} + # server_config.servers.rest.server.socket_option.reuse_port -- server listen socket option for reuse_port functionality + reuse_port: true + # @schema {"name": "server_config.servers.rest.server.socket_option.reuse_addr", "type": "boolean"} + # server_config.servers.rest.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality + reuse_addr: true + # @schema {"name": "server_config.servers.rest.server.socket_option.tcp_fast_open", "type": "boolean"} + # server_config.servers.rest.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality + tcp_fast_open: true + # @schema {"name": "server_config.servers.rest.server.socket_option.tcp_no_delay", "type": "boolean"} + # server_config.servers.rest.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality + tcp_no_delay: true + # @schema {"name": "server_config.servers.rest.server.socket_option.tcp_cork", "type": "boolean"} + # server_config.servers.rest.server.socket_option.tcp_cork -- server listen socket option for tcp_cork functionality + tcp_cork: false + # @schema {"name": "server_config.servers.rest.server.socket_option.tcp_quick_ack", "type": "boolean"} + # server_config.servers.rest.server.socket_option.tcp_quick_ack -- server listen socket option for tcp_quick_ack functionality + tcp_quick_ack: true + # @schema {"name": "server_config.servers.rest.server.socket_option.tcp_defer_accept", "type": "boolean"} + # server_config.servers.rest.server.socket_option.tcp_defer_accept -- server listen socket option for tcp_defer_accept functionality + tcp_defer_accept: true + # @schema {"name": "server_config.servers.rest.server.socket_option.ip_transparent", "type": "boolean"} + # server_config.servers.rest.server.socket_option.ip_transparent -- server listen socket option for ip_transparent functionality + ip_transparent: false + # @schema {"name": "server_config.servers.rest.server.socket_option.ip_recover_destination_addr", "type": "boolean"} + # server_config.servers.rest.server.socket_option.ip_recover_destination_addr -- server listen socket option for ip_recover_destination_addr functionality + ip_recover_destination_addr: false + # @schema {"name": "server_config.servers.grpc", "type": "object"} + grpc: + # @schema {"name": "server_config.servers.grpc.enabled", "type": "boolean"} + # server_config.servers.grpc.enabled -- gRPC server enabled + enabled: true + # @schema {"name": "server_config.servers.grpc.host", "type": "string"} + # server_config.servers.grpc.host -- gRPC server host + host: 0.0.0.0 + # @schema {"name": "server_config.servers.grpc.port", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.servers.grpc.port -- gRPC server port + port: 8081 + # @schema {"name": "server_config.servers.grpc.servicePort", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.servers.grpc.servicePort -- gRPC server service port + servicePort: 8081 + # @schema {"name": "server_config.servers.grpc.server", "type": "object"} + server: + # @schema {"name": "server_config.servers.grpc.server.mode", "type": "string"} + # server_config.servers.grpc.server.mode -- gRPC server server mode + mode: GRPC + # @schema {"name": "server_config.servers.grpc.server.probe_wait_time", "type": "string"} + # server_config.servers.grpc.server.probe_wait_time -- gRPC server probe wait time + probe_wait_time: "3s" + # @schema {"name": "server_config.servers.grpc.server.network", "type": "string", "enum": ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix", "unixgram", "unixpacket"]} + # server_config.servers.grpc.server.network -- mysql network + network: tcp + # @schema {"name": "server_config.servers.grpc.server.socket_path", "type": "string"} + # server_config.servers.grpc.server.socket_path -- mysql socket_path + socket_path: "" + # @schema {"name": "server_config.servers.grpc.server.grpc", "type": "object"} + grpc: + # @schema {"name": "server_config.servers.grpc.server.grpc.bidirectional_stream_concurrency", "type": "integer"} + # server_config.servers.grpc.server.grpc.bidirectional_stream_concurrency -- gRPC server bidirectional stream concurrency + bidirectional_stream_concurrency: 20 + # @schema {"name": "server_config.servers.grpc.server.grpc.max_receive_message_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.max_receive_message_size -- gRPC server max receive message size + max_receive_message_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.max_send_message_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.max_send_message_size -- gRPC server max send message size + max_send_message_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.initial_window_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.initial_window_size -- gRPC server initial window size + initial_window_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.initial_conn_window_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.initial_conn_window_size -- gRPC server initial connection window size + initial_conn_window_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive", "type": "object"} + keepalive: + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.max_conn_idle", "type": "string"} + # server_config.servers.grpc.server.grpc.keepalive.max_conn_idle -- gRPC server keep alive max connection idle + max_conn_idle: "" + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.max_conn_age", "type": "string"} + # server_config.servers.grpc.server.grpc.keepalive.max_conn_age -- gRPC server keep alive max connection age + max_conn_age: "" + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.max_conn_age_grace", "type": "string"} + # server_config.servers.grpc.server.grpc.keepalive.max_conn_age_grace -- gRPC server keep alive max connection age grace + max_conn_age_grace: "" + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.time", "type": "string"} + # server_config.servers.grpc.server.grpc.keepalive.time -- gRPC server keep alive time + time: "120s" + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.timeout", "type": "string"} + # server_config.servers.grpc.server.grpc.keepalive.timeout -- gRPC server keep alive timeout + timeout: "30s" + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.min_time", "type": "string"} + # server_config.servers.grpc.server.grpc.keepalive.min_time -- gRPC server keep alive min_time + min_time: "60s" + # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive.permit_without_stream", "type": "boolean"} + # server_config.servers.grpc.server.grpc.keepalive.permit_without_stream -- gRPC server keep alive permit_without_stream + permit_without_stream: true + # @schema {"name": "server_config.servers.grpc.server.grpc.write_buffer_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.write_buffer_size -- gRPC server write buffer size + write_buffer_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.read_buffer_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.read_buffer_size -- gRPC server read buffer size + read_buffer_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.connection_timeout", "type": "string"} + # server_config.servers.grpc.server.grpc.connection_timeout -- gRPC server connection timeout + connection_timeout: "" + # @schema {"name": "server_config.servers.grpc.server.grpc.max_header_list_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.max_header_list_size -- gRPC server max header list size + max_header_list_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.header_table_size", "type": "integer"} + # server_config.servers.grpc.server.grpc.header_table_size -- gRPC server header table size + header_table_size: 0 + # @schema {"name": "server_config.servers.grpc.server.grpc.interceptors", "type": "array", "items": {"type": "string", "enum": ["RecoverInterceptor", "AccessLogInterceptor", "TraceInterceptor", "MetricInterceptor"]}} + # server_config.servers.grpc.server.grpc.interceptors -- gRPC server interceptors + interceptors: + - "RecoverInterceptor" + # @schema {"name": "server_config.servers.grpc.server.grpc.enable_reflection", "type": "boolean"} + # server_config.servers.grpc.server.grpc.enable_reflection -- gRPC server reflection option + enable_reflection: true + # @schema {"name": "server_config.servers.grpc.server.socket_option", "alias": "socket_option"} + socket_option: + # server_config.servers.grpc.server.socket_option.reuse_port -- server listen socket option for reuse_port functionality + reuse_port: true + # server_config.servers.grpc.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality + reuse_addr: true + # server_config.servers.grpc.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality + tcp_fast_open: true + # server_config.servers.grpc.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality + tcp_no_delay: true + # server_config.servers.grpc.server.socket_option.tcp_cork -- server listen socket option for tcp_cork functionality + tcp_cork: false + # server_config.servers.grpc.server.socket_option.tcp_quick_ack -- server listen socket option for tcp_quick_ack functionality + tcp_quick_ack: true + # server_config.servers.grpc.server.socket_option.tcp_defer_accept -- server listen socket option for tcp_defer_accept functionality + tcp_defer_accept: true + # server_config.servers.grpc.server.socket_option.ip_transparent -- server listen socket option for ip_transparent functionality + ip_transparent: false + # server_config.servers.grpc.server.socket_option.ip_recover_destination_addr -- server listen socket option for ip_recover_destination_addr functionality + ip_recover_destination_addr: false + # @schema {"name": "server_config.servers.grpc.server.restart", "type": "boolean"} + # server_config.servers.grpc.server.restart -- gRPC server restart + restart: true + # @schema {"name": "server_config.healths", "type": "object"} + healths: + # @schema {"name": "server_config.healths.startup", "type": "object"} + startup: + # @schema {"name": "server_config.healths.startup.enabled", "type": "boolean"} + # server_config.healths.startup.enabled -- startup server enabled + enabled: true + # @schema {"name": "server_config.healths.startup.port", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.healths.startup.port -- startup server port + port: 3000 + # @schema {"name": "server_config.healths.startup.startupProbe", "type": "object"} + startupProbe: + # @schema {"name": "server_config.healths.startup.startupProbe.httpGet", "type": "object"} + httpGet: + # @schema {"name": "server_config.healths.startup.startupProbe.httpGet.path", "type": "string"} + # server_config.healths.startup.startupProbe.httpGet.path -- startup probe path + path: /liveness + # @schema {"name": "server_config.healths.startup.startupProbe.httpGet.port", "type": "string"} + # server_config.healths.startup.startupProbe.httpGet.port -- startup probe port + port: liveness + # @schema {"name": "server_config.healths.startup.startupProbe.httpGet.scheme", "type": "string"} + # server_config.healths.startup.startupProbe.httpGet.scheme -- startup probe scheme + scheme: HTTP + # @schema {"name": "server_config.healths.startup.startupProbe.initialDelaySeconds", "type": "integer"} + # server_config.healths.startup.startupProbe.initialDelaySeconds -- startup probe initial delay seconds + initialDelaySeconds: 5 + # @schema {"name": "server_config.healths.startup.startupProbe.timeoutSeconds", "type": "integer"} + # server_config.healths.startup.startupProbe.timeoutSeconds -- startup probe timeout seconds + timeoutSeconds: 2 + # @schema {"name": "server_config.healths.startup.startupProbe.successThreshold", "type": "integer"} + # server_config.healths.startup.startupProbe.successThreshold -- startup probe success threshold + successThreshold: 1 + # @schema {"name": "server_config.healths.startup.startupProbe.failureThreshold", "type": "integer"} + # server_config.healths.startup.startupProbe.failureThreshold -- startup probe failure threshold + failureThreshold: 30 + # @schema {"name": "server_config.healths.startup.startupProbe.periodSeconds", "type": "integer"} + # server_config.healths.startup.startupProbe.periodSeconds -- startup probe period seconds + periodSeconds: 5 + # @schema {"name": "server_config.healths.liveness", "type": "object"} + liveness: + # @schema {"name": "server_config.healths.liveness.enabled", "type": "boolean"} + # server_config.healths.liveness.enabled -- liveness server enabled + enabled: true + # @schema {"name": "server_config.healths.liveness.host", "type": "string"} + # server_config.healths.liveness.host -- liveness server host + host: 0.0.0.0 + # @schema {"name": "server_config.healths.liveness.port", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.healths.liveness.port -- liveness server port + port: 3000 + # @schema {"name": "server_config.healths.liveness.servicePort", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.healths.liveness.servicePort -- liveness server service port + servicePort: 3000 + # @schema {"name": "server_config.healths.liveness.livenessProbe", "type": "object"} + livenessProbe: + # @schema {"name": "server_config.healths.liveness.livenessProbe.httpGet", "type": "object"} + httpGet: + # @schema {"name": "server_config.healths.liveness.livenessProbe.httpGet.path", "type": "string"} + # server_config.healths.liveness.livenessProbe.httpGet.path -- liveness probe path + path: /liveness + # @schema {"name": "server_config.healths.liveness.livenessProbe.httpGet.port", "type": "string"} + # server_config.healths.liveness.livenessProbe.httpGet.port -- liveness probe port + port: liveness + # @schema {"name": "server_config.healths.liveness.livenessProbe.httpGet.scheme", "type": "string"} + # server_config.healths.liveness.livenessProbe.httpGet.scheme -- liveness probe scheme + scheme: HTTP + # @schema {"name": "server_config.healths.liveness.livenessProbe.initialDelaySeconds", "type": "integer"} + # server_config.healths.liveness.livenessProbe.initialDelaySeconds -- liveness probe initial delay seconds + initialDelaySeconds: 5 + # @schema {"name": "server_config.healths.liveness.livenessProbe.timeoutSeconds", "type": "integer"} + # server_config.healths.liveness.livenessProbe.timeoutSeconds -- liveness probe timeout seconds + timeoutSeconds: 2 + # @schema {"name": "server_config.healths.liveness.livenessProbe.successThreshold", "type": "integer"} + # server_config.healths.liveness.livenessProbe.successThreshold -- liveness probe success threshold + successThreshold: 1 + # @schema {"name": "server_config.healths.liveness.livenessProbe.failureThreshold", "type": "integer"} + # server_config.healths.liveness.livenessProbe.failureThreshold -- liveness probe failure threshold + failureThreshold: 2 + # @schema {"name": "server_config.healths.liveness.livenessProbe.periodSeconds", "type": "integer"} + # server_config.healths.liveness.livenessProbe.periodSeconds -- liveness probe period seconds + periodSeconds: 3 + # @schema {"name": "server_config.healths.liveness.server", "type": "object"} + server: + # @schema {"name": "server_config.healths.liveness.server.mode", "type": "string"} + # server_config.healths.liveness.server.mode -- liveness server mode + mode: "" + # @schema {"name": "server_config.healths.liveness.server.probe_wait_time", "type": "string"} + # server_config.healths.liveness.server.probe_wait_time -- liveness server probe wait time + probe_wait_time: "3s" + # @schema {"name": "server_config.healths.liveness.server.network", "type": "string", "enum": ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix", "unixgram", "unixpacket"]} + # server_config.healths.liveness.server.network -- mysql network + network: tcp + # @schema {"name": "server_config.healths.liveness.server.socket_path", "type": "string"} + # server_config.healths.liveness.server.socket_path -- mysql socket_path + socket_path: "" + # @schema {"name": "server_config.healths.liveness.server.http", "type": "object"} + http: + # @schema {"name": "server_config.healths.liveness.server.http.shutdown_duration", "type": "string"} + # server_config.healths.liveness.server.http.shutdown_duration -- liveness server shutdown duration + shutdown_duration: "5s" + # @schema {"name": "server_config.healths.liveness.server.http.handler_timeout", "type": "string"} + # server_config.healths.liveness.server.http.handler_timeout -- liveness server handler timeout + handler_timeout: "" + # @schema {"name": "server_config.healths.liveness.server.http.idle_timeout", "type": "string"} + # server_config.healths.liveness.server.http.idle_timeout -- liveness server idle timeout + idle_timeout: "" + # @schema {"name": "server_config.healths.liveness.server.http.read_header_timeout", "type": "string"} + # server_config.healths.liveness.server.http.read_header_timeout -- liveness server read header timeout + read_header_timeout: "" + # @schema {"name": "server_config.healths.liveness.server.http.read_timeout", "type": "string"} + # server_config.healths.liveness.server.http.read_timeout -- liveness server read timeout + read_timeout: "" + # @schema {"name": "server_config.healths.liveness.server.http.write_timeout", "type": "string"} + # server_config.healths.liveness.server.http.write_timeout -- liveness server write timeout + write_timeout: "" + # @schema {"name": "server_config.healths.liveness.server.socket_option", "alias": "socket_option"} + socket_option: + # server_config.healths.liveness.server.socket_option.reuse_port -- server listen socket option for reuse_port functionality + reuse_port: true + # server_config.healths.liveness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality + reuse_addr: true + # server_config.healths.liveness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality + tcp_fast_open: true + # server_config.healths.liveness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality + tcp_no_delay: true + # server_config.healths.liveness.server.socket_option.tcp_cork -- server listen socket option for tcp_cork functionality + tcp_cork: false + # server_config.healths.liveness.server.socket_option.tcp_quick_ack -- server listen socket option for tcp_quick_ack functionality + tcp_quick_ack: true + # server_config.healths.liveness.server.socket_option.tcp_defer_accept -- server listen socket option for tcp_defer_accept functionality + tcp_defer_accept: true + # server_config.healths.liveness.server.socket_option.ip_transparent -- server listen socket option for ip_transparent functionality + ip_transparent: false + # server_config.healths.liveness.server.socket_option.ip_recover_destination_addr -- server listen socket option for ip_recover_destination_addr functionality + ip_recover_destination_addr: false + # @schema {"name": "server_config.healths.readiness", "type": "object"} + readiness: + # @schema {"name": "server_config.healths.readiness.enabled", "type": "boolean"} + # server_config.healths.readiness.enabled -- readiness server enabled + enabled: true + # @schema {"name": "server_config.healths.readiness.host", "type": "string"} + # server_config.healths.readiness.host -- readiness server host + host: 0.0.0.0 + # @schema {"name": "server_config.healths.readiness.port", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.healths.readiness.port -- readiness server port + port: 3001 + # @schema {"name": "server_config.healths.readiness.servicePort", "type": "integer", "minimum": 0, "maximum": 65535} + # server_config.healths.readiness.servicePort -- readiness server service port + servicePort: 3001 + # @schema {"name": "server_config.healths.readiness.readinessProbe", "type": "object"} + readinessProbe: + # @schema {"name": "server_config.healths.readiness.readinessProbe.httpGet", "type": "object"} + httpGet: + # @schema {"name": "server_config.healths.readiness.readinessProbe.httpGet.path", "type": "string"} + # server_config.healths.readiness.readinessProbe.httpGet.path -- readiness probe path + path: /readiness + # @schema {"name": "server_config.healths.readiness.readinessProbe.httpGet.port", "type": "string"} + # server_config.healths.readiness.readinessProbe.httpGet.port -- readiness probe port + port: readiness + # @schema {"name": "server_config.healths.readiness.readinessProbe.httpGet.scheme", "type": "string"} + # server_config.healths.readiness.readinessProbe.httpGet.scheme -- readiness probe scheme + scheme: HTTP + # @schema {"name": "server_config.healths.readiness.readinessProbe.initialDelaySeconds", "type": "integer"} + # server_config.healths.readiness.readinessProbe.initialDelaySeconds -- readiness probe initial delay seconds + initialDelaySeconds: 10 + # @schema {"name": "server_config.healths.readiness.readinessProbe.timeoutSeconds", "type": "integer"} + # server_config.healths.readiness.readinessProbe.timeoutSeconds -- readiness probe timeout seconds + timeoutSeconds: 2 + # @schema {"name": "server_config.healths.readiness.readinessProbe.successThreshold", "type": "integer"} + # server_config.healths.readiness.readinessProbe.successThreshold -- readiness probe success threshold + successThreshold: 1 + # @schema {"name": "server_config.healths.readiness.readinessProbe.failureThreshold", "type": "integer"} + # server_config.healths.readiness.readinessProbe.failureThreshold -- readiness probe failure threshold + failureThreshold: 2 + # @schema {"name": "server_config.healths.readiness.readinessProbe.periodSeconds", "type": "integer"} + # server_config.healths.readiness.readinessProbe.periodSeconds -- readiness probe period seconds + periodSeconds: 3 + # @schema {"name": "server_config.healths.readiness.server", "type": "object"} + server: + # @schema {"name": "server_config.healths.readiness.server.mode", "type": "string"} + # server_config.healths.readiness.server.mode -- readiness server mode + mode: "" + # @schema {"name": "server_config.healths.readiness.server.probe_wait_time", "type": "string"} + # server_config.healths.readiness.server.probe_wait_time -- readiness server probe wait time + probe_wait_time: "3s" + # @schema {"name": "server_config.healths.readiness.server.network", "type": "string", "enum": ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix", "unixgram", "unixpacket"]} + # server_config.healths.readiness.server.network -- mysql network + network: tcp + # @schema {"name": "server_config.healths.readiness.server.socket_path", "type": "string"} + # server_config.healths.readiness.server.socket_path -- mysql socket_path + socket_path: "" + # @schema {"name": "server_config.healths.readiness.server.http", "type": "object"} + http: + # @schema {"name": "server_config.healths.readiness.server.http.shutdown_duration", "type": "string"} + # server_config.healths.readiness.server.http.shutdown_duration -- readiness server shutdown duration + shutdown_duration: "0s" + # @schema {"name": "server_config.healths.readiness.server.http.handler_timeout", "type": "string"} + # server_config.healths.readiness.server.http.handler_timeout -- readiness server handler timeout + handler_timeout: "" + # @schema {"name": "server_config.healths.readiness.server.http.idle_timeout", "type": "string"} + # server_config.healths.readiness.server.http.idle_timeout -- readiness server idle timeout + idle_timeout: "" + # @schema {"name": "server_config.healths.readiness.server.http.read_header_timeout", "type": "string"} + # server_config.healths.readiness.server.http.read_header_timeout -- readiness server read header timeout + read_header_timeout: "" + # @schema {"name": "server_config.healths.readiness.server.http.read_timeout", "type": "string"} + # server_config.healths.readiness.server.http.read_timeout -- readiness server read timeout + read_timeout: "" + # @schema {"name": "server_config.healths.readiness.server.http.write_timeout", "type": "string"} + # server_config.healths.readiness.server.http.write_timeout -- readiness server write timeout + write_timeout: "" + # @schema {"name": "server_config.healths.readiness.server.socket_option", "alias": "socket_option"} + socket_option: + # server_config.healths.readiness.server.socket_option.reuse_port -- server listen socket option for reuse_port functionality + reuse_port: true + # server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality + reuse_addr: true + # server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + tcp_fast_open: true + # server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality + tcp_no_delay: true + # server_config.healths.readiness.server.socket_option.tcp_cork -- server listen socket option for tcp_cork functionality + tcp_cork: false + # server_config.healths.readiness.server.socket_option.tcp_quick_ack -- server listen socket option for tcp_quick_ack functionality + tcp_quick_ack: true + # server_config.healths.readiness.server.socket_option.tcp_defer_accept -- server listen socket option for tcp_defer_accept functionality + tcp_defer_accept: true + # server_config.healths.readiness.server.socket_option.ip_transparent -- server listen socket option for ip_transparent functionality + ip_transparent: false + # server_config.healths.readiness.server.socket_option.ip_recover_destination_addr -- server listen socket option for ip_recover_destination_addr functionality + ip_recover_destination_addr: false diff --git a/charts/vald-benchmark-operator/values.schema.json b/charts/vald-benchmark-operator/values.schema.json index 57fb5cd25bb..8e4ded670dd 100644 --- a/charts/vald-benchmark-operator/values.schema.json +++ b/charts/vald-benchmark-operator/values.schema.json @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", + "$schema": "https://json-schema.org/draft-07/schema#", "title": "Values", "type": "object", "properties": { diff --git a/cmd/tools/benchmark/job/sample.yaml b/cmd/tools/benchmark/job/sample.yaml index b9ee53ab515..d49500d6b1a 100644 --- a/cmd/tools/benchmark/job/sample.yaml +++ b/cmd/tools/benchmark/job/sample.yaml @@ -24,58 +24,121 @@ server_config: - name: grpc host: 0.0.0.0 port: 8081 + probe_wait_time: 3s + socket_path: "" + mode: GRPC grpc: bidirectional_stream_concurrency: 20 - connection_timeout: "" - header_table_size: 0 - initial_conn_window_size: 0 - initial_window_size: 0 - interceptors: [] + max_receive_message_size: 0 + max_send_message_size: 0 + initial_window_size: 1048576 + initial_conn_window_size: 2097152 keepalive: + max_conn_idle: "" max_conn_age: "" max_conn_age_grace: "" - max_conn_idle: "" - time: "" - timeout: "" - max_header_list_size: 0 - max_receive_message_size: 0 - max_send_message_size: 0 - read_buffer_size: 0 + time: "3h" + timeout: "60s" + min_time: "10m" + permit_without_stream: true write_buffer_size: 0 - mode: GRPC - probe_wait_time: 60s + read_buffer_size: 0 + connection_timeout: "" + max_header_list_size: 0 + header_table_size: 0 + interceptors: + - "RecoverInterceptor" + enable_reflection: true + socket_option: + reuse_port: true + reuse_addr: true + tcp_fast_open: false + tcp_no_delay: false + tcp_cork: false + tcp_quick_ack: false + tcp_defer_accept: false + ip_transparent: false + ip_recover_destination_addr: false restart: true health_check_servers: - name: liveness host: 0.0.0.0 port: 3000 + mode: "" + probe_wait_time: "3s" + network: tcp + socket_path: "" http: + shutdown_duration: "5s" handler_timeout: "" idle_timeout: "" read_header_timeout: "" read_timeout: "" - shutdown_duration: 5s write_timeout: "" - mode: "" - probe_wait_time: 60s + socket_option: + reuse_port: true + reuse_addr: true + tcp_fast_open: true + tcp_no_delay: true + tcp_cork: false + tcp_quick_ack: true + tcp_defer_accept: false + ip_transparent: false + ip_recover_destination_addr: false - name: readiness host: 0.0.0.0 port: 3001 + mode: "" + probe_wait_time: "3s" + network: tcp + socket_path: "" http: + shutdown_duration: "0s" handler_timeout: "" idle_timeout: "" read_header_timeout: "" read_timeout: "" - shutdown_duration: 0s write_timeout: "" - mode: "" - probe_wait_time: 60s + socket_option: + reuse_port: true + reuse_addr: true + tcp_fast_open: true + tcp_no_delay: true + tcp_cork: false + tcp_quick_ack: true + tcp_defer_accept: false + ip_transparent: false + ip_recover_destination_addr: false metrics_servers: + - name: pprof + host: 0.0.0.0 + port: 8081 + probe_wait_time: "3s" + socket_path: "" + mode: REST + network: tcp + http: + handler_timeout: "5s" + idle_timeout: "2s" + read_header_timeout: "1s" + read_timeout: "1s" + shutdown_duration: "5s" + write_timeout: "1m" + socket_option: + reuse_port: true + reuse_addr: true + tcp_fast_open: false + tcp_no_delay: false + tcp_cork: false + tcp_quick_ack: false + tcp_defer_accept: false + ip_transparent: false + ip_recover_destination_addr: false startup_strategy: - liveness - - grpc - readiness - full_shutdown_duration: 600s + - grpc + full_shutdown_duration: 30s tls: ca: /path/to/ca cert: /path/to/cert @@ -83,86 +146,115 @@ server_config: key: /path/to/key observability: enabled: false - collector: - duration: 5s - metrics: - enable_cgo: true - enable_goroutine: true - enable_memory: true - enable_version_info: true - version_info_labels: - - vald_version - - server_name - - git_commit - - build_time - - go_version - - go_os - - go_arch - - ngt_version + otlp: + collector_endpoint: "" + attribute: + namespace: _MY_POD_NAMESPACE_ + pod_name: _MY_POD_NAME_ + node_name: _MY_NODE_NAME_ + service_name: vald-benchmark + trace_batch_timeout: "1s" + trace_export_timeout: "1m" + trace_max_export_batch_size: 1024 + trace_max_queue_size: 256 + metrics: + enable_cgo: true + enable_goroutine: true + enable_memory: true + enable_version_info: true + version_info_labels: + - vald_version + - server_name + - git_commit + - build_time + - go_version + - go_os + - go_arch + - ngt_version trace: enabled: false - sampling_rate: 1 - prometheus: - enabled: false - endpoint: /metrics - namespace: vald - jaeger: - enabled: false - collector_endpoint: "" - agent_endpoint: "jaeger-agent.default.svc.cluster.local:6831" - username: "" - password: "" - service_name: "vald-benchmark-job" - buffer_max_count: 10 - stackdriver: - project_id: "" - client: - api_key: "" - audiences: [] - authentication_enabled: true - credentials_file: "" - credentials_json: "" - endpoint: "" - quota_project: "" - request_reason: "" - scopes: [] - telemetry_enabled: true - user_agent: "" - exporter: - bundle_count_threshold: 0 - bundle_delay_threshold: "0" - location: "" - metric_prefix: vald.vdaas.org - monitoring_enabled: false - number_of_workers: 1 - reporting_interval: 1m - skip_cmd: false - timeout: 5s - trace_spans_buffer_max_bytes: 0 - tracing_enabled: false - profiler: - enabled: false - service: "vald-benchmark-job" - service_version: "" - debug_logging: false - mutex_profiling: true - cpu_profiling: true - alloc_profiling: true - heap_profiling: true - goroutine_profiling: true - alloc_force_gc: false - api_addr: "" - instance: "" - zone: "" job: - job_type: "search" - dimension: 784 - iter: 100 - num: 10 - minNum: 10 - radius: -1 - epsilon: 0.1 - timeout: 5s - gateway_client: - addrs: - - vald-lb-gateway.default.svc.cluster.local:8081 + replica: 1 + repetition: 1 + before_job_name: "" + before_job_namespace: "" + rps: 200 + concurrency_limit: 200 + client_config: + health_check_duration: "1s" + connection_pool: + enable_dns_resolver: true + enable_rebalance: true + rebalance_duration: "30m" + size: 3 + old_conn_close_duration: "2m" + backoff: + initial_duration: "5ms" + backoff_time_limit: "5s" + maximum_duration: "5s" + jitter_limit: "100ms" + backoff_factor: 1.1 + retry_count: 100 + enable_error_log: true + circuit_breaker: + closed_error_rate: 0.7 + half_open_error_rate: 0.5 + min_samples: 1000 + open_timeout: "1s" + closed_refresh_timeout: "10s" + call_option: + wait_for_ready: true + max_retry_rpc_buffer_size: 0 + max_recv_msg_size: 0 + max_send_msg_size: 0 + dial_option: + write_buffer_size: 0 + read_buffer_size: 0 + initial_window_size: 1048576 + initial_connection_window_size: 2097152 + max_msg_size: 0 + backoff_max_delay: "120s" + backoff_base_delay: "1s" + backoff_multiplier: 1.6 + backoff_jitter: 0.2 + min_connection_timeout: "20s" + enable_backoff: false + insecure: true + timeout: "" + interceptors: [] + net: + dns: + cache_enabled: true + refresh_duration: "30m" + cache_expiration: "1h" + dialer: + timeout: "" + keepalive: "" + fallback_delay: "" + dual_stack_enabled: true + tls: + enabled: false + cert: /path/to/cert + key: /path/to/key + ca: /path/to/ca + insecure_skip_verify: false + socket_option: + reuse_port: true + reuse_addr: true + tcp_fast_open: false + tcp_no_delay: false + tcp_cork: false + tcp_quick_ack: false + tcp_defer_accept: false + ip_transparent: false + ip_recover_destination_addr: false + keepalive: + time: "120s" + timeout: "30s" + permit_without_stream: false + tls: + enabled: false + cert: /path/to/cert + key: /path/to/key + ca: /path/to/ca + insecure_skip_verify: false diff --git a/go.mod b/go.mod index 3cb075bd774..43dd3f361a1 100755 --- a/go.mod +++ b/go.mod @@ -388,7 +388,8 @@ require ( go.uber.org/automaxprocs v0.0.0-00010101000000-000000000000 go.uber.org/goleak v1.2.1 go.uber.org/zap v1.26.0 - go.uber.org/ratelimit v0.2.0 + go.uber.org/ratelimit v0.3.0 + go.uber.org/zap v1.24.0 gocloud.dev v0.0.0-00010101000000-000000000000 golang.org/x/net v0.19.0 golang.org/x/oauth2 v0.15.0 @@ -420,7 +421,7 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect - github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/campoy/embedmd v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect diff --git a/go.sum b/go.sum index e5d7cb78cd2..cbeeccd22e4 100644 --- a/go.sum +++ b/go.sum @@ -172,8 +172,6 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyR github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= -github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= -github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/stroke v0.0.0-20221221101821-bd29b49d73f0/go.mod h1:ccdDYaY5+gO+cbnQdFxEXqfy0RkoV25H3jLXUDNM3wg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= diff --git a/internal/config/config.go b/internal/config/config.go index 201b3d9c0b5..27e9be31f8b 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -19,9 +19,11 @@ package config import ( "bytes" + "fmt" "io/fs" "os" "path/filepath" + "reflect" "github.com/vdaas/vald/internal/conv" "github.com/vdaas/vald/internal/encoding/json" @@ -131,3 +133,131 @@ func ToRawYaml(data interface{}) string { } return buf.String() } + +// Merge merges multiple objects to one object. +// the value of each field is prioritized the value of last index of `objs`. +// if the length of `objs` is zero, it returns initial value of type T. +func Merge[T any](objs ...T) (dst T, err error) { + switch len(objs) { + case 0: + return dst, nil + case 1: + dst = objs[0] + return dst, nil + default: + dst = objs[0] + visited := make(map[uintptr]bool) + rdst := reflect.ValueOf(&dst) + for _, src := range objs[1:] { + err = deepMerge(rdst, reflect.ValueOf(&src), visited, "") + if err != nil { + return dst, err + } + } + } + return dst, err +} + +func deepMerge(dst, src reflect.Value, visited map[uintptr]bool, fieldPath string) (err error) { + if !src.IsValid() || src.IsZero() { + return nil + } else if !dst.IsValid() { + dst = src + log.Info(dst.Type(), dst, src) + } + dType := dst.Type() + sType := src.Type() + if dType != sType { + return errors.ErrNotMatchFieldType(fieldPath, dType, sType) + } + sKind := src.Kind() + if sKind == reflect.Ptr { + src = src.Elem() + } + if sKind == reflect.Struct && src.CanAddr() { + addr := src.Addr().Pointer() + if visited[addr] { + return nil + } + if src.NumField() > 1 { + visited[addr] = true + } + } + switch dst.Kind() { + case reflect.Ptr: + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + return deepMerge(dst.Elem(), src, visited, fieldPath) + case reflect.Struct: + dnum := dst.NumField() + snum := src.NumField() + if dnum != snum { + return errors.ErrNotMatchFieldNum(fieldPath, dnum, snum) + } + for i := 0; i < dnum; i++ { + dstField := dst.Field(i) + if dstField.CanSet() { + nf := fmt.Sprintf("%s.%s(%d)", fieldPath, dType.Field(i).Name, i) + if err = deepMerge(dstField, src.Field(i), visited, nf); err != nil { + return errors.ErrDeepMergeKind(dst.Kind().String(), nf, err) + } + } + } + case reflect.Slice: + srcLen := src.Len() + if srcLen > 0 { + if dst.IsNil() { + dst.Set(reflect.MakeSlice(dType, srcLen, srcLen)) + } else { + diffLen := srcLen - dst.Len() + if diffLen > 0 { + dst.Set(reflect.AppendSlice(dst, reflect.MakeSlice(dType, diffLen, diffLen))) + } + } + for i := 0; i < srcLen; i++ { + nf := fmt.Sprintf("%s[%d]", fieldPath, i) + if err = deepMerge(dst.Index(i), src.Index(i), visited, nf); err != nil { + return errors.ErrDeepMergeKind(dst.Kind().String(), nf, err) + } + } + } + case reflect.Array: + srcLen := src.Len() + if srcLen != dst.Len() { + return errors.ErrNotMatchArrayLength(fieldPath, dst.Len(), srcLen) + } + for i := 0; i < srcLen; i++ { + nf := fmt.Sprintf("%s[%d]", fieldPath, i) + if err = deepMerge(dst.Index(i), src.Index(i), visited, nf); err != nil { + return errors.ErrDeepMergeKind(dst.Kind().String(), nf, err) + } + } + case reflect.Map: + if dst.IsNil() { + dst.Set(reflect.MakeMapWithSize(dType, src.Len())) + } + dElem := dType.Elem() + for _, key := range src.MapKeys() { + vdst := dst.MapIndex(key) + // fmt.Println(vdst.IsValid(), key, vdst) + if !vdst.IsValid() { + vdst = reflect.New(dElem).Elem() + } + nf := fmt.Sprintf("%s[%s]", fieldPath, key) + if vdst.CanSet() { + if err = deepMerge(vdst, src.MapIndex(key), visited, nf); err != nil { + return errors.Errorf("error in array at %s: %w", nf, err) + } + dst.SetMapIndex(key, vdst) + } else { + dst.SetMapIndex(key, src.MapIndex(key)) + } + } + default: + if dst.CanSet() { + dst.Set(src) + } + } + return nil +} diff --git a/internal/config/config_test.go b/internal/config/config_test.go index deb563cbc19..7f931915a9c 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -18,6 +18,7 @@ package config import ( + "encoding/json" "io/fs" "os" "reflect" @@ -1431,4 +1432,460 @@ func TestToRawYaml(t *testing.T) { } } -// NOT IMPLEMENTED BELOW +func TestMerge(t *testing.T) { + type config struct { + // Config GlobalConfig + Discoverer *Discoverer + } + + type args struct { + objs []*config + } + type want struct { + wantDst *config + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, *config, error) error + beforeFunc func(*testing.T, args) + afterFunc func(*testing.T, args) + } + defaultCheckFunc := func(w want, gotDst *config, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + if !reflect.DeepEqual(gotDst, w.wantDst) { + gb, _ := json.Marshal(gotDst) + wb, _ := json.Marshal(w.wantDst) + return errors.Errorf("got: \"%s\",\n\t\t\t\twant: \"%s\"", string(gb), string(wb)) + } + return nil + } + defaultBeforeFunc := func(t *testing.T, _ args) { + t.Helper() + } + defaultAfterFunc := func(t *testing.T, _ args) { + t.Helper() + } + + // dst + dst := &config{ + Discoverer: &Discoverer{ + Name: "dst", + Namespace: "dst", + DiscoveryDuration: "1m", + Net: &Net{ + DNS: &DNS{ + RefreshDuration: "2s", + CacheExpiration: "10s", + }, + Dialer: &Dialer{ + Timeout: "2s", + Keepalive: "1m", + FallbackDelay: "2s", + DualStackEnabled: true, + }, + SocketOption: &SocketOption{ + ReusePort: true, + ReuseAddr: true, + TCPFastOpen: false, + TCPNoDelay: true, + TCPCork: true, + TCPQuickAck: false, + TCPDeferAccept: true, + IPTransparent: true, + IPRecoverDestinationAddr: true, + }, + TLS: &TLS{ + Enabled: false, + Cert: "/path/to/cert", + Key: "/path/to/key", + CA: "/path/to/ca", + InsecureSkipVerify: false, + }, + }, + Selectors: &Selectors{ + Pod: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/pod": "dst", + }, + Fields: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/pod": "dst", + }, + }, + Node: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/node": "dst", + }, + Fields: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/node": "dst", + }, + }, + NodeMetrics: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/node": "dst", + }, + Fields: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/node": "dst", + }, + }, + PodMetrics: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/pod": "dst", + }, + Fields: map[string]string{ + "vald.vdaas.org": "dst", + "vald.vdaas.org/pod": "dst", + }, + }, + }, + }, + } + // src + src := &config{ + Discoverer: &Discoverer{ + Name: "src", + Namespace: "src", + DiscoveryDuration: "10m", + Net: &Net{ + DNS: &DNS{ + RefreshDuration: "20s", + CacheExpiration: "1s", + }, + Dialer: &Dialer{ + Timeout: "20s", + Keepalive: "10m", + FallbackDelay: "20s", + DualStackEnabled: true, + }, + SocketOption: &SocketOption{ + TCPFastOpen: true, + }, + TLS: &TLS{ + Cert: "/path/to/cert", + Key: "/path/to/key", + CA: "/path/to/ca", + InsecureSkipVerify: false, + }, + }, + Selectors: &Selectors{ + Pod: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + }, + Node: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/src": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "src", + }, + }, + NodeMetrics: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "src", + }, + }, + PodMetrics: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + }, + }, + }, + } + w := &config{ + Discoverer: &Discoverer{ + Name: "src", + Namespace: "src", + DiscoveryDuration: "10m", + Net: &Net{ + DNS: &DNS{ + CacheEnabled: false, + RefreshDuration: "20s", + CacheExpiration: "1s", + }, + Dialer: &Dialer{ + Timeout: "20s", + Keepalive: "10m", + FallbackDelay: "20s", + DualStackEnabled: true, + }, + SocketOption: &SocketOption{ + ReusePort: true, + ReuseAddr: true, + TCPFastOpen: true, + TCPNoDelay: true, + TCPCork: true, + TCPQuickAck: false, + TCPDeferAccept: true, + IPTransparent: true, + IPRecoverDestinationAddr: true, + }, + TLS: &TLS{ + Enabled: false, + Cert: "/path/to/cert", + Key: "/path/to/key", + CA: "/path/to/ca", + InsecureSkipVerify: false, + }, + }, + Selectors: &Selectors{ + Pod: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + }, + Node: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "dst", + "vald.vdaas.org/src": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "src", + }, + }, + NodeMetrics: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/node": "src", + }, + }, + PodMetrics: &Selector{ + Labels: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + Fields: map[string]string{ + "vald.vdaas.org": "src", + "vald.vdaas.org/pod": "src", + }, + }, + }, + }, + } + + tests := []test{ + { + name: "return nil config when len(objs) is 0.", + args: args{ + objs: []*config{}, + }, + want: want{}, + checkFunc: defaultCheckFunc, + beforeFunc: defaultBeforeFunc, + afterFunc: defaultAfterFunc, + }, + { + name: "return dst config when len(objs) is 1.", + args: args{ + objs: []*config{ + dst, + }, + }, + want: want{ + wantDst: dst, + }, + checkFunc: defaultCheckFunc, + beforeFunc: defaultBeforeFunc, + afterFunc: defaultAfterFunc, + }, + { + name: "return merged config when len(objs) is 2.", + args: args{ + objs: []*config{ + dst, + src, + }, + }, + want: want{ + wantDst: w, + }, + checkFunc: defaultCheckFunc, + beforeFunc: defaultBeforeFunc, + afterFunc: defaultAfterFunc, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) + if test.beforeFunc != nil { + test.beforeFunc(tt, test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(tt, test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + gotDst, err := Merge(test.args.objs...) + t.Log("err: \t", err, "\n\t", gotDst, "\n\t", test.want.wantDst) + if err := checkFunc(test.want, gotDst, err); err != nil { + tt.Errorf("error: \n\t\t\t\t%v", err) + } + }) + } +} + +func Test_deepMerge(t *testing.T) { + type config struct { + Slice []int + GlobalConfig + } + type args struct { + dst reflect.Value + src reflect.Value + visited map[uintptr]bool + fieldPath string + } + type want struct { + err error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(*testing.T, args) + afterFunc func(*testing.T, args) + } + defaultCheckFunc := func(w want, err error) error { + if !errors.Is(err, w.err) { + return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + } + return nil + } + defaultBeforeFunc := func(t *testing.T, _ args) { + t.Helper() + } + defaultAfterFunc := func(t *testing.T, _ args) { + t.Helper() + } + tests := []test{ + func() test { + dst := &config{ + GlobalConfig: GlobalConfig{ + Version: "v0.0.1", + TZ: "UTC", + Logging: &Logging{ + Logger: "glg", + Level: "debug", + Format: "raw", + }, + }, + } + src := &config{ + GlobalConfig: GlobalConfig{ + Version: "v1.0.1", + TZ: "JST", + Logging: &Logging{ + Logger: "glg", + Format: "json", + }, + }, + } + visited := make(map[uintptr]bool) + return test{ + name: "success merge struct by src", + args: args{ + dst: reflect.ValueOf(dst), + src: reflect.ValueOf(src), + visited: visited, + fieldPath: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + beforeFunc: defaultBeforeFunc, + afterFunc: defaultAfterFunc, + } + }(), + func() test { + dst := &config{ + Slice: []int{1, 2, 3}, + } + src := &config{ + Slice: []int{4, 5}, + } + visited := make(map[uintptr]bool) + return test{ + name: "success merge struct by slice", + args: args{ + dst: reflect.ValueOf(dst), + src: reflect.ValueOf(src), + visited: visited, + fieldPath: "", + }, + want: want{}, + checkFunc: defaultCheckFunc, + beforeFunc: defaultBeforeFunc, + afterFunc: defaultAfterFunc, + } + }(), + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + tt.Parallel() + defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) + if test.beforeFunc != nil { + test.beforeFunc(tt, test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(tt, test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + err := deepMerge(test.args.dst, test.args.src, test.args.visited, test.args.fieldPath) + if err := checkFunc(test.want, err); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} diff --git a/internal/errors/config.go b/internal/errors/config.go index 5908c3cda00..d2b3f16fa7b 100644 --- a/internal/errors/config.go +++ b/internal/errors/config.go @@ -17,10 +17,28 @@ // Package errors provides error types and function package errors +import "reflect" + var ( ErrInvalidConfig = New("component config is invalid") ErrUnsupportedConfigFileType = func(ext string) error { return Errorf("unsupported file type: %s", ext) } + + ErrNotMatchFieldType = func(path string, dType, sType reflect.Type) error { + return Errorf("types do not match at %s: %v vs %v", path, dType, sType) + } + + ErrNotMatchFieldNum = func(path string, dNum, sNum int) error { + return Errorf("number of fields do not match at %s, dst: %d, src: %d", path, dNum, sNum) + } + + ErrNotMatchArrayLength = func(path string, dLen, sLen int) error { + return Errorf("array length do not match at %s, dst: %d, src: %d", path, dLen, sLen) + } + + ErrDeepMergeKind = func(kind string, nf string, err error) error { + return Errorf("error in %s at %s: %w", kind, nf, err) + } ) diff --git a/internal/k8s/vald/benchmark/api/v1/job_types.go b/internal/k8s/vald/benchmark/api/v1/job_types.go index 729bf276299..9b8bbf1f0b9 100644 --- a/internal/k8s/vald/benchmark/api/v1/job_types.go +++ b/internal/k8s/vald/benchmark/api/v1/job_types.go @@ -23,23 +23,25 @@ import ( ) type BenchmarkJobSpec struct { - Target *BenchmarkTarget `json:"target,omitempty"` - Dataset *BenchmarkDataset `json:"dataset,omitempty"` - Dimension int `json:"dimension,omitempty"` - Replica int `json:"replica,omitempty"` - Repetition int `json:"repetition,omitempty"` - JobType string `json:"job_type,omitempty"` - InsertConfig *config.InsertConfig `json:"insert_config,omitempty"` - UpdateConfig *config.UpdateConfig `json:"update_config,omitempty"` - UpsertConfig *config.UpsertConfig `json:"upsert_config,omitempty"` - SearchConfig *config.SearchConfig `json:"search_config,omitempty"` - RemoveConfig *config.RemoveConfig `json:"remove_config,omitempty"` - ObjectConfig *config.ObjectConfig `json:"object_config,omitempty"` - ClientConfig *config.GRPCClient `json:"client_config,omitempty"` - Rules []*config.BenchmarkJobRule `json:"rules,omitempty"` - RPS int `json:"rps,omitempty"` - ConcurrencyLimit int `json:"concurrency_limit,omitempty"` - TTLSecondsAfterFinished int `json:"ttl_seconds_after_finished,omitempty"` + *config.GlobalConfig `json:",omitempty" yaml:""` + ServerConfig *config.Servers `json:"server_config,omitempty" yaml:"server_config"` + Target *BenchmarkTarget `json:"target,omitempty" yaml:"target"` + Dataset *BenchmarkDataset `json:"dataset,omitempty" yaml:"dataset"` + Dimension int `json:"dimension,omitempty" yaml:"dimension"` + Replica int `json:"replica,omitempty" yaml:"replica"` + Repetition int `json:"repetition,omitempty" yaml:"repetition"` + JobType string `json:"job_type,omitempty" yaml:"job_type"` + InsertConfig *config.InsertConfig `json:"insert_config,omitempty" yaml:"insert_config"` + UpdateConfig *config.UpdateConfig `json:"update_config,omitempty" yaml:"update_config"` + UpsertConfig *config.UpsertConfig `json:"upsert_config,omitempty" yaml:"upsert_config"` + SearchConfig *config.SearchConfig `json:"search_config,omitempty" yaml:"search_config"` + RemoveConfig *config.RemoveConfig `json:"remove_config,omitempty" yaml:"remove_config"` + ObjectConfig *config.ObjectConfig `json:"object_config,omitempty" yaml:"object_config"` + ClientConfig *config.GRPCClient `json:"client_config,omitempty" yaml:"client_config"` + Rules []*config.BenchmarkJobRule `json:"rules,omitempty" yaml:"rules"` + RPS int `json:"rps,omitempty" yaml:"rps"` + ConcurrencyLimit int `json:"concurrency_limit,omitempty" yaml:"concurrency_limit"` + TTLSecondsAfterFinished int `json:"ttl_seconds_after_finished,omitempty" yaml:"ttl_seconds_after_finished"` } type BenchmarkJobStatus string diff --git a/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml b/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml index d85ea0c12b7..f5669958ee6 100644 --- a/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml +++ b/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml @@ -280,6 +280,34 @@ spec: dimension: type: integer minimum: 1 + global_config: + type: object + properties: + logging: + type: object + properties: + format: + type: string + enum: + - raw + - json + level: + type: string + enum: + - debug + - info + - warn + - error + - fatal + logger: + type: string + enum: + - glg + - zap + time_zone: + type: string + version: + type: string insert_config: type: object properties: @@ -349,6 +377,416 @@ spec: type: number timeout: type: string + server_config: + type: object + properties: + healths: + type: object + properties: + liveness: + type: object + properties: + enabled: + type: boolean + host: + type: string + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + httpGet: + type: object + properties: + path: + type: string + port: + type: string + scheme: + type: string + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + timeoutSeconds: + type: integer + port: + type: integer + maximum: 65535 + minimum: 0 + server: + type: object + properties: + http: + type: object + properties: + handler_timeout: + type: string + idle_timeout: + type: string + read_header_timeout: + type: string + read_timeout: + type: string + shutdown_duration: + type: string + write_timeout: + type: string + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 + readiness: + type: object + properties: + enabled: + type: boolean + host: + type: string + port: + type: integer + maximum: 65535 + minimum: 0 + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + httpGet: + type: object + properties: + path: + type: string + port: + type: string + scheme: + type: string + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + timeoutSeconds: + type: integer + server: + type: object + properties: + http: + type: object + properties: + handler_timeout: + type: string + idle_timeout: + type: string + read_header_timeout: + type: string + read_timeout: + type: string + shutdown_duration: + type: string + write_timeout: + type: string + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 + startup: + type: object + properties: + enabled: + type: boolean + port: + type: integer + maximum: 65535 + minimum: 0 + startupProbe: + type: object + properties: + failureThreshold: + type: integer + httpGet: + type: object + properties: + path: + type: string + port: + type: string + scheme: + type: string + initialDelaySeconds: + type: integer + periodSeconds: + type: integer + successThreshold: + type: integer + timeoutSeconds: + type: integer + servers: + type: object + properties: + grpc: + type: object + properties: + enabled: + type: boolean + host: + type: string + port: + type: integer + maximum: 65535 + minimum: 0 + server: + type: object + properties: + grpc: + type: object + properties: + bidirectional_stream_concurrency: + type: integer + connection_timeout: + type: string + enable_reflection: + type: boolean + header_table_size: + type: integer + initial_conn_window_size: + type: integer + initial_window_size: + type: integer + interceptors: + type: array + items: + type: string + enum: + - RecoverInterceptor + - AccessLogInterceptor + - TraceInterceptor + - MetricInterceptor + keepalive: + type: object + properties: + max_conn_age: + type: string + max_conn_age_grace: + type: string + max_conn_idle: + type: string + min_time: + type: string + permit_without_stream: + type: boolean + time: + type: string + timeout: + type: string + max_header_list_size: + type: integer + max_receive_message_size: + type: integer + max_send_message_size: + type: integer + read_buffer_size: + type: integer + write_buffer_size: + type: integer + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + restart: + type: boolean + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 + rest: + type: object + properties: + enabled: + type: boolean + host: + type: string + port: + type: integer + maximum: 65535 + minimum: 0 + server: + type: object + properties: + http: + type: object + properties: + handler_timeout: + type: string + idle_timeout: + type: string + read_header_timeout: + type: string + read_timeout: + type: string + shutdown_duration: + type: string + write_timeout: + type: string + mode: + type: string + network: + type: string + enum: + - tcp + - tcp4 + - tcp6 + - udp + - udp4 + - udp6 + - unix + - unixgram + - unixpacket + probe_wait_time: + type: string + socket_option: + type: object + properties: + ip_recover_destination_addr: + type: boolean + ip_transparent: + type: boolean + reuse_addr: + type: boolean + reuse_port: + type: boolean + tcp_cork: + type: boolean + tcp_defer_accept: + type: boolean + tcp_fast_open: + type: boolean + tcp_no_delay: + type: boolean + tcp_quick_ack: + type: boolean + socket_path: + type: string + servicePort: + type: integer + maximum: 65535 + minimum: 0 target: type: object properties: diff --git a/pkg/tools/benchmark/job/config/config.go b/pkg/tools/benchmark/job/config/config.go index 3b1492e25d0..afab9a93929 100644 --- a/pkg/tools/benchmark/job/config/config.go +++ b/pkg/tools/benchmark/job/config/config.go @@ -19,6 +19,7 @@ package config import ( "context" + "encoding/json" "os" "github.com/vdaas/vald/internal/config" @@ -45,7 +46,7 @@ type Config struct { Job *config.BenchmarkJob `json:"job" yaml:"job"` // K8sClient represents kubernetes clients - K8sClient client.Client + K8sClient client.Client `json:"k8s_client" yaml:"k8s_client"` } var ( @@ -61,7 +62,6 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { if err != nil { return nil, err } - if cfg != nil { cfg.Bind() } @@ -102,93 +102,184 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { if err != nil { log.Warn(err.Error()) } else { - cfg.Job.Target = (*config.BenchmarkTarget)(jobResource.Spec.Target) - cfg.Job.Dataset = (*config.BenchmarkDataset)(jobResource.Spec.Dataset) - cfg.Job.Dimension = jobResource.Spec.Dimension - cfg.Job.Replica = jobResource.Spec.Replica - cfg.Job.Repetition = jobResource.Spec.Repetition - cfg.Job.JobType = jobResource.Spec.JobType - cfg.Job.Rules = jobResource.Spec.Rules - cfg.Job.InsertConfig = jobResource.Spec.InsertConfig - cfg.Job.UpdateConfig = jobResource.Spec.UpdateConfig - cfg.Job.UpsertConfig = jobResource.Spec.UpsertConfig - cfg.Job.SearchConfig = jobResource.Spec.SearchConfig - cfg.Job.RemoveConfig = jobResource.Spec.RemoveConfig - cfg.Job.ObjectConfig = jobResource.Spec.ObjectConfig - cfg.Job.ClientConfig = jobResource.Spec.ClientConfig - cfg.Job.RPS = jobResource.Spec.RPS - cfg.Job.ConcurrencyLimit = jobResource.Spec.ConcurrencyLimit + // create override Config + overrideCfg := new(Config) + if jobResource.Spec.GlobalConfig != nil { + overrideCfg.GlobalConfig = *jobResource.Spec.Bind() + } + if jobResource.Spec.ServerConfig != nil { + overrideCfg.Server = (*jobResource.Spec.ServerConfig).Bind() + } + // jobResource.Spec has another field comparering Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec + var overrideJobCfg config.BenchmarkJob + b, err := json.Marshal(*jobResource.Spec.DeepCopy()) + if err == nil { + err = json.Unmarshal([]byte(b), &overrideJobCfg) + if err != nil { + log.Warn(err.Error()) + } + overrideCfg.Job = overrideJobCfg.Bind() + } if annotations := jobResource.GetAnnotations(); annotations != nil { - cfg.Job.BeforeJobName = annotations[JOBNAME_ANNOTATION] - cfg.Job.BeforeJobNamespace = annotations[JOBNAMESPACE_ANNOTATION] + overrideCfg.Job.BeforeJobName = annotations[JOBNAME_ANNOTATION] + overrideCfg.Job.BeforeJobNamespace = annotations[JOBNAMESPACE_ANNOTATION] } + return config.Merge(cfg, overrideCfg) } - return cfg, nil } // func FakeData() { // d := Config{ -// Version: "v0.0.1", +// GlobalConfig: config.GlobalConfig{ +// Version: "v0.0.1", +// TZ: "JST", +// Logging: &config.Logging{ +// Format: "raw", +// Level: "debug", +// Logger: "glg", +// }, +// }, // Server: &config.Servers{ // Servers: []*config.Server{ // { -// Name: "agent-rest", -// Host: "127.0.0.1", -// Port: 8080, -// Mode: "REST", +// Name: "grpc", +// Host: "0.0.0.0", +// Port: 8081, +// Mode: "GRPC", // ProbeWaitTime: "3s", +// SocketPath: "", +// GRPC: &config.GRPC{ +// BidirectionalStreamConcurrency: 20, +// MaxReceiveMessageSize: 0, +// MaxSendMessageSize: 0, +// InitialWindowSize: 1048576, +// InitialConnWindowSize: 2097152, +// Keepalive: &config.GRPCKeepalive{ +// MaxConnIdle: "", +// MaxConnAge: "", +// MaxConnAgeGrace: "", +// Time: "3h", +// Timeout: "60s", +// MinTime: "10m", +// PermitWithoutStream: true, +// }, +// WriteBufferSize: 0, +// ReadBufferSize: 0, +// ConnectionTimeout: "", +// MaxHeaderListSize: 0, +// HeaderTableSize: 0, +// Interceptors: []string{ +// "RecoverInterceptor", +// }, +// EnableReflection: true, +// }, +// SocketOption: &config.SocketOption{ +// ReusePort: true, +// ReuseAddr: true, +// TCPFastOpen: false, +// TCPNoDelay: false, +// TCPCork: false, +// TCPQuickAck: false, +// TCPDeferAccept: false, +// IPTransparent: false, +// IPRecoverDestinationAddr: false, +// }, +// Restart: true, +// }, +// }, +// HealthCheckServers: []*config.Server{ +// { +// Name: "livenesss", +// Host: "0.0.0.0", +// Port: 3000, +// Mode: "", +// Network: "tcp", +// ProbeWaitTime: "3s", +// SocketPath: "", // HTTP: &config.HTTP{ +// HandlerTimeout: "", +// IdleTimeout: "", +// ReadHeaderTimeout: "", +// ReadTimeout: "", // ShutdownDuration: "5s", -// HandlerTimeout: "5s", -// IdleTimeout: "2s", -// ReadHeaderTimeout: "1s", -// ReadTimeout: "1s", -// WriteTimeout: "1s", +// WriteTimeout: "", +// }, +// SocketOption: &config.SocketOption{ +// ReusePort: true, +// ReuseAddr: true, +// TCPFastOpen: true, +// TCPNoDelay: true, +// TCPCork: false, +// TCPQuickAck: true, +// TCPDeferAccept: false, +// IPTransparent: false, +// IPRecoverDestinationAddr: false, // }, // }, // { -// Name: "agent-grpc", -// Host: "127.0.0.1", -// Port: 8082, -// Mode: "GRPC", +// Name: "readiness", +// Host: "0.0.0.0", +// Port: 3001, +// Mode: "", +// Network: "tcp", +// ProbeWaitTime: "3s", +// SocketPath: "", +// HTTP: &config.HTTP{ +// HandlerTimeout: "", +// IdleTimeout: "", +// ReadHeaderTimeout: "", +// ReadTimeout: "", +// ShutdownDuration: "0s", +// WriteTimeout: "", +// }, +// SocketOption: &config.SocketOption{ +// ReusePort: true, +// ReuseAddr: true, +// TCPFastOpen: true, +// TCPNoDelay: true, +// TCPCork: false, +// TCPQuickAck: true, +// TCPDeferAccept: false, +// IPTransparent: false, +// IPRecoverDestinationAddr: false, +// }, // }, // }, // MetricsServers: []*config.Server{ // { // Name: "pprof", -// Host: "127.0.0.1", +// Host: "0.0.0.0", // Port: 6060, // Mode: "REST", +// Network: "tcp", // ProbeWaitTime: "3s", +// SocketPath: "", // HTTP: &config.HTTP{ -// ShutdownDuration: "5s", // HandlerTimeout: "5s", // IdleTimeout: "2s", // ReadHeaderTimeout: "1s", // ReadTimeout: "1s", -// WriteTimeout: "1s", +// ShutdownDuration: "5s", +// WriteTimeout: "1m", +// }, +// SocketOption: &config.SocketOption{ +// ReusePort: true, +// ReuseAddr: true, +// TCPFastOpen: false, +// TCPNoDelay: false, +// TCPCork: false, +// TCPQuickAck: false, +// TCPDeferAccept: false, +// IPTransparent: false, +// IPRecoverDestinationAddr: false, // }, -// }, -// }, -// HealthCheckServers: []*config.Server{ -// { -// Name: "livenesss", -// Host: "127.0.0.1", -// Port: 3000, -// }, -// { -// Name: "readiness", -// Host: "127.0.0.1", -// Port: 3001, // }, // }, // StartUpStrategy: []string{ // "livenesss", -// "pprof", -// "agent-grpc", -// "agent-rest", // "readiness", +// "pprof", // }, // ShutdownStrategy: []string{ // "readiness", @@ -205,6 +296,42 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { // CA: "/path/to/ca", // }, // }, +// Observability: &config.Observability{ +// Enabled: true, +// OTLP: &config.OTLP{ +// CollectorEndpoint: "", +// Attribute: &config.OTLPAttribute{ +// Namespace: NAMESPACE, +// PodName: NAME, +// NodeName: "", +// ServiceName: "vald", +// }, +// TraceBatchTimeout: "1s", +// TraceExportTimeout: "1m", +// TraceMaxExportBatchSize: 1024, +// TraceMaxQueueSize: 256, +// MetricsExportInterval: "1s", +// MetricsExportTimeout: "1m", +// }, +// Metrics: &config.Metrics{ +// EnableVersionInfo: true, +// EnableMemory: true, +// EnableGoroutine: true, +// EnableCGO: true, +// VersionInfoLabels: []string{ +// "vald_version", +// "server_name", +// "git_commit", +// "build_time", +// "go_version", +// "go_arch", +// "ngt_version", +// }, +// }, +// Trace: &config.Trace{ +// Enabled: true, +// }, +// }, // Job: &config.BenchmarkJob{ // Target: &config.BenchmarkTarget{ // Host: "vald-lb-gateway.svc.local", @@ -262,15 +389,15 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { // DialOption: &config.DialOption{ // WriteBufferSize: 0, // ReadBufferSize: 0, -// InitialWindowSize: 0, -// InitialConnectionWindowSize: 0, +// InitialWindowSize: 1048576, +// InitialConnectionWindowSize: 2097152, // MaxMsgSize: 0, // BackoffMaxDelay: "120s", // BackoffBaseDelay: "1s", // BackoffJitter: 0.2, // BackoffMultiplier: 1.6, // MinimumConnectionTimeout: "20s", -// EnableBackoff: true, +// EnableBackoff: false, // Insecure: true, // Timeout: "", // Interceptors: []string{}, @@ -296,11 +423,11 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { // SocketOption: &config.SocketOption{ // ReusePort: true, // ReuseAddr: true, -// TCPFastOpen: true, -// TCPNoDelay: true, -// TCPQuickAck: true, +// TCPFastOpen: false, +// TCPNoDelay: false, +// TCPQuickAck: false, // TCPCork: false, -// TCPDeferAccept: true, +// TCPDeferAccept: false, // IPTransparent: false, // IPRecoverDestinationAddr: false, // }, diff --git a/pkg/tools/benchmark/job/handler/grpc/handler.go b/pkg/tools/benchmark/job/handler/grpc/handler.go index 7febc72d11d..e3ec51c25d9 100644 --- a/pkg/tools/benchmark/job/handler/grpc/handler.go +++ b/pkg/tools/benchmark/job/handler/grpc/handler.go @@ -21,7 +21,6 @@ import ( "context" "github.com/vdaas/vald/apis/grpc/v1/benchmark" - "github.com/vdaas/vald/internal/singleflight" "github.com/vdaas/vald/pkg/tools/benchmark/job/service" ) @@ -33,8 +32,7 @@ type Benchmark interface { type server struct { benchmark.UnimplementedJobServer - job service.Job - group singleflight.Group + job service.Job } func New(opts ...Option) (bm Benchmark, err error) { @@ -47,8 +45,6 @@ func New(opts ...Option) (bm Benchmark, err error) { } } - b.group = singleflight.New() - return b, nil } diff --git a/pkg/tools/benchmark/job/service/insert.go b/pkg/tools/benchmark/job/service/insert.go index 69d16ecc270..00ba632e801 100644 --- a/pkg/tools/benchmark/job/service/insert.go +++ b/pkg/tools/benchmark/job/service/insert.go @@ -23,9 +23,9 @@ import ( "strconv" "github.com/vdaas/vald/apis/grpc/v1/payload" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" + "golang.org/x/sync/errgroup" ) func (j *job) insert(ctx context.Context, ech chan error) error { @@ -38,8 +38,10 @@ func (j *job) insert(ctx context.Context, ech chan error) error { if j.timestamp > int64(0) { cfg.Timestamp = j.timestamp } - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { iter := i eg.Go(func() error { diff --git a/pkg/tools/benchmark/job/service/job.go b/pkg/tools/benchmark/job/service/job.go index 2f115ea157d..168046e2b7a 100644 --- a/pkg/tools/benchmark/job/service/job.go +++ b/pkg/tools/benchmark/job/service/job.go @@ -33,6 +33,7 @@ import ( "github.com/vdaas/vald/internal/k8s/client" v1 "github.com/vdaas/vald/internal/k8s/vald/benchmark/api/v1" "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/rand" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/test/data/hdf5" "github.com/vdaas/vald/internal/timeutil/rate" @@ -245,34 +246,6 @@ func (j *job) PreStart(ctx context.Context) error { return err } } - // Wait for beforeJob completed if exists - if len(j.beforeJobName) != 0 { - var jobResource v1.ValdBenchmarkJob - log.Info("[benchmark job] check before benchjob is completed or not...") - j.eg.Go(safety.RecoverFunc(func() error { - dt := time.NewTicker(j.beforeJobDur) - defer dt.Stop() - for { - select { - case <-ctx.Done(): - return nil - case <-dt.C: - err := j.k8sClient.Get(ctx, j.beforeJobName, j.beforeJobNamespace, &jobResource) - if err != nil { - return err - } - if jobResource.Status == v1.BenchmarkJobCompleted { - log.Infof("[benchmark job ] before job (%s) is completed, job service will start soon.", j.beforeJobName) - return nil - } - log.Infof("[benchmark job] before job (%s) is not completed...", j.beforeJobName) - } - } - })) - if err := j.eg.Wait(); err != nil { - return err - } - } return nil } @@ -345,3 +318,12 @@ func calcRecall(linearRes, searchRes *payload.Search_Response) (recall float64) } return recall / float64(len(lres)) } + +// TODO: apply many object type +func addNoiseToVec(oVec []float32) []float32 { + noise := rand.Float32() + vec := oVec + idx := rand.LimitedUint32(uint64(len(oVec) - 1)) + vec[idx] += noise + return vec +} diff --git a/pkg/tools/benchmark/job/service/object.go b/pkg/tools/benchmark/job/service/object.go index eec7ea773f1..af3971798fe 100644 --- a/pkg/tools/benchmark/job/service/object.go +++ b/pkg/tools/benchmark/job/service/object.go @@ -22,15 +22,17 @@ import ( "strconv" "github.com/vdaas/vald/apis/grpc/v1/payload" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" + "golang.org/x/sync/errgroup" ) func (j *job) exists(ctx context.Context, ech chan error) error { log.Info("[benchmark job] Start benchmarking exists") - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { idx := i eg.Go(func() error { @@ -78,8 +80,10 @@ func (j *job) exists(ctx context.Context, ech chan error) error { func (j *job) getObject(ctx context.Context, ech chan error) error { log.Info("[benchmark job] Start benchmarking getObject") - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { log.Infof("[benchmark job] Start get object: iter = %d", i) ft := []*payload.Filter_Target{} diff --git a/pkg/tools/benchmark/job/service/remove.go b/pkg/tools/benchmark/job/service/remove.go index 66c604a6ddd..be3c91c3fa7 100644 --- a/pkg/tools/benchmark/job/service/remove.go +++ b/pkg/tools/benchmark/job/service/remove.go @@ -22,9 +22,9 @@ import ( "strconv" "github.com/vdaas/vald/apis/grpc/v1/payload" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" + "golang.org/x/sync/errgroup" ) func (j *job) remove(ctx context.Context, ech chan error) error { @@ -35,8 +35,10 @@ func (j *job) remove(ctx context.Context, ech chan error) error { if j.timestamp > int64(0) { cfg.Timestamp = j.timestamp } - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { idx := i eg.Go(func() error { diff --git a/pkg/tools/benchmark/job/service/search.go b/pkg/tools/benchmark/job/service/search.go index d8fb0d25f97..c7d54937052 100644 --- a/pkg/tools/benchmark/job/service/search.go +++ b/pkg/tools/benchmark/job/service/search.go @@ -22,9 +22,9 @@ import ( "math" "github.com/vdaas/vald/apis/grpc/v1/payload" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" + "golang.org/x/sync/errgroup" ) func (j *job) search(ctx context.Context, ech chan error) error { @@ -47,8 +47,10 @@ func (j *job) search(ctx context.Context, ech chan error) error { }(), } sres := make([]*payload.Search_Response, j.dataset.Indexes) - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { iter := i eg.Go(func() error { @@ -92,6 +94,9 @@ func (j *job) search(ctx context.Context, ech chan error) error { } if res != nil && j.searchConfig.EnableLinearSearch { sres[iter-j.dataset.Range.Start] = res + log.Debugf("[benchmark job] Finish search: iter = %d, len = %d", iter, len(res.Results)) + } else { + log.Debugf("[benchmark job] Finish search: iter = %d, res = %v", iter, res) } log.Debugf("[benchmark job] Finish search: iter = %d, len = %d", iter, len(res.Results)) return nil diff --git a/pkg/tools/benchmark/job/service/update.go b/pkg/tools/benchmark/job/service/update.go index 2c7ab12c49f..51a10a9ba2e 100644 --- a/pkg/tools/benchmark/job/service/update.go +++ b/pkg/tools/benchmark/job/service/update.go @@ -23,9 +23,9 @@ import ( "strconv" "github.com/vdaas/vald/apis/grpc/v1/payload" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" + "golang.org/x/sync/errgroup" ) func (j *job) update(ctx context.Context, ech chan error) error { @@ -39,8 +39,10 @@ func (j *job) update(ctx context.Context, ech chan error) error { if j.timestamp > int64(0) { cfg.Timestamp = j.timestamp } - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { iter := i eg.Go(func() error { @@ -63,7 +65,7 @@ func (j *job) update(ctx context.Context, ech chan error) error { res, err := j.client.Update(egctx, &payload.Update_Request{ Vector: &payload.Object_Vector{ Id: strconv.Itoa(iter), - Vector: vecs[idx], + Vector: addNoiseToVec(vecs[idx]), }, Config: cfg, }) diff --git a/pkg/tools/benchmark/job/service/upsert.go b/pkg/tools/benchmark/job/service/upsert.go index a31f3379727..016c2963820 100644 --- a/pkg/tools/benchmark/job/service/upsert.go +++ b/pkg/tools/benchmark/job/service/upsert.go @@ -23,9 +23,9 @@ import ( "strconv" "github.com/vdaas/vald/apis/grpc/v1/payload" - "github.com/vdaas/vald/internal/errgroup" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" + "golang.org/x/sync/errgroup" ) func (j *job) upsert(ctx context.Context, ech chan error) error { @@ -39,8 +39,10 @@ func (j *job) upsert(ctx context.Context, ech chan error) error { if j.timestamp > int64(0) { cfg.Timestamp = j.timestamp } - eg, egctx := errgroup.New(ctx) - eg.Limitation(j.concurrencyLimit) + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(j.concurrencyLimit) + // eg, egctx := errgroup.New(ctx) + // eg.Limitation(j.concurrencyLimit) for i := j.dataset.Range.Start; i <= j.dataset.Range.End; i++ { iter := i eg.Go(func() error { @@ -62,7 +64,7 @@ func (j *job) upsert(ctx context.Context, ech chan error) error { res, err := j.client.Upsert(egctx, &payload.Upsert_Request{ Vector: &payload.Object_Vector{ Id: strconv.Itoa(iter), - Vector: vecs[idx], + Vector: addNoiseToVec(vecs[idx]), }, Config: cfg, }) diff --git a/pkg/tools/benchmark/job/usecase/benchmarkd.go b/pkg/tools/benchmark/job/usecase/benchmarkd.go index 400c6d580ac..038f048c38d 100644 --- a/pkg/tools/benchmark/job/usecase/benchmarkd.go +++ b/pkg/tools/benchmark/job/usecase/benchmarkd.go @@ -143,39 +143,6 @@ func New(cfg *config.Config) (r runner.Runner, err error) { } } - if len(cfg.Server.MetricsServers) == 0 { - cfg.Server.MetricsServers = []*iconf.Server{ - { - Name: "pprof", - Host: "0.0.0.0", - Port: uint16(6060), - HTTP: &iconf.HTTP{ - HandlerTimeout: "5s", - IdleTimeout: "2s", - ReadHeaderTimeout: "1s", - ReadTimeout: "1s", - ShutdownDuration: "5s", - WriteTimeout: "1m", - }, - Mode: "REST", - Network: "tcp", - ProbeWaitTime: "3s", - SocketOption: &iconf.SocketOption{ - IPRecoverDestinationAddr: false, - IPTransparent: false, - ReuseAddr: true, - ReusePort: true, - TCPCork: false, - TCPDeferAccept: true, - TCPFastOpen: true, - TCPNoDelay: true, - TCPQuickAck: true, - }, - SocketPath: "", - }, - } - } - srv, err := starter.New( starter.WithConfig(cfg.Server), starter.WithREST(func(sc *iconf.Server) []server.Option { diff --git a/pkg/tools/benchmark/operator/handler/grpc/handler.go b/pkg/tools/benchmark/operator/handler/grpc/handler.go index a11cf4133fa..7809bc394cd 100644 --- a/pkg/tools/benchmark/operator/handler/grpc/handler.go +++ b/pkg/tools/benchmark/operator/handler/grpc/handler.go @@ -21,7 +21,6 @@ import ( "context" "github.com/vdaas/vald/apis/grpc/v1/benchmark" - "github.com/vdaas/vald/internal/singleflight" "github.com/vdaas/vald/pkg/tools/benchmark/operator/service" ) @@ -34,7 +33,6 @@ type server struct { benchmark.UnimplementedJobServer operator service.Operator - group singleflight.Group } func New(opts ...Option) (bm Benchmark, err error) { @@ -47,8 +45,6 @@ func New(opts ...Option) (bm Benchmark, err error) { } } - b.group = singleflight.New() - return b, nil }