-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathnsdb-cluster-deployment.yml
137 lines (134 loc) · 3.31 KB
/
nsdb-cluster-deployment.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# Copyright 2018-2020 Radicalbit S.r.l.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#deployment
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nsdb
name: nsdb
spec:
replicas: 3
selector:
matchLabels:
app: nsdb
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
labels:
app: nsdb
actorSystemName: nsdb
spec:
containers:
- name: nsdb
image: weareradicalbit/nsdb:1.1.0-SNAPSHOT
#health
livenessProbe:
httpGet:
path: /alive
port: management
readinessProbe:
httpGet:
path: /ready
port: management
#health
ports:
- name: remoting
containerPort: 2552
protocol: TCP
- name: management
containerPort: 8558
protocol: TCP
- name: http
containerPort: 9000
protocol: TCP
- name: grpc
containerPort: 7817
protocol: TCP
# when contact-point-discovery.port-name is set for cluster bootstrap,
# the management port must be named accordingly:
# name: management
env:
# The Kubernetes API discovery will use this service name to look for
# nodes with this value in the 'app' label.
# This can be customized with the 'pod-label-selector' setting.
- name: AKKA_CLUSTER_BOOTSTRAP_SERVICE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: "metadata.labels['app']"
- name: CLUSTER_MODE
value: "k8s-api"
- name: SHARD_INTERVAL
value: "10d"
#deployment
---
#rbac
#
# Create a role, `pod-reader`, that can list pods and
# bind the default service account in the namespace
# that the binding is deployed to to that role.
#
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: pod-reader
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["pods"]
verbs: ["get", "watch", "list"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: read-pods
subjects:
# Uses the default service account.
# Consider creating a dedicated service account to run your
# Akka Cluster services and binding the role to that one.
- kind: ServiceAccount
name: default
roleRef:
kind: Role
name: pod-reader
apiGroup: rbac.authorization.k8s.io
#rbac
---
#service
kind: Service
apiVersion: v1
metadata:
name: nsdb
spec:
type: NodePort
selector:
app: nsdb
ports:
- protocol: TCP
name: management
port: 8558
targetPort: management
- protocol: TCP
name: http
port: 9000
targetPort: http
- protocol: TCP
name: grpc
port: 7817
targetPort: grpc
#service