Skip to content

Commit ffbf0a9

Browse files
authored
Merge pull request #45 from HarrisChu/refactor_plugin_option
2 parents 98fa2df + 629c5cb commit ffbf0a9

File tree

9 files changed

+542
-324
lines changed

9 files changed

+542
-324
lines changed

Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ all: build
44
pairs := darwin/amd64 linux/amd64 linux/arm64
55
GOPATH ?= ~/go
66
export GO111MODULE=on
7-
VERSION ?= v1.1.0
87
K6_VERSION ?= v0.43.0
98

109
fmt:
@@ -15,7 +14,8 @@ lint :
1514

1615
build:
1716
go install github.com/k6io/xk6/cmd/[email protected]
18-
$(GOPATH)/bin/xk6 build $(K6_VERSION) --with github.com/vesoft-inc/k6-plugin@$(VERSION);
17+
version=$(git describe --tags `git rev-list --tags --max-count=1`) \
18+
$(GOPATH)/bin/xk6 build $(K6_VERSION) --with github.com/vesoft-inc/k6-plugin@$(version);
1919

2020
build-all: build-arm-v7
2121

README.md

Lines changed: 111 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ Used to test [NebulaGraph](https://github.com/vesoft-inc/nebula).
1212

1313
## Version match
1414

15-
k6-plugin now support NebulaGraph above v2.5.0.
15+
k6-plugin now support NebulaGraph above v3.0.0.
1616

1717
## Build
1818

@@ -32,6 +32,13 @@ Then:
3232
2. Build the binary:
3333

3434
```bash
35+
# build with the latest version.
36+
make
37+
38+
# build with local source code
39+
make build-dev
40+
41+
# or build with specified version
3542
xk6 build --with github.com/vesoft-inc/k6-plugin@{version}
3643
# e.g. build v0.0.8
3744
xk6 build --with github.com/vesoft-inc/[email protected]
@@ -45,41 +52,46 @@ xk6 build --with github.com/vesoft-inc/k6-plugin@master
4552
import nebulaPool from 'k6/x/nebulagraph';
4653
import { check } from 'k6';
4754
import { Trend } from 'k6/metrics';
48-
import { sleep } from 'k6';
4955

50-
var latencyTrend = new Trend('latency');
51-
var responseTrend = new Trend('responseTime');
52-
// initial nebula connect pool
53-
// by default the channel buffer size is 20000, you can reset it with
54-
// var pool = nebulaPool.initWithSize("192.168.8.152:9669", {poolSize}, {bufferSize}); e.g.
55-
// var pool = nebulaPool.initWithSize("192.168.8.152:9669", 1000, 4000)
56-
var pool = nebulaPool.init("192.168.8.152:9669", 400);
56+
var latencyTrend = new Trend('latency', true);
57+
var responseTrend = new Trend('responseTime', true);
58+
59+
// option configuration, please refer more details in this doc.
60+
var graph_option = {
61+
address: "192.168.8.6:10010",
62+
space: "sf1",
63+
csv_path: "person.csv",
64+
csv_delimiter: "|",
65+
csv_with_header: true
66+
};
5767

68+
nebulaPool.setOption(graph_option);
69+
var pool = nebulaPool.init();
5870
// initial session for every vu
59-
var session = pool.getSession("root", "nebula")
60-
session.execute("USE sf1")
71+
var session = pool.getSession()
6172

73+
String.prototype.format = function () {
74+
var formatted = this;
75+
var data = arguments[0]
6276

63-
export function setup() {
64-
// config csv file
65-
pool.configCSV("person.csv", "|", false)
66-
// config output file, save every query information
67-
pool.configOutput("output.csv")
68-
sleep(1)
69-
}
77+
formatted = formatted.replace(/\{(\d+)\}/g, function (match, key) {
78+
return data[key]
79+
})
80+
return formatted
81+
};
7082

7183
export default function (data) {
7284
// get csv data from csv file
7385
let d = session.getData()
74-
// d[0] means the first column data in the csv file
75-
let ngql = 'go 2 steps from ' + d[0] + ' over KNOWS '
86+
// {0} means the first column data in the csv file
87+
let ngql = 'go 2 steps from {0} over KNOWS'.format(d)
7688
let response = session.execute(ngql)
7789
check(response, {
7890
"IsSucceed": (r) => r.isSucceed() === true
7991
});
8092
// add trend
81-
latencyTrend.add(response.getLatency());
82-
responseTrend.add(response.getResponseTime());
93+
latencyTrend.add(response.getLatency()/1000);
94+
responseTrend.add(response.getResponseTime()/1000);
8395
};
8496

8597
export function teardown() {
@@ -95,45 +107,39 @@ export function teardown() {
95107
# -d means the duration that test running, e.g. `3s` means 3 seconds, `5m` means 5 minutes.
96108
>./k6 run nebula-test.js -u 3 -d 3s
97109

110+
98111
/\ |‾‾| /‾‾/ /‾‾/
99112
/\ / \ | |/ / / /
100113
/ \/ \ | ( / ‾‾\
101114
/ \ | |\ \ | (‾) |
102115
/ __________ \ |__| \__\ \_____/ .io
103116

104-
INFO[0000] 2021/07/07 16:50:25 [INFO] begin init the nebula pool
105-
INFO[0000] 2021/07/07 16:50:25 [INFO] connection pool is initialized successfully
106-
INFO[0000] 2021/07/07 16:50:25 [INFO] finish init the pool
117+
testing option: {"pool_policy":"connection","output":"output.csv","output_channel_size":10000,"address":"192.168.8.6:10010","timeout_us":0,"idletime_us":0,"max_size":400,"min_size":0,"username":"root","password":"nebula","space":"sf1","csv_path":"person.csv","csv_delimiter":"|","csv_with_header":true,"csv_channel_size":10000,"csv_data_limit":500000,"retry_times":0,"retry_interval_us":0,"retry_timeout_us":0,"ssl_ca_pem_path":"","ssl_client_pem_path":"","ssl_client_key_path":""}
107118
execution: local
108119
script: nebula-test.js
109-
output: -
120+
output: engine
110121

111122
scenarios: (100.00%) 1 scenario, 3 max VUs, 33s max duration (incl. graceful stop):
112123
* default: 3 looping VUs for 3s (gracefulStop: 30s)
113124

114-
INFO[0004] 2021/07/07 16:50:29 [INFO] begin close the nebula pool
115-
116-
running (04.1s), 0/3 VUs, 570 complete and 0 interrupted iterations
117-
default ✓ [======================================] 3 VUs 3s
118-
INFO[0004] 2021/07/07 16:50:29 [INFO] begin init the nebula pool
119-
INFO[0004] 2021/07/07 16:50:29 [INFO] connection pool is initialized successfully
120-
INFO[0004] 2021/07/07 16:50:29 [INFO] finish init the pool
121125

122126
✓ IsSucceed
123127

124-
█ setup
125-
126128
█ teardown
127129

128-
checks...............: 100.00% ✓ 570 ✗ 0
130+
checks...............: 100.00% ✓ 3529 ✗ 0
129131
data_received........: 0 B 0 B/s
130132
data_sent............: 0 B 0 B/s
131-
iteration_duration...: avg=17.5ms min=356.6µs med=11.44ms max=1s p(90)=29.35ms p(95)=38.73ms
132-
iterations...........: 570 139.877575/s
133-
latency..............: avg=2986.831579 min=995 med=2663 max=18347 p(90)=4518.4 p(95)=5803
134-
responseTime.........: avg=15670.263158 min=4144 med=11326.5 max=108286 p(90)=28928.9 p(95)=38367.1
135-
vus..................: 3 min=0 max=3
136-
vus_max..............: 3 min=3 max=3
133+
iteration_duration...: avg=2.54ms min=129.28µs med=1.78ms max=34.99ms p(90)=5.34ms p(95)=6.79ms
134+
iterations...........: 3529 1174.135729/s
135+
latency..............: avg=1.98ms min=439µs med=1.42ms max=27.77ms p(90)=4.11ms p(95)=5.12ms
136+
responseTime.........: avg=2.48ms min=495µs med=1.72ms max=34.93ms p(90)=5.27ms p(95)=6.71ms
137+
vus..................: 3 min=3 max=3
138+
vus_max..............: 3 min=3 max=3
139+
140+
141+
running (03.0s), 0/3 VUs, 3529 complete and 0 interrupted iterations
142+
default ✓ [======================================] 3 VUs 3s
137143
```
138144
139145
* `checks`, one check per iteration, verify `isSucceed` by default.
@@ -154,38 +160,71 @@ The `output.csv` saves data as below:
154160
```bash
155161
>head output.csv
156162

157-
timestamp,nGQL,latency,responseTime,isSucceed,rows,errorMsg
158-
1625647825,USE sf1,7808,10775,true,0,
159-
1625647825,USE sf1,4055,7725,true,0,
160-
1625647825,USE sf1,3431,10231,true,0,
161-
1625647825,USE sf1,2938,5600,true,0,
162-
1625647825,USE sf1,2917,5410,true,0,
163-
1625647826,go 2 steps from 933 over KNOWS ,6022,24537,true,1680,
164-
1625647826,go 2 steps from 1129 over KNOWS ,6141,25861,true,1945,
165-
1625647826,go 2 steps from 4194 over KNOWS ,6317,26309,true,1581,
166-
1625647826,go 2 steps from 8698 over KNOWS ,4388,22597,true,1530,
167-
```
168-
169-
## Advanced usage
170-
171-
By default, all vus use the same channel to read the csv data.
172-
173-
You can change the strategy before `getSession` function.
174-
175-
As each vu uses a separate channel, you can reduce channel buffer size to save memory.
176-
177-
```js
178-
// initial nebula connect pool, channel buffer size is 4000
179-
var pool = nebulaPool.initWithSize("192.168.8.61:9669", 400, 4000);
180-
181-
// set csv strategy, 1 means each vu has a separate csv reader.
182-
pool.configCsvStrategy(1)
183-
184-
// initial session for every vu
185-
var session = pool.getSession("root", "nebula")
163+
timestamp,nGQL,latency,responseTime,isSucceed,rows,firstRecord,errorMsg
164+
1689576531,go 2 steps from 4194 over KNOWS yield dst(edge),4260,5151,true,1581,32985348838665,
165+
1689576531,go 2 steps from 8333 over KNOWS yield dst(edge),4772,5772,true,2063,32985348833536,
166+
1689576531,go 2 steps from 1129 over KNOWS yield dst(edge),5471,6441,true,1945,19791209302529,
167+
1689576531,go 2 steps from 8698 over KNOWS yield dst(edge),3453,4143,true,1530,28587302322946,
168+
1689576531,go 2 steps from 8853 over KNOWS yield dst(edge),4361,5368,true,2516,28587302324992,
169+
1689576531,go 2 steps from 2199023256684 over KNOWS yield dst(edge),2259,2762,true,967,32985348833796,
170+
1689576531,go 2 steps from 2199023262818 over KNOWS yield dst(edge),638,732,true,0,,
171+
1689576531,go 2 steps from 10027 over KNOWS yield dst(edge),5182,6701,true,3288,30786325580290,
172+
1689576531,go 2 steps from 2199023261211 over KNOWS yield dst(edge),2131,2498,true,739,32985348833794,
186173
```
187174
188-
Please refer to [nebula-test-insert.js](./example/nebula-test-insert.js) for more details.
175+
## Plugin Option
176+
177+
Pool options
178+
179+
---
180+
| Key | Type | Default | Description |
181+
|---|---|---|---|
182+
|pool_policy|string|connection|'connection' or 'session', using which pool to test |
183+
|address |string||NebulaGraph address, e.g. '192.168.8.6:9669,192.168.8.7:9669'|
184+
|timeout_us|int|0|client connetion timeout, 0 means no timeout|
185+
|idletime_us|int|0|client connection idle timeout, 0 means no timeout|
186+
|max_size|int|400|max client connections in pool|
187+
|min_size|int|0|min client connections in pool|
188+
|username|string|root|NebulaGraph username|
189+
|password|string|nebula|NebulaGraph password|
190+
|space|string||NebulaGraph space|
191+
192+
Output options
193+
194+
---
195+
| Key | Type | Default | Description |
196+
|---|---|---|---|
197+
|output|string||output file path|
198+
|output_channel_size|int|10000| size of output channel|
199+
200+
CSV options
201+
202+
---
203+
| Key | Type | Default | Description |
204+
|---|---|---|---|
205+
|csv_path|string||csv file path|
206+
|csv_delimiter|string|,|delimiter of csv file|
207+
|csv_with_header|bool|false|if ture, would ignore the first record|
208+
|csv_channel_size|int|10000|size of csv reader channel|
209+
|csv_data_limit|int|500000|would load [x] rows in memory, and then send to channel in loop|
210+
211+
Retry options
212+
213+
---
214+
| Key | Type | Default | Description |
215+
|---|---|---|---|
216+
|retry_times|int|0|max retry times|
217+
|retry_interval_us|int|0|interval duration for next retry|
218+
|retry_timeout_us|int|0|retry timeout|
219+
220+
SSL options
221+
222+
---
223+
| Key | Type | Default | Description |
224+
|---|---|---|---|
225+
|ssl_ca_pem_path|string||if it is not blank, would use SSL connection. ca pem path|
226+
|ssl_client_pem_path|string||client pem path|
227+
|ssl_client_key_path|string||client key path|
189228
190229
## Batch insert
191230

example/nebula-test-insert-limit-rate.js

Lines changed: 27 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -7,19 +7,23 @@
77
import nebulaPool from 'k6/x/nebulagraph';
88
import { check } from 'k6';
99
import { Trend } from 'k6/metrics';
10-
import { sleep } from 'k6';
1110

12-
var latencyTrend = new Trend('latency');
13-
var responseTrend = new Trend('responseTime');
14-
// initial nebula connect pool
15-
var pool = nebulaPool.initWithSize("192.168.8.61:9669,192.168.8.62:9669,192.168.8.63:9669", 400, 4000);
11+
var latencyTrend = new Trend('latency', true);
12+
var responseTrend = new Trend('responseTime', true);
1613

17-
// set csv strategy, 1 means each vu has a separate csv reader.
18-
pool.configCsvStrategy(1)
14+
var graph_option = {
15+
address: "192.168.8.6:10010",
16+
space: "sf1",
17+
csv_path: "person.csv",
18+
csv_delimiter: "|",
19+
csv_with_header: true,
20+
output: "output.csv"
21+
};
1922

23+
nebulaPool.setOption(graph_option);
24+
var pool = nebulaPool.init();
2025
// initial session for every vu
21-
var session = pool.getSession("root", "nebula")
22-
session.execute("USE ldbc")
26+
var session = pool.getSession()
2327

2428
// concurrent 300, and each second, 1000 iterations would be made.
2529
export const options = {
@@ -35,32 +39,27 @@ export const options = {
3539
},
3640
};
3741

38-
export function setup() {
39-
// config csv file
40-
pool.configCSV("person.csv", "|", false)
41-
// config output file, save every query information
42-
pool.configOutput("output.csv")
43-
sleep(1)
44-
}
42+
String.prototype.format = function() {
43+
var formatted = this;
44+
var data = arguments[0]
45+
46+
formatted = formatted.replace(/\{(\d+)\}/g, function(match, key) {
47+
return data[key]
48+
})
49+
return formatted
50+
};
4551

46-
export default function (data) {
52+
export default function(data) {
4753
// get csv data from csv file
4854
let ngql = 'INSERT VERTEX Person(firstName, lastName, gender, birthday, creationDate, locationIP, browserUsed) VALUES '
4955
let batches = []
50-
let batchSize = 1
51-
// batch size
56+
let batchSize = 10
5257
for (let i = 0; i < batchSize; i++) {
5358
let d = session.getData();
54-
let values = []
55-
// concat the insert value
56-
for (let index = 1; index < 8; index++) {
57-
let value = '"' + d[index] + '"'
58-
values.push(value)
59-
}
60-
let batch = d[0] + ":(" + values.join(",") + ")"
61-
batches.push(batch)
59+
let value = "{0}:(\"{1}\",\"{2}\", \"{3}\", \"{4}\", datetime(\"{5}\"), \"{6}\", \"{7}\")".format(d)
60+
batches.push(value)
6261
}
63-
ngql = ngql + batches.join(',')
62+
ngql = ngql + " " + batches.join(',')
6463
let response = session.execute(ngql)
6564
check(response, {
6665
"IsSucceed": (r) => r.isSucceed() === true
@@ -74,5 +73,3 @@ export default function (data) {
7473
export function teardown() {
7574
pool.close()
7675
}
77-
78-

0 commit comments

Comments
 (0)