Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PMM-13391 Integrate VictoriaLogs #3269

Open
wants to merge 18 commits into
base: v3
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions build/ansible/pmm-docker/victorialogs.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
- hosts: all
become: yes
gather_facts: yes
vars:
victorialogs_version: "0.37.0"
roles:
- victorialogs
2 changes: 1 addition & 1 deletion build/ansible/roles/grafana/files/grafana.ini
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ enabled = true

[plugins]
# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
allow_loading_unsigned_plugins = grafana-polystat-panel,pmm-app,pmm-check-panel-home,pmm-update,pmm-qan-app-panel,pmm-pt-summary-panel,pmm-pt-summary-datasource
allow_loading_unsigned_plugins = grafana-polystat-panel,pmm-app,pmm-check-panel-home,pmm-update,pmm-qan-app-panel,pmm-pt-summary-panel,pmm-pt-summary-datasource,victorialogs-datasource

[feature_toggles]
# there are currently two ways to enable feature toggles in the `grafana.ini`.
Expand Down
16 changes: 16 additions & 0 deletions build/ansible/roles/victorialogs/files/Dockerfile.victorialogs
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# TODO: the image should be percona/pmm-server:3 once PMM v3 is released.

# To build the image, run the following in the project root directory (mind the dot!):
# docker buildx build --platform=linux/amd64 --progress=plain -t perconalab/pmm-server:victorialogs-0.37.0 -f ./build/ansible/roles/victorialogs/files/Dockerfile.victorialogs .
FROM perconalab/pmm-server:3-dev-latest

ENV GF_ANALYTICS_CHECK_FOR_UPDATES=false
ENV GF_ANALYTICS_REPORTING_ENABLED=false

USER root

COPY build/ansible /opt/ansible

RUN ansible-playbook -vvv -i 'localhost,' -c local /opt/ansible/pmm-docker/victorialogs.yml

USER pmm
BupycHuk marked this conversation as resolved.
Show resolved Hide resolved
62 changes: 62 additions & 0 deletions build/ansible/roles/victorialogs/files/nginx.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# user pmm; ## It's ignored when the master process is not run by root.
worker_processes 2;

daemon off;

error_log /dev/stderr warn;
pid /run/nginx.pid;

events {
worker_connections 4096;
}

http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}

include /etc/nginx/mime.types;
default_type application/octet-stream;

log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';

log_format ext_format
'time="$time_iso8601" '
'host=$http_host '
'remote_addr=$remote_addr '
'request_method=$request_method '
'request="$request_uri" '
'request_time=$request_time '
'body_bytes_sent=$body_bytes_sent '
'server_protocol="$server_protocol" '
'status=$status '
'http_referrer="$http_referer" '
'http_x_forwarded_for="$http_x_forwarded_for" '
'http_user_agent="$http_user_agent"';

access_log /dev/stdout ext_format;

sendfile on;
gzip on;
etag on;

keepalive_timeout 65;

resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 3s;

## TODO https://jira.percona.com/browse/PMM-4670
# CWE-693, CWE-16
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
# TODO X-XSS-Protection useless for modern browsers which support CSP. We need to implement CSP instead.
add_header X-XSS-Protection "1; mode=block";
# CWE-524, CWE-525
add_header Cache-control "no-cache";
add_header Pragma "no-cache";

include /etc/nginx/conf.d/*.conf;
}
268 changes: 268 additions & 0 deletions build/ansible/roles/victorialogs/files/pmm.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,268 @@
upstream managed-grpc {
server 127.0.0.1:7771;
keepalive 32;
}
upstream managed-json {
server 127.0.0.1:7772;
keepalive 32;
keepalive_requests 100;
keepalive_timeout 75s;
}

upstream qan-api-grpc {
server 127.0.0.1:9911;
keepalive 32;
}
upstream qan-api-json {
server 127.0.0.1:9922;
keepalive 32;
keepalive_requests 100;
keepalive_timeout 75s;
}

upstream vmproxy {
server localhost:8430;
keepalive 32;
keepalive_requests 100;
keepalive_timeout 75s;
}

server {
listen 8080;
listen 8443 ssl http2;
server_name _;
server_tokens off;

# allow huge requests
large_client_header_buffers 128 64k;

client_max_body_size 10m;

ssl_certificate /srv/nginx/certificate.crt;
ssl_certificate_key /srv/nginx/certificate.key;
ssl_trusted_certificate /srv/nginx/ca-certs.pem;
ssl_dhparam /srv/nginx/dhparam.pem;

# this block checks for maintenance.html file and, if it exists, it redirects all requests to the maintenance page
# there are two exceptions for it /v1/updates/Status and /auth_request endpoints
set $maintenance_mode 0;

if (-f /usr/share/pmm-server/maintenance/maintenance.html) {
set $maintenance_mode 1;
}

if ($request_uri ~* "^/v1/updates/Status|^/auth_request") {
set $maintenance_mode 0;
}

if ($maintenance_mode = 1) {
return 503;
}

error_page 503 @maintenance;

location @maintenance {
auth_request off;
root /usr/share/pmm-server/maintenance;
rewrite ^(.*)$ /maintenance.html break;
}


# Enable passing of the remote user's IP address to all
# proxied services using the X-Forwarded-For header
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

# Enable auth_request for all locations, including root
# (but excluding /auth_request).
auth_request /auth_request;

# Store the value of X-Proxy-Filter header of auth_request subrequest response in the variable.
auth_request_set $auth_request_proxy_filter $upstream_http_x_proxy_filter;
proxy_set_header X-Proxy-Filter $auth_request_proxy_filter;

# nginx completely ignores auth_request subrequest response body.
# We use that directive to send the same request to the same location as a normal request
# to get a response body or redirect and return it to the client.
# auth_request supports only 401 and 403 statuses; 401 is reserved for this configration,
# and 403 is used for normal pmm-managed API errors.
error_page 401 = /auth_request;

# Internal location for authentication via pmm-managed/Grafana.
# First, nginx sends request there to authenticate it. If it is not authenticated by pmm-managed/Grafana,
# it is sent to this location for the second time (as a normal request) by error_page directive above.
location /auth_request {
internal;

auth_request off;

proxy_pass http://managed-json/auth_request;

# nginx always strips body from authentication subrequests.
# Overwrite Content-Length to avoid problems on Go side and to keep connection alive.
proxy_pass_request_body off;
proxy_set_header Content-Length 0;

proxy_http_version 1.1;
proxy_set_header Connection "";

# Those headers are set for both subrequest and normal request.
proxy_set_header X-Original-Uri $request_uri;
proxy_set_header X-Original-Method $request_method;
}

# PMM UI
location /pmm-ui {
# Will redirect on FE to login page if user is not authenticated
auth_request off;

alias /usr/share/pmm-ui;
try_files $uri /index.html break;
}

# Grafana
rewrite ^/$ $scheme://$http_host/graph/;
rewrite ^/graph$ /graph/;
location /graph {
proxy_cookie_path / "/;";
proxy_pass http://127.0.0.1:3000;
rewrite ^/graph/(.*) /$1 break;
proxy_read_timeout 600;
proxy_http_version 1.1;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Host $http_host;
proxy_set_header X-Proxy-Filter $auth_request_proxy_filter;
}

# Prometheus
location /prometheus {
proxy_pass http://127.0.0.1:9090;
proxy_read_timeout 600;
proxy_http_version 1.1;
proxy_set_header Connection "";
}
location /prometheus/api/v1 {
proxy_pass http://vmproxy;
proxy_read_timeout 600;
proxy_http_version 1.1;
proxy_set_header Connection "";
}

# VictoriaMetrics
location /victoriametrics/ {
proxy_pass http://127.0.0.1:9090/prometheus/;
proxy_read_timeout 600;
proxy_http_version 1.1;
proxy_set_header Connection "";
client_body_buffer_size 10m;
}

# VMAlert
location /prometheus/rules {
proxy_pass http://127.0.0.1:8880/api/v1/rules;
proxy_read_timeout 600;
proxy_http_version 1.1;
proxy_set_header Connection "";
}
location /prometheus/alerts {
proxy_pass http://127.0.0.1:8880/api/v1/alerts;
proxy_read_timeout 600;
proxy_http_version 1.1;
proxy_set_header Connection "";
}

# Swagger UI
rewrite ^/swagger/swagger.json$ $scheme://$http_host/swagger.json permanent;
rewrite ^(/swagger)/(.*)$ $scheme://$http_host/swagger permanent;
location /swagger {
auth_request off;
root /usr/share/pmm-managed/swagger;
try_files $uri /index.html break;
}

# pmm-managed gRPC APIs
location /agent. {
grpc_pass grpc://managed-grpc;
# Disable request body size check for gRPC streaming, see https://trac.nginx.org/nginx/ticket/1642.
# pmm-managed uses grpc.MaxRecvMsgSize for that.
client_max_body_size 0;
}
location /inventory. {
grpc_pass grpc://managed-grpc;
}
location /management. {
grpc_pass grpc://managed-grpc;
}
location /server. {
grpc_pass grpc://managed-grpc;
}

# pmm-managed JSON APIs
location /v1/ {
proxy_pass http://managed-json/v1/;
proxy_http_version 1.1;
proxy_set_header Connection "";
}

# qan-api gRPC APIs should not be exposed

# qan-api JSON APIs
location /v1/qan {
proxy_pass http://qan-api-json/v1/qan;
proxy_http_version 1.1;
proxy_set_header Connection "";
}

location /vmui {
proxy_pass http://127.0.0.1:9428/select/vmui;
proxy_http_version 1.1;
proxy_read_timeout 600;
proxy_set_header Connection "";
}

location /select/logsql {
proxy_pass http://127.0.0.1:9428$request_uri;
proxy_http_version 1.1;
proxy_read_timeout 600;
proxy_set_header Connection "";
}

# compatibility with PMM 1.x
rewrite ^/ping$ /v1/server/readyz;
# compatibility with PMM 2.x
rewrite ^/v1/readyz$ /v1/server/readyz;
rewrite ^/v1/version$ /v1/server/version;
rewrite ^/logs.zip$ /v1/server/logs.zip;

# logs.zip in both PMM 1.x and 2.x variants
location /v1/server/logz.zip {
proxy_pass http://managed-json;
proxy_http_version 1.1;
proxy_set_header Connection "";
}

# pmm-dump artifacts
location /dump {
alias /srv/dump/;
}

# This localtion stores static content for general pmm-server purposes.
# Ex.: local-rss.xml - contains Percona's news when no internet connection.
location /pmm-static {
auth_request off;
alias /usr/share/pmm-server/static;
}

# proxy requests to the Percona's blog feed
# fallback to local rss if pmm-server is isolated from internet.
# https://jira.percona.com/browse/PMM-6153
location = /percona-blog/feed {
auth_request off;
proxy_ssl_server_name on;

set $feed https://www.percona.com/blog/feed/;
proxy_pass $feed;
proxy_set_header User-Agent "$http_user_agent pmm-server/3.x";
error_page 500 502 503 504 /pmm-static/local-rss.xml;
}
}
Loading
Loading