diff --git a/README.md b/README.md index 1e490f7..66cd934 100644 --- a/README.md +++ b/README.md @@ -90,4 +90,4 @@ and includes the full set of [free features](https://www.elastic.co/subscriptions). View the detailed release notes -[here](https://www.elastic.co/guide/en/elasticsearch/reference/8.17/es-release-notes.html). +[here](https://www.elastic.co/guide/en/elasticsearch/reference/9.3/es-release-notes.html). diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile new file mode 100644 index 0000000..4c8c2b3 --- /dev/null +++ b/elasticsearch/Dockerfile @@ -0,0 +1,142 @@ + + +################################################################################ +# Build stage 1 `builder`: +# Extract Elasticsearch artifact +################################################################################ + +FROM redhat/ubi9-minimal:latest AS builder + +RUN microdnf install -y findutils tar gzip + +# `tini` is a tiny but valid init for containers. This is used to cleanly +# control how ES and any child processes are shut down. +# +# The tini GitHub page gives instructions for verifying the binary using +# gpg, but the keyservers are slow to return the key and this can fail the +# build. Instead, we check the binary against the published checksum. +RUN set -eux; \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)"; \ + case "$arch" in \ + aarch64) tini_bin='tini-arm64'; tini_sum='07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81' ;; \ + x86_64) tini_bin='tini-amd64'; tini_sum='93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c' ;; \ + *) echo >&2 "Unsupported architecture $arch"; exit 1 ;; \ + esac ; \ + curl -f --retry 10 -S -L -o /tmp/tini https://github.com/krallin/tini/releases/download/v0.19.0/${tini_bin}; \ + echo "${tini_sum} /tmp/tini" | sha256sum -c -; \ + mv /tmp/tini /bin/tini; \ + chmod 0555 /bin/tini + +WORKDIR /usr/share/elasticsearch +RUN arch="$(rpm --query --queryformat='%{ARCH}' rpm)" && curl -f --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-9.3.0-linux-$arch.tar.gz +RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 && \ +# Configure the distribution for Docker + sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ +# Create required directory + mkdir data && \ +# Reset permissions on all directories + find . -type d -exec chmod 0555 {} + && \ +# keep default elasticsearch log4j config + mv config/log4j2.properties config/log4j2.file.properties && \ +# Reset permissions on all files + find . -type f -exec chmod 0444 {} + && \ +# Make CLI tools executable + chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \ +# Make some directories writable. `bin` must be writable because +# plugins can install their own CLI utilities. + chmod 0775 bin config config/jvm.options.d data logs plugins && \ +# Make some files writable + find config -type f -exec chmod 0664 {} + && \ +# Tighten up permissions on the ES home dir (the permissions of the contents are handled below) + chmod 0775 . && \ +# You can't install plugins that include configuration when running as `elasticsearch` and the `config` +# dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's +# config directory. + chown 1000:1000 bin config config/jvm.options.d data logs plugins + +# The distribution includes a `config` directory, no need to create it +COPY --chmod=664 config/elasticsearch.yml config/log4j2.properties config/ + +################################################################################ +# Build stage 2 (the actual Elasticsearch image): +# +# Copy elasticsearch from stage 1 +# Add entrypoint +################################################################################ + +FROM redhat/ubi9-minimal:latest + +RUN microdnf install --setopt=tsflags=nodocs -y \ + nc shadow-utils zip unzip findutils procps-ng && \ + microdnf clean all + +RUN groupadd -g 1000 elasticsearch && \ + adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \ + chown -R 0:0 /usr/share/elasticsearch + +ENV ELASTIC_CONTAINER=true + +COPY --from=builder /bin/tini /bin/tini + +WORKDIR /usr/share/elasticsearch + +COPY --from=builder --chown=0:0 /usr/share/elasticsearch . + +# Replace OpenJDK's built-in CA certificate keystore with the one from the OS +# vendor. The latter is superior in several ways. +# REF: https://github.com/elastic/elasticsearch-docker/issues/171 +RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts jdk/lib/security/cacerts + +ENV PATH=/usr/share/elasticsearch/bin:$PATH +ENV SHELL=/bin/bash + +COPY --chmod=0555 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh + +RUN chmod g=u /etc/passwd && \ + find / -xdev -perm -4000 -exec chmod ug-s {} + && \ + chmod 0775 /usr/share/elasticsearch && \ + chown elasticsearch bin config config/jvm.options.d data logs plugins + +EXPOSE 9200 9300 + +LABEL org.label-schema.build-date="2026-01-29T10:05:46.708397977Z" \ + org.label-schema.license="Elastic-License-2.0" \ + org.label-schema.name="Elasticsearch" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.url="https://www.elastic.co/products/elasticsearch" \ + org.label-schema.usage="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ + org.label-schema.vcs-ref="17b451d8979a29e31935fe1eb901310350b30e62" \ + org.label-schema.vcs-url="https://github.com/elastic/elasticsearch" \ + org.label-schema.vendor="Elastic" \ + org.label-schema.version="9.3.0" \ + org.opencontainers.image.created="2026-01-29T10:05:46.708397977Z" \ + org.opencontainers.image.documentation="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \ + org.opencontainers.image.licenses="Elastic-License-2.0" \ + org.opencontainers.image.revision="17b451d8979a29e31935fe1eb901310350b30e62" \ + org.opencontainers.image.source="https://github.com/elastic/elasticsearch" \ + org.opencontainers.image.title="Elasticsearch" \ + org.opencontainers.image.url="https://www.elastic.co/products/elasticsearch" \ + org.opencontainers.image.vendor="Elastic" \ + org.opencontainers.image.version="9.3.0" + +LABEL name="Elasticsearch" \ + maintainer="infra@elastic.co" \ + vendor="Elastic" \ + version="9.3.0" \ + release="1" \ + summary="Elasticsearch" \ + description="You know, for search." + +RUN mkdir /licenses && ln LICENSE.txt /licenses/LICENSE + +# Our actual entrypoint is `tini`, a minimal but functional init program. It +# calls the entrypoint we provide, while correctly forwarding signals. +ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"] +# Dummy overridable parameter parsed by entrypoint +CMD ["eswrapper"] + +USER 1000:0 + +################################################################################ +# End of multi-stage Dockerfile +################################################################################ diff --git a/elasticsearch/bin/docker-entrypoint.sh b/elasticsearch/bin/docker-entrypoint.sh new file mode 100644 index 0000000..d7b41b8 --- /dev/null +++ b/elasticsearch/bin/docker-entrypoint.sh @@ -0,0 +1,84 @@ +#!/bin/bash +set -e + +# Files created by Elasticsearch should always be group writable too +umask 0002 + +# Allow user specify custom CMD, maybe bin/elasticsearch itself +# for example to directly specify `-E` style parameters for elasticsearch on k8s +# or simply to run /bin/bash to check the image +if [[ "$1" == "eswrapper" || $(basename "$1") == "elasticsearch" ]]; then + # Rewrite CMD args to remove the explicit command, + # so that we are backwards compatible with the docs + # from the previous Elasticsearch versions < 6 + # and configuration option: + # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink + # Without this, user could specify `elasticsearch -E x.y=z` but + # `bin/elasticsearch -E x.y=z` would not work. In any case, + # we want to continue through this script, and not exec early. + set -- "${@:2}" +else + # Run whatever command the user wanted + exec "$@" +fi + +# Allow environment variables to be set by creating a file with the +# contents, and setting an environment variable with the suffix _FILE to +# point to it. This can be used to provide secrets to a container, without +# the values being specified explicitly when running the container. +# +# This is also sourced in elasticsearch-env, and is only needed here +# as well because we use ELASTIC_PASSWORD below. Sourcing this script +# is idempotent. +source /usr/share/elasticsearch/bin/elasticsearch-env-from-file + +if [[ -f bin/elasticsearch-users ]]; then + # Check for the ELASTIC_PASSWORD environment variable to set the + # bootstrap password for Security. + # + # This is only required for the first node in a cluster with Security + # enabled, but we have no way of knowing which node we are yet. We'll just + # honor the variable if it's present. + if [[ -n "$ELASTIC_PASSWORD" ]]; then + [[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (elasticsearch-keystore create) + if ! (elasticsearch-keystore has-passwd --silent) ; then + # keystore is unencrypted + if ! (elasticsearch-keystore list | grep -q '^bootstrap.password$'); then + (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') + fi + else + # keystore requires password + if ! (echo "$KEYSTORE_PASSWORD" \ + | elasticsearch-keystore list | grep -q '^bootstrap.password$') ; then + COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$ELASTIC_PASSWORD")" + (echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password') + fi + fi + fi +fi + +if [[ -n "$ES_LOG_STYLE" ]]; then + case "$ES_LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config. Do this as a + # copy, not a move, in case the container is restarted. + cp -f /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties + ;; + *) + echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +if [[ -n "$ENROLLMENT_TOKEN" ]]; then + POSITIONAL_PARAMETERS="--enrollment-token $ENROLLMENT_TOKEN" +else + POSITIONAL_PARAMETERS="" +fi + +# Signal forwarding and child reaping is handled by `tini`, which is the +# actual entrypoint of the container +exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" diff --git a/elasticsearch/bin/docker-openjdk b/elasticsearch/bin/docker-openjdk new file mode 100644 index 0000000..f3fe072 --- /dev/null +++ b/elasticsearch/bin/docker-openjdk @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +# Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it +# stays up-to-date with changes to Ubuntu's store) + +trust extract \ + --overwrite \ + --format=java-cacerts \ + --filter=ca-anchors \ + --purpose=server-auth \ + /usr/share/elasticsearch/jdk/lib/security/cacerts diff --git a/elasticsearch/config/elasticsearch.yml b/elasticsearch/config/elasticsearch.yml new file mode 100644 index 0000000..50b1547 --- /dev/null +++ b/elasticsearch/config/elasticsearch.yml @@ -0,0 +1,2 @@ +cluster.name: "docker-cluster" +network.host: 0.0.0.0 diff --git a/elasticsearch/config/log4j2.properties b/elasticsearch/config/log4j2.properties new file mode 100644 index 0000000..3a5ab75 --- /dev/null +++ b/elasticsearch/config/log4j2.properties @@ -0,0 +1,225 @@ +status = error + +######## Server JSON ############################ +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ECSJsonLayout +appender.rolling.layout.dataset = elasticsearch.server + +################################################ + +################################################ + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling + +######## Deprecation JSON ####################### +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ECSJsonLayout +# Intentionally follows a different pattern to above +appender.deprecation_rolling.layout.dataset = elasticsearch.deprecation +appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter + +appender.header_warning.type = HeaderWarningAppender +appender.header_warning.name = header_warning +################################################# + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.header_warning.ref = header_warning +logger.deprecation.additivity = false + +######## Search slowlog JSON #################### +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog + +################################################# + +################################################# +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +######## Indexing slowlog JSON ################## +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog + +################################################# + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false + +######## ES|QL query log JSON #################### +appender.esql_querylog_rolling.type = Console +appender.esql_querylog_rolling.name = esql_querylog_rolling +appender.esql_querylog_rolling.layout.type = ECSJsonLayout +appender.esql_querylog_rolling.layout.dataset = elasticsearch.esql_querylog + +################################################# + +logger.esql_querylog_rolling.name = esql.querylog +logger.esql_querylog_rolling.level = trace +logger.esql_querylog_rolling.appenderRef.esql_querylog_rolling.ref = esql_querylog_rolling +logger.esql_querylog_rolling.additivity = false + +# Suppress spurious entitlement warnings for exceptions that are caught +logger.entitlements_awt.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.(server).java.desktop.java.awt +logger.entitlements_awt.level = error + +logger.org_apache_pdfbox.name = org.apache.pdfbox +logger.org_apache_pdfbox.level = off + +logger.org_apache_poi.name = org.apache.poi +logger.org_apache_poi.level = off + +logger.org_apache_fontbox.name = org.apache.fontbox +logger.org_apache_fontbox.level = off + +logger.org_apache_xmlbeans.name = org.apache.xmlbeans +logger.org_apache_xmlbeans.level = off + +logger.entitlements_ingest_attachment.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.ingest-attachment.ALL-UNNAMED +logger.entitlements_ingest_attachment.level = error + +logger.entitlements_repository_gcs.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.repository-gcs.ALL-UNNAMED +logger.entitlements_repository_gcs.level = error + +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error + +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error + +logger.entitlements_repository_s3.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.repository-s3.ALL-UNNAMED +logger.entitlements_repository_s3.level = error + +appender.audit_rolling.type = Console +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = {\ + "type":"audit", \ + "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ + %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ + %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ + %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ + %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ + %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ + %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ + %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ + %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ + %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ + %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ + %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ + %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.roles":%map{user.roles}}\ + %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ + %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ + %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\ + %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ + %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ + %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ + %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ + %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ + %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ + %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ + %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ + %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ + %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ + %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ + %varsNotEmpty{, "indices":%map{indices}}\ + %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ + %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ + %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ + %varsNotEmpty{, "put":%map{put}}\ + %varsNotEmpty{, "delete":%map{delete}}\ + %varsNotEmpty{, "change":%map{change}}\ + %varsNotEmpty{, "create":%map{create}}\ + %varsNotEmpty{, "invalidate":%map{invalidate}}\ + }%n +# "node.name" node name from the `elasticsearch.yml` settings +# "node.id" node id which should not change between cluster restarts +# "host.name" unresolved hostname of the local node +# "host.ip" the local bound ip (i.e. the ip listening for connections) +# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) +# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. +# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" +# "user.name" the subject name as authenticated by a realm +# "user.run_by.name" the original authenticated subject name that is impersonating another one. +# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. +# "user.realm" the name of the realm that authenticated "user.name" +# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain +# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") +# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain +# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from +# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain +# "user.roles" the roles array of the user; these are the roles that are granting privileges +# "apikey.id" this field is present if and only if the "authentication.type" is "api_key" +# "apikey.name" this field is present if and only if the "authentication.type" is "api_key" +# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token +# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token +# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster +# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" +# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node +# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated +# "realm_domain" if "realm" is under a domain, this is the name of the domain +# "url.path" the URI component between the port and the query string; it is percent (URL) encoded +# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded +# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT +# "request.body" the content of the request body entity, JSON escaped +# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request +# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) +# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) +# "indices" the array of indices that the "action" is acting upon +# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "trace_id" an identifier conveyed by the part of "traceparent" request header +# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) +# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event +# "rule" name of the applied rule if the "origin.type" is "ip_filter" +# the "put", "delete", "change", "create", "invalidate" fields are only present +# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal + +logger.entitlements_xpack_security.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-security.org.elasticsearch.security +logger.entitlements_xpack_security.level = error + +logger.entitlements_inference.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-inference.software.amazon.awssdk.profiles +logger.entitlements_inference.level = error diff --git a/kibana/Dockerfile b/kibana/Dockerfile new file mode 100644 index 0000000..d9e5475 --- /dev/null +++ b/kibana/Dockerfile @@ -0,0 +1,142 @@ +################################################################################ +# This Dockerfile was generated from the template at: +# src/dev/build/tasks/os_packages/docker_generator/templates/Dockerfile +# +# Beginning of multi stage Dockerfile +################################################################################ + +################################################################################ +# Build stage 0 `builder`: +# Extract Kibana artifact +################################################################################ +FROM redhat/ubi9-minimal:latest AS builder + +RUN microdnf install -y findutils tar gzip + +RUN cd /tmp && \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)" && \ + curl -f --retry 8 -s -L \ + --output kibana.tar.gz \ + https://artifacts.elastic.co/downloads/kibana/kibana-9.3.0-linux-${arch}.tar.gz && \ + cd - + +RUN mkdir /usr/share/kibana +WORKDIR /usr/share/kibana +RUN tar \ + --strip-components=1 \ + -zxf /tmp/kibana.tar.gz + +# Ensure that group permissions are the same as user permissions. +# This will help when relying on GID-0 to run Kibana, rather than UID-1000. +# OpenShift does this, for example. +# REF: https://docs.openshift.org/latest/creating_images/guidelines.html +RUN chmod -R g=u /usr/share/kibana + +# Add an init process, check the checksum to make sure it's a match +RUN set -e ; \ + TINI_BIN="" ; \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)"; \ + case "$arch" in \ + aarch64) \ + TINI_BIN='tini-arm64' ; \ + TINI_CHECKSUM='07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81' ; \ + ;; \ + x86_64) \ + TINI_BIN='tini-amd64' ; \ + TINI_CHECKSUM='93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c' ; \ + ;; \ + *) echo >&2 "Unsupported architecture $arch" ; exit 1 ;; \ + esac ; \ + TINI_VERSION='v0.19.0' ; \ + curl -f --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ + echo "${TINI_CHECKSUM} ${TINI_BIN}" | sha256sum -c - ; \ + mv "${TINI_BIN}" /bin/tini ; \ + chmod +x /bin/tini +RUN mkdir -p /usr/share/fonts/local && \ + curl --retry 8 -S -L -o /usr/share/fonts/local/NotoSansCJK-Regular.ttc https://github.com/googlefonts/noto-cjk/raw/NotoSansV2.001/NotoSansCJK-Regular.ttc && \ + echo "5dcd1c336cc9344cb77c03a0cd8982ca8a7dc97d620fd6c9c434e02dcb1ceeb3 /usr/share/fonts/local/NotoSansCJK-Regular.ttc" | sha256sum -c - + + +################################################################################ +# Build stage 1 (the actual Kibana image): +# +# Copy kibana from stage 0 +# Add entrypoint +################################################################################ +FROM redhat/ubi9-minimal:latest +EXPOSE 5601 + +RUN microdnf install --setopt=tsflags=nodocs -y \ + fontconfig liberation-fonts-common freetype shadow-utils nss findutils && \ + microdnf clean all + +# Bring in Kibana from the initial stage. +COPY --from=builder --chown=1000:0 /usr/share/kibana /usr/share/kibana +COPY --from=builder --chown=0:0 /bin/tini /bin/tini +# Load reporting fonts +COPY --from=builder --chown=0:0 /usr/share/fonts/local/NotoSansCJK-Regular.ttc /usr/share/fonts/local/NotoSansCJK-Regular.ttc +RUN fc-cache -v +WORKDIR /usr/share/kibana + +RUN ln -s /usr/share/kibana /opt/kibana + +ENV ELASTIC_CONTAINER=true +ENV PATH=/usr/share/kibana/bin:$PATH + +# Set some Kibana configuration defaults. +COPY --chown=1000:0 config/kibana.yml /usr/share/kibana/config/kibana.yml + +# Add the launcher/wrapper script. It knows how to interpret environment +# variables and translate them to Kibana CLI options. +COPY bin/kibana-docker /usr/local/bin/ + +# Ensure gid 0 write permissions for OpenShift. +RUN chmod g+ws /usr/share/kibana && \ + find /usr/share/kibana -gid 0 -and -not -perm /g+w -exec chmod g+w {} \; + +# Remove the suid bit everywhere to mitigate "Stack Clash" +RUN find / -xdev -perm -4000 -exec chmod u-s {} + + +# Provide a non-root user to run the process. +RUN groupadd --gid 1000 kibana && \ + useradd --uid 1000 --gid 1000 -G 0 \ + --home-dir /usr/share/kibana --no-create-home \ + kibana + +LABEL org.label-schema.build-date="2026-01-29T09:38:21.004Z" \ + org.label-schema.license="Elastic License" \ + org.label-schema.name="Kibana" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.url="https://www.elastic.co/products/kibana" \ + org.label-schema.usage="https://www.elastic.co/guide/en/kibana/reference/index.html" \ + org.label-schema.vcs-ref="30ab63cc0017fe2da7a84fb9b285dd762468802d" \ + org.label-schema.vcs-url="https://github.com/elastic/kibana" \ + org.label-schema.vendor="Elastic" \ + org.label-schema.version="9.3.0" \ + org.opencontainers.image.created="2026-01-29T09:38:21.004Z" \ + org.opencontainers.image.documentation="https://www.elastic.co/guide/en/kibana/reference/index.html" \ + org.opencontainers.image.licenses="Elastic License" \ + org.opencontainers.image.revision="30ab63cc0017fe2da7a84fb9b285dd762468802d" \ + org.opencontainers.image.source="https://github.com/elastic/kibana" \ + org.opencontainers.image.title="Kibana" \ + org.opencontainers.image.url="https://www.elastic.co/products/kibana" \ + org.opencontainers.image.vendor="Elastic" \ + org.opencontainers.image.version="9.3.0" + +LABEL name="Kibana" \ + maintainer="infra@elastic.co" \ + vendor="Elastic" \ + version="9.3.0" \ + release="1" \ + summary="Kibana" \ + description="Your window into the Elastic Stack." + +RUN mkdir /licenses && ln LICENSE.txt /licenses/LICENSE + +ENTRYPOINT ["/bin/tini", "--"] + + +CMD ["/usr/local/bin/kibana-docker"] + + +USER 1000 diff --git a/kibana/bin/kibana-docker b/kibana/bin/kibana-docker new file mode 100755 index 0000000..8e6192d --- /dev/null +++ b/kibana/bin/kibana-docker @@ -0,0 +1,490 @@ +#!/bin/bash + +# Run Kibana, using environment variables to set longopts defining Kibana's +# configuration. +# +# eg. Setting the environment variable: +# +# ELASTICSEARCH_LOGQUERIES=true +# +# will cause Kibana to be invoked with: +# +# --elasticsearch.logQueries=true + +kibana_vars=( + apm_oss.apmAgentConfigurationIndex + apm_oss.errorIndices + apm_oss.indexPattern + apm_oss.metricsIndices + apm_oss.onboardingIndices + apm_oss.sourcemapIndices + apm_oss.spanIndices + apm_oss.transactionIndices + console.proxyConfig + console.proxyFilter + csp.strict + csp.warnLegacyBrowsers + csp.disableUnsafeEval + csp.script_src + csp.worker_src + csp.style_src + csp.connect_src + csp.default_src + csp.font_src + csp.frame_src + csp.img_src + csp.object_src + csp.frame_ancestors + csp.report_uri + csp.report_to + csp.report_only.form_action + csp.report_only.object_src + permissionsPolicy.report_to + data.autocomplete.valueSuggestions.terminateAfter + data.autocomplete.valueSuggestions.timeout + data.search.asyncSearch.waitForCompletion + data.search.asyncSearch.keepAlive + data.search.asyncSearch.batchedReduceSize + data.search.asyncSearch.pollInterval + data.search.sessions.defaultExpiration + data.search.sessions.enabled + data.search.sessions.maxUpdateRetries + data.search.sessions.notTouchedInProgressTimeout + data.search.sessions.notTouchedTimeout + data.search.sessions.pageSize + data.search.sessions.trackingInterval + unifiedSearch.autocomplete.valueSuggestions.terminateAfter + unifiedSearch.autocomplete.valueSuggestions.timeout + unifiedSearch.autocomplete.querySuggestions.enabled + unifiedSearch.autocomplete.valueSuggestions.enabled + unifiedSearch.autocomplete.valueSuggestions.tiers + elasticsearch.customHeaders + elasticsearch.hosts + elasticsearch.logQueries + elasticsearch.password + elasticsearch.pingTimeout + elasticsearch.publicBaseUrl + elasticsearch.requestHeadersWhitelist + elasticsearch.requestTimeout + elasticsearch.serviceAccountToken + elasticsearch.shardTimeout + elasticsearch.sniffInterval + elasticsearch.sniffOnConnectionFault + elasticsearch.sniffOnStart + elasticsearch.ssl.alwaysPresentCertificate + elasticsearch.ssl.certificate + elasticsearch.ssl.certificateAuthorities + elasticsearch.ssl.key + elasticsearch.ssl.keyPassphrase + elasticsearch.ssl.keystore.password + elasticsearch.ssl.keystore.path + elasticsearch.ssl.truststore.password + elasticsearch.ssl.truststore.path + elasticsearch.ssl.verificationMode + elasticsearch.username + externalUrl.policy + i18n.locale + interactiveSetup.enabled + interactiveSetup.connectionCheck.interval + kibana.autocompleteTerminateAfter + kibana.autocompleteTimeout + kibana.index + logging.appenders + logging.appenders.console + logging.appenders.file + logging.loggers + logging.loggers.appenders + logging.loggers.level + logging.loggers.name + logging.root + logging.root.appenders + logging.root.level + map.emsUrl + map.includeElasticMapsService + map.tilemap.options.attribution + map.tilemap.options.maxZoom + map.tilemap.options.minZoom + map.tilemap.options.subdomains + map.tilemap.url + migrations.batchSize + migrations.maxBatchSizeBytes + migrations.pollInterval + migrations.retryAttempts + migrations.scrollDuration + migrations.skip + migrations.useCumulativeLogger + monitoring.cluster_alerts.email_notifications.email_address + monitoring.kibana.collection.enabled + monitoring.kibana.collection.interval + monitoring.ui.ccs.enabled + monitoring.ui.container.elasticsearch.enabled + monitoring.ui.container.logstash.enabled + monitoring.ui.elasticsearch.hosts + monitoring.ui.elasticsearch.logFetchCount + monitoring.ui.elasticsearch.password + monitoring.ui.elasticsearch.pingTimeout + monitoring.ui.elasticsearch.ssl.certificateAuthorities + monitoring.ui.elasticsearch.ssl.verificationMode + monitoring.ui.elasticsearch.username + monitoring.ui.enabled + monitoring.ui.logs.index + monitoring.ui.max_bucket_size + monitoring.ui.min_interval_seconds + newsfeed.enabled + node.roles + ops.cGroupOverrides.cpuAcctPath + ops.cGroupOverrides.cpuPath + ops.interval + path.data + pid.file + profiler.signal + regionmap + savedObjects.maxImportExportSize + savedObjects.maxImportPayloadBytes + savedObjects.allowHttpApiAccess + security.showInsecureClusterWarning + server.basePath + server.cdn.url + server.compression.enabled + server.compression.referrerWhitelist + server.cors + server.cors.allowCredentials + server.cors.allowOrigin + server.cors.enabled + server.cors.origin + server.customResponseHeaders + server.defaultRoute + server.host + server.http2.allowUnsecure + server.keepAliveTimeout + server.maxPayload + server.maxPayloadBytes + server.name + server.port + server.protocol + server.prototypeHardening + server.publicBaseUrl + server.requestId.allowFromAnyIp + server.requestId.ipAllowlist + server.rewriteBasePath + server.restrictInternalApis + server.securityResponseHeaders.disableEmbedding + server.securityResponseHeaders.permissionsPolicy + server.securityResponseHeaders.referrerPolicy + server.securityResponseHeaders.strictTransportSecurity + server.securityResponseHeaders.xContentTypeOptions + server.securityResponseHeaders.crossOriginOpenerPolicy + server.shutdownTimeout + server.socketTimeout + server.ssl.cert + server.ssl.certificate + server.ssl.certificateAuthorities + server.ssl.cipherSuites + server.ssl.clientAuthentication + server.ssl.enabled + server.ssl.key + server.ssl.keyPassphrase + server.ssl.keystore.password + server.ssl.keystore.path + server.ssl.redirectHttpFromPort + server.ssl.supportedProtocols + server.ssl.truststore.password + server.ssl.truststore.path + server.uuid + server.xsrf.allowlist + server.xsrf.disableProtection + status.allowAnonymous + status.v6ApiFormat + telemetry.allowChangingOptInStatus + telemetry.enabled + telemetry.hidePrivacyStatement + telemetry.optIn + telemetry.sendUsageTo + telemetry.sendUsageFrom + telemetry.metrics.enabled + telemetry.metrics.interval + telemetry.metrics.exporters # Allow specifying the array here or.. + telemetry.metrics.exporters.grpc.url # ... or a single exporter by specifying these 2. + telemetry.metrics.exporters.grpc.headers + telemetry.metrics.exporters.grpc.exportIntervalMillis + telemetry.metrics.exporters.grpc.temporalityPreference + tilemap.options.attribution + tilemap.options.maxZoom + tilemap.options.minZoom + tilemap.options.subdomains + tilemap.url + usageCollection.maxCollectorConcurrency + vega.enableExternalUrls + vis_type_vega.enableExternalUrls + xpack.actions.allowedHosts + xpack.actions.customHostSettings + xpack.actions.email.domain_allowlist + xpack.actions.email.recipient_allowlist + xpack.actions.email.maximum_body_length + xpack.actions.email.services.ses.host + xpack.actions.email.services.ses.port + xpack.actions.email.services.enabled + xpack.actions.enableFooterInEmail + xpack.actions.enabledActionTypes + xpack.actions.maxResponseContentLength + xpack.actions.preconfigured + xpack.actions.preconfiguredAlertHistoryEsIndex + xpack.actions.proxyBypassHosts + xpack.actions.proxyHeaders + xpack.actions.proxyOnlyHosts + xpack.actions.proxyUrl + xpack.actions.responseTimeout + xpack.actions.ssl.proxyVerificationMode + xpack.actions.ssl.verificationMode + xpack.actions.webhook.ssl.pfx.enabled + xpack.alerting.healthCheck.interval + xpack.alerting.invalidateApiKeysTask.interval + xpack.alerting.invalidateApiKeysTask.removalDelay + xpack.alerting.rules.run.timeout + xpack.alerting.rules.run.ruleTypeOverrides + xpack.alerting.cancelAlertsOnRuleTimeout + xpack.alerting.rules.minimumScheduleInterval.value + xpack.alerting.rules.minimumScheduleInterval.enforce + xpack.alerting.rules.run.actions.max + xpack.alerting.rules.run.alerts.max + xpack.alerting.rules.run.actions.connectorTypeOverrides + xpack.alerting.rules.maxScheduledPerMinute + xpack.alerting.disabledRuleTypes + xpack.alerting.enabledRuleTypes + xpack.apm.indices.error + xpack.apm.indices.metric + xpack.apm.indices.onboarding + xpack.apm.indices.sourcemap + xpack.apm.indices.span + xpack.apm.indices.transaction + xpack.apm.maxServiceEnvironments + xpack.apm.searchAggregatedTransactions + xpack.apm.serviceMapEnabled + xpack.apm.serviceMapFingerprintBucketSize + xpack.apm.serviceMapFingerprintGlobalBucketSize + xpack.apm.ui.enabled + xpack.apm.ui.maxTraceItems + xpack.apm.ui.transactionGroupBucketSize + xpack.banners.backgroundColor + xpack.banners.disableSpaceBanners + xpack.banners.placement + xpack.banners.textColor + xpack.banners.textContent + xpack.cases.enabled + xpack.cases.analytics.index.enabled + xpack.cases.incrementalId.enabled + xpack.cases.incrementalId.taskIntervalMinutes + xpack.cases.incrementalId.taskStartDelayMinutes + xpack.cases.files.allowedMimeTypes + xpack.cases.files.maxSize + xpack.cases.stack.enabled + xpack.code.disk.thresholdEnabled + xpack.code.disk.watermarkLow + xpack.code.indexRepoFrequencyMs + xpack.code.lsp.verbose + xpack.code.maxWorkspace + xpack.code.security.enableGitCertCheck + xpack.code.security.gitHostWhitelist + xpack.code.security.gitProtocolWhitelist + xpack.code.ui.enabled + xpack.code.updateRepoFrequencyMs + xpack.code.verbose + xpack.data_enhanced.search.sessions.defaultExpiration + xpack.data_enhanced.search.sessions.enabled + xpack.data_enhanced.search.sessions.maxUpdateRetries + xpack.data_enhanced.search.sessions.notTouchedInProgressTimeout + xpack.data_enhanced.search.sessions.notTouchedTimeout + xpack.data_enhanced.search.sessions.pageSize + xpack.data_enhanced.search.sessions.trackingInterval + xpack.discoverEnhanced.actions.exploreDataInChart.enabled + xpack.discoverEnhanced.actions.exploreDataInContextMenu.enabled + xpack.encryptedSavedObjects.encryptionKey + xpack.encryptedSavedObjects.keyRotation.decryptionOnlyKeys + xpack.event_log.indexEntries + xpack.event_log.logEntries + xpack.fleet.agentPolicies + xpack.fleet.agents.elasticsearch.host + xpack.fleet.agents.elasticsearch.hosts + xpack.fleet.agents.enabled + xpack.fleet.agents.fleet_server.hosts + xpack.fleet.agents.kibana.host + xpack.fleet.agents.tlsCheckDisabled + xpack.fleet.packages + xpack.fleet.packageVerification.gpgKeyPath + xpack.fleet.registryProxyUrl + xpack.fleet.registryUrl + xpack.graph.canEditDrillDownUrls + xpack.graph.savePolicy + xpack.infra.query.partitionFactor + xpack.infra.query.partitionSize + xpack.infra.sources.default.fields.container + xpack.infra.sources.default.fields.host + xpack.infra.sources.default.fields.message + xpack.infra.sources.default.fields.pod + xpack.infra.sources.default.fields.tiebreaker + xpack.infra.sources.default.fields.timestamp + xpack.infra.sources.default.logAlias + xpack.infra.sources.default.metricAlias + xpack.ingestManager.fleet.tlsCheckDisabled + xpack.ingestManager.registryUrl + xpack.observability.annotations.index + xpack.observability.unsafe.alertDetails.metrics.enabled + xpack.observability.unsafe.alertDetails.logs.enabled + xpack.observability.unsafe.alertDetails.uptime.enabled + xpack.observability.unsafe.alertDetails.observability.enabled + xpack.observability.unsafe.thresholdRule.enabled + xpack.productDocBase.artifactRepositoryUrl + xpack.reporting.capture.browser.autoDownload + xpack.reporting.capture.browser.chromium.disableSandbox + xpack.reporting.capture.browser.chromium.maxScreenshotDimension + xpack.reporting.capture.browser.chromium.proxy.bypass + xpack.reporting.capture.browser.chromium.proxy.enabled + xpack.reporting.capture.browser.chromium.proxy.server + xpack.reporting.capture.browser.type + xpack.reporting.capture.concurrency + xpack.reporting.capture.loadDelay + xpack.reporting.capture.maxAttempts + xpack.reporting.capture.networkPolicy + xpack.reporting.capture.settleTime + xpack.reporting.capture.timeout + xpack.reporting.capture.timeouts.openUrl + xpack.reporting.capture.timeouts.openUrl + xpack.reporting.capture.timeouts.renderComplete + xpack.reporting.capture.timeouts.waitForElements + xpack.reporting.capture.viewport.height + xpack.reporting.capture.viewport.width + xpack.reporting.capture.zoom + xpack.reporting.csv.checkForFormulas + xpack.reporting.csv.enablePanelActionDownload + xpack.reporting.csv.escapeFormulaValues + xpack.reporting.csv.maxSizeBytes + xpack.reporting.csv.scroll.duration + xpack.reporting.csv.scroll.size + xpack.reporting.csv.scroll.strategy + xpack.reporting.csv.useByteOrderMarkEncoding + xpack.reporting.csv.maxRows + xpack.reporting.enabled + xpack.reporting.encryptionKey + xpack.reporting.kibanaApp + xpack.reporting.kibanaServer.hostname + xpack.reporting.kibanaServer.port + xpack.reporting.kibanaServer.protocol + xpack.reporting.poll.jobCompletionNotifier.interval + xpack.reporting.poll.jobCompletionNotifier.intervalErrorMultiplier + xpack.reporting.poll.jobsRefresh.interval + xpack.reporting.poll.jobsRefresh.intervalErrorMultiplier + xpack.reporting.queue.indexInterval + xpack.reporting.queue.pollEnabled + xpack.reporting.queue.pollInterval + xpack.reporting.queue.pollIntervalErrorMultiplier + xpack.reporting.queue.timeout + xpack.reporting.roles.allow + xpack.reporting.roles.enabled + xpack.ruleRegistry.write.enabled + xpack.screenshotting.browser.chromium.disableSandbox + xpack.security.accessAgreement.message + xpack.security.audit.appender.fileName + xpack.security.audit.appender.layout.highlight + xpack.security.audit.appender.layout.pattern + xpack.security.audit.appender.layout.type + xpack.security.audit.appender.legacyLoggingConfig + xpack.security.audit.appender.policy.interval + xpack.security.audit.appender.policy.modulate + xpack.security.audit.appender.policy.size + xpack.security.audit.appender.policy.type + xpack.security.audit.appender.strategy.max + xpack.security.audit.appender.strategy.pattern + xpack.security.audit.appender.strategy.type + xpack.security.audit.appender.type + xpack.security.audit.enabled + xpack.security.audit.include_saved_object_names + xpack.security.audit.ignore_filters + xpack.security.authc.http.autoSchemesEnabled + xpack.security.authc.http.enabled + xpack.security.authc.http.schemes + xpack.security.authc.oidc.realm + xpack.security.authc.providers + xpack.security.authc.saml.maxRedirectURLSize + xpack.security.authc.saml.realm + xpack.security.authc.selector.enabled + xpack.security.cookieName + xpack.security.encryptionKey + xpack.security.fipsMode.enabled + xpack.security.loginAssistanceMessage + xpack.security.loginHelp + xpack.security.sameSiteCookies + xpack.security.secureCookies + xpack.security.session.cleanupInterval + xpack.security.session.concurrentSessions.maxSessions + xpack.security.session.idleTimeout + xpack.security.session.lifespan + xpack.security.sessionTimeout + xpack.security.showInsecureClusterWarning + xpack.securitySolution.alertMergeStrategy + xpack.securitySolution.alertIgnoreFields + xpack.securitySolution.maxExceptionsImportSize + xpack.securitySolution.maxRuleImportExportSize + xpack.securitySolution.maxRuleImportPayloadBytes + xpack.securitySolution.maxTimelineImportExportSize + xpack.securitySolution.maxTimelineImportPayloadBytes + xpack.securitySolution.packagerTaskInterval + xpack.securitySolution.prebuiltRulesPackageVersion + xpack.spaces.maxSpaces + xpack.spaces.defaultSolution + xpack.task_manager.capacity + xpack.task_manager.claim_strategy + xpack.task_manager.auto_calculate_default_ech_capacity + xpack.task_manager.discovery.active_nodes_lookback + xpack.task_manager.discovery.interval + xpack.task_manager.invalidate_api_key_task.interval + xpack.task_manager.invalidate_api_key_task.removalDelay + xpack.task_manager.kibanas_per_partition + xpack.task_manager.max_attempts + xpack.task_manager.max_workers + xpack.task_manager.monitored_aggregated_stats_refresh_rate + xpack.task_manager.monitored_stats_required_freshness + xpack.task_manager.monitored_stats_running_average_window + xpack.task_manager.monitored_stats_health_verbose_log.enabled + xpack.task_manager.monitored_stats_health_verbose_log.warn_delayed_task_start_in_seconds + xpack.task_manager.monitored_task_execution_thresholds + xpack.task_manager.poll_interval + xpack.task_manager.request_capacity + xpack.task_manager.version_conflict_threshold + xpack.task_manager.event_loop_delay.monitor + xpack.task_manager.event_loop_delay.warn_threshold + xpack.task_manager.worker_utilization_running_average_window + xpack.uptime.index + serverless +) + +longopts='' +for kibana_var in ${kibana_vars[*]}; do + # 'elasticsearch.hosts' -> 'ELASTICSEARCH_HOSTS' + env_var=$(echo ${kibana_var^^} | tr . _) + + # Indirectly lookup env var values via the name of the var. + # REF: http://tldp.org/LDP/abs/html/bashver2.html#EX78 + value=${!env_var} + if [[ -n $value ]]; then + longopt="--${kibana_var}=${value}" + longopts+=" ${longopt}" + fi +done + +# Files created at run-time should be group-writable, for Openshift's sake. +umask 0002 + +# The virtual file /proc/self/cgroup should list the current cgroup +# membership. For each hierarchy, you can follow the cgroup path from +# this file to the cgroup filesystem (usually /sys/fs/cgroup/) and +# introspect the statistics for the cgroup for the given +# hierarchy. Alas, Docker breaks this by mounting the container +# statistics at the root while leaving the cgroup paths as the actual +# paths. Therefore, Kibana provides a mechanism to override +# reading the cgroup path from /proc/self/cgroup and instead uses the +# cgroup path defined the configuration properties +# ops.cGroupOverrides.cpuPath and ops.cGroupOverrides.cpuAcctPath. +# Therefore, we set this value here so that cgroup statistics are +# available for the container this process will run in. + +exec /usr/share/kibana/bin/kibana --ops.cGroupOverrides.cpuPath=/ --ops.cGroupOverrides.cpuAcctPath=/ ${longopts} "$@" diff --git a/kibana/config/kibana.yml b/kibana/config/kibana.yml new file mode 100644 index 0000000..230ba1c --- /dev/null +++ b/kibana/config/kibana.yml @@ -0,0 +1,9 @@ +# +# ** THIS IS AN AUTO-GENERATED FILE ** +# + +# Default Kibana configuration for docker target +server.host: "0.0.0.0" +server.shutdownTimeout: "5s" +elasticsearch.hosts: [ "http://elasticsearch:9200" ] +monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file diff --git a/logstash/Dockerfile b/logstash/Dockerfile new file mode 100644 index 0000000..72c04db --- /dev/null +++ b/logstash/Dockerfile @@ -0,0 +1,76 @@ +# This Dockerfile was generated from templates/Dockerfile.erb + + +FROM redhat/ubi9-minimal:latest + +ENV ELASTIC_CONTAINER=true +ENV PATH=/usr/share/logstash/bin:$PATH +ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 + +WORKDIR /usr/share + +# Install packages +RUN \ + microdnf install -y procps findutils tar gzip && \ + microdnf install -y openssl && \ + microdnf install -y which shadow-utils && \ + microdnf clean all + +# Provide a non-root user to run the process +# Add Logstash itself and set permissions +RUN groupadd --gid 1000 logstash && \ + adduser --uid 1000 --gid 1000 \ + --home "/usr/share/logstash" \ + --no-create-home \ + logstash && \ + arch="$(rpm --query --queryformat='%{ARCH}' rpm)" && \ + curl --fail --location --output logstash.tar.gz https://artifacts.elastic.co/downloads/logstash/logstash-9.3.0-linux-${arch}.tar.gz && \ + tar -zxf logstash.tar.gz -C /usr/share && \ + rm logstash.tar.gz && \ + mv /usr/share/logstash-9.3.0 /usr/share/logstash && \ + chown -R logstash:root /usr/share/logstash && \ + chmod -R g=u /usr/share/logstash && \ + mkdir /licenses && \ + mv /usr/share/logstash/NOTICE.TXT /licenses/NOTICE.TXT && \ + mv /usr/share/logstash/LICENSE.txt /licenses/LICENSE.txt && \ + find /usr/share/logstash -type d -exec chmod g+s {} \; && \ + ln -s /usr/share/logstash /opt/logstash + + +COPY --chown=logstash:root env2yaml/classes /usr/share/logstash/env2yaml/classes/ +COPY --chown=logstash:root env2yaml/lib /usr/share/logstash/env2yaml/lib/ +COPY --chmod=0755 env2yaml/env2yaml /usr/local/bin/env2yaml +COPY --chown=logstash:root config/pipelines.yml config/log4j2.properties config/log4j2.file.properties /usr/share/logstash/config/ +COPY --chown=logstash:root config/logstash-full.yml /usr/share/logstash/config/logstash.yml +COPY --chown=logstash:root pipeline/default.conf /usr/share/logstash/pipeline/logstash.conf +COPY --chmod=0755 bin/docker-entrypoint /usr/local/bin/ + + +WORKDIR /usr/share/logstash + +USER 1000 + +EXPOSE 9600 5044 + +LABEL org.label-schema.build-date=2026-01-22T01:49:14+00:00 \ + org.label-schema.license="Elastic License" \ + org.label-schema.name="logstash" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.url="https://www.elastic.co/products/logstash" \ + org.label-schema.vcs-url="https://github.com/elastic/logstash" \ + org.label-schema.vendor="Elastic" \ + org.label-schema.version="9.3.0" \ + org.opencontainers.image.created=2026-01-22T01:49:14+00:00 \ + org.opencontainers.image.description="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ + org.opencontainers.image.licenses="Elastic License" \ + org.opencontainers.image.title="logstash" \ + org.opencontainers.image.vendor="Elastic" \ + org.opencontainers.image.version="9.3.0" \ + description="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ + license="Elastic License" \ + maintainer="info@elastic.co" \ + name="logstash" \ + summary="Logstash is a free and open server-side data processing pipeline that ingests data from a multitude of sources, transforms it, and then sends it to your favorite 'stash.'" \ + vendor="Elastic" + +ENTRYPOINT ["/usr/local/bin/docker-entrypoint"] diff --git a/logstash/bin/docker-entrypoint b/logstash/bin/docker-entrypoint new file mode 100755 index 0000000..e2fd33c --- /dev/null +++ b/logstash/bin/docker-entrypoint @@ -0,0 +1,31 @@ +#!/bin/bash -e + +# Map environment variables to entries in logstash.yml. +# Note that this will mutate logstash.yml in place if any such settings are found. +# This may be undesirable, especially if logstash.yml is bind-mounted from the +# host system. +env2yaml /usr/share/logstash/config/logstash.yml + +if [[ -n "$LOG_STYLE" ]]; then + case "$LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config. Do this as a + # copy, not a move, in case the container is restarted. + cp -f /usr/share/logstash/config/log4j2.file.properties /usr/share/logstash/config/log4j2.properties + ;; + *) + echo "ERROR: LOG_STYLE set to [$LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +export LS_JAVA_OPTS="-Dls.cgroup.cpuacct.path.override=/ -Dls.cgroup.cpu.path.override=/ $LS_JAVA_OPTS" + +if [[ -z $1 ]] || [[ ${1:0:1} == '-' ]] ; then + exec logstash "$@" +else + exec "$@" +fi diff --git a/logstash/config/log4j2.file.properties b/logstash/config/log4j2.file.properties new file mode 100644 index 0000000..234b23d --- /dev/null +++ b/logstash/config/log4j2.file.properties @@ -0,0 +1,147 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +appender.rolling.type = RollingFile +appender.rolling.name = plain_rolling +appender.rolling.fileName = ${sys:ls.logs}/logstash-plain.log +appender.rolling.filePattern = ${sys:ls.logs}/logstash-plain-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 100MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.max = 30 +appender.rolling.avoid_pipelined_filter.type = PipelineRoutingFilter + +appender.json_rolling.type = RollingFile +appender.json_rolling.name = json_rolling +appender.json_rolling.fileName = ${sys:ls.logs}/logstash-json.log +appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-json-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling.policies.type = Policies +appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling.policies.time.interval = 1 +appender.json_rolling.policies.time.modulate = true +appender.json_rolling.layout.type = JSONLayout +appender.json_rolling.layout.compact = true +appender.json_rolling.layout.eventEol = true +appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling.policies.size.size = 100MB +appender.json_rolling.strategy.type = DefaultRolloverStrategy +appender.json_rolling.strategy.max = 30 +appender.json_rolling.avoid_pipelined_filter.type = PipelineRoutingFilter + +appender.routing.type = PipelineRouting +appender.routing.name = pipeline_routing_appender +appender.routing.pipeline.type = RollingFile +appender.routing.pipeline.name = appender-${ctx:pipeline.id} +appender.routing.pipeline.fileName = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.log +appender.routing.pipeline.filePattern = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.%i.log.gz +appender.routing.pipeline.layout.type = PatternLayout +appender.routing.pipeline.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n +appender.routing.pipeline.policy.type = SizeBasedTriggeringPolicy +appender.routing.pipeline.policy.size = 100MB +appender.routing.pipeline.strategy.type = DefaultRolloverStrategy +appender.routing.pipeline.strategy.max = 30 + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console +rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling +rootLogger.appenderRef.routing.ref = pipeline_routing_appender + +# Slowlog + +appender.console_slowlog.type = Console +appender.console_slowlog.name = plain_console_slowlog +appender.console_slowlog.layout.type = PatternLayout +appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.json_console_slowlog.type = Console +appender.json_console_slowlog.name = json_console_slowlog +appender.json_console_slowlog.layout.type = JSONLayout +appender.json_console_slowlog.layout.compact = true +appender.json_console_slowlog.layout.eventEol = true + +appender.rolling_slowlog.type = RollingFile +appender.rolling_slowlog.name = plain_rolling_slowlog +appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-plain.log +appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-plain-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_slowlog.policies.type = Policies +appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_slowlog.policies.time.interval = 1 +appender.rolling_slowlog.policies.time.modulate = true +appender.rolling_slowlog.layout.type = PatternLayout +appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n +appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_slowlog.policies.size.size = 100MB +appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.rolling_slowlog.strategy.max = 30 + +appender.json_rolling_slowlog.type = RollingFile +appender.json_rolling_slowlog.name = json_rolling_slowlog +appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-json.log +appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-json-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling_slowlog.policies.type = Policies +appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.time.interval = 1 +appender.json_rolling_slowlog.policies.time.modulate = true +appender.json_rolling_slowlog.layout.type = JSONLayout +appender.json_rolling_slowlog.layout.compact = true +appender.json_rolling_slowlog.layout.eventEol = true +appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.size.size = 100MB +appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.json_rolling_slowlog.strategy.max = 30 + +logger.slowlog.name = slowlog +logger.slowlog.level = trace +logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog +logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog +logger.slowlog.additivity = false + +logger.licensereader.name = logstash.licensechecker.licensereader +logger.licensereader.level = error + +# Silence http-client by default +logger.apache_http_client.name = org.apache.http +logger.apache_http_client.level = fatal + +# Deprecation log +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_plain_rolling +appender.deprecation_rolling.fileName = ${sys:ls.logs}/logstash-deprecation.log +appender.deprecation_rolling.filePattern = ${sys:ls.logs}/logstash-deprecation-%d{yyyy-MM-dd}-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.deprecation_rolling.policies.time.interval = 1 +appender.deprecation_rolling.policies.time.modulate = true +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 100MB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 30 + +logger.deprecation.name = org.logstash.deprecation, deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling +logger.deprecation.additivity = false + +logger.deprecation_root.name = deprecation +logger.deprecation_root.level = WARN +logger.deprecation_root.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling +logger.deprecation_root.additivity = false diff --git a/logstash/config/log4j2.properties b/logstash/config/log4j2.properties new file mode 100644 index 0000000..663a015 --- /dev/null +++ b/logstash/config/log4j2.properties @@ -0,0 +1,16 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console diff --git a/logstash/config/logstash-full.yml b/logstash/config/logstash-full.yml new file mode 100644 index 0000000..949b467 --- /dev/null +++ b/logstash/config/logstash-full.yml @@ -0,0 +1,2 @@ +api.http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] diff --git a/logstash/config/logstash-oss.yml b/logstash/config/logstash-oss.yml new file mode 100644 index 0000000..979a5ca --- /dev/null +++ b/logstash/config/logstash-oss.yml @@ -0,0 +1 @@ +api.http.host: "0.0.0.0" diff --git a/logstash/config/pipelines.yml b/logstash/config/pipelines.yml new file mode 100644 index 0000000..aed22ce --- /dev/null +++ b/logstash/config/pipelines.yml @@ -0,0 +1,6 @@ +# This file is where you define your pipelines. You can define multiple. +# For more information on multiple pipelines, see the documentation: +# https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html + +- pipeline.id: main + path.config: "/usr/share/logstash/pipeline" diff --git a/logstash/env2yaml/classes/org/logstash/env2yaml/Env2Yaml$SettingValidator.class b/logstash/env2yaml/classes/org/logstash/env2yaml/Env2Yaml$SettingValidator.class new file mode 100644 index 0000000..32f0d45 Binary files /dev/null and b/logstash/env2yaml/classes/org/logstash/env2yaml/Env2Yaml$SettingValidator.class differ diff --git a/logstash/env2yaml/classes/org/logstash/env2yaml/Env2Yaml.class b/logstash/env2yaml/classes/org/logstash/env2yaml/Env2Yaml.class new file mode 100644 index 0000000..ae7e476 Binary files /dev/null and b/logstash/env2yaml/classes/org/logstash/env2yaml/Env2Yaml.class differ diff --git a/logstash/env2yaml/env2yaml b/logstash/env2yaml/env2yaml new file mode 100644 index 0000000..c709c30 --- /dev/null +++ b/logstash/env2yaml/env2yaml @@ -0,0 +1,12 @@ +#!/bin/bash + +# Execute the env2yaml java program. Ensure the snakeyaml-engine jar is in the classpath. + +exec /usr/share/logstash/jdk/bin/java \ + -XX:+UseSerialGC \ + -Xms32m \ + -Xmx32m \ + -cp "/usr/share/logstash/env2yaml/classes:/usr/share/logstash/env2yaml/lib/*" \ + org.logstash.env2yaml.Env2Yaml "$@" + + diff --git a/logstash/env2yaml/lib/snakeyaml-engine-2.9.jar b/logstash/env2yaml/lib/snakeyaml-engine-2.9.jar new file mode 100644 index 0000000..ba1ec26 Binary files /dev/null and b/logstash/env2yaml/lib/snakeyaml-engine-2.9.jar differ diff --git a/logstash/pipeline/default.conf b/logstash/pipeline/default.conf new file mode 100644 index 0000000..11ce14c --- /dev/null +++ b/logstash/pipeline/default.conf @@ -0,0 +1,12 @@ +input { + beats { + port => 5044 + } +} + +output { + stdout { + codec => rubydebug + } +} +