spring-boot - 如何在发现选项卡中获取日志中提供的字段的字段

标签 spring-boot elasticsearch kibana logback

首先,我对Kibana不仅是DevOps方面,而且对索引映射也不陌生。
有人可以帮助我如何为诸如日志级别和价格之类的索引提供字段吗?
提供给kibana的示例日志消息:

{"@timestamp":"2020-09-28T19:09:05.438Z","message":"Received Message: {\n \"timestamp\": \"2020-09-28T20:00:00\",\n \"type\": \"usd\",\n \"price\": 10877.07,\n \"volume\":39.065\n}","logger_name":"project.me.consumers.PriceConsumer","thread_name":"DefaultMessageListenerContainer-2","level":"INFO"}\n


我目前的 Realm :
My Current Fields
它在日志下显示为json的内容
{
"_index": "logstash-2020.09.28",
"_type": "_doc",
"_id": "------------",
"_version": 1,
"_score": null,
"_source": {
"log": "{\"@timestamp\":\"2020-09-28T19:09:05.438Z\",\"message\":\"Received Message: {\\n    \\\"timestamp\\\": \\\"2020-09-28T20:00:00\\\",\\n    \\\"type\\\": \\\"usd\\\",\\n    \\\"price\\\": 10877.07,\\n \\\"volume\\\":39.065\\n}\",\"logger_name\":\"project.me.consumers.PriceConsumer\",\"thread_name\":\"DefaultMessageListenerContainer-2\",\"level\":\"INFO\"}\n",
"stream": "stdout",
"docker": {
  "container_id": "------------------------------"
},
"kubernetes": {
  "container_name": "db-gateway",
  "namespace_name": "------------------------------",
  "pod_name": ------------------------------",
  "container_image": "------------------------------:1.0.0-b40",
  "container_image_id": "docker-pullable://------------------------------",
  "pod_id": "------------------------------",
  "labels": {
    "app": "db-gateway",
    "pod-template-hash": "------------------------------"
  },
  "host": "------------------------------",
  "master_url": "https://------------------------------:443/api",
  "namespace_id": "------------------------------"
},
"@timestamp": "2020-09-28T19:09:05.438928347+00:00",
"tag": "------------------------------"
},
"fields": {
"@timestamp": [
  "2020-09-28T19:09:05.438Z"
]
},
"highlight": {
"kubernetes.container_name": [
  "@kibana-highlighted-field@db@/kibana-highlighted-field@-gateway"
]
},
"sort": [
1601320145438
]
}
使用fluentD和logback,一切都在我的kubernetes集群上。
我在日志字段上尝试过的配置:
<filter log.**>
  @type parser
  format json # apache2, nginx, etc...
  key_name log
  reserve_data true
</filter>

<filter log.**>
  @type concat
  key log
  stream_identity_key container_id
  multiline_start_regexp /^-e:2:in `\/'/
  multiline_end_regexp /^-e:4:in/
</filter>

<filter _source.log.**>
  @type parser
  format json # apache2, nginx, etc...
  key_name log
  reserve_data true
</filter>
我的流利配置:
<match fluent.**>
  @type null
</match>
<source>
  @type tail
  @id in_tail_container_logs
  path /var/log/containers/*.log
  pos_file /var/log/fluentd-containers.log.pos
  tag kubernetes.*
  read_from_head true
  <parse>
    @type "#{ENV['FLUENT_CONTAINER_TAIL_PARSER_TYPE'] || 'json'}"
    time_format %Y-%m-%dT%H:%M:%S.%NZ
  </parse>
</source>
<source>
  @type tail
  @id in_tail_minion
  path /var/log/salt/minion
  pos_file /var/log/fluentd-salt.pos
  tag salt
  <parse>
    @type regexp
    expression /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
    time_format %Y-%m-%d %H:%M:%S
  </parse>
</source>
<source>
  @type tail
  @id in_tail_startupscript
  path /var/log/startupscript.log
  pos_file /var/log/fluentd-startupscript.log.pos
  tag startupscript
  <parse>
    @type syslog
  </parse>
</source>
<source>
  @type tail
  @id in_tail_docker
  path /var/log/docker.log
  pos_file /var/log/fluentd-docker.log.pos
  tag docker
  <parse>
    @type regexp
    expression /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
  </parse>
</source>
<source>
  @type tail
  @id in_tail_etcd
  path /var/log/etcd.log
  pos_file /var/log/fluentd-etcd.log.pos
  tag etcd
  <parse>
    @type none
  </parse>
</source>
<source>
  @type tail
  @id in_tail_kubelet
  multiline_flush_interval 5s
  path /var/log/kubelet.log
  pos_file /var/log/fluentd-kubelet.log.pos
  tag kubelet
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_kube_proxy
  multiline_flush_interval 5s
  path /var/log/kube-proxy.log
  pos_file /var/log/fluentd-kube-proxy.log.pos
  tag kube-proxy
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_kube_apiserver
  multiline_flush_interval 5s
  path /var/log/kube-apiserver.log
  pos_file /var/log/fluentd-kube-apiserver.log.pos
  tag kube-apiserver
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_kube_controller_manager
  multiline_flush_interval 5s
  path /var/log/kube-controller-manager.log
  pos_file /var/log/fluentd-kube-controller-manager.log.pos
  tag kube-controller-manager
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_kube_scheduler
  multiline_flush_interval 5s
  path /var/log/kube-scheduler.log
  pos_file /var/log/fluentd-kube-scheduler.log.pos
  tag kube-scheduler
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_rescheduler
  multiline_flush_interval 5s
  path /var/log/rescheduler.log
  pos_file /var/log/fluentd-rescheduler.log.pos
  tag rescheduler
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_glbc
  multiline_flush_interval 5s
  path /var/log/glbc.log
  pos_file /var/log/fluentd-glbc.log.pos
  tag glbc
  <parse>
    @type kubernetes
  </parse>
</source>
<source>
  @type tail
  @id in_tail_cluster_autoscaler
  multiline_flush_interval 5s
  path /var/log/cluster-autoscaler.log
  pos_file /var/log/fluentd-cluster-autoscaler.log.pos
  tag cluster-autoscaler
  <parse>
    @type kubernetes
  </parse>
</source>
# Example:
# 2017-02-09T00:15:57.992775796Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" ip="104.132.1.72" method="GET" user="kubecfg" as="<self>" asgroups="<lookup>" namespace="default" uri="/api/v1/namespaces/default/pods"
# 2017-02-09T00:15:57.993528822Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" response="200"
<source>
  @type tail
  @id in_tail_kube_apiserver_audit
  multiline_flush_interval 5s
  path /var/log/kubernetes/kube-apiserver-audit.log
  pos_file /var/log/kube-apiserver-audit.log.pos
  tag kube-apiserver-audit
  <parse>
    @type multiline
    format_firstline /^\S+\s+AUDIT:/
    # Fields must be explicitly captured by name to be parsed into the record.
    # Fields may not always be present, and order may change, so this just looks
    # for a list of key="\"quoted\" value" pairs separated by spaces.
    # Unknown fields are ignored.
    # Note: We can't separate query/response lines as format1/format2 because
    #       they don't always come one after the other for a given query.
format1 /^(?<time>\S+) AUDIT:(?: (?:id="(?<id>(?:[^"\\]|\\.)*)"|ip="(?<ip>(?:[^"\\]|\\.)*)"|method="(?<method>(?:[^"\\]|\\.)*)"|user="(?<user>(?:[^"\\]|\\.)*)"|groups="(?<groups>(?:[^"\\]|\\.)*)"|as="(?<as>(?:[^"\\]|\\.)*)"|asgroups="(?<asgroups>(?:[^"\\]|\\.)*)"|namespace="(?<namespace>(?:[^"\\]|\\.)*)"|uri="(?<uri>(?:[^"\\]|\\.)*)"|response="(?<response>(?:[^"\\]|\\.)*)"|\w+="(?:[^"\\]|\\.)*"))*/
    time_format %Y-%m-%dT%T.%L%Z
  </parse>
</source>
<filter kubernetes.**>
  @type kubernetes_metadata
  @id filter_kube_metadata
</filter>

最佳答案

mapping set up for that type的问题在于log字段被视为文本(甚至可能是关键字)。 Elasticsearch不会尝试将提交的文本字段解析为json对象结构。因此,有效载荷需要在管道中更早进行转换,然后才能将其放入Elasticsearch。
您可以为此使用fluentd json解析插件。请参阅the docs about it here。还有some special article on docker logging and parsing the log field

关于spring-boot - 如何在发现选项卡中获取日志中提供的字段的字段,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/64109706/

相关文章:

elasticsearch - Grok模式在Kibana Grok调试器上可以正常工作,但不能与Logstash一起使用

java - 如何使用Spring Boot为传记后端制作实体类和 Controller ?

elasticsearch - Elasticsearch:带annotated_text字段的全文本搜索

docker - 如何更改 Kibana 的 Docker 镜像的端口号

docker - Docker上的Elastic 6:无法设置密码

elasticsearch - Painless Scripting Kibana 6.4.2 不使用匹配器进行匹配,但使用条件表达式进行匹配

spring-boot - 对于@SpringBootTest @TestConfiguration 类的@Import 什么都不做,而@ContextConfiguration 按预期覆盖

java - 不使用方面跟踪 Spring 方法调用

java - 为什么 Spring Boot 中的 Keycloak 允许(预期未经授权的)用户?

java - 在 Java 中更新集合项