diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4073ec5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +input_ocibuild/ +output_ocibuild_ruby_build/ +output_ocibuild_dockerize_api/ +.DS_Store +fluent-plugin-oci-logging-analytics* +lib/.DS_Store +lib/fluent/.DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md old mode 100644 new mode 100755 index b6ecc45..012ce70 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Change Log +# 2025-08-13 +### Changed +- - As per the recent service name change from `Logging Analytics` to `Log Analytics`, updated the relevant documentation and descriptions to reflect the name change. + - This is a non-breaking change that maintains backward compatibility + - Updated service references in documentation, decscription, and comments + +## 2.0.8 - 2024-11-18 +### Added +- Support for new OCI Regions which are not yet supported through OCI Ruby SDK by default. + +## 2.0.7 - 2024-10-10 +### Added +- Support for timezone override for logs where timezone identifier is missing +- Support for Workload Identity based authorization + +## 2.0.6 - 2024-02-05 +### Added +- Support for endpoint override for instance principal auth mode. + +## 2.0.5 - 2023-04-12 +### Added +- Prometheus metrics support for multi worker configuration. +- 'FAQ' section to help customers in triaging issues when encountered. +### Changed +- Supporting both 'memory' and 'file' buffer types. The recommended and default buffer type is still 'file'. +- Using Yajl over default JSON library for handling multi byte character logs gracefully. +- Plugin log defaults to STDOUT + - 'oci-logging-analytics.log' file will no longer be created when 'plugin_log_location' in match block section is not provided explicitly. +### Bug fix +- 'tag' field not mandatory in filter block. + +## 2.0.4 - 2022-06-20 +### Changed +- Updated prometheus-client dependency to v4.0.0. + ## 2.0.3 - 2022-04-20 ### Added - Added Prometheus-client Api based internal metrics functionality. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..637430b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to this repository + +We welcome your contributions! There are multiple ways to contribute. + +## Opening issues + +For bugs or enhancement requests, please file a GitHub issue unless it's +security related. When filing a bug remember that the better written the bug is, +the more likely it is to be fixed. If you think you've found a security +vulnerability, do not raise a GitHub issue and follow the instructions in our +[security policy](./SECURITY.md). + +## Contributing code + +We welcome your code contributions. Before submitting code via a pull request, +you will need to have signed the [Oracle Contributor Agreement][OCA] (OCA) and +your commits need to include the following line using the name and e-mail +address you used to sign the OCA: + +```text +Signed-off-by: Your Name +``` + +This can be automatically added to pull requests by committing with `--sign-off` +or `-s`, e.g. + +```text +git commit --signoff +``` + +Only pull requests from committers that can be verified as having signed the OCA +can be accepted. + +## Pull request process + +1. Ensure there is an issue created to track and discuss the fix or enhancement + you intend to submit. +1. Fork this repository. +1. Create a branch in your fork to implement the changes. We recommend using + the issue number as part of your branch name, e.g. `1234-fixes`. +1. Ensure that any documentation is updated with the changes that are required + by your change. +1. Ensure that any samples are updated if the base image has been changed. +1. Submit the pull request. *Do not leave the pull request blank*. Explain exactly + what your changes are meant to do and provide simple steps on how to validate. + your changes. Ensure that you reference the issue you created as well. +1. We will assign the pull request to 2-3 people for review before it is merged. + +## Code of conduct + +Follow the [Golden Rule](https://en.wikipedia.org/wiki/Golden_Rule). If you'd +like more specific guidelines, see the [Contributor Covenant Code of Conduct][COC]. + +[OCA]: https://oca.opensource.oracle.com +[COC]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/ diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000..90582a6 --- /dev/null +++ b/FAQ.md @@ -0,0 +1,227 @@ +# Frequently Asked Questions + +- [Why am I getting this error - "Error occurred while initializing LogAnalytics Client" ?](#why-am-i-getting-this-error---error-occurred-while-initializing-loganalytics-client-) +- [Why am I getting this error - "Error occurred while parsing oci_la_log_set" ?](#why-am-i-getting-this-error---error-occurred-while-parsing-oci_la_log_set-) +- [Why am I getting this error - "Error while uploading the payload" or "execution expired" or "status : 0" ?](#why-am-i-getting-this-error---error-while-uploading-the-payload-or--execution-expired-or--status--0-) +- [How to find fluentd/output plugin logs ?](#how-to-find-fluentdoutput-plugin-logs-) +- [Fluentd successfully uploaded data, but still it is not visible in LogExplorer. How to triage ?](#fluentd-successfully-uploaded-data-but-still-it-is-not-visible-in-logexplorer-how-to-triage-) +- [How to extract specific K8s metadata field that I am interested in to a Log Analytics field ?](#how-to-extract-specific-k8s-metadata-field-that-i-am-interested-in-to-a-log-analytics-field-) +- [How to make Fluentd process the log data from the beginning of a file when using tail input plugin ?](#how-to-make-fluentd-process-the-log-data-from-the-beginning-of-a-file-when-using-tail-input-plugin-) +- [How to make Fluentd process the last line from the file when using tail input plugin ?](#how-to-make-fluentd-process-the-last-line-from-the-file-when-using-tail-input-plugin-) +- [In multi worker setup, prometheus is not displaying all the worker's metrics. How to fix it ?](#in-multi-worker-setup-prometheus-is-not-displaying-all-the-workers-metrics-how-to-fix-it-) +- [Why am I getting this error - "ConcatFilter::TimeoutError" ?](#why-am-i-getting-this-error---concatfiltertimeouterror-) +- [Fluentd is failing to parse the log data. What can be the reason ?](#fluentd-is-failing-to-parse-the-log-data-what-can-be-the-reason-) + + +## Why am I getting this error - "Error occurred while initializing LogAnalytics Client" ? +- This occurs mostly due to incorrect authorization type/configuration provided. +- This plugin uses either config based auth or Instance principal based auth with default being Instance principal based auth. +- For config based auth ensure valid "config_file_location" and "profile" details are provided in match block as shown below. + ``` + + @type oci-logging-analytics + config_file_location #REPLACE_ME + profile_name DEFAULT + + ``` + +## Why am I getting this error - "Error occurred while parsing oci_la_log_set" ? +- The provided Regex do not match the key coming in and the regex might need a correction. +- This might be expected behaviour with the regex configured where not all keys need to be matched. In such scenarios we fall back to use logSet parameter set using oci_la_log_set. +- You may also apply logSet using alternative approach documented [here](https://docs.oracle.com/en-us/iaas/log-analytics/doc/manage-log-partitioning.html#LOGAN-GUID-2EC8EEDE-9BBD-4872-8083-A44F77611524) + +## Why am I getting this error - "Error while uploading the payload" or "execution expired" or "status : 0" ? +- Sample logs: + ``` + I, [2023-01-18T10:39:49.483789 #11] INFO -- : Received new chunk, started processing ... + I, [2023-01-18T10:39:49.495771 #11] INFO -- : Generating payload with 31 records for oci_la_log_group_id: ocid1.loganalyticsloggroup.oc1.iad.amaaaaaa.... + E, [2023-01-18T10:39:59.502747 #11] ERROR -- : oci upload exception : Error while uploading the payload. { 'message': 'execution expired', 'status': 0, 'opc-request-id':'C37D1DE643E24D778FC5FA22835FE024', 'response-body': '' } + ``` + +- This occurs due to connectivity to OCI endpoint. Ensure the proxy details are provided are valid if configured, or you have network connectivity to reach the OCI endpoint from where you are running the fluentd. + +## How to find fluentd/output plugin logs ? +- By default (starting from 2.0.5 version), oci log analytics output plugin logs goes to STDOUT and available as part of the fluentd logs itself, unless it is explicitly configured using the following plugin parameter. + ``` + plugin_log_location "#{ENV['FLUENT_OCI_LOG_LOCATION'] || '/var/log'}" + # Log file named 'oci-logging-analytics.log' will be generated in the above location + ``` +- For td-agent (rpm/deb) based setup, the fluentd logs are located at /var/log/td-agent/td-agent.log + + +## Fluentd successfully uploaded data, but still it is not visible in LogExplorer. How to triage ? +- Check if selected time range in log explorer is in line with the actual log messages timestamp. +- Check after some time - As the processing of the data happens asynchronously, there are cases it may take some time to reflect the data in log explorer. + - The processing of the data may fail in the subsequent validations which happens at OCI. + - Check for any [processing errors](https://docs.oracle.com/en-us/iaas/log-analytics/doc/troubleshoot-ingestion-pipeline.html). + - If the issue is still persistent, raise an SR by providing the following information. tenency_ocid, region, sample opc-request-id/opc-object-id + - You may get the opc-request-id/opc-object-id from fluentd output plugin log. The sample log for a successful upload looks like below, + + ``` + I, [2023-01-18T10:39:49.483789 #11] INFO -- : Received new chunk, started processing ... + I, [2023-01-18T10:39:49.495771 #11] INFO -- : Generating payload with 30 records for oci_la_log_group_id: ocid1.loganalyticsloggroup.oc1.iad.amaaaaaa.... + I, [2023-01-18T10:39:59.502747 #11] INFO -- : The payload has been successfully uploaded to logAnalytics - + oci_la_log_group_id: ocid1.loganalyticsloggroup.oc1.iad.amaaaaaa...., + ConsumedRecords: #30, + opc-request-id':'C37D1DE643E24D778FC5FA22835FE024', + opc-object-id: 'C37D1DE643E24D778FC5FA22835FE024-D37D1DE643E24D778FC5FA22835FE024'" + ``` +## How to extract specific K8s metadata field that I am interested in to a Log Analytics field ? +- We can get this kind of scenario when collecting the logs from Kubernetes clusters and using kubernetes_metadata_filter to enrich the data at fluentd. +- By default, fluentd output plugin will fetch following fields "container_name", "namespace_name", "pod_name", "container_image", "host" from kubernetes metadata when available, and maps them to following fields "Container", "Namespace", "Pod", "Container Image Name", "Node". +- In case if a new field is needed to be extracted, or to modify the default mappings, add "kubernetes_metadata_keys_mapping" in match block like shown below. + - When you are adding a new field mapping, ensure the corresponding Loggingg Analytics field is already defined. + + ``` + + @type oci-logging-analytics + nameSpace namespace #REPLACE_ME + config_file_location ~/.oci/config #REPLACE_ME + profile_name DEFAULT + kubernetes_metadata_keys_mapping {"container_name":"Container","namespace_name":"Namespace","pod_name":"Pod","container_image":"Container Image Name","host":"Node"} + + @type file + path /var/log/fluent_oci_outplugin/buffer/ #REPLACE_ME + disable_chunk_backup true + + + ``` + +## How to make Fluentd process the log data from the beginning of a file when using tail input plugin ? +- The default behaviour of the tail plugin is to read from the latest(tail). This behaviour can be altered by modifying the "read_from_head" parameter. +- The below is an example tail plugin configuration to read from beginning of a file named foo.log + + ``` + + @type tail + + @type none + + path foo.log + pos_file foo.pos + tag oci.foo + read_from_head true + + ``` + +## How to make Fluentd process the last line from the file when using tail input plugin ? +- In case of multi-line events, for last line, log consumption might be delayed until the next log message is written to the log file. Fluentd will only parse the last line when a line break is appended at the end of the line. +- To fix this, add/increase multiline_flush_interval property in source block. + + ``` + + @type tail + multiline_flush_interval 5s + + @type multiline + format_firstline /\d{4}-[01]\d-[0-3]\d\s[0-2]\d((:[0-5]\d)?){2}\s+(\w+)\s+\[([\w-]+)?\]\s([\w._$]+)\s+([-\w]+)\s+(.*)/ + format1 /^(?.*)/ + + path foo.log + pos_file foo.pos + tag oci.foo + read_from_head true + + ``` + +## In multi worker setup, prometheus is not displaying all the worker's metrics. How to fix it ? +- In case of multi worker setup, each worker needs its own port binding. To ensure prometheus engine is scraping metrics from multiple ports, provide "aggregated_metrics_path /aggregated_metrics" in prometheus source config as shown below. +- Multi worker config example + + ``` + + @type prometheus + bind 0.0.0.0 + port 24231 + aggregated_metrics_path /aggregated_metrics + + ``` + +- Single worker config example + + ``` + + @type prometheus + bind 0.0.0.0 + port 24231 + metrics_path /metrics + + ``` + +## Why am I getting this error - "ConcatFilter::TimeoutError" ? +- This error occurs when using Concat plugin to handle multiline log messages. +- When the incoming log flow is very slow, then concat plugin throws this error to avoid waiting indefinitely for the next multiline start expression match. +- By increasing the flush_interval for this concat filter to appropriate value, this issue can be avoided. +- We recommend usage of "timeout_label" to redirect the corresponding log messages and handle them appropriately to avoid the data loss. +- When using "timeout_label", you may ignore this error. + + ``` + # Concat filter to handle multi-line log records. + + @type concat + key message + flush_interval 15 + timeout_label @NORMAL + multiline_start_regexp /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z/ + separator "" + + + + ``` + + +## Fluentd is failing to parse the log data. What can be the reason ? +- In the source-block check the regex/format_firstline expression and check if it matches with your log data. +- Sample logs: + ``` + 2021-02-06 01:44:03 +0000 [warn]: #0 dump an error event: + error_class=Fluent::Plugin::Parser::ParserError error="pattern not match with data + ``` +- Multiline + + ``` + + @type tail + multiline_flush_interval 5s + + @type multiline + format_firstline /\d{4}-[01]\d-[0-3]\d\s[0-2]\d((:[0-5]\d)?){2}\s+(\w+)\s+\[([\w-]+)?\]\s([\w._$]+)\s+([-\w]+)\s+(.*)/ + format1 /^(?.*)/ + + path foo.log + pos_file foo.pos + tag oci.foo + read_from_head true + + ``` + +- Regexp + + ``` + + @type tail + multiline_flush_interval 5s + # regexp + + @type regexp + expression ^(?[^ ]*) (?[^ ]*) (?\d*)$ + + path foo.log + pos_file foo.pos + tag oci.foo + read_from_head true + + ``` \ No newline at end of file diff --git a/Gemfile b/Gemfile index 784e23a..05752e7 100755 --- a/Gemfile +++ b/Gemfile @@ -1,7 +1,6 @@ -## Copyright (c) 2021, 2022 Oracle and/or its affiliates. +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. ## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ source "https://rubygems.org" gemspec - diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 17fd947..3fe26ec --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ -# OCI Logging Analytics Fluentd Output Plugin +# OCI Log Analytics Fluentd Output Plugin ## Overview -OCI Logging Analytics Fluentd output plugin collects event logs, buffer into local file system, periodically creates payload and uploads it to OCI Logging Analytics. +OCI Log Analytics Fluentd output plugin collects event logs, buffer into local file system, periodically creates payload and uploads it to OCI Log Analytics. ## Installation Instructions @@ -21,6 +21,7 @@ Refer [Prerequisites](https://docs.oracle.com/en/learn/oci_logging_analytics_flu rubyzip oci prometheus-client + yajl-ruby >= 2.0.0 @@ -29,6 +30,7 @@ Refer [Prerequisites](https://docs.oracle.com/en/learn/oci_logging_analytics_flu ~> 2.3.2 ~> 2.16 ~> 2.1.0 + ~>1.4.3 @@ -59,10 +61,25 @@ Or install it manually as: ### Buffer Configuration - [Buffer configuration parameters](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/#buffer-configuration-parameters) + - Note* - Buffer type 'file' is recommended but no longer mandatory. Customer can configure other available options. + +#### Advantages of 'file' based Buffer plugin + - In case of a fast input plugin and slow output plugin, buffer will keep on increase and file based buffer with default 50GB (can configure for higher value) can handle the output plugin delay. + We may not have huge memory configured for memory based plugin and will result in data loss. + + - In case of back-end service not available (5XX exceptions), output plugin will keep retrying with the existing chunk and meanwhile, new chunks will be keep on getting scheduled. + Each chunk being 2MB size, and with chunk interval 30 sec, in case of a 30 mins outage (can be more in unforeseen cases), we need a minimum of 120MB memory allocated for memory buffer. + + - In any container based log Analytics, as the logs are not saved in the containers and they are completely lost in case not consumed, file based memory provides a persistent buffer implementation. + + - For container based deployments, while creating the fluentd config file, we need to consider all these edge cases and proper sizing to come up with the memory size. + + - As the data loss is very critical and not all customers are aware of these cases, we prevented memory buffer. Having said that, we can go ahead and remove that limitation and let the informed customers like you can do the proper sizing and decide which option is helpful for them. + ### Input Plugin Configuration -The incoming log events must be in a specific format so that the Fluentd plugin provided by Oracle can process the log data, chunk them, and transfer them to OCI Logging Analytics. +The incoming log events must be in a specific format so that the Fluentd plugin provided by Oracle can process the log data, chunk them, and transfer them to OCI Log Analytics. - [Verify the format of the incoming log events](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/#verify-the-format-of-the-incoming-log-events) @@ -70,7 +87,7 @@ The incoming log events must be in a specific format so that the Fluentd plugin ### Filter Configuration -Use filter plugin (record_transformer) to transform the input log events to add OCI Logging Analytics specific fields/metadata. +Use filter plugin (record_transformer) to transform the input log events to add OCI Log Analytics specific fields/metadata. - [Filter configuration](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/#filter-configuration) @@ -83,49 +100,68 @@ Use filter plugin (record_transformer) to transform the input log events to add - Example configuration that can be used for monitoring [kafka log](examples/kafka.conf) -## Start Viewing the Logs in Logging Analytics +## Start Viewing the Logs in Log Analytics -Refer [Viewing the Logs in Logging Analytics](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/#start-viewing-the-logs-in-logging-analytics) +Refer [Viewing the Logs in Log Analytics](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/#start-viewing-the-logs-in-logging-analytics) ## Metrics -The plugin emits following metrics in Prometheus format, which provides stats/insights about the data being collected and processed by the plugin. Refer [monitoring-prometheus](https://docs.fluentd.org/monitoring-fluentd/monitoring-prometheus) for details on how to expose these and other various Fluentd metrics to Prometheus (*If the requirement is to collect and monitor core Fluentd and this plugin metrics alone using Prometheus then Step1 and Step2 from the referred document can be skipped*). +The plugin emits following metrics in Prometheus format, which provides stats/insights about the data being collected and processed by the plugin. +Refer [monitoring-prometheus](https://docs.fluentd.org/monitoring-fluentd/monitoring-prometheus) for details on how to expose these and other various Fluentd metrics to Prometheus (*If the requirement is to collect and monitor core Fluentd and this plugin metrics alone using Prometheus then Step1 and Step2 from the referred document can be skipped*). + +#### Note +For prometheus metrics to work properly, please add 'tag' and 'worker_id' (in case of multi worker configuration) to the filter block. + + tag ${tag} + worker_id ${ENV['SERVERENGINE_WORKER_ID']} +#### Metrics details Metric Name: oci_la_fluentd_output_plugin_records_received - labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set] - Description: Number of records received by the OCI Logging Analytics Fluentd output plugin. + labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set] + Description: Number of records received by the OCI Log Analytics Fluentd output plugin. Type : Gauge Metric Name: oci_la_fluentd_output_plugin_records_valid - labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set] - Description: Number of valid records received by the OCI Logging Analytics Fluentd output plugin. + labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set] + Description: Number of valid records received by the OCI Log Analytics Fluentd output plugin. Type : Gauge Metric Name: oci_la_fluentd_output_plugin_records_invalid - labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:reason] - Description: Number of invalid records received by the OCI Logging Analytics Fluentd output plugin. + labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:reason] + Description: Number of invalid records received by the OCI Log Analytics Fluentd output plugin. Type : Gauge Metric Name: oci_la_fluentd_output_plugin_records_post_error - labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:error_code, :reason] - Description: Number of records failed posting to OCI Logging Analytics by the Fluentd output plugin. + labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:error_code, :reason] + Description: Number of records failed posting to OCI Log Analytics by the Fluentd output plugin. Type : Gauge Metric Name: oci_la_fluentd_output_plugin_records_post_success - labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set] - Description: Number of records posted by the OCI Logging Analytics Fluentd output plugin. + labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set] + Description: Number of records posted by the OCI Log Analytics Fluentd output plugin. Type : Gauge Metric Name: oci_la_fluentd_output_plugin_chunk_time_to_receive - labels: [:tag] - Description: Average time taken by Fluentd to deliver the collected records from Input plugin to OCI Logging Analytics output plugin. + labels: [:worker_id,:tag] + Description: Average time taken by Fluentd to deliver the collected records from Input plugin to OCI Log Analytics output plugin. Type : Histogram Metric Name: oci_la_fluentd_output_plugin_chunk_time_to_post labels: [:oci_la_log_group_id] - Description: Average time taken for posting the received records to OCI Logging Analytics by the Fluentd output plugin. + Description: Average time taken for posting the received records to OCI Log Analytics by the Fluentd output plugin. Type : Histogram +## Contributing + +This project welcomes contributions from the community. Before submitting a pull request, please [review our contribution guide](./CONTRIBUTING.md) + +## Security + +Please consult the [security guide](./SECURITY.md) for our responsible security vulnerability disclosure process + +## FAQ + +See [FAQ](FAQ.md). ## Changes @@ -136,4 +172,3 @@ See [CHANGELOG](CHANGELOG.md). Copyright (c) 2021, 2022 Oracle and/or its affiliates. See [LICENSE](LICENSE.txt) for more details. - diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..2ca8102 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,38 @@ +# Reporting security vulnerabilities + +Oracle values the independent security research community and believes that +responsible disclosure of security vulnerabilities helps us ensure the security +and privacy of all our users. + +Please do NOT raise a GitHub Issue to report a security vulnerability. If you +believe you have found a security vulnerability, please submit a report to +[secalert_us@oracle.com][1] preferably with a proof of concept. Please review +some additional information on [how to report security vulnerabilities to Oracle][2]. +We encourage people who contact Oracle Security to use email encryption using +[our encryption key][3]. + +We ask that you do not use other channels or contact the project maintainers +directly. + +Non-vulnerability related security issues including ideas for new or improved +security features are welcome on GitHub Issues. + +## Security updates, alerts and bulletins + +Security updates will be released on a regular cadence. Many of our projects +will typically release security fixes in conjunction with the +Oracle Critical Patch Update program. Additional +information, including past advisories, is available on our [security alerts][4] +page. + +## Security-related information + +We will provide security related information such as a threat model, considerations +for secure use, or any known security issues in our documentation. Please note +that labs and sample code are intended to demonstrate a concept and may not be +sufficiently hardened for production use. + +[1]: mailto:secalert_us@oracle.com +[2]: https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html +[3]: https://www.oracle.com/security-alerts/encryptionkey.html +[4]: https://www.oracle.com/security-alerts/ diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index f04c4c9..8caf51c 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -4,6 +4,7 @@ - BSD 2-clause - Ruby License - UPL-1.0 +- MIT -------------------------------- Notices ---------------------------------------- ======================== Third Party Components ================================= @@ -25,12 +26,26 @@ oci * Source code: https://github.com/oracle/oci-java-sdk * Project home: https://docs.cloud.oracle.com/en-us/iaas/Content/API/SDKDocs/rubysdk.htm +prometheus-client +* Copyright 2013-2015 The Prometheus Authors +* License: Apache License 2.0 +* Source code: https://github.com/prometheus/client_ruby +* Project home: https://github.com/prometheus/client_ruby + +yajl-ruby +* Copyright 2013-2015 The Prometheus Authors +* License: MIT +* Source code: https://github.com/brianmario/yajl-ruby +* Project home: https://github.com/brianmario/yajl-ruby + =============================== Licenses ======================================== -------------------------- Apache License 2.0 ----------------------------------- Copyright 2011-2018 Fluentd Authors +Copyright 2013-2015 The Prometheus Authors + Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. Apache License @@ -362,3 +377,29 @@ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- + +------------------------------- MIT --------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2014 Brian Lopez + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------------------------------------------------------------------- diff --git a/examples/apache.conf b/examples/apache.conf index 94fd70e..4f3c8a3 100644 --- a/examples/apache.conf +++ b/examples/apache.conf @@ -1,61 +1,64 @@ # The following label is to ignore Fluentd warning events. - @type tail - @id in_tail_apacheError - path /var/log/apacheError.log - pos_file /var/log/fluentd-apacheError.log.pos - read_from_head true - path_key tailed_path - tag oci.apacheError - - @type none - + @type tail + @id in_tail_apacheError + path /var/log/apacheError.log + pos_file /var/log/fluentd-apacheError.log.pos + read_from_head true + path_key tailed_path + tag oci.apacheError + + @type none + + + +# Add below prometheus config block only when you need output plugin metrics. + + @type prometheus + bind 0.0.0.0 + port 24231 + metrics_path /metrics - @type record_transformer - enable_ruby true - - oci_la_global_metadata ${{: , :}} - oci_la_entity_id # If same across sources. Else keep this in individual filters - oci_la_entity_type # If same across sources. Else keep this in individual filters - + @type record_transformer + enable_ruby true + + oci_la_global_metadata ${{: , :}} + oci_la_entity_id # If same across sources. Else keep this in individual filters + oci_la_entity_type # If same across sources. Else keep this in individual filters + - @type record_transformer - enable_ruby true - - oci_la_metadata ${{: , :}} - oci_la_log_source_name - oci_la_log_group_id - oci_la_log_path "${record['tailed_path']}" - tag ${tag} - + @type record_transformer + enable_ruby true + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + oci_la_timezone + tag ${tag} + - @type oci-logging-analytics + @type oci-logging-analytics namespace - -# Auth config file details config_file_location ~/.oci/config profile_name DEFAULT - -# Configuration for plugin (oci-logging-analytics) generated logs - plugin_log_location /var/log - plugin_log_level info -# Buffer Configuration + # Buffer Configuration @type file - Path /var/log + path /var/log retry_forever true disable_chunk_backup true diff --git a/examples/kafka.conf b/examples/kafka.conf index 51083d9..c27f30d 100644 --- a/examples/kafka.conf +++ b/examples/kafka.conf @@ -1,62 +1,65 @@ # The following label is to ignore Fluentd warning events. - @type tail - @id in_tail_kafka - path /var/log/kafka.log - pos_file /var/log/fluentd-kafka.log.pos - read_from_head true - path_key tailed_path - tag oci.kafka - - @type json - + @type tail + @id in_tail_kafka + path /var/log/kafka.log + pos_file /var/log/fluentd-kafka.log.pos + read_from_head true + path_key tailed_path + tag oci.kafka + + @type json + + + +# Add below prometheus config block only when you need output plugin metrics. + + @type prometheus + bind 0.0.0.0 + port 24231 + metrics_path /metrics - @type record_transformer - enable_ruby true - - oci_la_global_metadata ${{: , :}} - oci_la_entity_id # If same across sources. Else keep this in individual filters - oci_la_entity_type # If same across sources. Else keep this in individual filters - + @type record_transformer + enable_ruby true + + oci_la_global_metadata ${{: , :}} + oci_la_entity_id # If same across sources. Else keep this in individual filters + oci_la_entity_type # If same across sources. Else keep this in individual filters + - @type record_transformer - enable_ruby true - - oci_la_metadata ${{: , :}} - oci_la_log_source_name - oci_la_log_group_id - oci_la_log_path "${record['tailed_path']}" - message ${record["log"]} # Will assign the 'log' key value from json wrapped message to 'message' field - tag ${tag} - + @type record_transformer + enable_ruby true + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + message ${record["log"]} # Will assign the 'log' key value from json wrapped message to 'message' field + tag ${tag} + - @type oci-logging-analytics + @type oci-logging-analytics namespace - -# Auth config file details + # Auth config file details config_file_location ~/.oci/config profile_name DEFAULT - -# Configuration for plugin (oci-logging-analytics) generated logs - plugin_log_location /var/log - plugin_log_level info -# Buffer Configuration + # Buffer Configuration @type file - Path /var/log + path /var/log retry_forever true disable_chunk_backup true diff --git a/examples/multi_worker.conf b/examples/multi_worker.conf new file mode 100644 index 0000000..ff7b5cc --- /dev/null +++ b/examples/multi_worker.conf @@ -0,0 +1,189 @@ +# Four worker set up. + + workers 4 + + +# Below prometheus source block will be applicable for all the workers with plugins which support multi-process workers feature. +# For these workers, corresponding port for prometheus will be original port provided ( in this example 24232) + worker_id. + + @type prometheus + bind 0.0.0.0 + port 24232 + aggregated_metrics_path /aggregated_metrics + + +# work on worker 0 HTTP + + + @type http + port 9880 + bind 0.0.0.0 + tag oci.apacheError + + @type none + + + + + @type record_transformer + enable_ruby + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + tag ${tag} + worker_id 0 + + + + + @type oci-logging-analytics + namespace + # Auth config file details + config_file_location ~/.oci/config + profile_name DEFAULT + # Buffer Configuration + + @type file + path /var/log + retry_forever true + disable_chunk_backup true + + + + +# work on worker 1 UDP + + + @type udp + tag oci.audit.log # required + port 20001 # optional. 5160 by default + bind 0.0.0.0 # optional. 0.0.0.0 by default + message_length_limit 1MB # optional. 4096 bytes by default + + @type none + + + + + @type record_transformer + enable_ruby + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + tag ${tag} + worker_id 1 + + + + + @type oci-logging-analytics + namespace + # Auth config file details + config_file_location ~/.oci/config + profile_name DEFAULT + # Buffer Configuration + + @type file + path /var/log + retry_forever true + disable_chunk_backup true + + + + +# work on worker 2 TAIL + + + @type tail + @id in_tail_apacheError + path /var/log/apacheError.log + pos_file /var/log/fluentd-apacheError.log.pos + read_from_head true + path_key tailed_path + tag oci.apacheError + + @type none + + + +# As @tail plugin does not support multi-process workers feature, we need to provide prometheus related source block along with exact port details, as shown below. + + @type prometheus + bind 0.0.0.0 + port 24234 + aggregated_metrics_path /aggregated_metrics + + + + @type record_transformer + enable_ruby + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + tag ${tag} + worker_id 0 + + + + + @type oci-logging-analytics + namespace + # Auth config file details + config_file_location ~/.oci/config + profile_name DEFAULT + # Buffer Configuration + + @type file + path /var/log + retry_forever true + disable_chunk_backup true + + + + +# work on worker 3 TCP with buffer as memory + + + @type tcp + tag oci.apache.kafka # required + port 5170 # optional. 5170 by default + bind 0.0.0.0 # optional. 0.0.0.0 by default + + @type none + + + + + @type record_transformer + enable_ruby + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + tag ${tag} + worker_id 0 + + + + + @type oci-logging-analytics + namespace + # Auth config file details + config_file_location ~/.oci/config + profile_name DEFAULT + # Buffer Configuration + + @type file + path /var/log + retry_forever true + disable_chunk_backup true + + + \ No newline at end of file diff --git a/examples/syslog.conf b/examples/syslog.conf index 6513fc2..ecdf7c1 100644 --- a/examples/syslog.conf +++ b/examples/syslog.conf @@ -1,64 +1,67 @@ # The following label is to ignore Fluentd warning events. - @type tail - @id in_tail_syslog - multiline_flush_interval 5s - path /var/log/messages* - pos_file /var/log/messages*.log.pos - read_from_head true - path_key tailed_path - tag oci.syslog - - @type multiline - format_firstline /^\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/ - format1 /^(?.*)/ - + @type tail + @id in_tail_syslog + multiline_flush_interval 5s + path /var/log/messages* + pos_file /var/log/messages*.log.pos + read_from_head true + path_key tailed_path + tag oci.syslog + + @type multiline + format_firstline /^\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/ + format1 /^(?.*)/ + + + +# Add below prometheus config block only when you need output plugin metrics. + + @type prometheus + bind 0.0.0.0 + port 24231 + metrics_path /metrics - @type record_transformer - enable_ruby true - - oci_la_global_metadata ${{: , :}} - oci_la_entity_id # If same across sources. Else keep this in individual filters - oci_la_entity_type # If same across sources. Else keep this in individual filters - + @type record_transformer + enable_ruby true + + oci_la_global_metadata ${{: , :}} + oci_la_entity_id # If same across sources. Else keep this in individual filters + oci_la_entity_type # If same across sources. Else keep this in individual filters + - @type record_transformer - enable_ruby true - - oci_la_metadata ${{: , :}} - oci_la_log_source_name - oci_la_log_group_id - oci_la_log_path "${record['tailed_path']}" - tag ${tag} - + @type record_transformer + enable_ruby true + + oci_la_metadata ${{: , :}} + oci_la_log_source_name + oci_la_log_group_id + oci_la_log_path "${record['tailed_path']}" + tag ${tag} + - @type oci-logging-analytics + @type oci-logging-analytics namespace - -# Auth config file details + # Auth config file details config_file_location ~/.oci/config profile_name DEFAULT - -# Configuration for plugin (oci-logging-analytics) generated logs - plugin_log_location /var/log - plugin_log_level info -# Buffer Configuration + # Buffer Configuration @type file - Path /var/log + path /var/log retry_forever true disable_chunk_backup true diff --git a/fluent-plugin-oci-logging-analytics.gemspec b/fluent-plugin-oci-logging-analytics.gemspec index f19988d..07aad52 100755 --- a/fluent-plugin-oci-logging-analytics.gemspec +++ b/fluent-plugin-oci-logging-analytics.gemspec @@ -1,17 +1,18 @@ -## Copyright (c) 2021, 2022 Oracle and/or its affiliates. +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. ## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ +require_relative './lib/fluent/version/version' lib = File.expand_path("../lib", __FILE__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) Gem::Specification.new do |spec| spec.name = "fluent-plugin-oci-logging-analytics" - spec.version = "2.0.3" + spec.version = Version::VERSION spec.authors = ["Oracle","OCI Observability: Logging Analytics"] spec.email = ["oci_la_plugins_grp@oracle.com"] - spec.summary = %q{Fluentd output plugin for OCI Logging Analytics.} - spec.description = %q{OCI Logging Analytics Fluentd output plugin for ingesting the collected log events to OCI Logging Analytics.} + spec.summary = %q{Fluentd output plugin for OCI Log Analytics.} + spec.description = %q{Fluentd output plugin for ingesting the collected log events to OCI Log Analytics (previously known as OCI Logging Analytics).} spec.license = "UPL-1.0" spec.files = Dir.glob("{bin,lib}/**/*") @@ -30,6 +31,7 @@ Gem::Specification.new do |spec| spec.add_runtime_dependency "fluentd", [">= 0.14.10", "< 2"] spec.add_runtime_dependency 'rubyzip', '~> 2.3.2' spec.add_runtime_dependency "oci", "~>2.16" - spec.add_runtime_dependency "prometheus-client", "~>2.1.0" + spec.add_runtime_dependency "prometheus-client", "~>4.0" + spec.add_runtime_dependency "yajl-ruby", '~> 1.4', '>= 1.4.3' end diff --git a/lib/fluent/dto/logEvents.rb b/lib/fluent/dto/logEvents.rb index 959f1b9..78b4442 100755 --- a/lib/fluent/dto/logEvents.rb +++ b/lib/fluent/dto/logEvents.rb @@ -1,10 +1,10 @@ -## Copyright (c) 2021, 2022 Oracle and/or its affiliates. +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. ## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ class LogEvents - attr_accessor :entityId, :entityType, :logSourceName, :logPath, :logRecords , :metadata + attr_accessor :entityId, :entityType, :logSourceName, :logPath, :logRecords , :metadata, :timezone def initialize(lrpe_key, fluentd_records) - @metadata, @entityId, @entityType, @logSourceName, @logPath = lrpe_key + @metadata, @entityId, @entityType, @logSourceName, @logPath, @timezone = lrpe_key @logRecords = fluentd_records.map{ |record| record['message'] } @@ -17,8 +17,8 @@ def to_hash entityType: @entityType, logSourceName: @logSourceName, logPath: @logPath, - logRecords: @logRecords + logRecords: @logRecords, + timezone:@timezone }.compact end - end diff --git a/lib/fluent/dto/logEventsJson.rb b/lib/fluent/dto/logEventsJson.rb index a5cd758..cf28da3 100755 --- a/lib/fluent/dto/logEventsJson.rb +++ b/lib/fluent/dto/logEventsJson.rb @@ -1,4 +1,4 @@ -## Copyright (c) 2021, 2022 Oracle and/or its affiliates. +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. ## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ require_relative './logEvents' @@ -20,5 +20,4 @@ def to_hash end }.compact end - end diff --git a/lib/fluent/enums/source.rb b/lib/fluent/enums/source.rb new file mode 100644 index 0000000..ab3b373 --- /dev/null +++ b/lib/fluent/enums/source.rb @@ -0,0 +1,7 @@ +## Copyright (c) 2024 Oracle and/or its affiliates. +## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +module Source + FLUENTD = :fluentd + KUBERNETES_SOLUTION = :kubernetes_solution +end diff --git a/lib/fluent/metrics/metricsLabels.rb b/lib/fluent/metrics/metricsLabels.rb index d52c85b..4d263f3 100644 --- a/lib/fluent/metrics/metricsLabels.rb +++ b/lib/fluent/metrics/metricsLabels.rb @@ -1,6 +1,10 @@ +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. +## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ + class MetricsLabels - attr_accessor :tag, :logGroupId, :logSourceName, :logSet, :invalid_reason, :records_valid, :records_per_tag, :latency + attr_accessor :worker_id, :tag, :logGroupId, :logSourceName, :logSet, :invalid_reason, :records_valid, :records_per_tag, :latency,:timezone def initialize + @worker_id = nil @tag = nil @logGroupId = nil @logSourceName = nil @@ -9,5 +13,6 @@ def initialize @records_valid = 0 @records_per_tag = 0 @latency = 0 + @timezone = nil end -end \ No newline at end of file +end diff --git a/lib/fluent/metrics/prometheusMetrics.rb b/lib/fluent/metrics/prometheusMetrics.rb old mode 100644 new mode 100755 index d8fc60d..491246c --- a/lib/fluent/metrics/prometheusMetrics.rb +++ b/lib/fluent/metrics/prometheusMetrics.rb @@ -1,3 +1,6 @@ +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. +## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ + require 'prometheus/client' require 'prometheus/client/registry' require 'prometheus/client/gauge' @@ -14,16 +17,15 @@ def initialize end def createMetrics gauge = Prometheus::Client::Gauge - @records_received = gauge.new(:oci_la_fluentd_output_plugin_records_received, docstring: 'Number of records received by the OCI Logging Analytics Fluentd output plugin.', labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set]) - @records_valid = gauge.new(:oci_la_fluentd_output_plugin_records_valid, docstring: 'Number of valid records received by the OCI Logging Analytics Fluentd output plugin.', labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set]) - @records_invalid = gauge.new(:oci_la_fluentd_output_plugin_records_invalid, docstring: 'Number of invalid records received by the OCI Logging Analytics Fluentd output plugin.', labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:reason]) - @records_error = gauge.new(:oci_la_fluentd_output_plugin_records_post_error, docstring: 'Number of records failed posting to OCI Logging Analytics by the Fluentd output plugin.', labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:error_code, :reason]) - @records_posted = gauge.new(:oci_la_fluentd_output_plugin_records_post_success, docstring: 'Number of records posted by the OCI Logging Analytics Fluentd output plugin.', labels: [:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set]) - #@bytes_received = gauge.new(:oci_la_bytes_received, docstring: '...', labels: [:tag]) - #@bytes_posted = gauge.new(:oci_la_bytes_posted, docstring: '...', labels: [:oci_la_log_group_id]) + @records_received = gauge.new(:oci_la_fluentd_output_plugin_records_received, docstring: 'Number of records received by the OCI Log Analytics Fluentd output plugin.', labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set]) + @records_valid = gauge.new(:oci_la_fluentd_output_plugin_records_valid, docstring: 'Number of valid records received by the OCI Log Analytics Fluentd output plugin.', labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set]) + @records_invalid = gauge.new(:oci_la_fluentd_output_plugin_records_invalid, docstring: 'Number of invalid records received by the OCI Log Analytics Fluentd output plugin.', labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:reason]) + @records_error = gauge.new(:oci_la_fluentd_output_plugin_records_post_error, docstring: 'Number of records failed posting to OCI Log Analytics by the Fluentd output plugin.', labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set,:error_code, :reason]) + @records_posted = gauge.new(:oci_la_fluentd_output_plugin_records_post_success, docstring: 'Number of records posted by the OCI Log Analytics Fluentd output plugin.', labels: [:worker_id,:tag,:oci_la_log_group_id,:oci_la_log_source_name,:oci_la_log_set]) + histogram = Prometheus::Client::Histogram - @chunk_time_to_receive = histogram.new(:oci_la_fluentd_output_plugin_chunk_time_to_receive, docstring: 'Average time taken by Fluentd to deliver the collected records from Input plugin to OCI Logging Analytics output plugin.', labels: [:tag]) - @chunk_time_to_upload = histogram.new(:oci_la_fluentd_output_plugin_chunk_time_to_post, docstring: 'Average time taken for posting the received records to OCI Logging Analytics by the Fluentd output plugin.', labels: [:oci_la_log_group_id]) + @chunk_time_to_receive = histogram.new(:oci_la_fluentd_output_plugin_chunk_time_to_receive, docstring: 'Average time taken by Fluentd to deliver the collected records from Input plugin to OCI Log Analytics output plugin.', labels: [:worker_id,:tag]) + @chunk_time_to_upload = histogram.new(:oci_la_fluentd_output_plugin_chunk_time_to_post, docstring: 'Average time taken for posting the received records to OCI Log Analytics by the Fluentd output plugin.', labels: [:worker_id,:oci_la_log_group_id]) end def registerMetrics @@ -33,9 +35,7 @@ def registerMetrics registry.register(@records_invalid) unless registry.exist?('oci_la_fluentd_output_plugin_records_invalid') registry.register(@records_error) unless registry.exist?('oci_la_fluentd_output_plugin_records_post_error') registry.register(@records_posted) unless registry.exist?('oci_la_fluentd_output_plugin_records_post_success') - #registry.register(@bytes_received) unless registry.exist?('oci_la_bytes_received') - #registry.register(@bytes_posted) unless registry.exist?('oci_la_bytes_valid') registry.register(@chunk_time_to_receive) unless registry.exist?('oci_la_fluentd_output_plugin_chunk_time_to_receive') registry.register(@chunk_time_to_upload) unless registry.exist?('oci_la_fluentd_output_plugin_chunk_time_to_post') end -end \ No newline at end of file +end diff --git a/lib/fluent/plugin/out_oci-logging-analytics.rb b/lib/fluent/plugin/out_oci-logging-analytics.rb index cdfcc27..a6d24d2 100755 --- a/lib/fluent/plugin/out_oci-logging-analytics.rb +++ b/lib/fluent/plugin/out_oci-logging-analytics.rb @@ -1,15 +1,19 @@ -## Copyright (c) 2021, 2022 Oracle and/or its affiliates. +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. ## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ require 'fluent/plugin/output' require "benchmark" require 'zip' +require 'yajl' +require 'yajl/json_gem' +# require 'tzinfo' require 'logger' require_relative '../dto/logEventsJson' require_relative '../dto/logEvents' require_relative '../metrics/prometheusMetrics' require_relative '../metrics/metricsLabels' +require_relative '../enums/source' # Import only specific OCI modules to improve load times and reduce the memory requirements. require 'oci/auth/auth' @@ -34,7 +38,6 @@ require 'oci/waiter' require 'oci/retry/retry' require 'oci/object_storage/object_storage' - module OCI class << self attr_accessor :sdk_name @@ -75,6 +78,8 @@ class OutOracleOCILogAnalytics < Output @@loganalytics_client = nil @@prometheusMetrics = nil @@logger_config_errors = [] + @@worker_id = '0' + @@encoded_messages_count = 0 desc 'OCI Tenancy Namespace.' @@ -87,13 +92,16 @@ class OutOracleOCILogAnalytics < Output config_param :endpoint, :string, :default => nil desc 'AuthType to be used.' config_param :auth_type, :string, :default => 'InstancePrincipal' + desc 'OCI Domain' + config_param :oci_domain, :string, :default => nil desc 'Enable local payload dump.' config_param :dump_zip_file, :bool, :default => false desc 'Payload zip File Location.' config_param :zip_file_location, :string, :default => nil desc 'The kubernetes_metadata_keys_mapping.' - config_param :kubernetes_metadata_keys_mapping, :hash, :default => {"container_name":"Kubernetes Container Name","namespace_name":"Kubernetes Namespace Name","pod_name":"Kubernetes Pod Name","container_image":"Kubernetes Container Image","host":"Kubernetes Node Name","master_url":"Kubernetes Master Url"} - + config_param :kubernetes_metadata_keys_mapping, :hash, :default => {"container_name":"Container","namespace_name":"Namespace","pod_name":"Pod","container_image":"Container Image Name","host":"Node"} + desc 'opc-meta-properties' + config_param :collection_source, :string, :default => Source::FLUENTD #**************************************************************** desc 'The http proxy to be used.' @@ -146,7 +154,7 @@ class OutOracleOCILogAnalytics < Output desc 'The number of threads of output plugins, which is used to write chunks in parallel.' config_set_default :flush_thread_count, 1 desc 'The max size of each chunks: events will be written into chunks until the size of chunks become this size.' - config_set_default :chunk_limit_size, 2 * 1024 * 1024 # 2MB + config_set_default :chunk_limit_size, 4 * 1024 * 1024 # 4MB desc 'The size limitation of this buffer plugin instance.' config_set_default :total_limit_size, 5 * (1024**3) # 5GB desc 'Flush interval' @@ -170,67 +178,72 @@ def initialize end def initialize_logger() - filename = nil - is_default_log_location = false - if is_valid(@plugin_log_location) - filename = @plugin_log_location[-1] == '/' ? @plugin_log_location : @plugin_log_location +'/' - else - is_default_log_location = true - end - if !is_valid_log_level(@plugin_log_level) - @plugin_log_level = @@default_log_level - end - oci_fluent_output_plugin_log = nil - if is_default_log_location - oci_fluent_output_plugin_log = 'oci-logging-analytics.log' - else - oci_fluent_output_plugin_log = filename+'oci-logging-analytics.log' - end - - logger_config = nil - - if is_valid_number_of_logs(@plugin_log_file_count) && is_valid_log_size(@plugin_log_file_size) - # When customer provided valid log_file_count and log_file_size. - # logger will rotate with max log_file_count with each file having max log_file_size. - # Older logs purged automatically. - @@logger = Logger.new(oci_fluent_output_plugin_log, @plugin_log_file_count, @@validated_log_size) - logger_config = 'USER_CONFIG' - elsif is_valid_log_rotation(@plugin_log_rotation) - # When customer provided only log_rotation. - # logger will create a new log based on log_rotation (new file everyday if the rotation is daily). - # This will create too many logs over a period of time as log purging is not done. - @@logger = Logger.new(oci_fluent_output_plugin_log, @plugin_log_rotation) - logger_config = 'FALLBACK_CONFIG' - else - # When customer provided invalid log config, default config is considered. - # logger will rotate with max default log_file_count with each file having max default log_file_size. - # Older logs purged automatically. - @@logger = Logger.new(oci_fluent_output_plugin_log, @@default_number_of_logs, @@default_log_size) - logger_config = 'DEFAULT_CONFIG' - end - - logger_set_level(@plugin_log_level) + begin + filename = nil + is_default_log_location = false + if is_valid(@plugin_log_location) + filename = @plugin_log_location[-1] == '/' ? @plugin_log_location : @plugin_log_location +'/' + else + @@logger = log + return + end + if !is_valid_log_level(@plugin_log_level) + @plugin_log_level = @@default_log_level + end + oci_fluent_output_plugin_log = nil + if is_default_log_location + oci_fluent_output_plugin_log = 'oci-logging-analytics.log' + else + oci_fluent_output_plugin_log = filename+'oci-logging-analytics.log' + end + logger_config = nil + + if is_valid_number_of_logs(@plugin_log_file_count) && is_valid_log_size(@plugin_log_file_size) + # When customer provided valid log_file_count and log_file_size. + # logger will rotate with max log_file_count with each file having max log_file_size. + # Older logs purged automatically. + @@logger = Logger.new(oci_fluent_output_plugin_log, @plugin_log_file_count, @@validated_log_size) + logger_config = 'USER_CONFIG' + elsif is_valid_log_rotation(@plugin_log_rotation) + # When customer provided only log_rotation. + # logger will create a new log based on log_rotation (new file everyday if the rotation is daily). + # This will create too many logs over a period of time as log purging is not done. + @@logger = Logger.new(oci_fluent_output_plugin_log, @plugin_log_rotation) + logger_config = 'FALLBACK_CONFIG' + else + # When customer provided invalid log config, default config is considered. + # logger will rotate with max default log_file_count with each file having max default log_file_size. + # Older logs purged automatically. + @@logger = Logger.new(oci_fluent_output_plugin_log, @@default_number_of_logs, @@default_log_size) + logger_config = 'DEFAULT_CONFIG' + end - @@logger.info {"Initializing oci-logging-analytics plugin"} - if is_default_log_location - @@logger.info {"plugin_log_location is not specified. oci-logging-analytics.log will be generated under directory from where fluentd is executed."} - end + logger_set_level(@plugin_log_level) + @@logger.info {"Initializing oci-logging-analytics plugin"} + if is_default_log_location + @@logger.info {"plugin_log_location is not specified. oci-logging-analytics.log will be generated under directory from where fluentd is executed."} + end - case logger_config - when 'USER_CONFIG' - @@logger.info {"Logger for oci-logging-analytics.log is initialized with config values log size: #{@plugin_log_file_size}, number of logs: #{@plugin_log_file_count}"} - when 'FALLBACK_CONFIG' - @@logger.info {"Logger for oci-logging-analytics.log is initialized with log rotation: #{@plugin_log_rotation}"} - when 'DEFAULT_CONFIG' - @@logger.info {"Logger for oci-logging-analytics.log is initialized with default config values log size: #{@@default_log_size}, number of logs: #{@@default_number_of_logs}"} - end - if @@logger_config_errors.length > 0 - @@logger_config_errors. each {|logger_config_error| - @@logger.warn {"#{logger_config_error}"} - } - end - if is_valid_log_age(@plugin_log_age) - @@logger.warn {"'plugin_log_age' field is deprecated. Use 'plugin_log_file_size' and 'plugin_log_file_count' instead."} + case logger_config + when 'USER_CONFIG' + @@logger.info {"Logger for oci-logging-analytics.log is initialized with config values log size: #{@plugin_log_file_size}, number of logs: #{@plugin_log_file_count}"} + when 'FALLBACK_CONFIG' + @@logger.info {"Logger for oci-logging-analytics.log is initialized with log rotation: #{@plugin_log_rotation}"} + when 'DEFAULT_CONFIG' + @@logger.info {"Logger for oci-logging-analytics.log is initialized with default config values log size: #{@@default_log_size}, number of logs: #{@@default_number_of_logs}"} + end + if @@logger_config_errors.length > 0 + @@logger_config_errors. each {|logger_config_error| + @@logger.warn {"#{logger_config_error}"} + } + end + if is_valid_log_age(@plugin_log_age) + @@logger.warn {"'plugin_log_age' field is deprecated. Use 'plugin_log_file_size' and 'plugin_log_file_count' instead."} + end + rescue => ex + @@logger = log + @@logger.error {"Error while initializing logger:#{ex.inspect}"} + @@logger.info {"Redirecting oci log analytics logs to STDOUT"} end end @@ -238,21 +251,62 @@ def initialize_loganalytics_client() if is_valid(@config_file_location) @auth_type = "ConfigFile" end + case @auth_type - when "InstancePrincipal" - instance_principals_signer = OCI::Auth::Signers::InstancePrincipalsSecurityTokenSigner.new - @@loganalytics_client = OCI::LogAnalytics::LogAnalyticsClient.new(config: OCI::Config.new, signer: instance_principals_signer) - when "ConfigFile" - my_config = OCI::ConfigFileLoader.load_config(config_file_location: @config_file_location, profile_name: @profile_name) - if is_valid(endpoint) - @@loganalytics_client = OCI::LogAnalytics::LogAnalyticsClient.new(config:my_config, endpoint:@endpoint) - @@logger.info {"loganalytics_client initialised with endpoint: #{@endpoint}"} - else - @@loganalytics_client = OCI::LogAnalytics::LogAnalyticsClient.new(config:my_config) - end + when "InstancePrincipal" + instance_principals_signer = nil + la_endpoint = nil + if is_valid(@oci_domain) + fedration_endpoint = "https://auth.#{@oci_domain}/v1/x509" + instance_principals_signer = OCI::Auth::Signers::InstancePrincipalsSecurityTokenSigner.new( + federation_endpoint: fedration_endpoint) + @@logger.info "Custom Federation Endpoint: #{fedration_endpoint}" else - raise Fluent::ConfigError, "Invalid authType @auth_type, authType must be either InstancePrincipal or ConfigFile." - abort + instance_principals_signer = OCI::Auth::Signers::InstancePrincipalsSecurityTokenSigner.new + end + if is_valid(@endpoint) + la_endpoint = @endpoint + @@logger.info "Initializing loganalytics_client with endpoint: #{la_endpoint}" + elsif is_valid(@oci_domain) + la_endpoint = "https://loganalytics.#{@oci_domain}" + @@logger.info "Initializing loganalytics_client with custom domain endpoint: #{la_endpoint}" + end + @@loganalytics_client = OCI::LogAnalytics::LogAnalyticsClient.new( + config: OCI::Config.new, + endpoint: la_endpoint, + signer: instance_principals_signer) + @@logger.info 'loganalytics_client initialized.' + when "WorkloadIdentity" + la_endpoint = nil + workload_identity_signer = OCI::Auth::Signers::oke_workload_resource_principal_signer + if is_valid(@endpoint) + la_endpoint = @endpoint + @@logger.info "Initializing loganalytics_client with endpoint: #{@endpoint}" + elsif is_valid(@oci_domain) + la_endpoint = "https://loganalytics.#{@oci_domain}" + @@logger.info "Initializing loganalytics_client with custom domain endpoint: #{la_endpoint}" + end + @@loganalytics_client = OCI::LogAnalytics::LogAnalyticsClient.new( + config: OCI::Config.new, + endpoint: la_endpoint, + signer: workload_identity_signer) + @@logger.info 'loganalytics_client initialized.' + when "ConfigFile" + my_config = OCI::ConfigFileLoader.load_config( + config_file_location: @config_file_location, + profile_name: @profile_name) + la_endpoint = nil + if is_valid(@endpoint) + la_endpoint = @endpoint + @@logger.info "Initializing loganalytics_client with endpoint: #{la_endpoint}" + elsif is_valid(@oci_domain) + la_endpoint = "https://loganalytics.#{@oci_domain}" + @@logger.info "Initializing loganalytics_client with custom domain endpoint: #{la_endpoint}" + end + @@loganalytics_client = OCI::LogAnalytics::LogAnalyticsClient.new(config: my_config, endpoint: la_endpoint) + @@logger.info 'loganalytics_client initialised' + else + raise Fluent::ConfigError, "Invalid authType: #{@auth_type}, valid inputs are - InstancePrincipal, ConfigFile, WorkloadIdentity" end if is_valid(@proxy_ip) && is_number(@proxy_port) @@ -264,19 +318,24 @@ def initialize_loganalytics_client() end rescue => ex - @@logger.error {"Error occurred while initializing LogAnalytics Client: - authType: #{@auth_type}, - errorMessage: #{ex}"} + @@logger.error {"Error occurred while initializing LogAnalytics Client: + authType: #{@auth_type}, + errorMessage: #{ex}"} end def configure(conf) super + + if is_valid(@oci_domain) && !@oci_domain.match(/\S.oci.\S/) + raise Fluent::ConfigError, "Invalid oci_domain: #{@oci_domain}, valid fmt: .oci. | ex: us-ashburn-1.oci.oraclecloud.com" + end + @@prometheusMetrics = PrometheusMetrics.instance initialize_logger initialize_loganalytics_client - @@logger.error {"Error in config file : Buffer plugin must be of @type file."} unless buffer_config['@type'] == 'file' - raise Fluent::ConfigError, "Error in config file : Buffer plugin must be of @type file." unless buffer_config['@type'] == 'file' + #@@logger.error {"Error in config file : Buffer plugin must be of @type file."} unless buffer_config['@type'] == 'file' + #raise Fluent::ConfigError, "Error in config file : Buffer plugin must be of @type file." unless buffer_config['@type'] == 'file' is_mandatory_fields_valid,invalid_field_name = mandatory_field_validator if !is_mandatory_fields_valid @@ -288,8 +347,8 @@ def configure(conf) unless conf.elements(name: 'buffer').empty? buffer_conf = conf.elements(name: 'buffer').first chunk_limit_size_from_conf = buffer_conf['chunk_limit_size'] - unless chunk_limit_size_from_conf.nil? - log.debug "chunk limit size as per the configuration file is #{chunk_limit_size_from_conf}" + unless chunk_limit_size_from_conf.nil? && buffer_config['@type'] != 'file' + @@logger.debug "chunk limit size as per the configuration file is #{chunk_limit_size_from_conf}" case chunk_limit_size_from_conf.to_s when /([0-9]+)k/i chunk_limit_size_bytes = $~[1].to_i * 1024 @@ -299,13 +358,13 @@ def configure(conf) chunk_limit_size_bytes = $~[1].to_i * (1024 ** 3) when /([0-9]+)t/i chunk_limit_size_bytes = $~[1].to_i * (1024 ** 4) - else - raise Fluent::ConfigError, "error parsing chunk_limit_size" + #else + #raise Fluent::ConfigError, "error parsing chunk_limit_size" end - log.debug "chunk limit size in bytes as per the configuration file is #{chunk_limit_size_bytes}" - if !chunk_limit_size_bytes.between?(1048576, 2097152) - raise Fluent::ConfigError, "chunk_limit_size must be between 1MB and 2MB" + @@logger.debug "chunk limit size in bytes as per the configuration file is #{chunk_limit_size_bytes}" + if chunk_limit_size_bytes != nil && !chunk_limit_size_bytes.between?(1048576, 4194304) + raise Fluent::ConfigError, "chunk_limit_size must be between 1MB and 4MB" end end end @@ -577,7 +636,7 @@ def get_kubernetes_metadata(oci_la_metadata,record) kubernetes_metadata.each do |key, value| if kubernetes_metadata_keys_mapping.has_key?(key) if !is_valid(oci_la_metadata[kubernetes_metadata_keys_mapping[key]]) - oci_la_metadata[kubernetes_metadata_keys_mapping[key]] = json_message_handler(value) + oci_la_metadata[kubernetes_metadata_keys_mapping[key]] = json_message_handler(key, value) end end end @@ -588,14 +647,21 @@ def get_kubernetes_metadata(oci_la_metadata,record) return oci_la_metadata end - def json_message_handler(message) - if message.is_a?(Hash) - return JSON.generate(message) - else - return message - end - rescue => ex - return message + def json_message_handler(key, message) + begin + if !is_valid(message) + return nil + end + if message.is_a?(Hash) + return Yajl.dump(message) #JSON.generate(message) + end + return message + rescue => ex + @@logger.error {"Error occured while generating json for + field: #{key} + exception : #{ex}"} + return nil + end end def group_by_logGroupId(chunk) @@ -607,6 +673,8 @@ def group_by_logGroupId(chunk) latency = 0 records_per_tag = 0 + + tag_metrics_set = Hash.new logGroup_labels_set = Hash.new @@ -616,14 +684,18 @@ def group_by_logGroupId(chunk) tags_per_logGroupId = Hash.new tag_logSet_map = Hash.new tag_metadata_map = Hash.new + timezoneValuesByTag = Hash.new incoming_records = 0 - chunk.each do |time, record| incoming_records += 1 metricsLabels = MetricsLabels.new if !record.nil? begin record_hash = record.keys.map {|x| [x,true]}.to_h + if record_hash.has_key?("worker_id") && is_valid(record["worker_id"]) + metricsLabels.worker_id = record["worker_id"]||= '0' + @@worker_id = record["worker_id"]||= '0' + end is_tag_exists = false if record_hash.has_key?("tag") && is_valid(record["tag"]) is_tag_exists = true @@ -697,27 +769,30 @@ def group_by_logGroupId(chunk) end next end + + # metricsLabels.timezone = record["oci_la_timezone"] metricsLabels.logGroupId = record["oci_la_log_group_id"] metricsLabels.logSourceName = record["oci_la_log_source_name"] if record["oci_la_log_set"] != nil metricsLabels.logSet = record["oci_la_log_set"] end + record["message"] = json_message_handler("message", record["message"]) + + #This will check for null or empty messages and only that record will be ignored. if !is_valid(record["message"]) metricsLabels.invalid_reason = OutOracleOCILogAnalytics::METRICS_INVALID_REASON_MESSAGE if is_tag_exists - @@logger.warn {"'message' field has empty value, Skipping records associated with tag : #{record["tag"]}."} if invalid_records_per_tag.has_key?(record["tag"]) invalid_records_per_tag[record["tag"]] += 1 else invalid_records_per_tag[record["tag"]] = 1 + @@logger.warn {"'message' field is empty or encoded, Skipping records associated with tag : #{record["tag"]}."} end else - @@logger.warn {"'message' field has empty value, Skipping record."} + @@logger.warn {"'message' field is empty or encoded, Skipping record."} end next - else - record["message"] = json_message_handler(record["message"]) end if record_hash.has_key?("kubernetes") @@ -744,6 +819,25 @@ def group_by_logGroupId(chunk) tags_per_logGroupId[record["oci_la_log_group_id"]] = record["tag"] end end + # validating the timezone field + if !timezoneValuesByTag.has_key?(record["tag"]) + begin + timezoneIdentifier = record["oci_la_timezone"] + unless is_valid(timezoneIdentifier) + record["oci_la_timezone"] = nil + else + isTimezoneExist = timezone_exist? timezoneIdentifier + unless isTimezoneExist + @@logger.warn { "Invalid timezone '#{timezoneIdentifier}', using default UTC." } + record["oci_la_timezone"] = "UTC" + end + + end + timezoneValuesByTag[record["tag"]] = record["oci_la_timezone"] + end + else + record["oci_la_timezone"] = timezoneValuesByTag[record["tag"]] + end records << record ensure @@ -773,7 +867,7 @@ def group_by_logGroupId(chunk) tag_metrics_set.each do |tag,metricsLabels| latency_avg = (metricsLabels.latency / metricsLabels.records_per_tag).round(3) - @@prometheusMetrics.chunk_time_to_receive.observe(latency_avg, labels: { tag: tag}) + @@prometheusMetrics.chunk_time_to_receive.observe(latency_avg, labels: { worker_id: metricsLabels.worker_id, tag: tag}) end lrpes_for_logGroupId = {} @@ -827,17 +921,20 @@ def write(chunk) logGroup_metrics_map[metricsLabels.logGroupId] = metricsLabels_array end - @@prometheusMetrics.records_received.set(value.to_i, labels: { tag: key, + @@prometheusMetrics.records_received.set(value.to_i, labels: { worker_id: metricsLabels.worker_id, + tag: key, oci_la_log_group_id: metricsLabels.logGroupId, oci_la_log_source_name: metricsLabels.logSourceName, oci_la_log_set: metricsLabels.logSet}) - @@prometheusMetrics.records_invalid.set(dropped_messages, labels: { tag: key, + @@prometheusMetrics.records_invalid.set(dropped_messages, labels: { worker_id: metricsLabels.worker_id, + tag: key, oci_la_log_group_id: metricsLabels.logGroupId, oci_la_log_source_name: metricsLabels.logSourceName, oci_la_log_set: metricsLabels.logSet, reason: metricsLabels.invalid_reason}) - @@prometheusMetrics.records_valid.set(valid_messages, labels: { tag: key, + @@prometheusMetrics.records_valid.set(valid_messages, labels: { worker_id: metricsLabels.worker_id, + tag: key, oci_la_log_group_id: metricsLabels.logGroupId, oci_la_log_source_name: metricsLabels.logSourceName, oci_la_log_set: metricsLabels.logSet}) @@ -875,7 +972,7 @@ def write(chunk) end end }.real.round(3) - @@prometheusMetrics.chunk_time_to_upload.observe(chunk_upload_time_taken, labels: { oci_la_log_group_id: oci_la_log_group_id}) + @@prometheusMetrics.chunk_time_to_upload.observe(chunk_upload_time_taken, labels: { worker_id: @@worker_id, oci_la_log_group_id: oci_la_log_group_id}) end ensure @@ -887,6 +984,14 @@ def write(chunk) end end end + def timezone_exist?(tz) + begin + TZInfo::Timezone.get(tz) + return true + rescue TZInfo::InvalidTimezoneIdentifier + return false + end + end # Each oci_la_log_set will correspond to a separate file in the zip # Only MAX_FILES_PER_ZIP files are allowed per zip. @@ -929,6 +1034,21 @@ def get_logSets_map_per_logGroupId(oci_la_log_group_id,records_per_logGroupId) # takes a fluentD chunk and converts it to an in-memory zipfile, populating metrics hash provided # Any exception raised is passed into the metrics hash, to be re-thrown from write() + def getCollectionSource(input) + collections_src = [] + if !is_valid input + collections_src.unshift("source:#{Source::FLUENTD}") + else + if input == Source::FLUENTD.to_s or input == Source::KUBERNETES_SOLUTION.to_s + collections_src.unshift("source:#{input}") + else + # source not define ! using default source 'fluentd' + collections_src.unshift("source:#{Source::FLUENTD}") + end + end + collections_src + end + def get_zipped_stream(oci_la_log_group_id,oci_la_global_metadata,records_per_logSet_map) begin current, = Time.now @@ -941,8 +1061,9 @@ def get_zipped_stream(oci_la_log_group_id,oci_la_global_metadata,records_per_log record['oci_la_metadata'], record['oci_la_entity_id'], record['oci_la_entity_type'], - record['oci_la_log_source_name'] , - record['oci_la_log_path'] + record['oci_la_log_source_name'], + record['oci_la_log_path'], + record['oci_la_timezone'] ]}.map { |lrpe_key, records_per_lrpe| number_of_records += records_per_lrpe.length LogEvents.new(lrpe_key, records_per_lrpe) @@ -956,7 +1077,7 @@ def get_zipped_stream(oci_la_log_group_id,oci_la_global_metadata,records_per_log @@logger.debug {"Added entry #{nextEntry} for oci_la_log_set #{oci_la_log_set} into the zip."} zos.put_next_entry(nextEntry) logEventsJsonFinal = LogEventsJson.new(oci_la_global_metadata,lrpes_for_logEvents) - zos.write logEventsJsonFinal.to_hash.to_json + zos.write Yajl.dump(logEventsJsonFinal.to_hash) end } zippedstream.rewind @@ -992,9 +1113,10 @@ def save_zip_to_local(oci_la_log_group_id, zippedstream, current_s) # upload zipped stream to oci def upload_to_oci(oci_la_log_group_id, number_of_records, zippedstream, metricsLabels_array) begin + collection_src_prop = getCollectionSource @collection_source error_reason = nil error_code = nil - opts = {payload_type: "ZIP"} + opts = { payload_type: "ZIP", opc_meta_properties:collection_src_prop} response = @@loganalytics_client.upload_log_events_file(namespace_name=@namespace, logGroupId=oci_la_log_group_id , @@ -1002,13 +1124,15 @@ def upload_to_oci(oci_la_log_group_id, number_of_records, zippedstream, metricsL opts) if !response.nil? && response.status == 200 then headers = response.headers - - metricsLabels_array.each { |metricsLabels| - @@prometheusMetrics.records_posted.set(metricsLabels.records_valid, labels: { tag: metricsLabels.tag, - oci_la_log_group_id: metricsLabels.logGroupId, - oci_la_log_source_name: metricsLabels.logSourceName, - oci_la_log_set: metricsLabels.logSet}) - } + if metricsLabels_array != nil + metricsLabels_array.each { |metricsLabels| + @@prometheusMetrics.records_posted.set(metricsLabels.records_valid, labels: { worker_id: metricsLabels.worker_id, + tag: metricsLabels.tag, + oci_la_log_group_id: metricsLabels.logGroupId, + oci_la_log_source_name: metricsLabels.logSourceName, + oci_la_log_set: metricsLabels.logSet}) + } + end #zippedstream.rewind #reposition buffer pointer to the beginning #zipfile = zippedstream&.sysread&.dup @@ -1087,9 +1211,10 @@ def upload_to_oci(oci_la_log_group_id, number_of_records, zippedstream, metricsL error_reason = ex @@logger.error {"oci upload exception : Error while uploading the payload. #{ex}"} ensure - if error_reason != nil + if error_reason != nil && metricsLabels_array != nil metricsLabels_array.each { |metricsLabels| - @@prometheusMetrics.records_error.set(metricsLabels.records_valid, labels: { tag: metricsLabels.tag, + @@prometheusMetrics.records_error.set(metricsLabels.records_valid, labels: {worker_id: metricsLabels.worker_id, + tag: metricsLabels.tag, oci_la_log_group_id: metricsLabels.logGroupId, oci_la_log_source_name: metricsLabels.logSourceName, oci_la_log_set: metricsLabels.logSet, diff --git a/lib/fluent/version/version.rb b/lib/fluent/version/version.rb new file mode 100644 index 0000000..b7d011f --- /dev/null +++ b/lib/fluent/version/version.rb @@ -0,0 +1,8 @@ +## Copyright (c) 2021, 2024 Oracle and/or its affiliates. +## The Universal Permissive License (UPL), Version 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +# frozen_string_literal: true + +module Version + VERSION = "2.0.8".freeze +end