diff --git a/build/defaultOfflinePackagePlugins.txt b/build/defaultOfflinePackagePlugins.txt index eedf1d2f1..b54bd960d 100644 --- a/build/defaultOfflinePackagePlugins.txt +++ b/build/defaultOfflinePackagePlugins.txt @@ -1,51 +1 @@ -filter-plugin/logstash-filter-mysql-guardium/logstash-filter-mysql_filter_guardium -filter-plugin/logstash-filter-oua-guardium/logstash-filter-oua_filter -filter-plugin/logstash-filter-mariadb-aws-guardium/logstash-filter-awsmariadb_guardium_filter -filter-plugin/logstash-filter-progressdb-guardium/logstash-filter-progress_guardium_plugin_filter -filter-plugin/logstash-filter-redshift-aws-guardium/logstash-filter-redshift_guardium_connector -filter-plugin/logstash-filter-mongodb-guardium/logstash-filter-mongodb_guardium_filter -filter-plugin/logstash-filter-s3-guardium/logstash-filter-logstash_filter_s3_guardium -filter-plugin/logstash-filter-hdfs-guardium/logstash-filter-hdfs_guardium_filter -filter-plugin/logstash-filter-mysql-percona-guardium/logstash-filter-mysql_percona_filter -filter-plugin/logstash-filter-generic-guardium/logstash-filter-generic_guardium_filter -filter-plugin/logstash-filter-saphana-guardium/logstash-filter-saphana_guardium_plugin_filter -filter-plugin/logstash-filter-cassandra-guardium/logstash-filter-cassandra_guardium_plugin_filter -filter-plugin/logstash-filter-aurora-mysql-guardium/logstash-filter-auroramysqlguardiumpluginfilter -filter-plugin/logstash-filter-dynamodb-guardium/logstash-filter-dynamodb_guardium_plugin_filter -filter-plugin/logstash-filter-neptune-aws-guardium/logstash-filter-neptune_guardium_filter -filter-plugin/logstash-filter-cockroachdb-guardium/logstash-filter-cockroachdb_guardium_filter -filter-plugin/logstash-filter-couchbasedb-guardium/logstash-filter-couchbasedb_guardium_plugin_filter -filter-plugin/logstash-filter-couchdb-guardium/logstash-filter-couchdb_guardium_filter -filter-plugin/logstash-filter-mariadb-guardium/logstash-filter-mariadb_guardium_filter -filter-plugin/logstash-filter-azure-apachesolr-guardium/logstash-filter-apache_solr_azure_connector -filter-plugin/logstash-filter-onPremGreenplumdb-guardium/logstash-filter-greenplumdb_guardium_filter -filter-plugin/logstash-filter-documentdb-aws-guardium/logstash-filter-documentdb_guardium_filter -filter-plugin/logstash-filter-azure-postgresql-guardium/logstash-filter-azure_postgresql_guardium_plugin_filter -filter-plugin/logstash-filter-azure-sql-guardium/logstash-filter-azuresql_guardium_plugin_filter -filter-plugin/logstash-filter-neo4j-guardium/logstash-filter-neodb_guardium_filter -filter-plugin/logstash-filter-snowflake-guardium/logstash-filter-guardium_snowflake_filter filter-plugin/logstash-filter-yugabyte-guardium/logstash-filter-yugabytedb_guardium_filter -filter-plugin/logstash-filter-teradatadb-guardium/logstash-filter-teradatadb_guardium_plugin_filter -filter-plugin/logstash-filter-pubsub-apachesolr-guardium/logstash-filter-apache_solr_gcp_connector -filter-plugin/logstash-filter-pubsub-bigquery-guardium/logstash-filter-big_query_guardium_filter -filter-plugin/logstash-filter-pubsub-bigtable-guardium/logstash-filter-big_table_guardium_filter -filter-plugin/logstash-filter-pubsub-firebase-realtime-guardium/logstash-filter-fire_base_guardium_filter -filter-plugin/logstash-filter-pubsub-firestore-guardium/logstash-filter-fire_store_guardium_filter -filter-plugin/logstash-filter-pubsub-spanner-guardium/logstash-filter-spanner_db_guardium_filter -filter-plugin/logstash-filter-cosmos-azure-guardium/logstash-filter-azure_cosmos_guardium_filter -filter-plugin/logstash-filter-intersystems-iris-guardium/logstash-filter-intersystems_iris_guardium_filter -filter-plugin/logstash-filter-postgres-ibmcloud-guardium/logstash-filter-icd_postgresql_guardium_filter -filter-plugin/logstash-filter-mysql-azure-guardium/logstash-filter-azure_mysql_guardium_filter -filter-plugin/logstash-filter-scylldb-guardium/logstash-filter-scylladb_guardium_filter -filter-plugin/logstash-filter-databricks-guardium/logstash-filter-databricks_guardium_filter -filter-plugin/logstash-filter-trino-guardium/logstash-filter-trino_guardium_filter -filter-plugin/logstash-filter-capella-guardium/logstash-filter-capella_guardium_filter -filter-plugin/logstash-filter-opensearch-guardium/logstash-filter-opensearch_guardium_filter -filter-plugin/logstash-filter-milvus-guardium/logstash-filter-milvus_guardium_filter -filter-plugin/logstash-filter-singlestore-guardium/logstash-filter-singlestoredb_guardium_filter -filter-plugin/logstash-filter-alloydb-guardium/logstash-filter-alloydb_guardium_filter -input-plugin/logstash-input-couchbase-capella/logstash-input-couchbase_capella_input -input-plugin/logstash-input-http/logstash-offline-input-http-plugins -input-plugin/logstash-input-mongo-atlas/logstash-input-mongo_atlas_input -input-plugin/logstash-input-s3sqs/logstash-input-s3_sqs_input - diff --git a/build/pluginsToBuild_GDP.txt b/build/pluginsToBuild_GDP.txt index a4f5fa82d..a66b86f6c 100644 --- a/build/pluginsToBuild_GDP.txt +++ b/build/pluginsToBuild_GDP.txt @@ -1,50 +1 @@ -filter-plugin/logstash-filter-mongodb-guardium -filter-plugin/logstash-filter-cosmos-azure-guardium -filter-plugin/logstash-filter-oua-guardium -filter-plugin/logstash-filter-mariadb-aws-guardium -filter-plugin/logstash-filter-redshift-aws-guardium -filter-plugin/logstash-filter-progressdb-guardium -filter-plugin/logstash-filter-mysql-guardium -filter-plugin/logstash-filter-s3-guardium -filter-plugin/logstash-filter-hdfs-guardium -filter-plugin/logstash-filter-mysql-percona-guardium -filter-plugin/logstash-filter-generic-guardium -filter-plugin/logstash-filter-saphana-guardium -filter-plugin/logstash-filter-cassandra-guardium -filter-plugin/logstash-filter-aurora-mysql-guardium -filter-plugin/logstash-filter-dynamodb-guardium -filter-plugin/logstash-filter-neptune-aws-guardium -filter-plugin/logstash-filter-cockroachdb-guardium -filter-plugin/logstash-filter-couchbasedb-guardium -filter-plugin/logstash-filter-couchdb-guardium -filter-plugin/logstash-filter-mariadb-guardium -filter-plugin/logstash-filter-azure-apachesolr-guardium -filter-plugin/logstash-filter-onPremGreenplumdb-guardium -filter-plugin/logstash-filter-documentdb-aws-guardium -filter-plugin/logstash-filter-azure-postgresql-guardium -filter-plugin/logstash-filter-azure-sql-guardium -filter-plugin/logstash-filter-neo4j-guardium -filter-plugin/logstash-filter-snowflake-guardium -filter-plugin/logstash-filter-yugabyte-guardium -filter-plugin/logstash-filter-teradatadb-guardium -filter-plugin/logstash-filter-pubsub-apachesolr-guardium -filter-plugin/logstash-filter-pubsub-bigquery-guardium -filter-plugin/logstash-filter-pubsub-bigtable-guardium -filter-plugin/logstash-filter-pubsub-firebase-realtime-guardium -filter-plugin/logstash-filter-pubsub-firestore-guardium -filter-plugin/logstash-filter-pubsub-spanner-guardium -filter-plugin/logstash-filter-intersystems-iris-guardium -filter-plugin/logstash-filter-postgres-ibmcloud-guardium -filter-plugin/logstash-filter-mysql-azure-guardium -filter-plugin/logstash-filter-scylldb-guardium -filter-plugin/logstash-filter-databricks-guardium -filter-plugin/logstash-filter-trino-guardium -filter-plugin/logstash-filter-capella-guardium -filter-plugin/logstash-filter-opensearch-guardium -filter-plugin/logstash-filter-milvus-guardium -filter-plugin/logstash-filter-singlestore-guardium -filter-plugin/logstash-filter-alloydb-guardium -input-plugin/logstash-input-mongo-atlas -input-plugin/logstash-input-http -input-plugin/logstash-input-s3sqs -input-plugin/logstash-input-couchbase-capella +filter-plugin/logstash-filter-yugabyte-guardium \ No newline at end of file diff --git a/build/verifiedUCPlugins_gdp.txt b/build/verifiedUCPlugins_gdp.txt index 05c1c1376..dd0ae8c56 100644 --- a/build/verifiedUCPlugins_gdp.txt +++ b/build/verifiedUCPlugins_gdp.txt @@ -1,65 +1 @@ -#Filter plugins read to zip: -filter-plugin/logstash-filter-documentdb-aws-guardium/DocumentDBOverCloudwatchPackage -filter-plugin/logstash-filter-aurora-mysql-guardium/AuroraMysqlOverCloudwatchPackage -filter-plugin/logstash-filter-dynamodb-guardium/DynamodbOverCloudwatch/DynamodbOverCloudwatchPackage -#filter-plugin/logstash-filter-mariadb-aws-guardium/MariaDBOverCloudWatchPackage -filter-plugin/logstash-filter-mysql-aws-guardium/MysqlOverCloudwatchLogsPackage -filter-plugin/logstash-filter-neptune-aws-guardium/NeptuneOverCloudWatchPackage -filter-plugin/logstash-filter-postgres-guardium/PostgresOverCloudWatchPackageß -filter-plugin/logstash-filter-s3-guardium/S3OverCloudwatchLogsPackage -filter-plugin/logstash-filter-s3-guardium/S3OverSQSPackage -#Filebeat plug-ins -filter-plugin/logstash-filter-azure-apachesolr-guardium/ApacheSolrOverFilebeatPackage -filter-plugin/logstash-filter-cassandra-guardium/CassandraOverFilebeatPackage -filter-plugin/logstash-filter-couchbasedb-guardium/CouchbasedbOverFilebeatPackage -filter-plugin/logstash-filter-couchdb-guardium/CouchdbOverFilebeatPackage -filter-plugin/logstash-filter-hdfs-guardium/HdfsOverFilebeatPackage -filter-plugin/logstash-filter-mariadb-guardium/MariaDBOverFilebeatPackage -filter-plugin/logstash-filter-mongodb-guardium/MongodbOverFilebeatPackage -filter-plugin/logstash-filter-milvus-guardium/MilvusOverFilebeatPackage -filter-plugin/logstash-filter-mysql-guardium/MysqlOverFilebeatPackage -filter-plugin/logstash-filter-mysql-percona-guardium/MysqlPerconaOverFilebeatPackage -filter-plugin/logstash-filter-neo4j-guardium/NeodbOverFilebeatPackage -filter-plugin/logstash-filter-onPremGreenplumdb-guardium/GreenplumdbOverFilebeatPackage -filter-plugin/logstash-filter-onPremPostgres-guardium/PostgresOverFilebeatPackage -filter-plugin/logstash-filter-saphana-guardium/SaphanaOverFilebeatPackage -filter-plugin/logstash-filter-singlestore-guardium/SingleStoreOverFilebeatPackage -filter-plugin/logstash-filter-yugabyte-guardium/YugabytedbOverFilebeatPackage -#JDBC plug-ins -filter-plugin/logstash-filter-snowflake-guardium/SnowflakeOverJbdcPackage -filter-plugin/logstash-filter-azure-sql-guardium/AzureSQLOverJdbcPackage -filter-plugin/logstash-filter-mssql-guardium/MssqlAWSOverJdbcPackage -filter-plugin/logstash-filter-mssql-guardium/MssqlOnPremOverJdbcPackage -filter-plugin/logstash-filter-saphana-guardium/SaphanaOverJdbcPackage -#GCP plug-ins -filter-plugin/logstash-filter-pubsub-apachesolr-guardium/PubSubApacheSolrPackage -filter-plugin/logstash-filter-pubsub-bigquery-guardium/BigQueryOverPubSubPackage -filter-plugin/logstash-filter-pubsub-firebase-realtime-guardium/PubSubFirebasePackage -filter-plugin/logstash-filter-pubsub-firestore-guardium/PubSubFireStorePackage -filter-plugin/logstash-filter-pubsub-mysql-guardium/PubSubMySQLPackage -filter-plugin/logstash-filter-pubsub-postgresql-guardium/PubSubPostgreSQLPackage -filter-plugin/logstash-filter-pubsub-bigtable-guardium/gdp-pubsub-bigtable-package -filter-plugin/logstash-filter-alloydb-guardium/AlloyDBoverPubSubPackage -#Syslog plug-ins -filter-plugin/logstash-filter-cockroachdb-guardium/CockroachDBOverSyslogPackage -filter-plugin/logstash-filter-mongodb-guardium/MongoDBOverSyslogPackage -filter-plugin/logstash-filter-mysql-guardium/MySQLOverSyslogPackage -#Other -filter-plugin/logstash-filter-mongodb-guardium/MongodbOverMongoAtlasPackage -filter-plugin/logstash-filter-azure-postgresql-guardium/AzurePostgresqlOverAzureEventHub -filter-plugin/logstash-filter-databricks-guardium/AzureDatabricksOverAzureEventHub -filter-plugin/logstash-filter-trino-guardium/TrinoOverSyslogPackage -filter-plugin/logstash-filter-capella-guardium/CapellaCouchbaseOverCapellaPackage -filter-plugin/logstash-filter-opensearch-guardium/OpenSearchOverCloudwatchPackage -#Input plug-ins -input-plugin/logstash-input-azure-event-hubs/AzureEventHubsInputPackage -input-plugin/logstash-input-beats/FilebeatInputPackage -input-plugin/logstash-input-couchbase-capella/InputCouchbaseCapellaPackage -input-plugin/logstash-input-cloudwatch-logs/CloudwatchLogsInputPackage -input-plugin/logstash-input-jdbc/JdbcInputPackage -input-plugin/logstash-input-mongo-atlas/InputMongoAtlasPackage -input-plugin/logstash-input-sqs/SQSInputPackage -input-plugin/logstash-input-s3sqs/InputS3SQSPackage; -input-plugin/logstash-input-tcp-syslog/SyslogInputPackage -input-plugin/logstash-input-http/httpInputPackage -input-plugin/logstash-input-google-pubsub/GooglePubSubPackage +filter-plugin/logstash-filter-yugabyte-guardium/YugabytedbOverFilebeatPackage \ No newline at end of file diff --git a/build/verified_UC_plugins_full_list.txt b/build/verified_UC_plugins_full_list.txt index ad5c03d66..fd9480364 100644 --- a/build/verified_UC_plugins_full_list.txt +++ b/build/verified_UC_plugins_full_list.txt @@ -1,69 +1 @@ -#Filter plugins read to zip: -#The format is: path/to/package/dir;offline plugin name according to the zip name from the offline_packages file (optional) -filter-plugin/logstash-filter-documentdb-aws-guardium/DocumentDBOverCloudwatchPackage;logstash-filter-documentdb_guardium_filter.zip -filter-plugin/logstash-filter-aurora-mysql-guardium/AuroraMysqlOverCloudwatchPackage;logstash-filter-auroramysqlguardiumpluginfilter.zip -filter-plugin/logstash-filter-dynamodb-guardium/DynamodbOverCloudwatch/DynamodbOverCloudwatchPackage;logstash-filter-dynamodb_guardium_plugin_filter.zip -filter-plugin/logstash-filter-mariadb-aws-guardium/MariaDBOverCloudWatchPackage;logstash-filter-mariadb_guardium_filter.zip -filter-plugin/logstash-filter-mysql-aws-guardium/MysqlOverCloudwatchLogsPackage; -filter-plugin/logstash-filter-neptune-aws-guardium/NeptuneOverCloudWatchPackage;logstash-filter-neptune_guardium_filter.zip -filter-plugin/logstash-filter-postgres-guardium/PostgresOverCloudWatchPackage; -filter-plugin/logstash-filter-s3-guardium/S3OverCloudwatchLogsPackage;logstash-filter-logstash_filter_s3_guardium.zip -filter-plugin/logstash-filter-s3-guardium/S3OverSQSPackage;logstash-filter-logstash_filter_s3_guardium.zip -#Filebeat plug-ins -filter-plugin/logstash-filter-azure-apachesolr-guardium/ApacheSolrOverFilebeatPackage;logstash-filter-apache_solr_azure_connector.zip -filter-plugin/logstash-filter-cassandra-guardium/CassandraOverFilebeatPackage;logstash-filter-cassandra_guardium_plugin_filter.zip -filter-plugin/logstash-filter-couchbasedb-guardium/CouchbasedbOverFilebeatPackage;logstash-filter-couchbasedb_guardium_plugin_filter.zip -filter-plugin/logstash-filter-couchdb-guardium/CouchdbOverFilebeatPackage;logstash-filter-couchdb_guardium_filter.zip -filter-plugin/logstash-filter-hdfs-guardium/HdfsOverFilebeatPackage;logstash-filter-hdfs_guardium_filter.zip -filter-plugin/logstash-filter-mariadb-guardium/MariaDBOverFilebeatPackage;logstash-filter-mariadb_guardium_filter.zip -filter-plugin/logstash-filter-mongodb-guardium/MongodbOverFilebeatPackage;logstash-filter-mongodb_guardium_filter.zip -filter-plugin/logstash-filter-milvus-guardium/MilvusOverFilebeatPackage;logstash-filter-milvus_guardium_filter.zip -filter-plugin/logstash-filter-mysql-guardium/MysqlOverFilebeatPackage;logstash-filter-mysql_filter_guardium.zip -filter-plugin/logstash-filter-mysql-percona-guardium/MysqlPerconaOverFilebeatPackage;logstash-filter-mysql_percona_filter.zip -filter-plugin/logstash-filter-neo4j-guardium/NeodbOverFilebeatPackage;logstash-filter-neodb_guardium_filter.zip -filter-plugin/logstash-filter-onPremGreenplumdb-guardium/GreenplumdbOverFilebeatPackage;logstash-filter-greenplumdb_guardium_filter.zip -filter-plugin/logstash-filter-onPremPostgres-guardium/PostgresOverFilebeatPackage; -filter-plugin/logstash-filter-saphana-guardium/SaphanaOverFilebeatPackage;logstash-filter-saphana_guardium_plugin_filter.zip -filter-plugin/logstash-filter-singlestore-guardium/SingleStoreOverFilebeatPackage;logstash-filter-singlestoredb_guardium_filter.zip -filter-plugin/logstash-filter-yugabyte-guardium/YugabytedbOverFilebeatPackage;logstash-filter-yugabytedb_guardium_filter.zip -#JDBC plug-ins -filter-plugin/logstash-filter-snowflake-guardium/SnowflakeOverJbdcPackage; -filter-plugin/logstash-filter-azure-sql-guardium/AzureSQLOverJdbcPackage;logstash-filter-azuresql_guardium_plugin_filter.zip -filter-plugin/logstash-filter-mssql-guardium/MssqlAWSOverJdbcPackage; -filter-plugin/logstash-filter-mssql-guardium/MssqlOnPremOverJdbcPackage; -filter-plugin/logstash-filter-saphana-guardium/SaphanaOverJdbcPackage;logstash-filter-saphana_guardium_plugin_filter.zip -filter-plugin/logstash-filter-intersystems-iris-guardium/gi-filter-intersystems-iris-package;gi-filter-intersystems-iris-package.zip -#GCP plug-ins -filter-plugin/logstash-filter-alloydb-guardium/AlloyDBoverPubSubPackage;logstash-filter-alloydb_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-apachesolr-guardium/PubSubApacheSolrPackage;logstash-filter-apache_solr_gcp_connector.zip -filter-plugin/logstash-filter-pubsub-bigquery-guardium/BigQueryOverPubSubPackage;logstash-filter-big_query_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-firebase-realtime-guardium/PubSubFirebasePackage;logstash-filter-fire_base_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-firestore-guardium/PubSubFireStorePackage;logstash-filter-fire_store_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-mysql-guardium/PubSubMySQLPackage;logstash-filter-pubsub-mysql-guardium.zip -filter-plugin/logstash-filter-pubsub-postgresql-guardium/PubSubPostgreSQLPackage;logstash-filter-pubsub-postgresql-guardium.zip -#Pubsub plug-ins -filter-plugin/logstash-filter-pubsub-spanner-guardium/gi-pubsub-spanner-package;logstash-filter-spanner_db_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-firebase-realtime-guardium/gi-pubsub-firebase-package;logstash-filter-fire_base_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-firestore-guardium/gi-pubsub-firestore-package;logstash-filter-fire_store_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-mysql-guardium/gi-pubsub-mysql-package;logstash-filter-pubsub-mysql-guardium.zip -filter-plugin/logstash-filter-pubsub-bigquery-guardium/gi-pubsub-bigquery-package;logstash-filter-big_query_guardium_filter.zip -filter-plugin/logstash-filter-pubsub-apachesolr-guardium/gi-pubsub-apachsolr-package;logstash-filter-apache_solr_gcp_connector.zip -filter-plugin/logstash-filter-pubsub-bigtable-guardium/gi-pubsub-bigtable-package;logstash-filter-big_table_guardium_filter.zip -#Syslog plug-ins -filter-plugin/logstash-filter-cockroachdb-guardium/CockroachDBOverSyslogPackage;logstash-filter-cockroachdb_guardium_filter.zip -filter-plugin/logstash-filter-onPremPostgres-guardium/PostgresOverSyslogPackage; -filter-plugin/logstash-filter-yugabyte-guardium/YugabyteOverSyslogPackage; -#Other -filter-plugin/logstash-filter-mongodb-guardium/MongodbOverMongoAtlasPackage;logstash-filter-mongodb_guardium_filter.zip -filter-plugin/logstash-filter-azure-postgresql-guardium/AzurePostgresqlOverAzureEventHub;logstash-filter-azure_postgresql_guardium_plugin_filter.zip -#Input plug-ins -input-plugin/logstash-input-azure-event-hubs/AzureEventHubsInputPackage; -input-plugin/logstash-input-beats/FilebeatInputPackage; -input-plugin/logstash-input-cloudwatch-logs/CloudwatchLogsInputPackage -input-plugin/logstash-input-jdbc/JdbcInputPackage; -input-plugin/logstash-input-mongo-atlas/InputMongoAtlasPackage;logstash-input-mongo_atlas_input.zip -input-plugin/logstash-input-sqs/SQSInputPackage; -input-plugin/logstash-input-s3sqs/InputS3SQSPackage; -input-plugin/logstash-input-tcp-syslog/TCPInputPackage; -input-plugin/logstash-input-google-pubsub/GooglePubSubPackage; -input-plugin/logstash-input-google-pubsub/gi-pubsub-package; +filter-plugin/logstash-filter-mysql-aws-guardium/MySQLOverS3SQS;logstash-filter-mysql_guardium_plugin_filter.zip diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/AWSMySQLS3SQS.md b/filter-plugin/logstash-filter-mysql-aws-guardium/AWSMySQLS3SQS.md new file mode 100644 index 000000000..3a6fd47d3 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/AWSMySQLS3SQS.md @@ -0,0 +1,33 @@ +# AWS MySQL S3SQS Setup + +Please follow the below link to setup S3SQS for AWS MySQL using kinesis data fire hose. +[S3SQSWithFirehose](../../input-plugin/logstash-input-s3sqs/S3SQSWithFirehose.md) guide for setup and configuration details. + +## Configuring the AWS MySQL filter in Guardium + +The Guardium universal connector is the Guardium entry point for native audit logs. The Guardium universal connector identifies and parses the received events, and converts them to a standard Guardium format. The output of the Guardium universal connector is forwarded to the Guardium sniffer on the collector for policy and auditing enforcements. + +### Before you begin + +* Configure the policies you need. For more information, see [Policies](/docs/#policies). +* You must have permissions for the S-Tap Management role. By default, the admin user is assigned the S-Tap Management role. +* Download the [logstash-filter-mysql_guardium_plugin_filter](https://github.com/IBM/universal-connectors/releases) plug-in. +* Download the [logstash-input-s3_sqs](https://github.com/IBM/universal-connectors/releases) plug-in. + +### Procedure + +1. On the collector, go to **Setup** > **Tools and Views** > **Configure Universal Connector**. +2. Enable the universal connector if it is disabled. +3. Click **Upload File** and select the offline [logstash-filter-mysql_guardium_plugin_filter](https://github.com/IBM/universal-connectors/releases) plug-in. After it is uploaded, click **OK**. This step is not necessary for Guardium Data Protection v11.0p490 or later, v11.0p540 or later, v12.0 or later. +4. Click **Upload File** and select the offline [logstash-input-s3_sqs](https://github.com/IBM/universal-connectors/releases) plug-in. After it is uploaded, click **OK**. This step is not necessary for Guardium Data Protection v11.0p490 or later, v11.0p540 or later, v12.0 or later. +5. Click the **Plus** sign to open the Connector Configuration dialog box. +6. In the **Connector name** field, enter a name. +7. Update the input section to add the details from the [MySQLOverS3SQS.conf](./MySQLOverS3SQS/MySQLOverS3SQS.conf) file's `input` section, omitting the keyword `input{` at the beginning and its corresponding `}` at the end. More details on how to configure the relevant input plugin can be found [here](../../input-plugin/logstash-input-s3sqs/README.md). +8. Update the filter section to add the details from the [MySQLOverS3SQS.conf](./MySQLOverS3SQS/MySQLOverS3SQS.conf) file's `filter` section, omitting the keyword `filter{` at the beginning and its corresponding `}` at the end. +9. Make sure that the `type` fields in the `input` and `filter` configuration sections align. This field must be unique for each connector added to the system. This is no longer required starting v12p20 and v12.1. +10. Click **Save**. Guardium validates the new connector and displays it in the Configure Universal Connector page. +11. When the offline plug-in is installed and the configuration is uploaded and saved in the Guardium machine, restart the universal connector by using the **Disable/Enable** button. + +## Limitations + +- When a login attempt fails, the MySQL audit log does not capture the database name. As a result, a new S-TAP entry may be created with the host displayed as `:unknown`. \ No newline at end of file diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/CHANGELOG.md b/filter-plugin/logstash-filter-mysql-aws-guardium/CHANGELOG.md new file mode 100644 index 000000000..29a17075a --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/CHANGELOG.md @@ -0,0 +1,9 @@ +# Changelog +Notable changes will be documented in this file. + +## [Unreleased] +## [1.0.1] +- Updated filter configuration to handle grok parsing error + +## [1.0.0] - 2025-08-25 + diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/Gemfile b/filter-plugin/logstash-filter-mysql-aws-guardium/Gemfile new file mode 100644 index 000000000..ea0d321ef --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/Gemfile @@ -0,0 +1,12 @@ +# AUTOGENERATED BY THE GRADLE SCRIPT. EDITS WILL BE OVERWRITTEN. +source 'https://rubygems.org' + +gemspec + +logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash" +use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1" + +if Dir.exist?(logstash_path) && use_logstash_source + gem 'logstash-core', :path => "#{logstash_path}/logstash-core" + gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api" +end diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/MySQLOverS3SQS/MySQLOverS3SQS.conf b/filter-plugin/logstash-filter-mysql-aws-guardium/MySQLOverS3SQS/MySQLOverS3SQS.conf new file mode 100644 index 000000000..c6c9ff625 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/MySQLOverS3SQS/MySQLOverS3SQS.conf @@ -0,0 +1,118 @@ +input { + s3_sqs { + queue_url => "" # For i.e https://sqs..amazonaws.com// + region => "" + access_key_id => "" + secret_access_key => "" + role_arn => "" # Leave empty if not using role-based access + max_messages => + wait_time => # Must be >= 0 and <= 20, + polling_frequency => + type => "S3SQS_MYSQL" + } +} + +filter { +if [type] == "S3SQS_MYSQL" { + + # Step 1: Parse the JSON message from S3 event into [cloudwatch] + json { + source => "message" + target => "cloudwatch" + } + + # Step 2: Split logEvents array into separate events + split { + field => "[cloudwatch][logEvents]" + } + + # Step 3: Extract each log message and promote to top-level [message] + mutate { + rename => { "[cloudwatch][logEvents][message]" => "message" } + add_field => { + "logGroup" => "%{[cloudwatch][logGroup]}" + "logStream" => "%{[cloudwatch][logStream]}" + } + } + + # Step 4: Drop known noise events based on [message] content + if [message] =~ /(session\.transaction_read_only|information_schema\.TABLES|GLOBAL\.read_only|information_schema\.rds_events_threads_waits_current|SELECT\s+1|oscar_local_only_replica_host_status|replica_host_status)/ { + drop { } + } + + # Step 5: Parse message into fields using grok (use exact field names the plugin expects) + grok { + match => { + "message" => [ + # Format 1: QUERY with database and query + "%{NUMBER:timestamp_micro},%{DATA:db_instance},%{DATA:user},%{DATA:client_ip},%{NUMBER:thread_id},%{NUMBER:query_id},%{WORD:command},%{DATA:database},'%{GREEDYDATA:query}',%{NUMBER:status_code}", + + # Format 2: QUERY without database (empty field - double comma) + "%{NUMBER:timestamp_micro},%{DATA:db_instance},%{DATA:user},%{DATA:client_ip},%{NUMBER:thread_id},%{NUMBER:query_id},%{WORD:command},,'%{GREEDYDATA:query}',%{NUMBER:status_code}", + + # Format 3: READ/WRITE operations (ends with comma, no query) + "%{NUMBER:timestamp_micro},%{DATA:db_instance},%{DATA:user},%{DATA:client_ip},%{NUMBER:thread_id},%{NUMBER:query_id},%{WORD:command},%{DATA:database},%{DATA:table_name},", + + # Format 4: CONNECT/DISCONNECT (multiple empty fields) + "%{NUMBER:timestamp_micro},%{DATA:db_instance},%{DATA:user},%{DATA:client_ip},%{NUMBER:thread_id},%{NUMBER:query_id},%{WORD:command},,,,%{NUMBER:status_code}" + ] + } + remove_field => ["message"] + tag_on_failure => ["_grokparsefailure_custom"] + } + + # Step 5.1: Convert microsecond timestamp to datetime + date { + match => ["timestamp_micro", "UNIX_MS"] + target => "timestamp" + } + + # Step 5.2: Drop events that failed grok parsing + if "_grokparsefailure_custom" in [tags] { + drop { } + } + + # Step 5.3: Drop events from rdsadmin user (case-insensitive) + if [user] =~ /(?i)rdsadmin/ { + drop { } + } + + # Step 6: Set defaults for missing fields (CRITICAL: prevent NullPointerException) + if ![user] or [user] == "" { + mutate { replace => { "user" => "unknown" } } + } + + if ![command] or [command] == "" { + mutate { replace => { "command" => "UNKNOWN" } } + } + + if ![database] or [database] == "" { + mutate { replace => { "database" => "unknown" } } + } + + # CRITICAL: Set default status_code to prevent NullPointerException in Parser.java:83 + if ![status_code] { + mutate { add_field => { "status_code" => "0" } } + } + + # Step 7: Escape quotes in query field to prevent JSON malformation + if [query] { + ruby { + code => ' + query = event.get("query") + if query + query = query.gsub("\\", "\\\\\\\\").gsub("\"", "\\\\\"") + event.set("query", query) + end + ' + } + } + + # Step 8: Run Guardium plugin + mysql_guardium_plugin_filter{} + + # Optional: Keep only the GuardRecord field if desired + prune { + whitelist_names => [ "GuardRecord" ] + } +} diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/VERSION b/filter-plugin/logstash-filter-mysql-aws-guardium/VERSION new file mode 100644 index 000000000..3eefcb9dd --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/VERSION @@ -0,0 +1 @@ +1.0.0 diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/build.gradle b/filter-plugin/logstash-filter-mysql-aws-guardium/build.gradle new file mode 100644 index 000000000..bb1d7c456 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/build.gradle @@ -0,0 +1,174 @@ +import java.nio.file.Files +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING + +apply plugin: 'java' +apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'jacoco' +apply plugin: 'org.barfuin.gradle.jacocolog' + +// Load Ruby plugin support +apply from: LOGSTASH_CORE_PATH + "/../rubyUtils.gradle" + +// Plugin Info +group = 'com.ibm.guardium.mysql' +version = file("VERSION").text.trim() +description = "mysql-Guardium filter plugin" +pluginInfo.licenses = ['Apache-2.0'] +pluginInfo.longDescription = "This gem is a Logstash mysql filter plugin required to be installed as part of IBM Security Guardium, Guardium Universal Connector configuration. This gem is not a stand-alone program." +pluginInfo.authors = ['IBM'] +pluginInfo.email = [''] +pluginInfo.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html" +pluginInfo.pluginType = "filter" +pluginInfo.pluginClass = "MySQLGuardiumPluginFilter" +pluginInfo.pluginName = "mysql_guardium_plugin_filter" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 + +// Code coverage +def jacocoVersion = '0.8.4' +def minimumCoverageStr = System.getenv("MINIMUM_COVERAGE") ?: "50.0%" +if (minimumCoverageStr.endsWith("%")) { + minimumCoverageStr = minimumCoverageStr.substring(0, minimumCoverageStr.length() - 1) +} +def minimumCoverage = Float.valueOf(minimumCoverageStr) / 100 + +buildscript { + repositories { + maven { url "https://plugins.gradle.org/m2/" } + mavenCentral() + jcenter() + } + dependencies { + classpath 'com.github.jengelman.gradle.plugins:shadow:4.0.4' + classpath 'org.barfuin.gradle.jacocolog:gradle-jacoco-log:3.0.0-RC2' + } +} + +repositories { + mavenCentral() +} + +dependencies { + // Runtime dependencies + implementation 'commons-validator:commons-validator:1.7' + implementation 'org.apache.logging.log4j:log4j-core:2.17.1' + implementation 'org.apache.commons:commons-text:1.10.0' + implementation 'com.google.code.gson:gson:2.8.9' + implementation fileTree(dir: LOGSTASH_CORE_PATH, include: "build/libs/logstash-core*.jar") + implementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "guardium-universalconnector-commons*.jar") + + // Test dependencies + testImplementation 'org.junit.jupiter:junit-jupiter:5.10.0' + testImplementation 'org.mockito:mockito-core:5.12.0' + testRuntimeOnly 'org.junit.platform:junit-platform-launcher:1.10.0' + testImplementation 'org.jruby:jruby-complete:9.2.7.0' + testImplementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "guardium-universalconnector-commons*.jar") +} + +test { + useJUnitPlatform() + testLogging { + events "passed", "skipped", "failed" + exceptionFormat "full" + showStandardStreams = true + } + finalizedBy jacocoTestReport +} + +jacoco { + toolVersion = "${jacocoVersion}" + reportsDir = file("$buildDir/reports/jacoco") +} + +jacocoTestReport { + reports { + html.enabled true + xml.enabled true + csv.enabled true + html.destination file("${buildDir}/reports/jacoco") + csv.destination file("${buildDir}/reports/jacoco/all.csv") + } + executionData.from fileTree(dir: "${buildDir}/jacoco/", includes: ['**/*.exec']) + afterEvaluate { + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: []) + })) + } + doLast { + println "Code coverage report -> file://${buildDir}/reports/jacoco/index.html" + } +} + +jacocoTestCoverageVerification { + violationRules { + rule { + limit { + minimum = minimumCoverage + } + } + } + executionData.from fileTree(dir: "${buildDir}/jacoco/", includes: ['**/*.exec']) + afterEvaluate { + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: []) + })) + } +} + +check.dependsOn jacocoTestCoverageVerification, jacocoTestReport + +shadowJar { + classifier = null +} + +// Custom Tasks +tasks.register("vendor") { + dependsOn shadowJar + doLast { + String vendorPathPrefix = "vendor/jar-dependencies" + String projectGroupPath = project.group.replaceAll('\\.', '/') + File projectJarFile = file("${vendorPathPrefix}/${projectGroupPath}/${pluginInfo.pluginFullName()}/${project.version}/${pluginInfo.pluginFullName()}-${project.version}.jar") + projectJarFile.mkdirs() + Files.copy(file("$buildDir/libs/${project.name}-${project.version}.jar").toPath(), projectJarFile.toPath(), REPLACE_EXISTING) + validatePluginJar(projectJarFile, project.group) + } +} + +tasks.register("generateRubySupportFiles") { + doLast { + generateRubySupportFilesForPlugin(project.description, project.group, version) + } +} + +tasks.register("removeObsoleteJars") { + doLast { + new FileNameFinder().getFileNames( + projectDir.toString(), + "vendor/**/" + pluginInfo.pluginFullName() + "*.jar", + "vendor/**/" + pluginInfo.pluginFullName() + "-" + version + ".jar").each { f -> + delete f + } + } +} + +tasks.register("gem") { + dependsOn = [downloadAndInstallJRuby, removeObsoleteJars, vendor, generateRubySupportFiles] + doLast { + buildGem(projectDir, buildDir, pluginInfo.pluginFullName() + ".gemspec") + } +} + +clean { + delete "${projectDir}/Gemfile" + delete "${projectDir}/${pluginInfo.pluginFullName()}.gemspec" + delete "${projectDir}/lib/" + delete "${projectDir}/vendor/" + new FileNameFinder().getFileNames(projectDir.toString(), pluginInfo.pluginFullName() + "-*.*.*.gem").each { + delete it + } +} + +tasks.withType(JavaCompile) { + options.encoding = 'UTF-8' +} \ No newline at end of file diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/gradle/wrapper/gradle-wrapper.jar b/filter-plugin/logstash-filter-mysql-aws-guardium/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 000000000..7454180f2 Binary files /dev/null and b/filter-plugin/logstash-filter-mysql-aws-guardium/gradle/wrapper/gradle-wrapper.jar differ diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/gradle/wrapper/gradle-wrapper.properties b/filter-plugin/logstash-filter-mysql-aws-guardium/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 000000000..aa991fcea --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew b/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew new file mode 100755 index 000000000..cccdd3d51 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew.bat b/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew.bat new file mode 100644 index 000000000..107acd32c --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew.unix b/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew.unix new file mode 100644 index 000000000..cccdd3d51 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/gradlew.unix @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/mainREADME.md b/filter-plugin/logstash-filter-mysql-aws-guardium/mainREADME.md new file mode 100644 index 000000000..82e9a3eaf --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/mainREADME.md @@ -0,0 +1,13 @@ +# AWS MySQL Universal Connector + +## Follow this link to set up and use AWS MySQL Universal Connector over CloudWatch Logstash Plugin + +[AwsMySqlOverCloudwatch](./README.md) + + +## Follow this link to set up and use RDS MySQL Universal Connector over CloudWatch Connect + +[RDSMySqlOverConnectCloudwatch](../../docs/KafkaBasedUCs/RDSMySqlCloudwatchKafkaConnect.md) + + + diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/Constants.java b/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/Constants.java new file mode 100644 index 000000000..5b760ae6d --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/Constants.java @@ -0,0 +1,126 @@ +package com.ibm.guardium.mysql; + + +public interface Constants { + + + String NOT_AVAILABLE = "NA"; + + + + String LOGSTASH_TAG_SKIP_NOT_PROGRESS = "LOGSTASH_TAG_SKIP_NOT_PROGRESS"; + + + + String SERVER_IP = "server_ip"; + + + + String SERVER_PORT = "portNum"; + + + + + String SERVER_HOST = "host"; //db machine + + String CLIENT_HOST = "Client_Name"; + + String CLIENT_SESSION_ID = "clientSessionId"; + + + + + + + + + + String SOURCE_PROGRAM = "SOURCE_PROGRAM"; + + String USER_ID = "user"; + + + + + + String EVENT_CONTEXT = "eventContext"; + + + + String DATE_TIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; + + String LOGIN_FAILED = "LOGIN_FAILED"; + + + + String SQL_TEXT = "sql_text"; + + + + String EVENT_NOW_MYSQL = "MySQL data {} "; + + + String DATABASE_NAME = "database"; + + String DATABASE_USER_NAME = "user"; + + + String TIMESTAMP = "timestamp"; + + int MIN_OFFSET_FROM_GMT = 0; + + int MIN_DST = 0; + + String CLIENT_IP = "client_ip"; + + int DEFAULT_PORT = -1; + + String DEFAULT_IP = "0.0.0.0"; + + String UNKNOWN_STRING = ""; + + String SERVER_TYPE = "MySQL"; + + String ACCOUNT_ID = "account_id"; + + String DB_PROTOCOL = "MYSQL"; + + String TEXT = "TEXT"; + + String QUERY = "query"; + + String QUERY_CONST = "QUERY"; + + String STATUS_CODE = "status_code"; + + String COMMAND_TYPE = "command_type"; + + String FAILED_CONNECT = "FAILED_CONNECT"; + + String ACTION = "action"; + + String DESCRIPTION_MESSAGE = "The Query has failed with Error code "; + + String CONNECTION_FAILED_DESCRIPTION_MESSAGE = "Login Connection request failed with Error code "; + + String GUARD_RECORD = "GuardRecord {}"; + + String EVENT_DATA = "Event Data {}"; + + String RECORDS = "records"; + + String TIMESTAMP_ERROR = "Invalid timestamp format: {}"; + + String COMMAND_UNKNOWN = "UNKNOWN"; + + String COMMAND = "command"; + + String RDS_ADMIN = "rdsadmin"; + + String LOG_GROUP = "logGroup"; + + String MESSAGE = "message"; + + String SQL_ERROR = "SQL_ERROR"; +} + diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/MySQLGuardiumPluginFilter.java b/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/MySQLGuardiumPluginFilter.java new file mode 100644 index 000000000..6af3276d5 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/MySQLGuardiumPluginFilter.java @@ -0,0 +1,101 @@ +// +// Copyright 2020-2021 IBM Inc. All rights reserved +// SPDX-License-Identifier: Apache2.0 +// + +package com.ibm.guardium.mysql; + +import java.io.File; +import java.text.ParseException; +import java.util.*; + +import com.google.gson.*; +import org.apache.commons.text.StringEscapeUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import com.ibm.guardium.universalconnector.commons.GuardConstants; +import com.ibm.guardium.universalconnector.commons.structures.Record; +import co.elastic.logstash.api.Configuration; +import co.elastic.logstash.api.Context; +import co.elastic.logstash.api.Event; +import co.elastic.logstash.api.Filter; +import co.elastic.logstash.api.FilterMatchListener; +import co.elastic.logstash.api.LogstashPlugin; +import co.elastic.logstash.api.PluginConfigSpec; + +@LogstashPlugin(name = "mysql_guardium_plugin_filter") +public class MySQLGuardiumPluginFilter implements Filter { + + public static final String LOG42_CONF = "log4j2uc.properties"; + + static { + try { + String uc_etc = System.getenv("UC_ETC"); + LoggerContext context = (LoggerContext) LogManager.getContext(false); + File file = new File(uc_etc + File.separator + LOG42_CONF); + context.setConfigLocation(file.toURI()); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private String id; + public static final PluginConfigSpec SOURCE_CONFIG = PluginConfigSpec.stringSetting("source", "message"); + private static Logger log = LogManager.getLogger(MySQLGuardiumPluginFilter.class); + + public MySQLGuardiumPluginFilter(String id, Configuration config, Context context) { + this.id = id; + } + + @Override + public Collection> configSchema() { + // should return a list of all configuration options for this plugin + return Collections.singletonList(SOURCE_CONFIG); + } + + @Override + public String getId() { + return this.id; + } + + + @Override + public Collection filter(Collection events, FilterMatchListener matchListener) { + for (Event e : events) { + log.info(Constants.EVENT_NOW_MYSQL, e.getData()); + try { + + if(null != e.getData() + && null != e.getData().get(Constants.COMMAND) && e.getData().get(Constants.COMMAND).toString() + .equals(Constants.COMMAND_UNKNOWN) && null != e.getData().get(Constants.DATABASE_USER_NAME) + && e.getData().get(Constants.DATABASE_USER_NAME).toString().equals(Constants.RDS_ADMIN)){ + continue; + } + + Record record = Parser.parseRecord(e); + + final GsonBuilder builder = new GsonBuilder(); + + builder.serializeNulls(); + + final Gson gson = builder.disableHtmlEscaping().create(); + + String jsonRecord = gson.toJson(record); + + jsonRecord = StringEscapeUtils.unescapeJson(jsonRecord); + + e.setField(GuardConstants.GUARDIUM_RECORD_FIELD_NAME, jsonRecord); + + matchListener.filterMatched(e); + + } catch (ParseException ex) { + log.error("Given Event Is Not An Instance Of String " + e.getField(Constants.RECORDS)); + ex.printStackTrace(); + } + } + return events; + } + +} + diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/Parser.java b/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/Parser.java new file mode 100644 index 000000000..bbcf153a7 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/src/main/java/com/ibm/guardium/mysql/Parser.java @@ -0,0 +1,278 @@ +package com.ibm.guardium.mysql; + +import co.elastic.logstash.api.Event; + +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import com.google.gson.JsonSyntaxException; +import com.ibm.guardium.universalconnector.commons.structures.*; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + + +import java.text.ParseException; +import java.time.*; + +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +public class Parser { + + private static final Logger log = LogManager.getLogger(Parser.class); + + public static Record parseRecord(final Event event) throws ParseException { + + Map data = event.getData(); + + log.debug(Constants.EVENT_DATA, data); + + Record record = new Record(); + if (data != null) { + // set default value since we don't have sessionId + record.setSessionId(Constants.UNKNOWN_STRING); + + // accountId:dbName + record.setDbName(getAccountIdDBName(data)); + + + if(null != data.get(Constants.DATABASE_USER_NAME) + && !data.get(Constants.DATABASE_USER_NAME).toString().isEmpty()){ + record.setAppUserName(data.get(Constants.DATABASE_USER_NAME).toString()); + } + + record.setTime(getTime(data)); + + record.setSessionLocator(getSessionLocator(data)); + + record.setAccessor(getAccessor(data)); + + setQueryORExceptionRecord(data, record); + } + log.debug(Constants.GUARD_RECORD, record); + + return record; + + } + + private static void setQueryORExceptionRecord(Map data, Record record) { + if(null != data.get(Constants.STATUS_CODE) && null != data.get(Constants.COMMAND) + && Integer.parseInt(data.get(Constants.STATUS_CODE).toString()) == 0 + && !data.get(Constants.COMMAND).toString().isEmpty() + && data.get(Constants.COMMAND).toString().equals(Constants.QUERY_CONST)){ + setQueryData(data, record); + } else if (null != data.get(Constants.COMMAND) + && !data.get(Constants.COMMAND).toString().isEmpty() + && data.get(Constants.COMMAND).toString().equals(Constants.FAILED_CONNECT)) { + setExceptionRecord(data, record); + } else if (null != data.get(Constants.STATUS_CODE) + && Integer.parseInt(data.get(Constants.STATUS_CODE).toString()) != 0) { + setExceptionRecord (data, record); + } + } + + private static void setExceptionRecord(Map data, Record record) { + ExceptionRecord exceptionRecord = new ExceptionRecord(); + + String description = Constants.DESCRIPTION_MESSAGE + data.get(Constants.STATUS_CODE).toString(); + + exceptionRecord.setExceptionTypeId(Constants.SQL_ERROR); + + exceptionRecord.setDescription(description); + + if(null != data.get(Constants.QUERY) && !data.get(Constants.QUERY).toString().isEmpty()){ + exceptionRecord.setSqlString(data.get(Constants.QUERY).toString()); + } else { + exceptionRecord.setSqlString(Constants.NOT_AVAILABLE); + } + + if (null != data.get(Constants.COMMAND_TYPE) + && !data.get(Constants.COMMAND_TYPE).toString().isEmpty() + && null != data.get(Constants.ACTION) + && data.get(Constants.ACTION).toString().equals(Constants.FAILED_CONNECT)) { + exceptionRecord.setExceptionTypeId(Constants.LOGIN_FAILED); + description = Constants.CONNECTION_FAILED_DESCRIPTION_MESSAGE + data.get(Constants.STATUS_CODE).toString(); + exceptionRecord.setDescription((description)); + exceptionRecord.setSqlString(Constants.NOT_AVAILABLE); + } + record.setException(exceptionRecord); + } + + private static void setQueryData(Map data, Record record) { + if (data.get(Constants.QUERY) != null && !data.get(Constants.QUERY).toString().isEmpty()) { + + String rawQuery = data.get(Constants.QUERY).toString(); + + Data queryData = new Data(); + queryData.setConstruct(null); + queryData.setOriginalSqlCommand(rawQuery); + + record.setData(queryData); + } + } + + + private static Accessor getAccessor(Map data) { + Accessor accessor = new Accessor(); + + if(null != data.get(Constants.DATABASE_USER_NAME) + && !data.get(Constants.DATABASE_USER_NAME).toString().isEmpty()){ + accessor.setDbUser(data.get(Constants.DATABASE_USER_NAME).toString()); + } + + accessor.setServerType(Constants.SERVER_TYPE); + accessor.setServerOs(Constants.UNKNOWN_STRING); + accessor.setClientOs(Constants.UNKNOWN_STRING); + accessor.setClientHostName(Constants.UNKNOWN_STRING); + + // accountId:dbName + accessor.setServerHostName(getAccountIdDBName(data)); + + accessor.setCommProtocol(Constants.UNKNOWN_STRING); + accessor.setDbProtocol(Constants.DB_PROTOCOL); + accessor.setDbProtocolVersion(Constants.UNKNOWN_STRING); + accessor.setOsUser(Constants.UNKNOWN_STRING); + accessor.setSourceProgram(null); + accessor.setClient_mac(Constants.UNKNOWN_STRING); + + // accountId:dbName + accessor.setServiceName(getAccountIdDBName(data)); + + + accessor.setLanguage(Constants.DB_PROTOCOL); + accessor.setDataType(Constants.TEXT); + + return accessor; + } + + private static String getAccountIdDBName(Map data) { + + String dbNameAccountId = Constants.UNKNOWN_STRING; + + + if(null != data.get(Constants.ACCOUNT_ID) + && !data.get(Constants.ACCOUNT_ID).toString().isEmpty() + && null != data.get(Constants.DATABASE_NAME) + && !data.get(Constants.DATABASE_NAME).toString().isEmpty()){ + + String accountId = Constants.UNKNOWN_STRING; + String database = data.get(Constants.DATABASE_NAME).toString(); + + if(data.get(Constants.ACCOUNT_ID) instanceof String){ + accountId = data.get(Constants.ACCOUNT_ID).toString(); + } + else if (data.get(Constants.ACCOUNT_ID) instanceof List) { + List rawList = (List) data.get(Constants.ACCOUNT_ID); + List arrayList = new ArrayList<>(rawList); + + if(!arrayList.isEmpty()){ + accountId = String.valueOf(arrayList.get(0)); + } + } + dbNameAccountId = accountId+":"+database; + } + return dbNameAccountId; + } + + private static Time getTime(Map data) { + if(null != data.get(Constants.TIMESTAMP) && !data.get(Constants.TIMESTAMP).toString().isEmpty()){ + return getEpochTime(data.get(Constants.TIMESTAMP).toString()); + } + return null; + } + + private static SessionLocator getSessionLocator(Map data) { + String serverIp = getAccountIdDBName(data); + SessionLocator sessionLocator = new SessionLocator(); + if(null != data.get(Constants.CLIENT_IP) + && !data.get(Constants.CLIENT_IP).toString().isEmpty()){ + sessionLocator.setClientIp(data.get(Constants.CLIENT_IP).toString()); + } + sessionLocator.setClientPort(Constants.DEFAULT_PORT); + sessionLocator.setServerPort(Constants.DEFAULT_PORT); + sessionLocator.setServerIp(serverIp); + sessionLocator.setIpv6(false); + sessionLocator.setClientIpv6(Constants.UNKNOWN_STRING); + sessionLocator.setServerIpv6(Constants.UNKNOWN_STRING); + return sessionLocator; + } + + private static Time getEpochTime(String timeStamp) { + ZonedDateTime zonedDateTime; + + try { + // Try to parse as ISO-8601 (with 'Z' and nanoseconds) i.e. 2025-07-22T13:14:45.644605897Z + Instant instant = Instant.parse(timeStamp); + zonedDateTime = instant.atZone(ZoneId.systemDefault()); + } catch (DateTimeParseException e) { + // Fallback: try to parse using the custom format + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd HH:mm:ss"); + LocalDateTime localDateTime = LocalDateTime.parse(timeStamp, formatter); + zonedDateTime = localDateTime.atZone(ZoneId.systemDefault()); + } + + long millis = zonedDateTime.toInstant().toEpochMilli(); + int minOffset = zonedDateTime.getOffset().getTotalSeconds() / 60; + + return new Time(millis, minOffset, 0); + } + + private static String getDBInstanceName(String logGroupName) { + if (logGroupName == null || logGroupName.isEmpty()) { + return null; + } + + // Split by "/" and get the element at index 4 + String[] parts = logGroupName.split("/"); + if (parts.length >= 5) { + return parts[4]; + } + return null; + } + + + private static JsonObject getJSON(String jsonString) { + try { + JsonElement element = JsonParser.parseString(jsonString); + + if (element.isJsonObject()) { + return element.getAsJsonObject(); + } else { + throw new IllegalArgumentException("Provided string is not a JSON object"); + } + + } catch (JsonSyntaxException | IllegalArgumentException e) { + // Handle invalid JSON or wrong type + log.error("Error parsing JSON: " + e.getMessage()); + return null; + } + } + + private static boolean isValidJson(String json) { + if (json == null || json.trim().isEmpty()) { + log.warn("JSON string is null or empty"); + return false; + } + + try { + JsonElement element = JsonParser.parseString(json); + + if (element == null || element.isJsonNull()) { + log.warn("Parsed JSON is null: {}", json); + return false; + } + + return true; + } catch (JsonSyntaxException e) { + log.warn("Invalid JSON syntax: {} , {}", json, e); + return false; + } + } + +} diff --git a/filter-plugin/logstash-filter-mysql-aws-guardium/src/test/java/com/ibm/guardium/mysql/ParserTest.java b/filter-plugin/logstash-filter-mysql-aws-guardium/src/test/java/com/ibm/guardium/mysql/ParserTest.java new file mode 100644 index 000000000..670fd3969 --- /dev/null +++ b/filter-plugin/logstash-filter-mysql-aws-guardium/src/test/java/com/ibm/guardium/mysql/ParserTest.java @@ -0,0 +1,324 @@ +package com.ibm.guardium.mysql; + +import co.elastic.logstash.api.Event; +import com.ibm.guardium.universalconnector.commons.structures.Record; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import java.text.ParseException; +import java.util.*; + +import static org.junit.jupiter.api.Assertions.*; + +class ParserTest { + + @Test + void testParseRecord_withValidInput_shouldReturnValidRecord() throws ParseException { + // Mock the Event + Event event = Mockito.mock(Event.class); + + // Prepare data for the mocked Event + Map mockData = new HashMap<>(); + + // Nested cloudwatch data as a Map + Map logEvents = new HashMap<>(); + logEvents.put("id", "12345678901234567890123456789012345678901234567890123456"); + logEvents.put("timestamp", 1753323600000L); + + Map cloudwatch = new HashMap<>(); + cloudwatch.put("logEvents", logEvents); + cloudwatch.put("logStream", "perfmysqlsv"); + cloudwatch.put("messageType", "DATA_MESSAGE"); + + List subscriptionFiltersList = new ArrayList(); + subscriptionFiltersList.add("PostgresCloudWatchLogstoS3"); + Map> subscriptionFiltersMap = new HashMap(); + subscriptionFiltersMap.put("delegate",subscriptionFiltersList); + + cloudwatch.put("subscriptionFilters",subscriptionFiltersMap); + + cloudwatch.put("owner", "123456789012"); + cloudwatch.put("logGroup", "/aws/rds/instance/sample-mysql-instance/audit"); + + mockData.put("cloudwatch", cloudwatch); + mockData.put("query_timestamp", "20250724 02:20:00"); + mockData.put("client_port", 734); + mockData.put("GuardRecord_did_not_exist", true); + mockData.put("logStream", "sample-mysql-instance"); + mockData.put("db_user", "unknown"); + mockData.put("client_ip", "192.168.1.100"); + mockData.put("fileKey", "mysql-logs/2025/07/24/CloudWatchLogstoS3-22-2025-07-24-02-19-30-sample-file-key.gz"); + mockData.put("command_type", "UNKNOWN"); + mockData.put("bucketName", "sample-mysql-audit-bucket"); + mockData.put("command", "QUERY"); + mockData.put("@timestamp", "2025-07-24T02:20:35.622185651Z"); + mockData.put("user", "admin"); + mockData.put("timestamp", "2025-07-24T02:20:35.622126440Z"); + mockData.put("database", "testdb"); + mockData.put("host", "sample-host-01"); + mockData.put("connection_id", 366361); + mockData.put("@version", 1); + mockData.put("type", "MYSQL_S3SQS"); + mockData.put("status_code", 0); + + List accountId = new ArrayList(); + accountId.add("123456789012"); + accountId.add("123456789012"); + + mockData.put("account_id", accountId); + mockData.put("query", "/* ApplicationName=DBeaver 25.0.3 - SQLEditor */ CREATE TABLE employees (\n" + + " id INT AUTO_INCREMENT PRIMARY KEY,\n" + + " first_name VARCHAR(50),\n" + + " last_name VARCHAR(50),\n" + + " email VARCHAR(100),\n" + + " hire_date DATE\n)"); + + // Embed the cloudwatch object as JSON string under "message" for getAccountIdDBName + mockData.put("message", new com.google.gson.Gson().toJson(cloudwatch)); + + // Set mock behavior + Mockito.when(event.getData()).thenReturn(mockData); + + // Act + Record record = Parser.parseRecord(event); + + // Assert + assertNotNull(record); + assertNotNull(record.getAccessor()); + assertNotNull(record.getSessionLocator()); + assertEquals("admin", record.getAccessor().getDbUser()); + assertNotNull(record.getData()); + assertTrue(record.getData().getOriginalSqlCommand().startsWith("/* ApplicationName=DBeaver")); + + // Validate db name format + assertTrue(record.getDbName().contains("123456789012:testdb")); + } + + @Test + void testParseRecord_withQuery() throws ParseException { + // Mock the Event + Event event = Mockito.mock(Event.class); + + // Prepare data for the mocked Event + Map mockData = new HashMap<>(); + + // Nested cloudwatch data as a Map + Map logEvents = new HashMap<>(); + logEvents.put("id", "12345678901234567890123456789012345678901234567890123456"); + logEvents.put("timestamp", 1753323600000L); + + Map cloudwatch = new HashMap<>(); + cloudwatch.put("logEvents", logEvents); + cloudwatch.put("logStream", "perfmysqlsv"); + cloudwatch.put("messageType", "DATA_MESSAGE"); + + List subscriptionFiltersList = new ArrayList(); + subscriptionFiltersList.add("PostgresCloudWatchLogstoS3"); + Map> subscriptionFiltersMap = new HashMap(); + subscriptionFiltersMap.put("delegate",subscriptionFiltersList); + + cloudwatch.put("subscriptionFilters",subscriptionFiltersMap); + + cloudwatch.put("owner", "123456789012"); + cloudwatch.put("logGroup", "/aws/rds/instance/sample-mysql-instance/audit"); + + mockData.put("cloudwatch", cloudwatch); + mockData.put("query_timestamp", "20250724 02:20:00"); + mockData.put("client_port", 734); + mockData.put("GuardRecord_did_not_exist", true); + mockData.put("logStream", "sample-mysql-instance"); + mockData.put("db_user", "unknown"); + mockData.put("client_ip", "192.168.1.100"); + mockData.put("fileKey", "mysql-logs/2025/07/24/CloudWatchLogstoS3-22-2025-07-24-02-19-30-sample-file-key.gz"); + mockData.put("command_type", "UNKNOWN"); + mockData.put("bucketName", "sample-mysql-audit-bucket"); + mockData.put("command", "QUERY"); + mockData.put("@timestamp", "2025-07-24T02:20:35.622185651Z"); + mockData.put("user", "admin"); + mockData.put("timestamp", "2025-07-24T02:20:35.622126440Z"); + mockData.put("database", "testdb"); + mockData.put("host", "sample-host-01"); + mockData.put("connection_id", 366361); + mockData.put("@version", 1); + mockData.put("type", "MYSQL_S3SQS"); + mockData.put("status_code", 0); + + List accountId = new ArrayList(); + accountId.add("123456789012"); + accountId.add("123456789012"); + + mockData.put("account_id", accountId); + mockData.put("query", "/* ApplicationName=DBeaver 25.0.3 - SQLEditor */ INSERT INTO Star123 (first_name, last_name, email, hire_date) VALUES(\\'John\\', \\'Doe\\', \\'john.doe@example.com\\', \\'2022-01-15\\')"); + + // Embed the cloudwatch object as JSON string under "message" for getAccountIdDBName + mockData.put("message", new com.google.gson.Gson().toJson(cloudwatch)); + + // Set mock behavior + Mockito.when(event.getData()).thenReturn(mockData); + + // Act + Record record = Parser.parseRecord(event); + + // Assert + assertNotNull(record); + assertNotNull(record.getAccessor()); + assertNotNull(record.getSessionLocator()); + assertEquals("admin", record.getAccessor().getDbUser()); + assertNotNull(record.getData()); + assertTrue(record.getData().getOriginalSqlCommand().startsWith("/* ApplicationName=DBeaver")); + + // Validate db name format + assertTrue(record.getDbName().contains("123456789012:testdb")); + } + + + @Test + void testParseRecordSQLSyntaxError() throws ParseException { + // Mock the Event + Event event = Mockito.mock(Event.class); + + // Prepare data for the mocked Event + Map mockData = new HashMap<>(); + + // Nested cloudwatch data as a Map + Map logEvents = new HashMap<>(); + logEvents.put("id", "98765432109876543210987654321098765432109876543210987654"); + logEvents.put("timestamp", 1753342566000L); + + Map cloudwatch = new HashMap<>(); + cloudwatch.put("logEvents", logEvents); + cloudwatch.put("logStream", "perfmysqlsv"); + cloudwatch.put("messageType", "DATA_MESSAGE"); + + List subscriptionFiltersList = new ArrayList<>(); + subscriptionFiltersList.add("PostgresCloudWatchLogstoS3"); + + Map> subscriptionFiltersMap = new HashMap<>(); + subscriptionFiltersMap.put("delegate", subscriptionFiltersList); + + cloudwatch.put("subscriptionFilters", subscriptionFiltersMap); + cloudwatch.put("owner", "123456789012"); + cloudwatch.put("logGroup", "/aws/rds/instance/sample-mysql-instance/audit"); + + mockData.put("cloudwatch", cloudwatch); + mockData.put("query_timestamp", "20250724 07:36:06"); + mockData.put("client_port", 734); + mockData.put("GuardRecord_did_not_exist", true); + mockData.put("logStream", "sample-mysql-instance"); + mockData.put("db_user", "unknown"); + mockData.put("client_ip", "192.168.1.100"); + mockData.put("fileKey", "mysql-logs/2025/07/24/CloudWatchLogstoS3-22-2025-07-24-07-35-25-sample-file-key.gz"); + mockData.put("command_type", "UNKNOWN"); + mockData.put("bucketName", "sample-mysql-audit-bucket"); + mockData.put("command", "QUERY"); + mockData.put("@timestamp", "2025-07-24T07:36:30.492230782Z"); + mockData.put("timestamp", "2025-07-24T07:36:30.492191791Z"); + mockData.put("user", "admin"); + mockData.put("host", "sample-host-01"); + mockData.put("connection_id", 417016); + mockData.put("database", "testdb"); + mockData.put("@version", 1); + mockData.put("type", "S3SQS__MYSQL"); + mockData.put("status_code", 1064); + + List accountId = new ArrayList<>(); + accountId.add("123456789012"); + accountId.add("123456789012"); + mockData.put("account_id", accountId); + + mockData.put("query", "/* ApplicationName=DBeaver 25.0.3 - SQLEditor */ DRGOP TABLE 24July2025_05"); + + // Embed the cloudwatch object as JSON string under "message" + mockData.put("message", new com.google.gson.Gson().toJson(cloudwatch)); + + // Set mock behavior + Mockito.when(event.getData()).thenReturn(mockData); + + // Act + Record record = Parser.parseRecord(event); + + // Assert + assertNotNull(record); + assertNotNull(record.getAccessor()); + assertNotNull(record.getSessionLocator()); + assertEquals("admin", record.getAccessor().getDbUser()); + assertNotNull(record.getException().getDescription()); + + assertTrue(record.getDbName().contains("123456789012:testdb")); + } + + @Test + void testParseRecordFailedConnect() throws ParseException { + // Mock the Event + Event event = Mockito.mock(Event.class); + + // Prepare data for the mocked Event + Map mockData = new HashMap<>(); + + // Nested cloudwatch data as a Map + Map logEvents = new HashMap<>(); + logEvents.put("id", "11111111111111111111111111111111111111111111111111111111"); + logEvents.put("timestamp", 1753346386000L); + + Map cloudwatch = new HashMap<>(); + cloudwatch.put("logEvents", logEvents); + cloudwatch.put("logStream", "perfmysqlsv"); + cloudwatch.put("messageType", "DATA_MESSAGE"); + + List subscriptionFiltersList = new ArrayList<>(); + subscriptionFiltersList.add("PostgresCloudWatchLogstoS3"); + + Map> subscriptionFiltersMap = new HashMap<>(); + subscriptionFiltersMap.put("delegate", subscriptionFiltersList); + + cloudwatch.put("subscriptionFilters", subscriptionFiltersMap); + cloudwatch.put("owner", "123456789012"); + cloudwatch.put("logGroup", "/aws/rds/instance/sample-mysql-instance/audit"); + + mockData.put("cloudwatch", cloudwatch); + mockData.put("client_port", 876); + mockData.put("GuardRecord_did_not_exist", true); + mockData.put("logStream", "sample-mysql-instance"); + mockData.put("db_user", "sample_user"); + mockData.put("client_ip", "192.168.1.100"); + mockData.put("fileKey", "mysql-logs/2025/07/24/CloudWatchLogstoS3-22-2025-07-24-08-39-00-sample-file-key.gz"); + mockData.put("command_type", "UNKNOWN"); + mockData.put("bucketName", "sample-mysql-audit-bucket"); + mockData.put("action", "FAILED_CONNECT"); + mockData.put("@timestamp", "2025-07-24T08:40:05.632510305Z"); + mockData.put("timestamp", "2025-07-24T08:40:05.632260750Z"); + mockData.put("connection_type", "TCP/IP"); + mockData.put("database", "unknown"); + mockData.put("status_code", 1045); + mockData.put("type", "S3SQS__MYSQL"); + mockData.put("@version", 1); + mockData.put("error_timestamp", "20250724 08:39:46"); + mockData.put("db_instance", "sample-host-01"); + + List accountIdList = new ArrayList<>(); + accountIdList.add("123456789012"); + accountIdList.add("123456789012"); + mockData.put("account_id", accountIdList); + + // Embed the cloudwatch object as JSON string under "message" + mockData.put("message", new com.google.gson.Gson().toJson(cloudwatch)); + + // Set mock behavior + Mockito.when(event.getData()).thenReturn(mockData); + + // Act + Record record = Parser.parseRecord(event); + + // Assert + assertNotNull(record); + assertNotNull(record.getAccessor()); + assertNotNull(record.getSessionLocator()); + + assertEquals("192.168.1.100", record.getSessionLocator().getClientIp()); + + assertTrue(record.getDbName().contains("123456789012:unknown")); + assertNotNull(record.getException()); + assertEquals("LOGIN_FAILED", record.getException().getExceptionTypeId()); + } + +} diff --git a/filter-plugin/logstash-filter-postgres-guardium/CHANGELOG.md b/filter-plugin/logstash-filter-postgres-guardium/CHANGELOG.md new file mode 100644 index 000000000..0c44e1a16 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.0.2] +Updating configuration in path `/PostgresOverSQSPackage/postgreSQS.conf` to enable custom certificate upload for UC. + +## [1.0.1] +Adding PostgresOverS3SQS filter parser implementation logic for Guardium records with remove extra quotes from SQL statement. + +## [1.0.0] +Adding PostgresOverS3SQS filter with java implementation \ No newline at end of file diff --git a/filter-plugin/logstash-filter-postgres-guardium/Gemfile b/filter-plugin/logstash-filter-postgres-guardium/Gemfile new file mode 100644 index 000000000..ea0d321ef --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/Gemfile @@ -0,0 +1,12 @@ +# AUTOGENERATED BY THE GRADLE SCRIPT. EDITS WILL BE OVERWRITTEN. +source 'https://rubygems.org' + +gemspec + +logstash_path = ENV["LOGSTASH_PATH"] || "../../logstash" +use_logstash_source = ENV["LOGSTASH_SOURCE"] && ENV["LOGSTASH_SOURCE"].to_s == "1" + +if Dir.exist?(logstash_path) && use_logstash_source + gem 'logstash-core', :path => "#{logstash_path}/logstash-core" + gem 'logstash-core-plugin-api', :path => "#{logstash_path}/logstash-core-plugin-api" +end diff --git a/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AWSS3SQSProstgre.conf b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AWSS3SQSProstgre.conf new file mode 100644 index 000000000..2e4d09892 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AWSS3SQSProstgre.conf @@ -0,0 +1,91 @@ +input { + s3_sqs { + queue_url => "" + region => "" + access_key_id => "" + secret_access_key => "" + role_arn => "" # Leave empty if not using role-based access + max_messages => + wait_time => # Must be >= 0 and <= 20, + polling_frequency => + type => "" + add_field => { + "account_id" => "" + "instance_name" => "" + } + } +} + +filter{ + + if [type] == "PostgresS3SQS" { + + json { + source => "message" + target => "parsed_message" + remove_field => ["message"] + } + + # Drop known noise patterns + if [parsed_message][message] =~ /(pg_sleep|export_postgres_logs_to_s3|cron\.job_run_details|create_foreign_table_for_log_file|logs.postgres_logs|public\.list_postgres_log_files|information_schema\.tables|aws_s3\.query_export_to_s3)/ { + drop { } + } + + # Drop ALTER/CREATE/CREATE FOREIGN TABLE statements on postgres_logs_ + if [parsed_message][message] =~ /((ALTER|CREATE(\s+FOREIGN)?)\s+TABLE)\s+postgres_logs_\d{8}_\d{4}/ { + drop { } + } + + # Sanitize multiline and quote issues + mutate { + gsub => [ + "[parsed_message][message]", "\n", "", + "[parsed_message][message]", "¶", " ", + "[parsed_message][message]", '\\"', '"' + ] + } + + # Promote nested parsed_message.message to top-level for grok + mutate { + add_field => { + "log_message" => "%{[parsed_message][message]}" + } + } + + grok { + match => { + "log_message" => [ + # Match structured AUDIT logs + 'AUDIT: %{WORD:audit_level},%{INT:session_id},%{INT:transaction_id},%{WORD:operation_type},%{DATA:command},%{DATA:object_type},%{DATA:object_name},%{GREEDYDATA:full_sql_query},%{GREEDYDATA:details}', + + # Catch-all error message fallback + '%{GREEDYDATA:ErrorMessage}' + ] + } + remove_field => ["log_message"] + } + + # Drop events that couldn't be parsed by grok + if "_grokparsefailure" in [tags] { + drop { } + } + + # Trim quotes from SQL query if it exists + mutate { + gsub => [ + "full_sql_query", '^\"', '', + "full_sql_query", '\"$', '' + ] + } + + # Call your custom Guardium plugin + s3sqs_postgresql_guardium_plugin_filter { } + + # Retain only processed GuardRecord + prune { + whitelist_names => ["GuardRecord"] + } + + } + +} \ No newline at end of file diff --git a/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AwsPostgresS3SQS_README.md b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AwsPostgresS3SQS_README.md new file mode 100644 index 000000000..e3cbcff5f --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AwsPostgresS3SQS_README.md @@ -0,0 +1,177 @@ +## Meet AWS Postgres +* Environment: AWS +* Supported inputs: S3 (pull), SQS (pull) +* Supported Guardium versions: + * Guardium Data Protection: 11.4 and above + * Guardium Data Security Center: 3.2 and above +#### Notes +This is a [Logstash](https://github.com/elastic/logstash) filter plug-in for the universal connector that is featured in IBM Security Guardium. The filter supports events sent through Cloudwatch OR SQS. + +## 1. Configuring the AWS Postgres service + +1. Go to https://console.aws.amazon.com/ +2. Click ```Services``` +3. In the Database section, click ```RDS``` +4. Select the region in the top right corner +5. In the central panel of the Amazon RDS Dashboard, click ```Create database``` +6. Choose a database creation method +7. In the Engine options, select ```PostgreSQL```, and then select the appropropriate version +8. Select an appropriate template (Production, Dev/Test, or Free Tier) +9. In the Settings section, type the database instance name and create the master account with the username and password to log in to the database +10. Select the database instance size according to your requirements +11. Select appropriate storage options (for example, you may want to enable auto-scaling) +12. Select the availability and durability options +13. Select the connectivity settings that are appropriate for your environment. To make the database accessible, set the Public access option to Publicly Accessible within Additional Configuration +14. Select the type of Authentication for the database (choose from Password Authentication, Password and IAM database authentication, and Password and Kerberos authentication) +15. Expand the Additional Configuration options: + + a. Configure the database options + + b. Select options for Backup + + c. If desired, enable Encryption on the database instances + + d. In Log exports, select the Postgresql log type to publish to Amazon CloudWatch + + e. Select the options for Deletion protection + +16. Click ```Create Database``` +17. To view the database, click ```Databases``` under Amazon RDS in the left panel +18. To authorize inbound traffic, edit the security group: + + a. In the database summary page, select the Connectivity and Security tab. Under Security, click VPC security group + + b. Click the group name that you selected while creating a database (each database has one active group) + + c. In the Inbound rule section, choose to edit the inbound rules + + d. Set this rule: + + • Type: PostgreSQL + + • Protocol: TCP + + • Port Range: 5432 + + Notes: Depending on your requirements, the source can be set to a specific IP address or it can be opened to all hosts. + + e. Click ```Add Rule ```and then click ```Save changes```. The database may need to be restarted + +## 2. Enabling the PGAudit extension + +There are different ways of auditing and logging in postgres. For this exercise, we will use PGAudit, the open +source audit logging extension for PostgreSQL 9.5+. This extension supports logging for Sessions or Objects. +Configure either Session Auditing or Object Auditing. You cannot enable both at the same time. + +### Steps to enable PGAudit + +1. Creating the database parameter group +2. Enabling Auditing using **either one** of the following: + + a. Enabling PGAudit Session Auditing + + b. Enabling PGAudit Object Auditing + +3. Associating the DB Parameter Group with the database Instance + +#### Creating the database parameter group + +When you create a database instance, it is associated with the default parameter group. Follow these +steps to create a new parameter group: + +1. Go to ```Services``` > ```Database``` > ```Parameter groups``` +2. Click Create Parameter Group in the left pane +3. Enter the parameter group details + + • Select the parameter group family. For example, aurora-postgres12. This version should match the version of the database that is created and with which this parameter group is to be associated + + • Enter the DB parameter group name + + • Enter the DB parameter group description + +4. Click ```Save```. The new group appears in the Parameter Groups section + +#### Enabling PGAudit Auditing + +Session Auditing allows you to log activities that are selected in the pgaudit.log for logging. Be cautious when you select which activities will be logged, as logged activities can affect the performance of the database instance. + +1. In the left-hand Amazon RDS panel, select Parameter Groups. +2. Select the parameter group that you created. +3. Click Edit parameters and add these settings: + + • pgaudit.log = all + (Select the options from the Allowed values list. You can specify multiple values, and separate them with ",". The values that are marked with "-" are excluded while logging.) + + • pgaudit.log_catalog = 0 + + • pgaudit.log_parameter = 0 + + • shared_preload_libraries = pgaudit,pg_cron + + • log_error_verbosity = default + + • pgaudit.role = rds_pgaudit + + • log_destination = csvlog + + • cron.database_name = `````` + +#### Associating the DB Parameter Group with the database Instance + +1. Go to ```Services``` > ```Database``` > ```RDS``` > ```Databases``` +2. Click the Postgres database instance to be updated +3. Click ```Modify``` +4. Go to the Additional Configuration ```section``` > ```database options``` > ```DB Parameter Group menu``` and select the ```newly-created group``` +5. Click ```Continue``` +6. Select the database instance in its configuration section. The state of the DB Parameter Group is pending-reboot +7. Reboot the database instance for the changes to take effect + +## 3. Viewing the PGAudit logs + +The PGAudit logs (both Session and Object logs) can be seen in log files in RDS, and also on CloudWatch: + +### Viewing the auditing details in RDS log files + +The RDS log files can be viewed, watched, and downloaded. The name of the RDS log file is modifiable and is controlled by parameter log_filename. + +1. Go to Services > Database > RDS > Databases +2. Select the database instance +3. Select the Logs & Events section +4. The end of the Logs section lists the files that contain the auditing details. The newest file is the last page + +### Viewing the logs entries on CloudWatch + +By default, each database instance has an associated log group with a name in this format: /aws/rds/instance//postgresql. You can use this log group, or you can create a new one and associate it with the database instance. + +1. On the AWS Console page, open the Services menu +2. Enter the CloudWatch string in the search box +3. Click CloudWatch to redirect to the CloudWatch dashboard +4. In the left panel, select Logs +5. Click Log Groups + +### Configuration +1. On the collector, go to ```Setup``` > ```Tools and Views``` > ```Configure Universal Connector```. +2. Enable the universal connector if it is disabled. +3. Click ```Upload File``` and select the offline [logstash-filter-s3sqs_postgresql_guardium_plugin_filter.zip](../../logstash-filter-postgres-guardium/PostgresOverS3SQS/logstash-filter-s3sqs_postgresql_guardium_plugin_filter.zip) plugin. After it is uploaded, click ```OK```. This step is not necessary for Guardium Data Protection v11.0p490 or later, v11.0p540 or later, v12.0 or later. +4. Click ```Upload File``` and select the offline [logstash-input-s3_sqs.zip](../../../input-plugin/logstash-input-s3sqs/InputS3SQSPackage/S3SQS/logstash-input-s3_sqs.zip) plugin. After it is uploaded, click ```OK```. This step is not necessary for Guardium Data Protection v11.0p490 or later, v11.0p540 or later, v12.0 or later. +4. Click the Plus sign to open the Connector Configuration dialog box. +5. Type a name in the ```Connector name``` field. +6. The audit logs are to be fetched from S3SQS directly, use the details from the [AWSS3SQSProstgre.conf](../../logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AWSS3SQSProstgre.conf) file. Update the input section to add the details from the corresponding file's input part, omitting the keyword "input{" at the beginning and its corresponding "}" at the end. More details on how to configure the relevant input plugin can be found [here](../../../input-plugin/logstash-input-s3sqs/README.md) +7. The audit logs are to be fetched from S3SQS directly, use the details from the [AWSS3SQSProstgre.conf](../../logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/AWSS3SQSProstgre.conf) file. Update the filter section to add the details from the corresponding file's filter part, omitting the keyword "filter{" at the beginning and its corresponding "}" at the end. +8. The "type" fields should match in the input and the filter configuration sections. This field should be unique for every individual connector added. This is no longer required starting v12p20 and v12.1. +9. Click ```Save```. Guardium validates the new connector and displays it in the Configure Universal Connector page. +10. After the offline plug-in is installed and the configuration is uploaded and saved in the Guardium machine, restart the Universal Connector using the ```Disable/Enable``` button. + +## Note: + +### Exporting PostgreSQL or Aurora PostgreSQL Audit Logs to S3 + +You can export PostgreSQL or Aurora PostgreSQL audit logs to an S3 bucket using the following methods: + +1. **Using Extensions (`log_fdw`, `aws_s3`, and `pg_cron`)** + Refer to the [PostgresExtLogsExport](../PostgresOverS3SQSPackage/PostgresExtLogsExport.md) guide for detailed instructions. + +### Limitations: + +System-generated queries may appear in the Full SQL Report when using SQL client tools (e.g., DBeaver, DBVisualizer, pgAdmin), which can result in duplicate query entries. +Role‑based authentication using AWS IAM Role ARNs is not supported for Postgres over S3SQL at this time. \ No newline at end of file diff --git a/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/PostgresExtLogsExport.md b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/PostgresExtLogsExport.md new file mode 100644 index 000000000..83fefc69c --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/PostgresExtLogsExport.md @@ -0,0 +1,498 @@ +## [Export logs to S3 bucket](https://aws.amazon.com/blogs/database/automate-postgresql-log-exports-to-amazon-s3-using-extensions/) + +### Note : +This implementation leverages PostgreSQL extensions such as `log_fdw`, `aws_s3`, and `pg_cron`, and closely follows the approach outlined in the official AWS blog post: [Automate PostgreSQL log exports to Amazon S3 using extensions](https://aws.amazon.com/blogs/database/automate-postgresql-log-exports-to-amazon-s3-using-extensions/). +Please note that this solution is based entirely on the methodology provided by AWS. IBM does not assume responsibility for any future updates, enhancements, or fixes that may be required due to changes in the AWS implementation or related extensions. + +### Limitation : +Client HostName is not available, will be seen as N.A. in Full SQL Report. + +### Create an IAM Role and Policy and Attach the Role to Your RDS for PostgreSQL Instance + +To allow Amazon RDS to export logs or data to Amazon S3, follow these steps: + +--- + +## 1. Create an S3 Bucket + +1. Sign in to the **AWS Management Console**. +2. Navigate to **S3** (Services → Storage → S3). +3. Click **[Create bucket]**. +4. Enter a unique **Bucket name** (e.g., `my-postgres-export-bucket`). +5. Select the same **Region** as your RDS instance. +6. Leave default settings or configure according to your requirements. +7. Click **[Create bucket]**. + +--- + +## 2. Create a Custom IAM Policy for S3 Access + +1. Go to **IAM** (Services → Security, Identity, & Compliance → IAM). +2. In the left navigation, click **Policies**. +3. Click **[Create policy]**. +4. Select the **JSON** tab and paste the following policy: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:AbortMultipartUpload", + "s3:DeleteObject", + "s3:ListMultipartUploadParts", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::", + "arn:aws:s3:::/*" + ] + } + ] +} +``` + +> Replace `` with your actual bucket name. + +5. Click **[Next]**, give the policy a name (e.g., `RDSExportToS3Policy`), then click **[Create policy]**. + +--- + +## 3. Create an IAM Role for RDS + +1. In the IAM console, go to **Roles**. +2. Click **[Create role]**. +3. Select **AWS service** as the trusted entity type. +4. Choose **RDS** as the use case. +5. Click **[Next]**. +6. Skip attaching policies for now → Click **[Next]**. +7. Enter a **Role name** (e.g., `RDSExportToS3Role`), then click **[Create role]**. + +--- + +## 4. Modify the Trust Relationship + +1. Click on the newly created role (e.g., `RDSExportToS3Role`). +2. Go to the **Trust relationships** tab. +3. Click **[Edit trust policy]**. +4. Replace the contents with the following: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "rds.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` + +5. Click **[Update policy]**. + +--- + +## 5. Attach the Policy to the Role + +1. While on the role details page, go to the **Permissions** tab. +2. Click **[Add permissions]** → **Attach policies**. +3. Find and select the policy you created (e.g., `RDSExportToS3Policy`). +4. Click **[Add permissions]**. + +--- + +## 6. Associate the IAM Role with Your RDS Instance + +### Using AWS Console + +1. Go to **RDS** in the AWS Console. +2. Select your **PostgreSQL DB instance**. +3. Scroll to **Manage IAM roles**. +4. Under **Feature name**, choose `s3Export`. +5. Under **IAM role**, select the IAM role you created (`RDSExportToS3Role`). +6. Click **[Continue]**, then **[Modify DB instance]**. +7. Wait for the instance to return to the **Available** state. + +### Using AWS CLI + +```bash +aws rds add-role-to-db-instance \ + --db-instance-identifier \ + --feature-name s3Export \ + --role-arn arn:aws:iam:::role/RDSExportToS3Role +``` + +Replace `` with your actual RDS instance identifier and `` with your AWS account ID. + +To verify that the role has been associated correctly, you can use: + +```bash +aws rds describe-db-instances \ + --db-instance-identifier \ + --query "DBInstances[*].AssociatedRoles" +``` + +--- +For more information, follow **[Step 4 in the official AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/postgresql-s3-export-access-bucket.html)**. + +### Import PostgreSQL logs into the table using extension log_fdw +To use the `log_fdw` functions, we must first create the extension on the database instance. Connect to the database using psql and run the following command. +```bash + postgres=> CREATE EXTENSION log_fdw; + CREATE EXTENSION +``` +With the extension loaded, we can create a function that loads all the available PostgreSQL DB log files as a table within the database. The definition of the function is available on [GitHub](https://github.com/aws-samples/amazon-rds-and-amazon-aurora-logging-blog/blob/master/scripts/pg_log_fdw_management.sql). +```bash +-- Yaser Raja +-- AWS Professional Services +-- +-- This function uses log_fdw to load all the available RDS / Aurora PostgreSQL DB log files as a table. +-- +-- Usage: +-- 1) Create this function +-- 2) Run the following to load all the log files +-- SELECT public.load_postgres_log_files(); +-- 3) Start looking at the logs +-- SELECT * FROM logs.postgres_logs; +-- +-- Here are the key features: +-- - By default, a table named "postgres_logs" is created in schema "logs". +-- - The schema name and table name can be changed via arguments. +-- - If the table already exists, it will be DROPPED +-- - If the schema 'logs' does not exist, it will be created. +-- - Each log file is loaded as a foreign table and then made child of table logs.postgres_logs +-- - By default, CSV file format is preferred, it can be changed via argument v_prefer_csv +-- - Daily, hourly and minute-based log file name formats are supported for CSV and non-CSV output files +-- - postgresql.log.YYYY-MM-DD-HHMI +-- - postgresql.log.YYYY-MM-DD-HH +-- - postgresql.log.YYYY-MM-DD +-- - Supports the scenario where log files list consist of both the file name formats +-- - When CSV format is used, a check-constraint is added to the child table created for each log file +-- +CREATE OR REPLACE FUNCTION public.load_postgres_log_files(v_schema_name TEXT DEFAULT 'logs', v_table_name TEXT DEFAULT 'postgres_logs', v_prefer_csv BOOLEAN DEFAULT TRUE) +RETURNS TEXT +AS +$BODY$ +DECLARE + v_csv_supported INT := 0; + v_hour_pattern_used INT := 0; + v_filename TEXT; + v_dt timestamptz; + v_dt_max timestamptz; + v_partition_name TEXT; + v_ext_exists INT := 0; + v_server_exists INT := 0; + v_table_exists INT := 0; + v_server_name TEXT := 'log_server'; + v_filelist_sql TEXT; + v_enable_csv BOOLEAN := TRUE; +BEGIN + EXECUTE FORMAT('SELECT count(1) FROM pg_catalog.pg_extension WHERE extname=%L', 'log_fdw') INTO v_ext_exists; + IF v_ext_exists = 0 THEN + CREATE EXTENSION log_fdw; + END IF; + + EXECUTE 'SELECT count(1) FROM pg_catalog.pg_foreign_server WHERE srvname=$1' INTO v_server_exists USING v_server_name; + IF v_server_exists = 0 THEN + EXECUTE FORMAT('CREATE SERVER %s FOREIGN DATA WRAPPER log_fdw', v_server_name); + END IF; + + EXECUTE FORMAT('CREATE SCHEMA IF NOT EXISTS %I', v_schema_name); + + -- Set the search path to make sure the tables are created in dblogs schema + EXECUTE FORMAT('SELECT set_config(%L, %L, TRUE)', 'search_path', v_schema_name); + + -- The db log files are in UTC timezone so that date extracted from filename will also be UTC. + -- Setting timezone to get correct table constraints. + EXECUTE FORMAT('SELECT set_config(%L, %L, TRUE)', 'timezone', 'UTC'); + + -- Check the parent table exists + EXECUTE 'SELECT count(1) FROM information_schema.tables WHERE table_schema=$1 AND table_name=$2' INTO v_table_exists USING v_schema_name, v_table_name; + IF v_table_exists = 1 THEN + RAISE NOTICE 'Table % already exists. It will be dropped.', v_table_name; + EXECUTE FORMAT('SELECT set_config(%L, %L, TRUE)', 'client_min_messages', 'WARNING'); + EXECUTE FORMAT('DROP TABLE %I CASCADE', v_table_name); + EXECUTE FORMAT('SELECT set_config(%L, %L, TRUE)', 'client_min_messages', 'NOTICE'); + v_table_exists = 0; + END IF; + + -- Check the pg log format + SELECT 1 INTO v_csv_supported FROM pg_catalog.pg_settings WHERE name='log_destination' AND setting LIKE '%csvlog%'; + IF v_csv_supported = 1 AND v_prefer_csv = TRUE THEN + RAISE NOTICE 'CSV log format will be used.'; + v_filelist_sql = FORMAT('SELECT file_name FROM public.list_postgres_log_files() WHERE file_name LIKE %L ORDER BY 1 DESC', '%.csv'); + ELSE + RAISE NOTICE 'Default log format will be used.'; + v_filelist_sql = FORMAT('SELECT file_name FROM public.list_postgres_log_files() WHERE file_name NOT LIKE %L ORDER BY 1 DESC', '%.csv'); + v_enable_csv = FALSE; + END IF; + + FOR v_filename IN EXECUTE (v_filelist_sql) + LOOP + RAISE NOTICE 'Processing log file - %', v_filename; + + IF v_enable_csv = TRUE THEN + -- Dynamically checking the file name pattern so that both allowed file names patters are parsed + IF v_filename like 'postgresql.log.____-__-__-____.csv' THEN + v_dt=substring(v_filename from 'postgresql.log.#"%#"-____.csv' for '#')::timestamp + INTERVAL '1 HOUR' * (substring(v_filename from 'postgresql.log.____-__-__-#"%#"__.csv' for '#')::int); + v_dt_max = v_dt + INTERVAL '1 HOUR'; + v_dt=substring(v_filename from 'postgresql.log.#"%#"-____.csv' for '#')::timestamp + INTERVAL '1 HOUR' * (substring(v_filename from 'postgresql.log.____-__-__-#"%#"__.csv' for '#')::int) + INTERVAL '1 MINUTE' * (substring(v_filename from 'postgresql.log.____-__-__-__#"%#".csv' for '#')::int); + ELSIF v_filename like 'postgresql.log.____-__-__-__.csv' THEN + v_dt=substring(v_filename from 'postgresql.log.#"%#"-__.csv' for '#')::timestamp + INTERVAL '1 HOUR' * (substring(v_filename from 'postgresql.log.____-__-__-#"%#".csv' for '#')::int); + v_dt_max = v_dt + INTERVAL '1 HOUR'; + ELSIF v_filename like 'postgresql.log.____-__-__.csv' THEN + v_dt=substring(v_filename from 'postgresql.log.#"%#".csv' for '#')::timestamp; + v_dt_max = v_dt + INTERVAL '1 DAY'; + ELSE + RAISE NOTICE ' Skipping file'; + CONTINUE; + END IF; + ELSE + IF v_filename like 'postgresql.log.____-__-__-____' THEN + v_dt=substring(v_filename from 'postgresql.log.#"%#"-____' for '#')::timestamp + INTERVAL '1 HOUR' * (substring(v_filename from 'postgresql.log.____-__-__-#"%#"__' for '#')::int) + INTERVAL '1 MINUTE' * (substring(v_filename from 'postgresql.log.____-__-__-__#"%#"' for '#')::int); + ELSIF v_filename like 'postgresql.log.____-__-__-__' THEN + v_dt=substring(v_filename from 'postgresql.log.#"%#"-__' for '#')::timestamp + INTERVAL '1 HOUR' * (substring(v_filename from 'postgresql.log.____-__-__-#"%#"' for '#')::int); + ELSIF v_filename like 'postgresql.log.____-__-__' THEN + v_dt=substring(v_filename from 'postgresql.log.#"%#"' for '#')::timestamp; + ELSE + RAISE NOTICE ' Skipping file'; + CONTINUE; + END IF; + END IF; + v_partition_name=CONCAT(v_table_name, '_', to_char(v_dt, 'YYYYMMDD_HH24MI')); + EXECUTE FORMAT('SELECT public.create_foreign_table_for_log_file(%L, %L, %L)', v_partition_name, v_server_name, v_filename); + + IF v_table_exists = 0 THEN + EXECUTE FORMAT('CREATE TABLE %I (LIKE %I INCLUDING ALL)', v_table_name, v_partition_name); + v_table_exists = 1; + END IF; + + EXECUTE FORMAT('ALTER TABLE %I INHERIT %I', v_partition_name, v_table_name); + + IF v_enable_csv = TRUE THEN + EXECUTE FORMAT('ALTER TABLE %I ADD CONSTRAINT check_date_range CHECK (log_time>=%L and log_time < %L)', v_partition_name, v_dt, v_dt_max); + END IF; + + END LOOP; + + RETURN FORMAT('Postgres logs loaded to table %I.%I', v_schema_name, v_table_name); +END; +$BODY$ +LANGUAGE plpgsql; +``` +With the function created, we can run the function to load the PostgreSQL logs into the database. Each time we run the following command, the logs.postgres_logs table is updated with the most recent engine logs. +```bash +postgres=> SELECT public.load_postgres_log_files(); +``` +### Export PostgreSQL logs from table into Amazon S3 using aws_s3 +Now that we have a function to query for new log statements, we use aws_s3 to export the retrieved logs to Amazon S3. From the prerequisites, we should already have an S3 bucket created and we should have attached an IAM role to the DB instance that allows for writing to your S3 bucket. +Create the aws_s3 extension with the following code: +```bash +postgres=> CREATE EXTENSION aws_s3 CASCADE; +CREATE EXTENSION +``` +### Automate the log exports using extension pg_cron +Now that we have the steps to perform log uploads to Amazon S3 using the log_fdw and aws_s3 extensions, we can automate these steps using pg_cron. With pg_cron, we can write database queries to be run on a schedule of our choosing. + +As part of the prerequisites, you should have pg_cron added to the shared_preload_libraries parameter in your database instance's parameter group. After pg_cron is loaded into shared_preload_libraries, you can simply run the following command to create the extension: + +```bash +postgres=> CREATE EXTENSION pg_cron; +CREATE EXTENSION +``` +With pg_cron created, we can use the extension to perform the PostgreSQL log uploads on a cron defined schedule. To do this, we need to schedule a cron job, passing in a name, schedule, and the log export query we want to run. For example, to schedule log uploads every hour with the same query described earlier, we can run the following command: + +Create a table logs.postgres_logs_export_tracker to track last exported log timestamp with `last_exported_log_time`. +```bash + CREATE TABLE IF NOT EXISTS logs.postgres_logs_export_tracker ( + id SERIAL PRIMARY KEY, + last_exported_log_time TIMESTAMPTZ NOT NULL DEFAULT '1970-01-01 00:00:00+00' + ); + + INSERT INTO logs.postgres_logs_export_tracker (last_exported_log_time) VALUES (NOW()); +``` + +Create function `export_postgres_logs_to_s3` to export log to S3 bucket. Please replace parameter `delay_in_minutes`, `S3_bucket_name` and `region` with actual value. +```bash + CREATE OR REPLACE FUNCTION public.export_postgres_logs_to_s3() + RETURNS void LANGUAGE plpgsql SECURITY DEFINER + AS $$ + DECLARE + last_exported TIMESTAMPTZ; + new_last_exported TIMESTAMPTZ; + cutoff_time TIMESTAMPTZ; + latest_tracker_id INT; + export_query TEXT; + export_filename TEXT; + delay_interval INTERVAL := INTERVAL ''; -- Adjust delay window for example 5 minutes + BEGIN + -- 1. Read last exported timestamp from tracker + SELECT id, last_exported_log_time + INTO latest_tracker_id, last_exported + FROM logs.postgres_logs_export_tracker + ORDER BY id DESC + LIMIT 1; + + IF last_exported IS NULL THEN + last_exported := '1970-01-01 00:00:00+00'; + END IF; + + -- 2. Compute cutoff time + cutoff_time := NOW() - delay_interval; + + IF cutoff_time <= last_exported THEN + RAISE NOTICE 'Cutoff time <= last exported time (%), skipping.', last_exported; + RETURN; + END IF; + + -- 3. Load logs (your implementation) + PERFORM public.load_postgres_log_files(); + + -- 4. Build export query + export_query := format($f$ + SELECT * FROM logs.postgres_logs + WHERE message ~* '(AUDIT:)' OR sql_state_code IS DISTINCT FROM '00000' + AND log_time > %L + AND log_time <= %L + ORDER BY log_time, session_id, session_line_num + $f$, last_exported::TEXT, cutoff_time::TEXT); + + export_filename := to_char(NOW(), '"postgres-log-"YYYYMMDD_HH24MI".csv"'); + + -- 5. Export to S3 + PERFORM aws_s3.query_export_to_s3( + export_query, + '', + export_filename, + '', + options := 'format csv, header true' + ); + + -- 6. Update tracker to cutoff time + UPDATE logs.postgres_logs_export_tracker + SET last_exported_log_time = cutoff_time + WHERE id = latest_tracker_id; + + RAISE NOTICE 'Logs exported. Tracker updated to %', cutoff_time; + END; + $$; +``` +Here we have used cron job which will run on every minute, you can customise it by updating the cron job schedule expression i.e. `* * * * *` +```bash + SELECT cron.schedule( + 'postgres-s3-log-uploads-every-minute', + '* * * * *', + 'SELECT public.export_postgres_logs_to_s3();' + ); +``` +If you decide at any time that you want to cancel these automated log uploads, you can unschedule the associated cron job by passing in the job name specified previously. In the following example, the job name is `postgres-s3-log-uploads-every-minute`: +```bash +postgres=> SELECT cron.unschedule('postgres-s3-log-uploads-every-minute'); +unschedule +------------ + t +(1 row) +``` +### Creating the SQS queue +The SQS queue created in these steps will receive messages from the Event Notification (configured in the next section). +These messages, generated by monitoring the S3 bucket, will contain details of the recently added S3 log files. + + +#### Procedure +1. Go to https://console.aws.amazon.com/ +2. Click **Services** +3. Search for SQS and click on **Simple Queue Services** +4. Click **Create Queue**. +5. Select the type as **Standard**. +6. Enter the name for the queue +7. Keep the rest of the default settings + +### Creating a policy for the relevant IAM User +Perform the following steps for the IAM user who is accessing the SQS logs in Guardium: + +#### Procedure +1. Go to https://console.aws.amazon.com/ +2. Go to **IAM service** > **Policies** > **Create Policy**. +3. Select **service as SQS**. +4. Check the following checkboxes: + * **ListQueues** + * **DeleteMessage** + * **DeleteMessageBatch** + * **GetQueueAttributes** + * **GetQueueUrl** + * **ReceiveMessage** + * **ChangeMessageVisibility** + * **ChangeMessageVisibilityBatch** +5. In the resources, specify the ARN of the queue created in the above step. +6. Click **Review policy** and specify the policy name. +7. Click **Create policy**. +8. Assign the policy to the user + 1. Log in to the IAM console as an IAM user (https://console.aws.amazon.com/iam/). + 2. Go to **Users** on the console and select the relevant IAM user to whom you want to give permissions. + Click the **username**. + 3. In the **Permissions tab**, click **Add permissions**. + 4. Click **Attach existing policies directly**. + 5. Search for the policy created and check the checkbox next to it. + 6. Click **Next: Review** + 7. Click **Add permissions** + +### Creating the Event Notification +The Event Notification will get triggered when a new Object is added to S3 bucket and will send the events to the SQS queue. +Follow the steps below to configure the Event Notification + +#### Creating Access Policy to allow Notifications +Update the Access Policy of the SQS queue to allow the Notification Service to send messages to the Queue + +__*Procedure*__ +1. Go to https://console.aws.amazon.com/ +2. Go to **SQS** -> **Queues** +3. Click on the Queue that was created in the above step +4. Go to **Access Policy** +5. Click on **Edit** +6. Add the below details to the existing policy + +``` +{ + "Sid": "example-statement-ID", + "Effect": "Allow", + "Principal": { + "Service": "s3.amazonaws.com" + }, + "Action": "SQS:SendMessage", + "Resource": "", + "Condition": { + "StringEquals": { + "aws:SourceAccount": "" + }, + "ArnLike": { + "aws:SourceArn": "" + } + } +} +``` + + +7. Click on **Save** + + +#### Create the Event Notification +__*Procedure*__ +1. Go to https://console.aws.amazon.com/ +2. Go to **Services**. Search for **S3**. +3. Click on the S3 bucket that is associated with the CloudTrail. +4. Click **Properties** +5. Navigate to **Event Notifications** +6. Click on **Create event notification**. +7. Enter **Event name** +8. Enter the **Prefix** though this is optional, this can be set to capture the specific traffic. +9. In **Event Types** Select **All object create events**. +10. In **Destination** Select **SQS queue**. +11. In **Specify SQS Queue** either **Choose from your SQS queues** option select the Queue name from drop down list or **Enter SQS queue ARN** enter the Queue ARN manually. +12. Click on **Save Changes** \ No newline at end of file diff --git a/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/logstash-filter-s3sqs_postgresql_guardium_plugin_filter.zip b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/logstash-filter-s3sqs_postgresql_guardium_plugin_filter.zip new file mode 100644 index 000000000..5ef498a0b Binary files /dev/null and b/filter-plugin/logstash-filter-postgres-guardium/PostgresOverS3SQSPackage/logstash-filter-s3sqs_postgresql_guardium_plugin_filter.zip differ diff --git a/filter-plugin/logstash-filter-postgres-guardium/VERSION b/filter-plugin/logstash-filter-postgres-guardium/VERSION new file mode 100644 index 000000000..6d7de6e6a --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/VERSION @@ -0,0 +1 @@ +1.0.2 diff --git a/filter-plugin/logstash-filter-postgres-guardium/build.gradle b/filter-plugin/logstash-filter-postgres-guardium/build.gradle new file mode 100644 index 000000000..2d0077a8b --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/build.gradle @@ -0,0 +1,171 @@ +import java.nio.file.Files +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING + +apply plugin: 'java' +apply plugin: 'jacoco' +apply plugin: 'com.github.johnrengelman.shadow' +apply from: LOGSTASH_CORE_PATH + "/../rubyUtils.gradle" + +// =========================================================================== +group = 'com.ibm.guardium.s3sqspostgresql' +version = file("VERSION").text.trim() +description = "S3SQS Postgresql-Guardium filter plugin" + +// =========================================================================== +pluginInfo.licenses = ['Apache-2.0'] +pluginInfo.longDescription = "This gem is a Logstash S3SQS postgresql filter plugin required to be installed as part of IBM Security Guardium, Guardium Universal connector configuration. This gem is not a stand-alone program." +pluginInfo.authors = ['IBM'] +pluginInfo.email = [''] +pluginInfo.homepage = "https://github.com/IBM/universal-connectors" +pluginInfo.pluginType = "filter" +pluginInfo.pluginClass = "S3SQSPostgresqlGuardiumPluginFilter" +pluginInfo.pluginName = "s3sqs_postgresql_guardium_plugin_filter" + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 + +def jacocoVersion = '0.8.4' +def minimumCoverageStr = System.getenv("MINIMUM_COVERAGE") ?: "50.0%" +if (minimumCoverageStr.endsWith("%")) { + minimumCoverageStr = minimumCoverageStr[0..-2] +} +def minimumCoverage = Float.valueOf(minimumCoverageStr) / 100 + +buildscript { + repositories { + maven { url "https://plugins.gradle.org/m2/" } + mavenCentral() + jcenter() + } + dependencies { + classpath 'com.github.jengelman.gradle.plugins:shadow:4.0.4' + classpath "org.barfuin.gradle.jacocolog:gradle-jacoco-log:3.0.0-RC2" + } +} + +repositories { + mavenCentral() +} + +dependencies { + implementation 'commons-validator:commons-validator:1.7' + implementation 'org.apache.logging.log4j:log4j-core:2.17.1' + implementation 'org.apache.commons:commons-lang3:3.7' + implementation 'com.google.code.gson:gson:2.8.9' + implementation fileTree(dir: LOGSTASH_CORE_PATH, include: "build/libs/logstash-core*.jar") + implementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "guardium-universalconnector-commons*.jar") + + // ✅ JUnit 4 only + testImplementation 'junit:junit:4.13.2' + testImplementation 'org.mockito:mockito-core:5.17.0' + testImplementation 'org.jruby:jruby-complete:9.2.7.0' + testImplementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "guardium-universalconnector-commons*.jar") +} + +test { + useJUnit() // ✅ Make sure JUnit 4 is used explicitly + testLogging { + events "passed", "skipped", "failed" + } +} + +tasks.register("vendor") { + dependsOn shadowJar + doLast { + String vendorPathPrefix = "vendor/jar-dependencies" + String projectGroupPath = project.group.replaceAll('\\.', '/') + File projectJarFile = file("${vendorPathPrefix}/${projectGroupPath}/${pluginInfo.pluginFullName()}/${project.version}/${pluginInfo.pluginFullName()}-${project.version}.jar") + projectJarFile.mkdirs() + Files.copy(file("$buildDir/libs/${project.name}-${project.version}.jar").toPath(), projectJarFile.toPath(), REPLACE_EXISTING) + validatePluginJar(projectJarFile, project.group) + } +} + +shadowJar { + classifier = null +} + +clean { + delete "${projectDir}/Gemfile" + delete "${projectDir}/${pluginInfo.pluginFullName()}.gemspec" + delete "${projectDir}/lib/" + delete "${projectDir}/vendor/" + new FileNameFinder().getFileNames(projectDir.toString(), "${pluginInfo.pluginFullName()}-*.*.*.gem").each { + delete it + } +} + +tasks.withType(JavaCompile) { + options.encoding = 'UTF-8' +} + +tasks.register("generateRubySupportFiles") { + doLast { + generateRubySupportFilesForPlugin(project.description, project.group, version) + } +} + +tasks.register("removeObsoleteJars") { + doLast { + new FileNameFinder().getFileNames( + projectDir.toString(), + "vendor/**/${pluginInfo.pluginFullName()}*.jar", + "vendor/**/${pluginInfo.pluginFullName()}-${version}.jar" + ).each { delete it } + } +} + +tasks.register("gem") { + dependsOn = [downloadAndInstallJRuby, removeObsoleteJars, vendor, generateRubySupportFiles] + doLast { + buildGem(projectDir, buildDir, "${pluginInfo.pluginFullName()}.gemspec") + } +} + +// ✅ JaCoCo Setup +jacoco { + toolVersion = jacocoVersion + reportsDir = file("$buildDir/reports/jacoco") +} + +jacocoTestReport { + reports { + html.enabled true + xml.enabled true + csv.enabled true + html.destination file("${buildDir}/reports/jacoco") + csv.destination file("${buildDir}/reports/jacoco/all.csv") + } + executionData.from fileTree(dir: "${buildDir}/jacoco/", includes: ['**/*.exec']) + + afterEvaluate { + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: []) + })) + } + + doLast { + println "Report -> file://${buildDir}/reports/jacoco/index.html" + } +} + +jacocoTestCoverageVerification { + violationRules { + rule { + limit { + minimum = minimumCoverage + } + } + } + + executionData.from fileTree(dir: "${buildDir}/jacoco/", includes: ['**/*.exec']) + + afterEvaluate { + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: []) + })) + } +} + +test.finalizedBy jacocoTestReport +check.dependsOn jacocoTestCoverageVerification, jacocoTestReport diff --git a/filter-plugin/logstash-filter-postgres-guardium/gradle/wrapper/gradle-wrapper.jar b/filter-plugin/logstash-filter-postgres-guardium/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 000000000..7454180f2 Binary files /dev/null and b/filter-plugin/logstash-filter-postgres-guardium/gradle/wrapper/gradle-wrapper.jar differ diff --git a/filter-plugin/logstash-filter-postgres-guardium/gradle/wrapper/gradle-wrapper.properties b/filter-plugin/logstash-filter-postgres-guardium/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 000000000..aa991fcea --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/filter-plugin/logstash-filter-postgres-guardium/gradlew b/filter-plugin/logstash-filter-postgres-guardium/gradlew new file mode 100755 index 000000000..cccdd3d51 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/filter-plugin/logstash-filter-postgres-guardium/gradlew.bat b/filter-plugin/logstash-filter-postgres-guardium/gradlew.bat new file mode 100755 index 000000000..107acd32c --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/filter-plugin/logstash-filter-postgres-guardium/mainREADME.md b/filter-plugin/logstash-filter-postgres-guardium/mainREADME.md new file mode 100644 index 000000000..a4cc153f4 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/mainREADME.md @@ -0,0 +1,14 @@ +# Postgres Universal Connector + +## Follow this link to set up and use Postgres Universal Connector Logstash Plugin + +[Postgres](./README.md) + +## Follow this link to set up and use Aurora Postgres Universal Connector over CloudWatch Connect + +[AuroraPostgresOverConnectCloudwatch](../../docs/KafkaBasedUCs/AuroraPostgresCloudwatchKafkaConnect.md) + +## Follow this link to set up and use AWS Postgres Universal Connector over CloudWatch Connect + +[AWSPostgresOverConnectCloudwatch](../../docs/KafkaBasedUCs/PostgresqlCloudwatchKafkaConnect.md) + diff --git a/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/Constants.java b/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/Constants.java new file mode 100644 index 000000000..dad19ad93 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/Constants.java @@ -0,0 +1,89 @@ +/* +#Copyright 2020-2021 IBM Inc. All rights reserved +#SPDX-License-Identifier: Apache-2.0 +#*/ +package com.ibm.guardium.s3sqspostgresql; + +public interface Constants { + + public static final String CONNECTION_FROM = "connection_from"; + + public static final String APP_USER_NAME = "AWSService"; + + public static final String TIMESTAMP = "timestamp"; + + public static final String STATEMENT = "statement"; + + public static final String CLIENT_IP = "client_ip"; + + public static final String CLIENT_PORT = "port"; + + public static final String SUCCEEDED = "e_level"; + + public static final String DATABASE_NAME = "database_name"; + + public static final String PARSED_MESSAGE = "parsed_message"; + + public static final String USER_NAME = "user_name"; + + public static final String DB_USER = "db_user"; + + public static final String SESSION_ID = "session_id"; + + public static final String FULL_SQL_QUERY = "full_sql_query"; + + public static final String ERROR_MESSAGE = "error_message"; + + public static final String SQL_STATE_CODE = "sql_state_code"; + + public static final String SQL_STATE_CODE_SUCCESS = "00000"; + + public static final String DEFAULT_IP = "0.0.0.0"; + + public static final int DEFAULT_PORT = -1; + + public static final String UNKNOWN_STRING = ""; + + public static final String SERVER_TYPE_STRING = "POSTGRESQL"; + + public static final String DATA_PROTOCOL_STRING = "POSTGRESQL"; + + public static final String LANGUAGE = "PGRS"; + + public static final String SQL_ERROR = "SQL_ERROR"; + + public static final String LOGIN_FAILED = "LOGIN_FAILED"; + + public static final String COMM_PROTOCOL = "AWSApiCall"; + + public static final String MESSAGE = "message"; + + public static final String APPLICATION_NAME = "application_name"; + + public static final String NA = "N.A."; + + public static final String RECORDS = "records"; + + public static final String PREFIX = "pre_fix"; + + public static final String ACCOUNT_ID = "account_id"; + + public static final String ERROR_SEVERITY = "error_severity"; + + public static final String ERROR = "ERROR"; + + public static final String FATAL = "FATAL"; + + public static final String QUERY = "query"; + + public static final String LOG_GROUP = "logGroup"; + + public static final String DB_NAME = "db_name"; + + public static final String LOG_LEVEL = "log_level"; + + public static final String SERVER_HOST_NAME = "server_hostname"; + + public static final String INSTANCE_NAME = "instance_name"; + +} \ No newline at end of file diff --git a/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/Parser.java b/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/Parser.java new file mode 100644 index 000000000..0d0903125 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/Parser.java @@ -0,0 +1,346 @@ +/* +#Copyright 2020-2021 IBM Inc. All rights reserved +#SPDX-License-Identifier: Apache-2.0 +#*/ +package com.ibm.guardium.s3sqspostgresql; + +import co.elastic.logstash.api.Event; +import com.ibm.guardium.universalconnector.commons.structures.*; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.text.ParseException; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class Parser { + + private static Logger log = LogManager.getLogger(Parser.class); + + public static Record parseRecord(final Event e) throws ParseException { + + Record record = new Record(); + + record.setAccessor(Parser.parseAccessor(e)); + + if (e.getField(Constants.PARSED_MESSAGE) != null) { + Object parsedMessageObj = e.getField(Constants.PARSED_MESSAGE); + String dbName = Constants.UNKNOWN_STRING; + if (parsedMessageObj instanceof Map) { + Map parsedMessage = (Map) parsedMessageObj; + + String accountId = getAccountId(e); + + if (!parsedMessage.isEmpty() && parsedMessage.get(Constants.DATABASE_NAME) != null + && !accountId.isEmpty()) { + dbName = parsedMessage.get(Constants.DATABASE_NAME).toString(); + } + // Set dbName for Amazon Data Firehose method + else if (null != e.getData().get(Constants.DB_NAME) + && !e.getData().get(Constants.DB_NAME).toString().isEmpty()) { + dbName = e.getData().get(Constants.DB_NAME).toString(); + } + if (dbName.isEmpty()) { + record.setDbName(Constants.NA); + } else { + record.setDbName(accountId + ":" + getInstanceName(e) + ":" + dbName); + } + + if (parsedMessage.get(Constants.CONNECTION_FROM) != null) { + + setSeesionLocator(e, parsedMessage, record, dbName); + } + // Set SessionLocator for Amazon Data Firehose method + else if (null != e.getData().get(Constants.CLIENT_IP) + && null != e.getData().get(Constants.CLIENT_PORT) + && !e.getData().get(Constants.CLIENT_IP).toString().isEmpty() + && !e.getData().get(Constants.CLIENT_PORT).toString().isEmpty()) { + String clientIP = e.getData().get(Constants.CLIENT_IP).toString(); + String clientPort = e.getData().get(Constants.CLIENT_PORT).toString(); + record.setSessionLocator(Parser.parseSessionLocator(e, clientIP, clientPort, dbName)); + + } else { + setDefaultSessionLocator(record); + } + record.setSessionId(Constants.UNKNOWN_STRING); + setOriginalSqlCommand(e, (Map) parsedMessageObj, record); + } + } + + record.setAppUserName(Constants.APP_USER_NAME); + + record.setTime(Parser.parseTimestamp(e)); + return record; + } + + private static String getAccountId(Event e) { + String accountId = ""; + if (e.getField(Constants.ACCOUNT_ID) instanceof String) { + accountId = e.getField(Constants.ACCOUNT_ID).toString(); + } else if (e.getField(Constants.ACCOUNT_ID) instanceof List) { + List rawList = (List) e.getField(Constants.ACCOUNT_ID); + List arrayList = new ArrayList<>(rawList); + + if (!arrayList.isEmpty()) { + accountId = String.valueOf(arrayList.get(0)); + } + } + return accountId; + } + + public static String getInstanceName(Event e) { + String res = ""; + if (e.getField(Constants.INSTANCE_NAME) instanceof String) { + res = e.getField(Constants.INSTANCE_NAME).toString(); + } else if (e.getField(Constants.INSTANCE_NAME) instanceof List) { + List rawList = (List) e.getField(Constants.INSTANCE_NAME); + List arrayList = new ArrayList<>(rawList); + + if (!arrayList.isEmpty()) { + res = String.valueOf(arrayList.get(0)); + } + } + return res; + } + + private static void setOriginalSqlCommand(Event e, Map parsedMessageObj, Record record) { + if (parsedMessageObj.get(Constants.SQL_STATE_CODE) != null + && !parsedMessageObj.get(Constants.SQL_STATE_CODE).toString().isEmpty() + && parsedMessageObj.get(Constants.SQL_STATE_CODE).toString().equals(Constants.SQL_STATE_CODE_SUCCESS)) { + Data data = new Data(); + setSQL(e, record, data); + } + // Set SQL for Amazon Data Firehose method + else if (null != parsedMessageObj.get(Constants.LOG_GROUP) + && null != e.getData().get(Constants.LOG_LEVEL) + && null != e.getField(Constants.FULL_SQL_QUERY) + && !parsedMessageObj.get(Constants.LOG_GROUP).toString().isEmpty() + && !e.getData().get(Constants.LOG_LEVEL).toString().isEmpty() + && !e.getField(Constants.FULL_SQL_QUERY).toString().isEmpty()) { + Data data = new Data(); + setSQL(e, record, data); + } else { + // Error message for Amazon Data Firehose method + if (null != e.getData().get(Constants.LOG_LEVEL) + && null != e.getData().get(Constants.ERROR_MESSAGE) + && !e.getData().get(Constants.LOG_LEVEL).toString().isEmpty() + && (e.getData().get(Constants.LOG_LEVEL).toString().equals(Constants.FATAL) + || !e.getData().get(Constants.ERROR_MESSAGE).toString().isEmpty())) { + + ExceptionRecord exceptionRecord = new ExceptionRecord(); + exceptionRecord.setExceptionTypeId(Constants.LOGIN_FAILED); + if (null != e.getData().get(Constants.ERROR_MESSAGE) + && e.getData().get(Constants.ERROR_MESSAGE).toString().isEmpty()) { + String message = e.getData().get(Constants.ERROR_MESSAGE).toString(); + exceptionRecord.setDescription(message); + } + exceptionRecord.setSqlString(Constants.UNKNOWN_STRING); + record.setSessionLocator(setDefaultSessionLocator(record)); + record.setException(exceptionRecord); + } else if (parsedMessageObj.get(Constants.ERROR_SEVERITY) != null && + (parsedMessageObj.get(Constants.ERROR_SEVERITY).equals(Constants.ERROR))) { + + ExceptionRecord exceptionRecord = new ExceptionRecord(); + exceptionRecord.setExceptionTypeId(Constants.SQL_ERROR); + + if (null != parsedMessageObj.get(Constants.MESSAGE) && + !parsedMessageObj.get(Constants.MESSAGE).toString().isEmpty()) { + String message = parsedMessageObj.get(Constants.MESSAGE).toString(); + message = message.replaceAll("\"(\\w+)\"", "$1"); + exceptionRecord.setDescription(message); + } + exceptionRecord.setSqlString(Constants.UNKNOWN_STRING); + record.setSessionLocator(setDefaultSessionLocator(record)); + record.setException(exceptionRecord); + } else { + ExceptionRecord exceptionRecord = new ExceptionRecord(); + exceptionRecord.setExceptionTypeId(Constants.LOGIN_FAILED); + if (null != parsedMessageObj.get(Constants.MESSAGE) && + !parsedMessageObj.get(Constants.MESSAGE).toString().isEmpty()) { + String message = parsedMessageObj.get(Constants.MESSAGE).toString(); + message = message.replaceAll("\"(\\w+)\"", "$1"); + exceptionRecord.setDescription(message); + } + exceptionRecord.setSqlString(Constants.UNKNOWN_STRING); + record.setSessionLocator(setDefaultSessionLocator(record)); + record.setException(exceptionRecord); + } + } + } + + private static void setSQL(Event e, Record record, Data data) { + if (e.getField(Constants.FULL_SQL_QUERY) != null) { + String sqlQuery = e.getField(Constants.FULL_SQL_QUERY).toString(); + if (sqlQuery.contains("\"\"")) { + sqlQuery = sqlQuery.replaceAll("\"\"([^\"]*)\"\"", "\"$1\""); + } + data.setOriginalSqlCommand(sqlQuery); + } else { + data.setOriginalSqlCommand(Constants.NA); + } + record.setData(data); + } + + // Set Default SessionLocator in case of LOGIN_FAILED or ERROR + private static SessionLocator setDefaultSessionLocator(Record record) { + String serverIp = Constants.DEFAULT_IP; + SessionLocator sessionLocator = new SessionLocator(); + sessionLocator.setClientIp(Constants.DEFAULT_IP); + sessionLocator.setClientPort(Constants.DEFAULT_PORT); + sessionLocator.setServerIp(serverIp); + sessionLocator.setServerPort(Constants.DEFAULT_PORT); + sessionLocator.setIpv6(false); + sessionLocator.setClientIpv6(Constants.UNKNOWN_STRING); + sessionLocator.setServerIpv6(Constants.UNKNOWN_STRING); + return sessionLocator; + } + + private static void setSeesionLocator(Event e, Map parsedMessage, Record record, String dbName) { + Object connectionFrom = parsedMessage.get(Constants.CONNECTION_FROM); + String[] connectionArr = connectionFrom.toString().split(":"); + + String clientIP = Constants.UNKNOWN_STRING; + String clientPort = Constants.UNKNOWN_STRING; + + if (connectionArr[0] != null && !connectionArr[0].isEmpty() && + connectionArr[1] != null && !connectionArr[1].isEmpty()) { + clientIP = connectionArr[0]; + clientPort = connectionArr[1]; + } + + record.setSessionLocator(Parser.parseSessionLocator(e, clientIP, clientPort, dbName)); + } + + public static Time parseTimestamp(final Event e) { + long millis = 0; + try { + Object field = e.getField(Constants.TIMESTAMP); + + String dateString; + + if (field instanceof Object[]) { + Object[] arr = (Object[]) field; + if (arr.length > 0) { + dateString = arr[0].toString(); + } else { + dateString = ""; + } + } else { + dateString = field.toString(); + // Handle case where string looks like "[ts1, ts2]" + if (dateString.startsWith("[") && dateString.endsWith("]")) { + String[] parts = dateString.substring(1, dateString.length() - 1).split(","); + if (parts.length > 0) { + dateString = parts[0].trim(); + } + } + } + // Try ISO first + try { + ZonedDateTime parsedTime = ZonedDateTime.parse(dateString, DateTimeFormatter.ISO_DATE_TIME); + millis = parsedTime.toInstant().toEpochMilli(); + } catch (DateTimeParseException isoEx) { + // Fallback to local date time format + log.debug("ISO parse failed, trying fallback format", isoEx); + LocalDateTime localDateTime = LocalDateTime.parse(dateString, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); + ZonedDateTime zonedDateTime = localDateTime.atZone(ZoneId.systemDefault()); + millis = zonedDateTime.toInstant().toEpochMilli(); + } + + } catch (Exception exe) { + log.error("parseTimestamp final failure: {}", exe); + } + return new Time(millis, 0, 0); + } + + public static SessionLocator parseSessionLocator(Event e, String clientIP, String clientPort, String dbName) { + + String accountId = getAccountId(e); + String serverIp = accountId + ":" + dbName; + + SessionLocator sessionLocator = new SessionLocator(); + sessionLocator.setClientIp(clientIP); + sessionLocator.setClientPort(Integer.parseInt(clientPort)); + sessionLocator.setServerIp(serverIp); + sessionLocator.setServerPort(Constants.DEFAULT_PORT); + sessionLocator.setIpv6(false); + sessionLocator.setClientIpv6(Constants.UNKNOWN_STRING); + sessionLocator.setServerIpv6(Constants.UNKNOWN_STRING); + return sessionLocator; + } + + + public static Accessor parseAccessor(final Event e) { + Accessor accessor = new Accessor(); + accessor.setDataType(Accessor.DATA_TYPE_GUARDIUM_SHOULD_PARSE_SQL); + accessor.setLanguage(Constants.LANGUAGE); + accessor.setClientHostName(Constants.NA); + accessor.setClientOs(Constants.UNKNOWN_STRING); + accessor.setServerOs(Constants.UNKNOWN_STRING); + + String accId = getAccountId(e); + String instanceName = getInstanceName(e); + + if (e.getField(Constants.PARSED_MESSAGE) != null) { + Object parsedMessageObj = e.getField(Constants.PARSED_MESSAGE); + if (parsedMessageObj instanceof Map) { + Map parsedMessage = (Map) parsedMessageObj; + + String dbUser = Constants.UNKNOWN_STRING; + if (!parsedMessage.isEmpty() && parsedMessage.get(Constants.USER_NAME) != null) { + dbUser = parsedMessage.get(Constants.USER_NAME).toString(); + } + // Set DBUser for Amazon Data Firehose method + else if (null != e.getData() + && null != e.getData().get(Constants.DB_USER) + && !e.getData().get(Constants.DB_USER).toString().isEmpty()) { + dbUser = e.getData().get(Constants.DB_USER).toString(); + } + if (dbUser.isEmpty()) { + accessor.setDbUser(Constants.NA); + } else { + accessor.setDbUser(dbUser); + } + + accessor.setOsUser(Constants.UNKNOWN_STRING); + String accountId = getAccountId(e); + if (parsedMessage.get(Constants.DATABASE_NAME) != null + && !accountId.isEmpty()) { + accessor.setServiceName(accountId + ":" + instanceName + ":" + parsedMessage.get(Constants.DATABASE_NAME).toString()); + } + // Set ServiceName for Amazon Data Firehose method + else if (null != e.getData() + && null != e.getData().get(Constants.DB_NAME) + && !e.getData().get(Constants.DB_NAME).toString().isEmpty()) { + accessor.setServiceName(accountId + ":" + instanceName + ":" + e.getData().get(Constants.DB_NAME).toString()); + } else { + accessor.setServiceName(Constants.NA); + } + + if (parsedMessage.get(Constants.APPLICATION_NAME) != null) { + accessor.setSourceProgram(parsedMessage.get(Constants.APPLICATION_NAME).toString()); + } else { + accessor.setSourceProgram(Constants.UNKNOWN_STRING); + } + } + } + + accessor.setServerType(Constants.SERVER_TYPE_STRING); + accessor.setCommProtocol(Constants.COMM_PROTOCOL); + accessor.setDbProtocol(Constants.DATA_PROTOCOL_STRING); + accessor.setDbProtocolVersion(Constants.UNKNOWN_STRING); + accessor.setClient_mac(Constants.UNKNOWN_STRING); + accessor.setServerDescription(Constants.UNKNOWN_STRING); + accessor.setServerHostName(accId + ":" + instanceName); + + return accessor; + } + + +} \ No newline at end of file diff --git a/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/S3SQSPostgresqlGuardiumPluginFilter.java b/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/S3SQSPostgresqlGuardiumPluginFilter.java new file mode 100644 index 000000000..55cae08bb --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/src/main/java/com/ibm/guardium/s3sqspostgresql/S3SQSPostgresqlGuardiumPluginFilter.java @@ -0,0 +1,111 @@ + +/* +#Copyright 2020-2021 IBM Inc. All rights reserved +#SPDX-License-Identifier: Apache-2.0 +#*/ +package com.ibm.guardium.s3sqspostgresql; + +import java.io.File; +import java.util.Collection; +import java.util.Collections; + + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.ibm.guardium.universalconnector.commons.GuardConstants; +import com.ibm.guardium.universalconnector.commons.structures.Record; + +import co.elastic.logstash.api.Configuration; +import co.elastic.logstash.api.Context; +import co.elastic.logstash.api.Event; +import co.elastic.logstash.api.Filter; +import co.elastic.logstash.api.FilterMatchListener; +import co.elastic.logstash.api.LogstashPlugin; +import co.elastic.logstash.api.PluginConfigSpec; + +//class name must match plugin name +@LogstashPlugin(name = "s3sqs_postgresql_guardium_plugin_filter") + +public class S3SQSPostgresqlGuardiumPluginFilter implements Filter { + + public static final String LOG42_CONF = "log4j2uc.properties"; + + static { + try { + String uc_etc = System.getenv("UC_ETC"); + + LoggerContext context = (LoggerContext) LogManager.getContext(false); + + File file = new File(uc_etc + File.separator + LOG42_CONF); + + context.setConfigLocation(file.toURI()); + + } catch (Exception e) { + + System.err.println("Failed to load log4j configuration " + e.getMessage()); + + e.printStackTrace(); + } + } + + private String id; + + public static final PluginConfigSpec SOURCE_CONFIG = PluginConfigSpec.stringSetting("source", "message"); + + private static Logger log = LogManager.getLogger(S3SQSPostgresqlGuardiumPluginFilter.class); + + public S3SQSPostgresqlGuardiumPluginFilter(String id, Configuration config, Context context) { + + // constructors should validate configuration options + + this.id = id; + } + + @Override + + public Collection filter(Collection events, FilterMatchListener matchListener) { + + for (Event e : events) { + if(null != e && null != e.getData()){ + try { + log.debug("Event Now: {} " + e.getData()); + + Record record = Parser.parseRecord(e); + + final GsonBuilder builder = new GsonBuilder(); + + builder.serializeNulls(); + + final Gson gson = builder.create(); + + e.setField(GuardConstants.GUARDIUM_RECORD_FIELD_NAME, gson.toJson(record)); + + matchListener.filterMatched(e); + + } catch (Exception exception) { + log.warn("Failed event {}" , e.getData()); + } + } + + } + return events; + } + + @Override + + public Collection> configSchema() { + + // should return a list of all configuration options for this plugin + + return Collections.singletonList(SOURCE_CONFIG); + } + + @Override + public String getId() { + return this.id; + } +} diff --git a/filter-plugin/logstash-filter-postgres-guardium/src/test/java/com/ibm/guardium/test/s3sqspostgresql/S3SQSPostgresqlGuardiumPluginFilterTest.java b/filter-plugin/logstash-filter-postgres-guardium/src/test/java/com/ibm/guardium/test/s3sqspostgresql/S3SQSPostgresqlGuardiumPluginFilterTest.java new file mode 100644 index 000000000..4643dc820 --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/src/test/java/com/ibm/guardium/test/s3sqspostgresql/S3SQSPostgresqlGuardiumPluginFilterTest.java @@ -0,0 +1,387 @@ +package com.ibm.guardium.test.s3sqspostgresql; + +import co.elastic.logstash.api.Event; +import com.ibm.guardium.s3sqspostgresql.Constants; +import com.ibm.guardium.s3sqspostgresql.Parser; +import com.ibm.guardium.universalconnector.commons.structures.*; +import org.junit.Test; +import sun.security.krb5.internal.PAEncTSEnc; + +import java.text.ParseException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.ibm.guardium.s3sqspostgresql.Parser.getInstanceName; +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +public class S3SQSPostgresqlGuardiumPluginFilterTest { + + @Test + public void testParseRecordWithSuccessSql() throws ParseException { + Event mockEvent = mock(Event.class); + + Map parsedMsg = new HashMap<>(); + parsedMsg.put(Constants.DATABASE_NAME, "testdb"); + parsedMsg.put(Constants.USER_NAME, "user1"); + parsedMsg.put(Constants.CONNECTION_FROM, "10.0.0.1:1234"); + parsedMsg.put(Constants.SQL_STATE_CODE, Constants.SQL_STATE_CODE_SUCCESS); + + when(mockEvent.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMsg); + when(mockEvent.getField(Constants.SESSION_ID)).thenReturn("11"); + when(mockEvent.getField(Constants.FULL_SQL_QUERY)).thenReturn("SELECT * FROM table;"); + when(mockEvent.getField(Constants.TIMESTAMP)).thenReturn("2023-11-10T10:15:30Z"); + when(mockEvent.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(mockEvent.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + Record record = Parser.parseRecord(mockEvent); + + assertNotNull(record.getData()); + assertEquals("SELECT * FROM table;", record.getData().getOriginalSqlCommand()); + assertEquals("user1", record.getAccessor().getDbUser()); + assertEquals("10.0.0.1", record.getSessionLocator().getClientIp()); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + assertEquals("123456:postgres:testdb", record.getAccessor().getServiceName()); + assertEquals("123456:postgres:testdb", record.getDbName()); + } + + @Test + public void testSQLQueryWithDoubleQuote() throws ParseException { + Event mockEvent = mock(Event.class); + + Map parsedMsg = new HashMap<>(); + parsedMsg.put(Constants.DATABASE_NAME, "testdb"); + parsedMsg.put(Constants.USER_NAME, "user1"); + parsedMsg.put(Constants.CONNECTION_FROM, "10.0.0.1:1234"); + parsedMsg.put(Constants.SQL_STATE_CODE, Constants.SQL_STATE_CODE_SUCCESS); + + when(mockEvent.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMsg); + when(mockEvent.getField(Constants.SESSION_ID)).thenReturn("11"); + when(mockEvent.getField(Constants.FULL_SQL_QUERY)).thenReturn("SELECT COUNT(*) AS \"\"RECORDS\"\" from \"table\";"); + when(mockEvent.getField(Constants.TIMESTAMP)).thenReturn("2023-11-10T10:15:30Z"); + when(mockEvent.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(mockEvent.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + Record record = Parser.parseRecord(mockEvent); + + assertNotNull(record.getData()); + assertEquals("SELECT COUNT(*) AS \"RECORDS\" from \"table\";", record.getData().getOriginalSqlCommand()); + assertEquals("user1", record.getAccessor().getDbUser()); + assertEquals("10.0.0.1", record.getSessionLocator().getClientIp()); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + assertEquals("123456:postgres:testdb", record.getAccessor().getServiceName()); + assertEquals("123456:postgres:testdb", record.getDbName()); + } + + @Test + public void testParseTimestampValid() { + Event mockEvent = mock(Event.class); + when(mockEvent.getField(Constants.TIMESTAMP)).thenReturn("2023-11-10T10:15:30Z"); + + Time time = Parser.parseTimestamp(mockEvent); + + assertTrue(time.getTimstamp() > 0); + } + + @Test + public void testParseTimestampInvalid() { + Event mockEvent = mock(Event.class); + when(mockEvent.getField(Constants.TIMESTAMP)).thenReturn("invalid-date"); + + Time time = Parser.parseTimestamp(mockEvent); + + assertEquals(0, time.getTimstamp()); + } + + @Test + public void testParseAccessorWithDefaults() { + Event mockEvent = mock(Event.class); + when(mockEvent.getField(Constants.PARSED_MESSAGE)).thenReturn(null); + when(mockEvent.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(mockEvent.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + Accessor accessor = Parser.parseAccessor(mockEvent); + + assertEquals(Constants.NA, accessor.getClientHostName()); + assertEquals(Constants.SERVER_TYPE_STRING, accessor.getServerType()); + assertEquals("123456:postgres", accessor.getServerHostName()); + } + + @Test + public void testParseAccessorWithParsedMessage() { + Event mockEvent = mock(Event.class); + Map parsedMsg = new HashMap<>(); + parsedMsg.put(Constants.USER_NAME, "dbuser"); + parsedMsg.put(Constants.DATABASE_NAME, "service_db"); + parsedMsg.put(Constants.APPLICATION_NAME, "app1"); + + when(mockEvent.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMsg); + when(mockEvent.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(mockEvent.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + Accessor accessor = Parser.parseAccessor(mockEvent); + + assertEquals("dbuser", accessor.getDbUser()); + //assertEquals("service_db", accessor.getServiceName()); + assertEquals("app1", accessor.getSourceProgram()); + assertEquals("123456:postgres", accessor.getServerHostName()); + + } + + @Test + public void ErrorTestCaseCheck() throws Exception { + Event event = mock(Event.class); + + Map parsedMessage = new HashMap<>(); + parsedMessage.put("backend_type", "client backend"); + parsedMessage.put("database_name", "mydb"); + parsedMessage.put("internal_query", ""); + parsedMessage.put("detail", ""); + parsedMessage.put("log_time", "2025-07-21 08:22:21.726+00"); + parsedMessage.put("query_id", "0"); + parsedMessage.put("virtual_transaction_id", "5/633"); + parsedMessage.put("session_start_time", "2025-07-21 05:03:18+00"); + parsedMessage.put("process_id", "1510"); + parsedMessage.put("application_name", "DBeaver 25.0.3 - SQLEditor "); + parsedMessage.put("leader_pid", ""); + parsedMessage.put("hint", ""); + parsedMessage.put("query_pos", "1"); + parsedMessage.put("message", "syntax error at or near \"drogp\""); + parsedMessage.put("connection_from", "223.233.87.228:22408"); + parsedMessage.put("session_line_num", "458"); + parsedMessage.put("location", ""); + parsedMessage.put("transaction_id", "0"); + parsedMessage.put("user_name", "Admin123"); + parsedMessage.put("session_id", "687dca16.5e6"); + parsedMessage.put("sql_state_code", "42601"); + parsedMessage.put("error_severity", "ERROR"); + parsedMessage.put("internal_query_pos", ""); + parsedMessage.put("context", ""); + parsedMessage.put("query", "drogp table PANY"); + parsedMessage.put("command_tag", "PARSE"); + + when(event.getField("parsed_message")).thenReturn(parsedMessage); + when(event.getField("account_id")).thenReturn("123456"); + when(event.getField("timestamp")).thenReturn("2025-07-21T08:23:01.903324202Z"); + when(event.getField("session_id")).thenReturn("687dca16.5e6"); + when(event.getField("message")).thenReturn("syntax error at or near \"drogp\""); + when(event.getField("succeeded")).thenReturn("ERROR"); + when(event.getField("prefix")).thenReturn("42601"); + when(event.getField("instance_name")).thenReturn("postgres"); + when(event.getField("server_hostname")).thenReturn("123456:postgres"); + + Record record = Parser.parseRecord(event); + + assertNotNull(record); + assertEquals("123456:postgres:mydb", record.getDbName()); + + assertNotNull(record.getAccessor()); + assertEquals("Admin123", record.getAccessor().getDbUser()); + assertEquals("123456:postgres:mydb", record.getAccessor().getServiceName()); + assertEquals("DBeaver 25.0.3 - SQLEditor ", record.getAccessor().getSourceProgram()); + + assertNotNull(record.getSessionLocator()); + assertEquals(Constants.DEFAULT_IP, record.getSessionLocator().getClientIp()); + assertEquals(Constants.DEFAULT_PORT, record.getSessionLocator().getClientPort()); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + + assertNotNull(record.getTime()); + assertTrue(record.getTime().getTimstamp() > 0); + + assertEquals("", record.getSessionId()); + + assertNotNull(record.getException()); + } + + @Test + public void testParseRecordFromSampleEventNow() throws Exception { + Event event = mock(Event.class); + + Map parsedMessage = new HashMap<>(); + parsedMessage.put(Constants.DATABASE_NAME, "mypgdb"); + parsedMessage.put(Constants.USER_NAME, "postgresadmin"); + parsedMessage.put(Constants.CONNECTION_FROM, "223.233.87.243:31637"); + parsedMessage.put(Constants.SQL_STATE_CODE, Constants.SQL_STATE_CODE_SUCCESS); + parsedMessage.put(Constants.APPLICATION_NAME, "NA"); + parsedMessage.put(Constants.MESSAGE, "INSERT INTO test123 (first_name, last_name, department, salary) VALUES ('Alice', 'Johnson', 'HR', 55000.00)"); + + when(event.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMessage); + when(event.getField(Constants.SESSION_ID)).thenReturn("1433"); + when(event.getField(Constants.FULL_SQL_QUERY)).thenReturn("INSERT INTO test123 (first_name, last_name, department, salary) VALUES ('Alice', 'Johnson', 'HR', 55000.00)"); + when(event.getField(Constants.TIMESTAMP)).thenReturn("2025-08-22T09:57:21Z"); + when(event.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(event.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(event.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + Record record = Parser.parseRecord(event); + + assertNotNull(record); + assertEquals("123456:postgres:mypgdb", record.getDbName()); + + assertNotNull(record.getAccessor()); + assertEquals("postgresadmin", record.getAccessor().getDbUser()); + assertEquals("123456:postgres:mypgdb", record.getAccessor().getServiceName()); + + assertNotNull(record.getSessionLocator()); + assertEquals("223.233.87.243", record.getSessionLocator().getClientIp()); + assertEquals(31637, record.getSessionLocator().getClientPort()); + assertEquals("123456:mypgdb", record.getSessionLocator().getServerIp()); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + + assertNotNull(record.getTime()); + assertTrue(record.getTime().getTimstamp() > 0); + + assertEquals("", record.getSessionId()); + + assertNotNull(record.getData()); + assertEquals("INSERT INTO test123 (first_name, last_name, department, salary) VALUES ('Alice', 'Johnson', 'HR', 55000.00)", + record.getData().getOriginalSqlCommand()); + } + + @Test + public void testParseRecordFromEventNowEFG789() throws Exception { + Event event = mock(Event.class); + + Map parsedMessage = new HashMap<>(); + parsedMessage.put(Constants.DATABASE_NAME, "mypgdb"); + parsedMessage.put(Constants.USER_NAME, "postgresadmin"); + parsedMessage.put(Constants.CONNECTION_FROM, "223.233.87.243:27930"); + parsedMessage.put(Constants.SQL_STATE_CODE, Constants.SQL_STATE_CODE_SUCCESS); + parsedMessage.put(Constants.APPLICATION_NAME, "NA"); + parsedMessage.put(Constants.MESSAGE, "INSERT INTO EFG789 (first_name, last_name, department, salary) VALUES ('Bob', 'Smith', 'IT', 65000.00)"); + + when(event.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMessage); + when(event.getField(Constants.SESSION_ID)).thenReturn("23"); + when(event.getField(Constants.FULL_SQL_QUERY)).thenReturn("INSERT INTO EFG789 (first_name, last_name, department, salary) VALUES ('Bob', 'Smith', 'IT', 65000.00)"); + when(event.getField(Constants.TIMESTAMP)).thenReturn("2025-08-23T07:30:48.706Z"); + when(event.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(event.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(event.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + Record record = Parser.parseRecord(event); + + // Validate main record fields + assertNotNull(record); + assertEquals("123456:postgres:mypgdb", record.getDbName()); + assertEquals("", record.getSessionId()); + + // Validate accessor + assertNotNull(record.getAccessor()); + assertEquals("postgresadmin", record.getAccessor().getDbUser()); + assertEquals("123456:postgres:mypgdb", record.getAccessor().getServiceName()); + assertEquals("NA", record.getAccessor().getSourceProgram()); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + + // Validate session locator + assertNotNull(record.getSessionLocator()); + assertEquals("223.233.87.243", record.getSessionLocator().getClientIp()); + assertEquals(27930, record.getSessionLocator().getClientPort()); + assertEquals("123456:mypgdb", record.getSessionLocator().getServerIp()); + + // Validate time + assertNotNull(record.getTime()); + assertTrue(record.getTime().getTimstamp() > 0); + + // Validate SQL command + assertNotNull(record.getData()); + assertEquals("INSERT INTO EFG789 (first_name, last_name, department, salary) VALUES ('Bob', 'Smith', 'IT', 65000.00)", + record.getData().getOriginalSqlCommand()); + } + + @Test + public void testParseRecordFromJHTV123InsertEvent() throws Exception { + Event event = mock(Event.class); + + // Build parsed message as per the event log + Map parsedMessage = new HashMap<>(); + parsedMessage.put(Constants.DATABASE_NAME, "mypgdb"); + parsedMessage.put(Constants.USER_NAME, "postgresadmin"); + parsedMessage.put(Constants.CONNECTION_FROM, "223.233.87.243:29370"); + parsedMessage.put(Constants.SQL_STATE_CODE, Constants.SQL_STATE_CODE_SUCCESS); + parsedMessage.put(Constants.APPLICATION_NAME, "NA"); + parsedMessage.put(Constants.MESSAGE, "INSERT INTO JHTV123 (first_name, last_name, department, salary) VALUES ('Bob', 'Smith', 'IT', 65000.00)"); + + // Mock the event fields + when(event.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMessage); + when(event.getField(Constants.SESSION_ID)).thenReturn(""); + when(event.getField(Constants.FULL_SQL_QUERY)).thenReturn("INSERT INTO JHTV123 (first_name, last_name, department, salary) VALUES ('Bob', 'Smith', 'IT', 65000.00)"); + when(event.getField(Constants.TIMESTAMP)).thenReturn("2025-08-23T08:55:48Z"); + when(event.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(event.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + when(event.getField(Constants.SERVER_HOST_NAME)).thenReturn("123456:postgres"); + + // Parse the record + Record record = Parser.parseRecord(event); + + // Validate main record fields + assertNotNull(record); + assertEquals("123456:postgres:mypgdb", record.getDbName()); + assertEquals("", record.getSessionId()); + + // Validate accessor + assertNotNull(record.getAccessor()); + assertEquals("postgresadmin", record.getAccessor().getDbUser()); + assertEquals("123456:postgres:mypgdb", record.getAccessor().getServiceName()); + assertEquals("NA", record.getAccessor().getSourceProgram()); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + + // Validate session locator + assertNotNull(record.getSessionLocator()); + assertEquals("223.233.87.243", record.getSessionLocator().getClientIp()); + assertEquals(29370, record.getSessionLocator().getClientPort()); + assertEquals("123456:mypgdb", record.getSessionLocator().getServerIp()); + + // Validate time + assertNotNull(record.getTime()); + assertTrue(record.getTime().getTimstamp() > 0); + + // Validate SQL command + assertNotNull(record.getData()); + assertEquals( + "INSERT INTO JHTV123 (first_name, last_name, department, salary) VALUES ('Bob', 'Smith', 'IT', 65000.00)", + record.getData().getOriginalSqlCommand() + ); + } + @Test + public void testParseServerHostName() throws ParseException { + Event mockEvent = mock(Event.class); + + Map parsedMsg = new HashMap<>(); + parsedMsg.put(Constants.DATABASE_NAME, "testdb"); + parsedMsg.put(Constants.USER_NAME, "user1"); + parsedMsg.put(Constants.CONNECTION_FROM, "10.0.0.1:1234"); + parsedMsg.put(Constants.SQL_STATE_CODE, Constants.SQL_STATE_CODE_SUCCESS); + + when(mockEvent.getField(Constants.PARSED_MESSAGE)).thenReturn(parsedMsg); + when(mockEvent.getField(Constants.ACCOUNT_ID)).thenReturn("123456"); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn("postgres"); + + Record record = Parser.parseRecord(mockEvent); + assertEquals("123456:postgres", record.getAccessor().getServerHostName()); + } + + @Test + public void testGetInstanceName_withString() throws Exception{ + Event mockEvent = mock(Event.class); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn("Postgres"); + + String result = Parser.getInstanceName(mockEvent); + assertEquals("Postgres", result); + } + + @Test + public void testGetInstanceName_withListContainingValue() throws Exception{ + Event mockEvent = mock(Event.class); + List instanceList = Arrays.asList("Postgres", "OtherValue"); + when(mockEvent.getField(Constants.INSTANCE_NAME)).thenReturn(instanceList); + + String result = Parser.getInstanceName(mockEvent); + assertEquals("Postgres", result); + } +} diff --git a/filter-plugin/logstash-filter-postgres-guardium/versions.yml b/filter-plugin/logstash-filter-postgres-guardium/versions.yml new file mode 100644 index 000000000..a23c8e1db --- /dev/null +++ b/filter-plugin/logstash-filter-postgres-guardium/versions.yml @@ -0,0 +1,18 @@ +--- +dependencies: + commonsValidator: 1.7 + log4jCore: 2.22.0 + log4jApi: 2.17.2 + commonsLang: 3.7 + gson: 2.8.9 + junit: 4.12 + jrubyComplete: 9.2.7.0 + junitJupiter: 5.7.1 + mockitoAll: 2.0.2-beta + json: 20231013 + parboiledJava: 1.1.8 + javaxJson: 1.1.4 + guava: 32.1.3-jre + commonsText: 1.10.0 + tinkergraphGremlin: 3.6.4 + rdf4jQueryparserSparql: 4.2.4 \ No newline at end of file diff --git a/filter-plugin/logstash-filter-yugabyte-guardium/src/main/java/com/ibm/guardium/yugabytedb/Constants.java b/filter-plugin/logstash-filter-yugabyte-guardium/src/main/java/com/ibm/guardium/yugabytedb/Constants.java index 1de46ff5a..0a2b15208 100644 --- a/filter-plugin/logstash-filter-yugabyte-guardium/src/main/java/com/ibm/guardium/yugabytedb/Constants.java +++ b/filter-plugin/logstash-filter-yugabyte-guardium/src/main/java/com/ibm/guardium/yugabytedb/Constants.java @@ -50,9 +50,10 @@ public class Constants { public static final String LANGUAGE_CASSANDRA = "CASS"; - public static final String DB_PROTOCOL_PG = "POSTGRESQL"; + public static final String DB_PROTOCOL_PG = "YUGABYTE"; + + public static final String DB_PROTOCOL_CASSANDRA = "YUGABYTE_CASS"; - public static final String DB_PROTOCOL_CASSANDRA = "CASSANDRA"; public static final String MESSAGE = "message"; public static final String TYPE = "log_type"; diff --git a/input-plugin/logstash-input-beats/FilebeatInputPackage/Filebeat/input.conf b/input-plugin/logstash-input-beats/FilebeatInputPackage/Filebeat/input.conf index 70952721e..97fe2b759 100644 --- a/input-plugin/logstash-input-beats/FilebeatInputPackage/Filebeat/input.conf +++ b/input-plugin/logstash-input-beats/FilebeatInputPackage/Filebeat/input.conf @@ -1,11 +1,11 @@ input{ beats { - port => guc_input_param_port - ssl => guc_input_param_is_ssl - ssl_certificate_authorities => SSL_CERT_AUTH - ssl_certificate => "/service/certs/external/tls.crt" - ssl_key => "/service/certs/external/tls.key" - include_codec_tag => guc_input_param_include_codec_tag - type => "filebeat" + port => guc_input_param_port + ssl_enabled => guc_input_param_is_ssl + ssl_certificate_authorities => SSL_CERT_AUTH + ssl_certificate => "/service/certs/external/tls.crt" + ssl_key => "/service/certs/external/tls.key" + include_codec_tag => guc_input_param_include_codec_tag + type => "filebeat" } } \ No newline at end of file diff --git a/input-plugin/logstash-input-s3sqs/build.gradle b/input-plugin/logstash-input-s3sqs/build.gradle index 00b029bb9..213653206 100644 --- a/input-plugin/logstash-input-s3sqs/build.gradle +++ b/input-plugin/logstash-input-s3sqs/build.gradle @@ -20,8 +20,10 @@ pluginInfo.pluginClass = "S3SQS" pluginInfo.pluginName = "s3_sqs" // must match the @LogstashPlugin annotation in the main plugin class // =========================================================================== -sourceCompatibility = 1.11 -targetCompatibility = 1.11 +java { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 +} buildscript { repositories { @@ -63,8 +65,6 @@ dependencies { implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.15.2' implementation("org.apache.commons:commons-csv:1.14.0") - implementation group: 'commons-beanutils', name: 'commons-beanutils', version: versions.dependencies.commonsBeanutils - implementation fileTree(dir: LOGSTASH_CORE_PATH, include: "**/logstash-core*.jar") implementation fileTree(dir: GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH, include: "guardium-universalconnector-commons*.jar") diff --git a/input-plugin/logstash-input-s3sqs/gradle.properties b/input-plugin/logstash-input-s3sqs/gradle.properties index eb30e44e2..244142548 100644 --- a/input-plugin/logstash-input-s3sqs/gradle.properties +++ b/input-plugin/logstash-input-s3sqs/gradle.properties @@ -1,5 +1,3 @@ -LOGSTASH_CORE_PATH=/Users/piyushdesai/work/plugin-developement/logstash/logstash-core -GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH=/Users/piyushdesai/work/plugin-developement/guardium-universalconnector-commons/build/libs -org.gradle.java.home=/Users/piyushdesai/.sdkman/candidates/java/11.0.21-zulu/zulu-11.jdk/Contents/Home -org.gradle.jvmargs=-Xmx4G -XX:MaxMetaspaceSize=1G - +org.gradle.java.home=/Users/andy.chen/Downloads/jdk-11.0.22+7/Contents/Home +LOGSTASH_CORE_PATH=/Users/andy.chen/Documents/workspace/nexus/guardium/logstash/logstash-core +GUARDIUM_UNIVERSALCONNECTOR_COMMONS_PATH=/Users/andy.chen/Documents/workspace/nexus/guardium/guardium-universalconnector-commons/build/libs \ No newline at end of file