[{"id":"tavjNS3oTkswrNJz46de","number":"12278382404325644697","begin":"2024-11-26T00:30:24+00:00","created":"2024-11-26T01:05:50+00:00","end":"2024-11-26T03:49:09+00:00","modified":"2024-11-26T03:49:10+00:00","external_desc":"Looker Studio customers may experience high latency on operations that involve Spanner read / write.","updates":[{"created":"2024-11-26T03:49:09+00:00","modified":"2024-11-26T03:49:11+00:00","when":"2024-11-26T03:49:09+00:00","text":"The issue with Looker Studio has been resolved for all affected users as of Monday, 2024-11-25 19:48 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-26T02:27:58+00:00","modified":"2024-11-26T03:49:10+00:00","when":"2024-11-26T02:27:58+00:00","text":"Summary: Looker Studio customers may experience high latency on operations that involve Spanner read / write.\nDescription: Our engineering team continues to work on the mitigation while also identifying any measures that can be taken to reduce the overall time of the rollout.\nWe currently do not have an ETA for complete mitigation.\nWe will provide more information by Monday, 2024-11-25 22:30 US/Pacific.\nWe apologize to all who are affected by this disruption.\nDiagnosis: Customers impacted by this issue may intermittently encounter timeouts, in addition to the latency observed on the affected operations.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-26T01:29:31+00:00","modified":"2024-11-26T02:27:58+00:00","when":"2024-11-26T01:29:31+00:00","text":"Summary: Looker Studio customers may experience high latency on operations that involve Spanner read / write.\nDescription: Our engineering team has identified the steps required to mitigate the issue and is currently in the process of the rollout.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-11-25 18:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may encounter timeouts, in addition to the latency observed on the affected operations.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-26T01:05:44+00:00","modified":"2024-11-26T01:29:34+00:00","when":"2024-11-26T01:05:44+00:00","text":"Summary: Looker Studio customers may experience high latency on operations that involve Spanner read / write.\nDescription: Our engineering team has identified the steps required to mitigate the issue and are currently in the process of the rollout.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-11-25 18:15 US/Pacific.\nDiagnosis: Customers impacted by this issue may encounter timeouts, in addition to the latency observed on the affected operations.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]}],"most_recent_update":{"created":"2024-11-26T03:49:09+00:00","modified":"2024-11-26T03:49:11+00:00","when":"2024-11-26T03:49:09+00:00","text":"The issue with Looker Studio has been resolved for all affected users as of Monday, 2024-11-25 19:48 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"kEYNqRYFXXHxP9QeFJ1d","service_name":"Looker Studio","affected_products":[{"title":"Looker Studio","id":"kEYNqRYFXXHxP9QeFJ1d"}],"uri":"incidents/tavjNS3oTkswrNJz46de","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"Fc17BgFxqDgm1BwKBySX","number":"13588014291395814457","begin":"2024-11-21T20:27:01+00:00","created":"2024-11-21T20:39:56+00:00","end":"2024-11-21T22:03:59+00:00","modified":"2024-11-21T22:04:00+00:00","external_desc":"AppSheet customers using Looker connectors will be unable to connect.","updates":[{"created":"2024-11-21T22:03:59+00:00","modified":"2024-11-21T22:04:02+00:00","when":"2024-11-21T22:03:59+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Thursday, 2024-11-21 13:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-21T21:00:12+00:00","modified":"2024-11-21T22:04:00+00:00","when":"2024-11-21T21:00:12+00:00","text":"Summary: AppSheet customers using Looker connectors will be unable to connect\nDescription: We are experiencing an issue with AppSheet beginning at Thursday, 2024-11-21 11:10 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-11-21 14:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Existing Looker connectors will be unable to connect\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-21T20:39:53+00:00","modified":"2024-11-21T21:00:12+00:00","when":"2024-11-21T20:39:53+00:00","text":"Summary: AppSheet customers using Looker connectors will be unable to connect\nDescription: We are experiencing an issue with AppSheet beginning at Thursday, 2024-11-21 11:10 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-11-21 13:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Existing Looker connectors will be unable to connect\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-11-21T22:03:59+00:00","modified":"2024-11-21T22:04:02+00:00","when":"2024-11-21T22:03:59+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Thursday, 2024-11-21 13:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FWjKi5U7KX4FUUPThHAJ","service_name":"AppSheet","affected_products":[{"title":"AppSheet","id":"FWjKi5U7KX4FUUPThHAJ"}],"uri":"incidents/Fc17BgFxqDgm1BwKBySX","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"goRTDnNfbzZyhP9b9qGi","number":"10441352759131669131","begin":"2024-11-18T16:54:45+00:00","created":"2024-11-19T21:09:24+00:00","end":"2024-11-22T06:18:04+00:00","modified":"2024-11-22T06:18:05+00:00","external_desc":"Some ASM collection scan results may be delayed","updates":[{"created":"2024-11-22T06:18:04+00:00","modified":"2024-11-22T06:18:06+00:00","when":"2024-11-22T06:18:04+00:00","text":"The issue with Mandiant Attack Surface Mangement has been resolved for all affected users as of Thursday, 2024-11-21 20:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-22T00:53:22+00:00","modified":"2024-11-22T06:18:05+00:00","when":"2024-11-22T00:53:22+00:00","text":"Summary: Some ASM collection scan results may be delayed\nDescription: We are experiencing an issue with Mandiant Attack Surface Management.\nOur engineering team is aware of an issue affecting some customers' collection scan results. These results may be delayed by up to 24 hours, and customers may also notice a temporary change in the reported entity count.\nEngineers have identified the root cause and are currently implementing a solution. We expect all affected collections to be fully processed within 48 hours.\nWe will provide an update by Friday, 2024-11-22 08:00 US/Pacific with current details.\nDiagnosis: Customers may notice delayed collection scan results and a temporary change in the reported entity count.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-19T21:09:16+00:00","modified":"2024-11-22T00:53:22+00:00","when":"2024-11-19T21:09:16+00:00","text":"Summary: Some ASM collection scan results may be delayed\nDescription: We are experiencing an issue with Mandiant Attack Surface Management.\nOur engineering team is aware of an issue affecting some customers' collection scan results. These results may be delayed by up to 24 hours, and customers may also notice a temporary change in the reported entity count.\nEngineers have identified the root cause and are currently implementing a solution. We expect all affected collections to be fully processed within 48 hours.\nWe will provide an update by Thursday, 2024-11-21 17:00 US/Pacific with current details.\nDiagnosis: Customers may notice delayed collection scan results and a temporary change in the reported entity count.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-11-22T06:18:04+00:00","modified":"2024-11-22T06:18:06+00:00","when":"2024-11-22T06:18:04+00:00","text":"The issue with Mandiant Attack Surface Mangement has been resolved for all affected users as of Thursday, 2024-11-21 20:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"v7DL2fMFnZpCxNwd8KE7","service_name":"Mandiant Attack Surface Management","affected_products":[{"title":"Mandiant Attack Surface Management","id":"v7DL2fMFnZpCxNwd8KE7"}],"uri":"incidents/goRTDnNfbzZyhP9b9qGi","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"kDBRnSgQCPw93E8vKKat","number":"9761556975820979069","begin":"2024-11-16T08:52:00+00:00","created":"2024-11-16T10:13:35+00:00","end":"2024-11-16T12:14:08+00:00","modified":"2024-11-23T01:29:24+00:00","external_desc":"Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region","updates":[{"created":"2024-11-23T01:29:24+00:00","modified":"2024-11-23T01:29:26+00:00","when":"2024-11-23T01:29:24+00:00","text":"# Incident Report\n## Summary\nOn 16 November 2024 at 00:47 US/Pacific, a combination of fiber failures and a network equipment fault led to reduced network capacity between the asia-southeast2 region and other GCP regions. The failures were corrected and minimum required capacity recovered by 02:13 US/Pacific.\nTo our GCP customers whose businesses were impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause and Impact\nGoogle’s global network is designed and built to ensure that any occasional capacity loss events are not noticeable and/or have minimal disruption to customers. We provision several diverse network paths to each region and maintain sufficient capacity buffers based on the measured reliability of capacity in each region.\nBetween 12 November and 16 November, two separate fiber failures occurred near the asia-southeast2 region. These failures temporarily reduced the available network capacity between the asia-southeast2 region and other GCP regions, but did not impact the availability of GCP services in the region. Google engineers were alerted of these failures as soon as they occurred and were working with urgency on remediating these fiber failures but had not yet completed full recovery.\nOn 16 November 2024 at 00:47 US/Pacific, a latent software defect impacted a backbone networking router in the asia-southeast2 region resulting in further reduction of available inter-region capacity and exhausted our reserve network capacity buffers causing multiple Google Cloud services in the region to experience high latency and/or elevated error rates for operations requiring inter-region connectivity. During this time, customers in asia-southeast2, would have experienced issues with managing and monitoring existing resources, creating new resources, and data replication to other regions.\nTo mitigate the impact, Google engineers re-routed Internet traffic away from the asia-southeast2 region to be served from other GCP regions, primarily asia-southeast1 while working in parallel to recover the lost capacity. The faulty backbone networking router was recovered on 16 November 2024 02:13 US/Pacific. This ended the elevated network latency and error rates for most of the impacted GCP services’ operations. Recovery of the first failed fiber was completed on 18 November 08:45 US/Pacific and the second failed fiber was restored at 09:00 US/Pacific on the same day.\n## Remediation and Prevention\nWe’re taking the following actions to reduce the likelihood of recurrence and time to mitigate impact of this type of incident in the future:\n- During the incident, our actions to reroute traffic away from the asia-southeast2 region and recover the faulty backbone networking router took longer than expected as the loss of capacity hindered our visibility of required networking telemetry and functionality of emergency tooling. We’re reviewing these gaps to implement the required improvements to our network observability, emergency tools and incident response playbooks.\n- Work with our fiber partners in the asia-southeast2 region to ensure our fiber paths between facilities in the region and to submarine cable landing stations are on the most reliable routes available, and have adequate preventative maintenance and repair processes.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-18T20:03:25+00:00","modified":"2024-11-23T01:29:24+00:00","when":"2024-11-18T20:03:25+00:00","text":"### Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using [***https://cloud.google.com/support***](https://cloud.google.com/support).\n(All Times US/Pacific)\n**Incident Start:** 16 November, 2024 00:52\n**Incident End:** 16 November, 2024 03:36\n**Duration:** 2 hours, 44 minutes\n**Affected Services and Features:**\nGoogle Cloud Networking\nVirtual Private Cloud (VPC)\nCloud Load Balancing\nCloud SQL\nCloud Spanner\nCloud Logging\nCloud Firestore\nBigQuery\nCloud VPN\nMemory Store for Redis\nArtifact Registry\nCloud Dataflow\nCloud Data Loss Prevention\nCloud Deploy Cloud Healthcare\nDataplex\nGKE fleet management (GKE Connect)\nGoogle Kubernetes Engine (GKE)\nIdentity and Access Management (IAM)\n**Regions/Zones:** asia-southeast-2\n**Description:**\nMultiple Google Cloud services in asia-southeast2 were degraded for 2 hours, 44 minutes. From preliminary analysis, the root cause of the issue was a significant loss of inter-region capacity due to several simultaneous fiber cuts, combined with the malfunction of a backbone networking router which had to be removed from service.\nGoogle will complete a full Incident Report in the following days that will provide a full root cause.\n**Customer Impact:**\n* **Virtual Private Cloud (VPC)** \\- Cross-region and external connectivity issues.\n* **Cloud Load Balancing** \\- Increased latency for GCLB requests ingressing in asia-southeast2.\n* **Cloud SQL** \\- Elevated latency and error rates for the Cloud SQL Admin API.\n* **Cloud Spanner** \\- Customers using asia-southeast2 may have seen higher latency.\n* **Cloud Logging** \\- Log ingestion and log routing in asia-southeast2 experienced high latency. Affected customers may have observed issues when doing operations (e.g. creating buckets) for this region.\n* **Cloud Firestore** \\- Affected customers may have experienced elevated error rates and latencies for databases in asia-southeast2.\n* **BigQuery \\-** Affected customers may have experienced increased latency/errors for import/export jobs and cross-region copy. The impact was mitigated at 02:30 US/Pacific.\n* **Cloud VPN** \\- Affected customers may have observed partial packet loss in asia-southeast2.\n* **Memory Store for Redis \\-** A small number of customers in asia-southeast2 may have experienced elevated errors when creating instances from 00:35 to 01:10 US/Pacific.\n* **Artifact Registry** \\- Affected customers may have observed API timeouts or server errors.\n* **Cloud Dataflow \\-** Affected customers may have experienced slow Dataflow jobs or job creation failures.\n* **Cloud Data Loss Prevention \\-** Affected customers may have observed server unavailable errors.\n* **Cloud Deploy \\-** Affected customers may have encountered deployment failures.\n* **Cloud Interconnect** \\- Affected customers may have observed partial packet loss in asia-southeast2.\n* **Cloud Healthcare** \\- Affected customers may have observed API timeouts or server errors.\n* **Dataplex** \\- Affected customers may have observed API timeouts or server errors.\n* **GKE fleet management (GKE Connect) \\-** Affected customers may have observed API timeouts or server errors when sending requests to clusters registered to asia-southeast2.\n* **Google Kubernetes Engine (GKE)** \\- Some customers may have experienced control plane availability issues and/or GKE Cluster or Node Pool creation or deletion failures.\n* **Identity and Access Management (IAM)** \\- Affected customers may have observed increased latency and timeouts for IAM Control Plane operations on global resources and retry traffic for approximately 90 minutes.\n---","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-16T12:14:08+00:00","modified":"2024-11-18T20:03:25+00:00","when":"2024-11-16T12:14:08+00:00","text":"The issue with Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Hybrid Connectivity, Cloud Logging, Cloud Healthcare, GKE fleet management, Dataplex, Artifact Registry, Google Kubernetes Engine, Cloud Data Loss Prevention, Google Cloud Dataflow, Cloud Spanner, Memorystore for Redis, Cloud Monitoring, Pub/Sub Lite, Identity and Access Management, Google BigQuery has been resolved for all affected users as of Saturday, 2024-11-16 03:50 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-16T12:09:38+00:00","modified":"2024-11-16T12:14:13+00:00","when":"2024-11-16T12:09:38+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing issues with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention, Cloud Deploy etc in asia-southeast2 region.\nWe believe that issues with many impacted GCP products have been mitigated. Our engineering team continues to validate this.\nWe will provide an update by Saturday, 2024-11-16 04:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues.\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2.\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API.\nCloud Spanner: Customers using asia-southeast2 would have seen higher latency.\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nCloud Firestore: Users experienced elevated error rates and latencies for databases in asia-southeast2.\nCloud VPN: Impacted users may observe partial packet loss in the impacted region\nMemorystore for Redis: A small number of customers in the impacted region (asia-southeast2) may have experienced elevated errors when creating instances from 00:35 to 01:10. The issue has been mitigated as of 2:45 US/ Pacific on 16 Nov, 2024\nArtifact Registry: Artifact Registry users may observe API time outs or server errors.\nBigQuery: Users would have experienced increased latency/errors for import/export jobs and cross-region copy. The impact was mitigated at 02:30 US/Pacific on 16 November 2024\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T12:02:05+00:00","modified":"2024-11-16T12:09:42+00:00","when":"2024-11-16T12:02:05+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing issues with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention, Cloud Deploy etc in asia-southeast2 region.\nWe believe that issues with multiple GCP products have been mitigated. Our engineering team is currtently validating this.\nWe will provide an update by Saturday, 2024-11-16 04:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues.\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2.\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API.\nCloud Spanner: Customers using asia-southeast2 would have seen higher latency.\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nCloud Firestore: Users experienced elevated error rates and latencies for databases in asia-southeast2.\nCloud VPN: Impacted users may observe partial packet loss in the impacted region\nMemorystore for Redis: A small number of customers in the impacted region (asia-southeast2) may have experienced elevated errors when creating instances from 00:35 to 01:10. The issue has been mitigated as of 2:45 US/ Pacific on 16 Nov, 2024\nArtifact Registry: Artifact Registry users may observe API time outs or server errors.\nBigQuery: Users would have experienced increased latency/errors for import/export jobs and cross-region copy. The impact was mitigated at 02:30 US/Pacific on 16 November 2024\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T11:40:43+00:00","modified":"2024-11-16T12:02:05+00:00","when":"2024-11-16T11:40:43+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We were experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region.\nNote that the issues with Virtual Private Cloud (VPC), Cloud Dataflow, Cloud Spanner, Cloud Load Balancing, Cloud Logging, Cloud VPN, Memorystore for Redis have been mitigated.\nOur engineering team continues to investigate the issues with remaining products impacted in an effort to resolve the issues for all the customers.\nWe will provide an update by Saturday, 2024-11-16 04:30 US/Pacific with current details\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues.\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2.\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API.\nCloud Spanner: Customers using asia-southeast2 would have seen higher latency.\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nCloud Firestore: Users experiencing elevated error rates and latencies for databases in asia-southeast2.\nCloud VPN: Impacted users may observe partial packet loss in the impacted region\nMemorystore for Redis: A small number of customers in the impacted region (asia-southeast2) may have experienced elevated errors when creating instances from 00:35 to 01:10. The issue has been mitigated as of 2:45 US/ Pacific on 16 Nov, 2024\nArtifact Registry: Artifact Registry users may observe API time outs or server errors.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T11:12:06+00:00","modified":"2024-11-16T11:40:43+00:00","when":"2024-11-16T11:12:06+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region\nOur engineering team continues to investigate the issue in an effort to resolve the same for all the customers.\nWe will provide an update by Saturday, 2024-11-16 03:45 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues.\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2.\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API.\nCloud Spanner: Customers using asia-southeast2 would have seen higher latency.\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nCloud Firestore: Users experiencing elevated error rates for database get, delete, and creation operations in asia-southeast2.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T11:07:27+00:00","modified":"2024-11-16T11:12:10+00:00","when":"2024-11-16T11:07:27+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:45 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues.\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2.\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API.\nCloud Spanner: Customers using asia-southeast2 would have seen higher latency.\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nCloud Firestore: Users experiencing elevated error rates for database get, delete, and creation operations in asia-southeast2.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T11:01:09+00:00","modified":"2024-11-16T11:07:32+00:00","when":"2024-11-16T11:01:09+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues.\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2.\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API.\nCloud Spanner: Customers using asia-southeast2 would have seen higher latency.\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nCloud Firestore: Users experiencing elevated error rates for database get, delete, and creation operations in asia-southeast2.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T10:51:34+00:00","modified":"2024-11-16T11:01:09+00:00","when":"2024-11-16T10:51:34+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues\nCloud Load Balancing: Increased latency for GCLB requests ingressing in asia-southeast2\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API\nCloud Spanner: Customer using asia-southeast2 would have seen higher latency\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud Dataflow: Dataflow jobs are slow or failed to create.\nCloud Data Loss Prevention: Users may observe server unavailable errors\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T10:43:31+00:00","modified":"2024-11-16T10:51:37+00:00","when":"2024-11-16T10:43:31+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:15 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see the following symptoms:\nVirtual Private Cloud: Cross-region and external connectivity issues in asia-southeast2\nCloud Load Balance: Increased latency for GCLB requests ingressing in asia-southeast2\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API\nCloud Spanner: Customer using asia-southeast2 would have seen higher latency\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nCloud dataflow: Dataflow jobs are slow or failed to create.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T10:39:35+00:00","modified":"2024-11-16T10:43:31+00:00","when":"2024-11-16T10:39:35+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN, Hybrid Connectivity, Cloud Healthcare, Dataplex, GKE fleet management, Artifact Registry, Google Kubernetes Engine (GKE), Identity and Access Management Cloud Data Loss Prevention etc in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:15 US/Pacific with current details.\nDiagnosis: Cloud Load Balance: Increased latency for GCLB requests ingressing in asia-southeast2\nCloud SQL: Elevated latency and error rate for the Cloud SQL Admin API\nCloud Spanner: Customer using asia-southeast2 would have seen higher latency\nCloud Logging: Log ingestion and log routing in asia-southeast2 sees high latency.The users might also observe issues when doing operations (e.g. creating bucket) for this region.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T10:28:26+00:00","modified":"2024-11-16T10:55:34+00:00","when":"2024-11-16T10:28:26+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing, Cloud Logging, Cloud Firestore, BigQuery, Cloud VPN etc in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:00 US/Pacific with current details.\nDiagnosis: Increased latency for GCLB requests ingressing in asia-southeast2\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"created":"2024-11-16T10:13:32+00:00","modified":"2024-11-16T10:55:54+00:00","when":"2024-11-16T10:13:32+00:00","text":"Summary: Multiple GCP Products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Load Balancing are experiencing issues in the asia-southeast2 region\nDescription: We are experiencing an issue with Multiple GCP products including Google Cloud Networking, Virtual Private Cloud (VPC), Cloud Balancing etc. in asia-southeast2 region\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-11-16 03:00 US/Pacific with current details.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]}],"most_recent_update":{"created":"2024-11-23T01:29:24+00:00","modified":"2024-11-23T01:29:26+00:00","when":"2024-11-23T01:29:24+00:00","text":"# Incident Report\n## Summary\nOn 16 November 2024 at 00:47 US/Pacific, a combination of fiber failures and a network equipment fault led to reduced network capacity between the asia-southeast2 region and other GCP regions. The failures were corrected and minimum required capacity recovered by 02:13 US/Pacific.\nTo our GCP customers whose businesses were impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause and Impact\nGoogle’s global network is designed and built to ensure that any occasional capacity loss events are not noticeable and/or have minimal disruption to customers. We provision several diverse network paths to each region and maintain sufficient capacity buffers based on the measured reliability of capacity in each region.\nBetween 12 November and 16 November, two separate fiber failures occurred near the asia-southeast2 region. These failures temporarily reduced the available network capacity between the asia-southeast2 region and other GCP regions, but did not impact the availability of GCP services in the region. Google engineers were alerted of these failures as soon as they occurred and were working with urgency on remediating these fiber failures but had not yet completed full recovery.\nOn 16 November 2024 at 00:47 US/Pacific, a latent software defect impacted a backbone networking router in the asia-southeast2 region resulting in further reduction of available inter-region capacity and exhausted our reserve network capacity buffers causing multiple Google Cloud services in the region to experience high latency and/or elevated error rates for operations requiring inter-region connectivity. During this time, customers in asia-southeast2, would have experienced issues with managing and monitoring existing resources, creating new resources, and data replication to other regions.\nTo mitigate the impact, Google engineers re-routed Internet traffic away from the asia-southeast2 region to be served from other GCP regions, primarily asia-southeast1 while working in parallel to recover the lost capacity. The faulty backbone networking router was recovered on 16 November 2024 02:13 US/Pacific. This ended the elevated network latency and error rates for most of the impacted GCP services’ operations. Recovery of the first failed fiber was completed on 18 November 08:45 US/Pacific and the second failed fiber was restored at 09:00 US/Pacific on the same day.\n## Remediation and Prevention\nWe’re taking the following actions to reduce the likelihood of recurrence and time to mitigate impact of this type of incident in the future:\n- During the incident, our actions to reroute traffic away from the asia-southeast2 region and recover the faulty backbone networking router took longer than expected as the loss of capacity hindered our visibility of required networking telemetry and functionality of emergency tooling. We’re reviewing these gaps to implement the required improvements to our network observability, emergency tools and incident response playbooks.\n- Work with our fiber partners in the asia-southeast2 region to ensure our fiber paths between facilities in the region and to submarine cable landing stations are on the most reliable routes available, and have adequate preventative maintenance and repair processes.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Artifact Registry","id":"QbBuuiRdsLpMr9WmGwm5"},{"title":"Cloud Data Loss Prevention","id":"xeLtpZYNSw1WSYhkuxHL"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"},{"title":"Cloud Memorystore","id":"LGPLu3M5pcUAKU1z6eP3"},{"title":"Cloud Monitoring","id":"3zaaDb7antc73BM1UAVT"},{"title":"Cloud Spanner","id":"EcNGGUgBtBLrtm4mWvqC"},{"title":"Dataplex","id":"Xx5qm9U2ovrN11z2Gd9Q"},{"title":"GKE fleet management","id":"4osgZCUJuuh3whY4B8tt"},{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Healthcare and Life Sciences","id":"zgodfdJcHiKkGxQYixiZ"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"},{"title":"Memorystore for Redis","id":"3yFciKa9NQH7pmbnUYUs"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Pub/Sub Lite","id":"5DWkcStmv4dFHRHLaRXb"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/kDBRnSgQCPw93E8vKKat","currently_affected_locations":[],"previously_affected_locations":[{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"}]},{"id":"kq8vzGmgzfjMfASCdmoV","number":"5834988663039889887","begin":"2024-11-15T23:02:37+00:00","created":"2024-11-16T00:32:48+00:00","end":"2024-11-16T00:52:48+00:00","modified":"2024-11-16T00:52:49+00:00","external_desc":"We are investigating reports of issues with Google Cloud Support.","updates":[{"created":"2024-11-16T00:52:48+00:00","modified":"2024-11-16T00:52:50+00:00","when":"2024-11-16T00:52:48+00:00","text":"The issue with Google Cloud Support has been resolved for all affected customers as of Friday, 2024-11-15 16:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-16T00:32:46+00:00","modified":"2024-11-16T00:52:49+00:00","when":"2024-11-16T00:32:46+00:00","text":"Summary: We are investigating reports of issues with Google Cloud Support.\nDescription: We are investigating reports of issues with Google Cloud Support beginning on Friday, 2024-11-15 13:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-15 17:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience issues with creating and managing the vector cases. The operations like case creation, read and updates may have been impacted.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-11-16T00:52:48+00:00","modified":"2024-11-16T00:52:50+00:00","when":"2024-11-16T00:52:48+00:00","text":"The issue with Google Cloud Support has been resolved for all affected customers as of Friday, 2024-11-15 16:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/kq8vzGmgzfjMfASCdmoV","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"NEtbe4S7pMgACeuopMaZ","number":"5871883127490818711","begin":"2024-11-15T05:57:00+00:00","created":"2024-11-15T06:45:21+00:00","end":"2024-11-15T12:09:00+00:00","modified":"2024-11-21T03:28:37+00:00","external_desc":"Issues opening cases","updates":[{"created":"2024-11-20T12:13:00+00:00","modified":"2024-11-21T03:28:37+00:00","when":"2024-11-20T12:13:00+00:00","text":"# Incident Report\n## Summary\nOn 14 November 2024 at 21:57 US/Pacific, our support ticketing system that handles Google Cloud, Billing, and Workspace customer support, experienced an unexpected issue during a vendor-planned maintenance event, causing the system to become unavailable. Throughout the incident duration of 6 hours and 12 minutes, customers were unable to update existing chat, portal or email cases. Customers who attempted to create a support case were presented with our backup contact method and were able to receive support through this method which remained available throughout the outage.\n## Root Cause\nThe outage was triggered by a vendor-initiated change that impacted the performance of our support ticket persistence layer. This update inadvertently caused unavailability, specifically to the query subsystem of our support case management tool. After this configuration change was applied, the subsystem became unresponsive, preventing the processing of any read or write commands. As a result, customers and Google Support were unable to access or update support ticket data, leading to service disruption.\n## Remediation and Prevention\nOur monitoring systems detected elevated error rates and, at 22:09 US/Pacific, alerted our engineering team, who immediately started an investigation with the vendor. The vendor's incident team concluded that the query subsystem state would not be resolved by a configuration rollback. The vendor’s engineering team prepared a new update, validated it in a test environment, and applied the update to production, returning the system to service on 15 November 2024 at 04:09 US/Pacific.\nWe are taking immediate steps with the vendor to prevent a recurrence and improve reliability in the future:\n- A production change freeze for the vendor's query subsystem is in place until rollout safeguards are sufficient to prevent further impact.\n- We are working with the vendor to improve their change management process to ensure safer rollouts that avoid unexpected issues while also providing earlier detection of rollout change.\n- We will perform a review of rollback safety for configuration changes with the vendor to ensure rollback is always possible, reducing recovery time.\n## Detailed Description of Impact\nStarting on 14 November 2024 at 21:57 US/Pacific,\n- Customers observed increased latency and required multiple attempts when opening support cases. Customers were able to use the backup case creation process to receive support.\n- Customers were able to send and receive updates to existing support cases by email, but were not able to update cases using the support portal. Support agents were able to send update emails for cases, create pro-active bugs and fill Contact-Us-Forms (CUFs) on behalf of their customers. However, responding via the support portal was unavailable.\n- Customers with active chat support cases were unable to continue their conversation. Error messages received by customers included options for continuing support via the Contact-Us-Forms (CUFs).\n- All contractual obligations for support requests submitted through the Contact-Us-Forms (CUFs) were fulfilled.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-15T22:31:25+00:00","modified":"2024-11-20T12:13:00+00:00","when":"2024-11-15T22:31:25+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 14 November, 2024 21:59\n**Incident End:** 15 November, 2024 04:09\n**Duration:** 6 hours, 10 mins\n**Affected Services and Features:**\nGoogle Cloud Support and Google Workspace Support\n**Regions/Zones:** Global\n**Description:**\nGoogle's Ticketing system, supporting Google Cloud, Billing, and Workspace, experienced an unplanned maintenance event, causing the system to become unavailable. Throughout the 6 hours and 10 minutes, customers were unable to update existing chat, portal, or email cases. Customers who attempted to create a support case were presented with our backup contact method and were able to receive support through the backup method.\nPreliminary analysis finds that a planned change to Google’s support ticket persistence layer caused the primary system for creating and updating support sessions to return errors. Customers attempting to create a support case were directed to retry, then were provided access to our backup support channel, which worked throughout the outage.\nNo other Google Cloud services were impacted.\nThe engineering on-call team rolled back the change to the persistence layer, restoring service.\nWe sincerely apologize to our Google Cloud customers for this recent service disruption.\n**Customer Impact:**\nCustomers saw increased latency and required multiple attempts when opening support cases. Customers were able to use the backup case creation method to receive support.\nCustomers were able to send updates to existing support cases by email, but may not have been able to update using the support portal. Support agents were unable to respond to already-existing cases.\nCustomers with active chat support cases were unable to continue their conversation.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-15T13:07:37+00:00","modified":"2024-11-20T12:13:50+00:00","when":"2024-11-15T13:07:37+00:00","text":"The issue with Google Cloud Support has been resolved for all affected users as of Friday, 2024-11-15 04:09 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-15T12:46:39+00:00","modified":"2024-11-15T13:07:41+00:00","when":"2024-11-15T12:46:39+00:00","text":"Summary: Issues opening cases\nDescription: Mitigation work is still underway by our engineering team.\nCurrent data indicates that the current mitigation strategy is working and the team continues to see positive results.\nWe will provide more information by Friday, 2024-11-15 05:30 US/Pacific.\nDiagnosis: Customers will have issues opening cases.\nWorkaround: Customers can use the Contact Us Form (CUF) which will be automatically generated when following the normal case creation process to create cases as a backup mechanism.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-15T10:45:54+00:00","modified":"2024-11-15T12:46:39+00:00","when":"2024-11-15T10:45:54+00:00","text":"Summary: Issues opening cases\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-15 05:00 US/Pacific with current details.\nDiagnosis: Customers will have issues opening cases.\nWorkaround: Customers can use the Contact Us Form (CUF) which will be automatically generated when following the normal case creation process to create cases as a backup mechanism.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-15T08:19:40+00:00","modified":"2024-11-15T10:45:54+00:00","when":"2024-11-15T08:19:40+00:00","text":"Summary: Issues opening cases\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-15 03:00 US/Pacific with current details.\nDiagnosis: Customers will have issues opening cases.\nWorkaround: Customers can use the Contact Us Form (CUF) which will be automatically generated when following the normal case creation process to create cases as a backup mechanism.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-15T07:15:25+00:00","modified":"2024-11-15T08:19:40+00:00","when":"2024-11-15T07:15:25+00:00","text":"Summary: Issues opening cases\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-15 01:00 US/Pacific with current details.\nDiagnosis: Customers will have issues opening cases.\nWorkaround: Customers can use the Contact Us Form (CUF) which will be automatically generated when following the normal case creation process to create cases as a backup mechanism.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-15T06:45:18+00:00","modified":"2024-11-15T07:15:27+00:00","when":"2024-11-15T06:45:18+00:00","text":"Summary: Issues opening cases\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-15 01:00 US/Pacific with current details.\nDiagnosis: Customers will have issues opening cases.\nWorkaround: Customers can use the CUF to create cases as a backup mechanism.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-11-20T12:13:00+00:00","modified":"2024-11-21T03:28:37+00:00","when":"2024-11-20T12:13:00+00:00","text":"# Incident Report\n## Summary\nOn 14 November 2024 at 21:57 US/Pacific, our support ticketing system that handles Google Cloud, Billing, and Workspace customer support, experienced an unexpected issue during a vendor-planned maintenance event, causing the system to become unavailable. Throughout the incident duration of 6 hours and 12 minutes, customers were unable to update existing chat, portal or email cases. Customers who attempted to create a support case were presented with our backup contact method and were able to receive support through this method which remained available throughout the outage.\n## Root Cause\nThe outage was triggered by a vendor-initiated change that impacted the performance of our support ticket persistence layer. This update inadvertently caused unavailability, specifically to the query subsystem of our support case management tool. After this configuration change was applied, the subsystem became unresponsive, preventing the processing of any read or write commands. As a result, customers and Google Support were unable to access or update support ticket data, leading to service disruption.\n## Remediation and Prevention\nOur monitoring systems detected elevated error rates and, at 22:09 US/Pacific, alerted our engineering team, who immediately started an investigation with the vendor. The vendor's incident team concluded that the query subsystem state would not be resolved by a configuration rollback. The vendor’s engineering team prepared a new update, validated it in a test environment, and applied the update to production, returning the system to service on 15 November 2024 at 04:09 US/Pacific.\nWe are taking immediate steps with the vendor to prevent a recurrence and improve reliability in the future:\n- A production change freeze for the vendor's query subsystem is in place until rollout safeguards are sufficient to prevent further impact.\n- We are working with the vendor to improve their change management process to ensure safer rollouts that avoid unexpected issues while also providing earlier detection of rollout change.\n- We will perform a review of rollback safety for configuration changes with the vendor to ensure rollback is always possible, reducing recovery time.\n## Detailed Description of Impact\nStarting on 14 November 2024 at 21:57 US/Pacific,\n- Customers observed increased latency and required multiple attempts when opening support cases. Customers were able to use the backup case creation process to receive support.\n- Customers were able to send and receive updates to existing support cases by email, but were not able to update cases using the support portal. Support agents were able to send update emails for cases, create pro-active bugs and fill Contact-Us-Forms (CUFs) on behalf of their customers. However, responding via the support portal was unavailable.\n- Customers with active chat support cases were unable to continue their conversation. Error messages received by customers included options for continuing support via the Contact-Us-Forms (CUFs).\n- All contractual obligations for support requests submitted through the Contact-Us-Forms (CUFs) were fulfilled.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/NEtbe4S7pMgACeuopMaZ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"db3E4dVfQzxfTrhsdZLw","number":"14934870579306172809","begin":"2024-11-13T15:51:14+00:00","created":"2024-11-15T07:39:02+00:00","end":"2024-11-18T10:29:37+00:00","modified":"2024-11-18T10:29:43+00:00","external_desc":"An issue with Vertex Gemini API in multiple regions of Asia and America","updates":[{"created":"2024-11-18T10:29:37+00:00","modified":"2024-11-18T10:29:48+00:00","when":"2024-11-18T10:29:37+00:00","text":"The issue with Vertex Gemini API has been resolved for all affected users as of Monday, 2024-11-18 02:29 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-16T01:21:05+00:00","modified":"2024-11-18T10:29:43+00:00","when":"2024-11-16T01:21:05+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: Mitigation work is still underway by our engineering team, and error rates and latency are showing continued improvement.\nWe do not have an ETA for full mitigation at this point.\nWe will provide more information by Monday, 2024-11-18 11:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T19:59:38+00:00","modified":"2024-11-16T01:21:05+00:00","when":"2024-11-15T19:59:38+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: Mitigation work is still underway by our engineering team, and error rates and latency are showing continued improvement.\nWe do not have an ETA for full mitigation at this point.\nWe will provide more information by Friday, 2024-11-15 17:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T17:51:28+00:00","modified":"2024-11-15T19:59:38+00:00","when":"2024-11-15T17:51:28+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: Mitigation work is still underway by our engineering team, and error rates and latency are showing improvement.\nWe will provide more information by Friday, 2024-11-15 12:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T14:34:03+00:00","modified":"2024-11-15T17:51:28+00:00","when":"2024-11-15T14:34:03+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: Mitigation work is currently underway by our engineering team.\nMitigation activities are still ongoing at this point.\nWe will provide more information by Friday, 2024-11-15 10:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T13:08:47+00:00","modified":"2024-11-15T14:34:03+00:00","when":"2024-11-15T13:08:47+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-11-15 07:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T12:48:46+00:00","modified":"2024-11-15T13:09:01+00:00","when":"2024-11-15T12:48:46+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: We are experiencing an intermittent issue with Vertex Gemini API beginning at Wednesday, 2024-11-13 03:21 US/Pacific.\nWhile our engineering team continues to investigate the issue, please reach out to Google Cloud Support if you need assistance.\nWe will provide an update by Friday, 2024-11-15 06:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T11:42:36+00:00","modified":"2024-11-15T12:48:53+00:00","when":"2024-11-15T11:42:36+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: We are experiencing an intermittent issue with Vertex Gemini API beginning at Wednesday, 2024-11-13 03:21 US/Pacific.\nWhile our engineering team continues to investigate the issue, please reach out to Google Cloud Support if you need assistance.\nWe will provide an update by Friday, 2024-11-15 06:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T11:38:11+00:00","modified":"2024-11-15T11:42:36+00:00","when":"2024-11-15T11:38:11+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: We are experiencing an intermittent issue with Vertex Gemini API beginning at Wednesday, 2024-11-13 03:21 US/Pacific.\nWhile our engineering team continues to investigate the issue, please reach out to Google Cloud Support if you need assistance.\nWe will provide an update by Friday, 2024-11-15 06:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with either 100% errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-15T07:39:00+00:00","modified":"2024-11-15T11:38:20+00:00","when":"2024-11-15T07:39:00+00:00","text":"Summary: An issue with Vertex Gemini API in multiple regions of Asia and America\nDescription: We are experiencing an intermittent issue with Vertex Gemini API beginning at Wednesday, 2024-11-13 03:21 US/Pacific.\nWhile our engineering team continues to investigate the issue, please reach out to Google Cloud Support if you need assistance.\nWe will provide an update by Friday, 2024-11-15 04:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users impacted by this issue may observe long context requests to Gemini 1.5 Pro 002 are served with either 100% errors or extremely long latency in some clusters in the impacted regions.\nWorkaround: The impacted users may try using the Europe based regions to use the Vertex Gemini API configurations or operations.","status":"SERVICE_DISRUPTION","affected_locations":[]}],"most_recent_update":{"created":"2024-11-18T10:29:37+00:00","modified":"2024-11-18T10:29:48+00:00","when":"2024-11-18T10:29:37+00:00","text":"The issue with Vertex Gemini API has been resolved for all affected users as of Monday, 2024-11-18 02:29 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"Z0FZJAMvEB4j3NbCJs6B","service_name":"Vertex Gemini API","affected_products":[{"title":"Vertex Gemini API","id":"Z0FZJAMvEB4j3NbCJs6B"}],"uri":"incidents/db3E4dVfQzxfTrhsdZLw","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"vz4bRXVxs45x33Bttboj","number":"15909649978535456609","begin":"2024-11-11T16:45:51+00:00","created":"2024-11-11T18:12:15+00:00","end":"2024-11-11T18:50:40+00:00","modified":"2024-11-11T18:50:42+00:00","external_desc":"Issues creating new Notification Configs for Cloud Security Command Center","updates":[{"created":"2024-11-11T18:50:40+00:00","modified":"2024-11-11T18:50:43+00:00","when":"2024-11-11T18:50:40+00:00","text":"The issue with Cloud Security Command Center is believed to be affecting a very small number of projects and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-11T18:12:13+00:00","modified":"2024-11-11T18:50:42+00:00","when":"2024-11-11T18:12:13+00:00","text":"Summary: Issues creating new Notification Configs for Cloud Security Command Center\nDescription: We are experiencing an issue with Cloud Security Command Center beginning at Saturday, 2024-11-9 08:45 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-11-11 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may be unable to create new Notification Configs for Cloud Security Command Center\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-11-11T18:50:40+00:00","modified":"2024-11-11T18:50:43+00:00","when":"2024-11-11T18:50:40+00:00","text":"The issue with Cloud Security Command Center is believed to be affecting a very small number of projects and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"csyyfUYy88hkeqbv23Mc","service_name":"Cloud Security Command Center","affected_products":[{"title":"Cloud Security Command Center","id":"csyyfUYy88hkeqbv23Mc"}],"uri":"incidents/vz4bRXVxs45x33Bttboj","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"jEg8HrM9CUrFvWbGvDEg","number":"9147874926105824090","begin":"2024-11-09T01:49:40+00:00","created":"2024-11-09T04:17:58+00:00","end":"2024-11-09T06:33:01+00:00","modified":"2024-11-09T06:33:08+00:00","external_desc":"Issue with Vertex Gemini API","updates":[{"created":"2024-11-09T06:33:01+00:00","modified":"2024-11-09T06:33:09+00:00","when":"2024-11-09T06:33:01+00:00","text":"The issue with Vertex Gemini API has been resolved for all affected users as of Friday, 2024-11-08 22:32 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-09T05:48:22+00:00","modified":"2024-11-09T06:33:08+00:00","when":"2024-11-09T05:48:22+00:00","text":"Summary: Issue with Vertex Gemini API\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Saturday, 2024-11-09 01:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may see HTTP 500 errors for requests in the affected regions.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-11-09T04:17:55+00:00","modified":"2024-11-09T05:48:22+00:00","when":"2024-11-09T04:17:55+00:00","text":"Summary: Issue with Vertex Gemini API\nDescription: We are experiencing an issue with Vertex Gemini API.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-08 22:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see HTTP 500 errors for requests in the affected regions.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-11-09T06:33:01+00:00","modified":"2024-11-09T06:33:09+00:00","when":"2024-11-09T06:33:01+00:00","text":"The issue with Vertex Gemini API has been resolved for all affected users as of Friday, 2024-11-08 22:32 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"Z0FZJAMvEB4j3NbCJs6B","service_name":"Vertex Gemini API","affected_products":[{"title":"Vertex Gemini API","id":"Z0FZJAMvEB4j3NbCJs6B"}],"uri":"incidents/jEg8HrM9CUrFvWbGvDEg","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"QKjWxuxQGHHmNN1rnrp9","number":"476222755671941118","begin":"2024-11-08T16:17:45+00:00","created":"2024-11-08T17:05:20+00:00","end":"2024-11-08T17:52:31+00:00","modified":"2024-11-08T17:52:33+00:00","external_desc":"Intermittent failure to create support cases","updates":[{"created":"2024-11-08T17:52:31+00:00","modified":"2024-11-08T17:52:34+00:00","when":"2024-11-08T17:52:31+00:00","text":"The issue with Google Cloud Support has been resolved for all affected users as of Friday, 2024-11-08 09:08 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-08T17:05:18+00:00","modified":"2024-11-08T17:52:33+00:00","when":"2024-11-08T17:05:18+00:00","text":"Summary: Intermittent failure to create support cases\nDescription: We are experiencing an intermittent issue with Google Cloud Support beginning at Friday, 2024-11-08 08:05 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-11-08 10:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users are facing issues intermittently while creating the support cases\nWorkaround: Users are requested to retry the request as the issue is intermittent","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-11-08T17:52:31+00:00","modified":"2024-11-08T17:52:34+00:00","when":"2024-11-08T17:52:31+00:00","text":"The issue with Google Cloud Support has been resolved for all affected users as of Friday, 2024-11-08 09:08 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/QKjWxuxQGHHmNN1rnrp9","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"KsnPoZyRWGjzePS1YHYL","number":"11412206907820838620","begin":"2024-11-07T23:53:00+00:00","created":"2024-11-08T03:10:47+00:00","end":"2024-11-08T08:07:41+00:00","modified":"2024-11-08T08:12:57+00:00","external_desc":"Issue with Mandiant Threat Intelligence","updates":[{"created":"2024-11-08T08:07:41+00:00","modified":"2024-11-08T08:07:44+00:00","when":"2024-11-08T08:07:41+00:00","text":"The issue with Mandiant Threat Intelligence has been resolved for all affected users as of Thursday, 2024-11-07 22:07 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-08T03:10:45+00:00","modified":"2024-11-08T08:07:43+00:00","when":"2024-11-08T03:10:45+00:00","text":"Summary: We are investigating an issue related to Mandiant Threat Intelligence\nDescription: We are experiencing an issue with a subset of our functions within Mandiant Threat Intelligence beginning on Thursday, 2024-11-07 15:53 US/Pacific.\nOur engineering team is currently investigating an issue specific to the Digital Threat Monitoring function.\nWe will provide an update by Friday, 2024-11-08 05:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: This issue does not impact the overall availability or core function of the product.\nFor the duration of the impact, affected customers will receive a decreased number of alerts.\nAPI login for select Mandiant products was degraded; this particular impact has been mitigated.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-11-08T08:07:41+00:00","modified":"2024-11-08T08:07:44+00:00","when":"2024-11-08T08:07:41+00:00","text":"The issue with Mandiant Threat Intelligence has been resolved for all affected users as of Thursday, 2024-11-07 22:07 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"Dvtt8cwUdpUYoGMpt2Fc","service_name":"Mandiant Threat Intelligence","affected_products":[{"title":"Mandiant Threat Intelligence","id":"Dvtt8cwUdpUYoGMpt2Fc"}],"uri":"incidents/KsnPoZyRWGjzePS1YHYL","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"oMxsNayyCDFD3fm6FgNE","number":"11283748343930355101","begin":"2024-11-07T14:55:27+00:00","created":"2024-11-07T15:21:50+00:00","end":"2024-11-07T16:06:12+00:00","modified":"2024-11-07T16:06:16+00:00","external_desc":"Chronicle Security User Interface Issue","updates":[{"created":"2024-11-07T16:06:12+00:00","modified":"2024-11-07T16:06:17+00:00","when":"2024-11-07T16:06:12+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Thursday, 2024-11-07 08:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-07T15:48:38+00:00","modified":"2024-11-07T16:06:16+00:00","when":"2024-11-07T15:48:38+00:00","text":"Summary: Chronicle Security User Interface Issue\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-11-07 08:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may see missing and misformed User Interface elements\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-11-07T15:21:36+00:00","modified":"2024-11-07T15:48:51+00:00","when":"2024-11-07T15:21:36+00:00","text":"Summary: Chronicle Security User Interface Issue\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-11-07 08:40 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may see missing and misformed User Interface elements\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-11-07T16:06:12+00:00","modified":"2024-11-07T16:06:17+00:00","when":"2024-11-07T16:06:12+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Thursday, 2024-11-07 08:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/oMxsNayyCDFD3fm6FgNE","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"id":"DsEwiPxaXU2QSkthu9WC","number":"12960599015176621765","begin":"2024-11-06T09:30:00+00:00","created":"2024-11-06T12:44:17+00:00","end":"2024-11-06T14:21:35+00:00","modified":"2024-11-06T14:21:39+00:00","external_desc":"Looker Studio users may observe increased latency","updates":[{"created":"2024-11-06T14:21:35+00:00","modified":"2024-11-06T14:21:44+00:00","when":"2024-11-06T14:21:35+00:00","text":"The issue with Looker Studio has been resolved for all affected users as of Wednesday, 2024-11-06 05:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-06T13:45:04+00:00","modified":"2024-11-06T14:21:39+00:00","when":"2024-11-06T13:45:04+00:00","text":"Summary: Looker Studio users may observe increased latency\nDescription: We are experiencing an issue with Looker Studio beginning on Wednesday, 2024-11-06 01:30 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-11-06 07:00 US/Pacific with current details.\nDiagnosis: Looker Studio users may observe issues with their dashboards, dashboard components may load slower than usual, they can be stuck in endless loading state.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-11-06T12:43:59+00:00","modified":"2024-11-06T13:45:04+00:00","when":"2024-11-06T12:43:59+00:00","text":"Summary: Looker Studio users may observe increased latency\nDescription: We are experiencing an issue with Looker Studio beginning on Wednesday, 2024-11-06 01:30 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-11-06 06:00 US/Pacific with current details.\nDiagnosis: Looker Studio users may observe issues with their dashboards, dashboard components may load slower than usual, they can be stuck in endless loading state.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-11-06T14:21:35+00:00","modified":"2024-11-06T14:21:44+00:00","when":"2024-11-06T14:21:35+00:00","text":"The issue with Looker Studio has been resolved for all affected users as of Wednesday, 2024-11-06 05:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"kEYNqRYFXXHxP9QeFJ1d","service_name":"Looker Studio","affected_products":[{"title":"Looker Studio","id":"kEYNqRYFXXHxP9QeFJ1d"}],"uri":"incidents/DsEwiPxaXU2QSkthu9WC","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"Pvq1RGrACDwogEE8hcrm","number":"11402175160313377268","begin":"2024-10-29T23:21:00+00:00","created":"2024-10-30T00:17:56+00:00","end":"2024-10-29T23:43:00+00:00","modified":"2024-11-11T15:43:47+00:00","external_desc":"All the impacted GCP products in australia-southeast2 have recovered.","updates":[{"created":"2024-11-08T15:14:10+00:00","modified":"2024-11-11T15:43:47+00:00","when":"2024-11-08T15:14:10+00:00","text":"# Incident Report\n## Summary\nOn Tuesday, 29 October 2024 at 16:21 US/Pacific, a power voltage swell event impacted the Network Point of Presence (POP) infrastructure in one of the campuses supporting the australia-southeast2 region, causing networking devices to unexpectedly reboot. The impacted network paths were recovered post reboot and connectivity was restored at 16:43 US/Pacific.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer, and we are taking immediate steps to improve the platforms’ performance and resilience.\n## Impact\nGCP operates multiple PoPs in the australia-southeast2 region to connect Cloud zones to each other and to Google’s Global Network. Google maintains sufficient capacity to ensure that occasional failures of network capacity are not noticeable and/or have minimal disruption to customers.\nOn Tuesday, 29 October 2024, two fiber failures had occurred near this region. These failures reduced the available inter-region network capacity, but did not impact the availability of GCP services in the region.\nThen later that same day, the PoP where 50% of the network capacity for the region is hosted, experienced a power voltage swell event causing the networking devices in the PoP to reboot. When the networking devices in this datacenter rebooted, this networking capacity was temporarily unavailable.\nThis rare triple failure resulted in reduced connectivity across zones in the region and caused multiple Google Cloud services to lose connectivity for 21 minutes. Additionally, customers using Zones A and Zones C, experienced up to 15% increased latency and error rates for 16 minutes due to degraded inter-zone network connectivity while the networking devices recovered from the reboot .\nGoogle engineers were already working on remediating the two fiber failures when they were alerted to the network disruption caused by the voltage swell via an internal monitoring alert on Tuesday, 29 October 2024 16:29 US/Pacific and immediately started an investigation.\nAfter the devices had completed rebooting, the impacted network paths were recovered and all affected Cloud Zones regained full connectivity to the network at 16:43 US/Pacific.\nMajority of GCP services impacted by the issue recovered shortly thereafter. A few Cloud services experienced longer restoration times as manual actions were required in some cases to complete full recovery.\n## Root cause of device reboots\nGoogle’s investigation of the cause of networking devices reboot is ongoing; we will provide additional information once analysis is completed.\n## Remediation and Prevention\nWe are taking the following actions to prevent a recurrence and improve reliability in the future:\n- Review our datacenter power distribution design in the region and implement any recommended additional protections for critical networking devices against voltage swells and sags. This includes ensuring there are two additional static UPS systems in the affected datacenter's power distribution design.\n- Implement changes to network device configuration that reduce time to recover full network connectivity after a failure.\n- Root cause and determine corrective actions for GCP Services that did not recover quickly from the incident after the network connectivity was restored.\n---","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-30T19:30:45+00:00","modified":"2024-11-08T15:14:10+00:00","when":"2024-10-30T19:30:45+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 29 October 2024 16:21\n**Incident End:** 29 October 2024 19:34\n**Duration:** 3 hours, 13 minutes\n**Affected Services and Features:**\nApigee, Apigee Edge Public Cloud, Batch, Cloud Filestore, Cloud Firestore, Cloud Key Management Service, Cloud NAT, Cloud Run, Google BigQuery, Google Cloud Dataflow, Google Cloud Dataproc, Google Cloud Networking, Google Cloud Pub/Sub, Google Cloud SQL, Google Cloud Storage, Google Compute Engine, Google Kubernetes Engine, Identity and Access Management, Persistent Disk, Resource Manager API, Virtual Private Cloud (VPC)\n**Regions/Zones:** australia-southeast2\n**Description:**\nMultiple Google Cloud products experienced service disruptions of varying impact and duration, with the longest lasting 2 hours, 3 minutes in the australia-southeast2 region.\nFrom preliminary analysis, the root cause of the issue was a power interruption causing network and optical infrastructure to reboot in a subset of the sites in the australia-southeast2 region.\nGoogle will complete a full Incident Report in the following days that will provide a detailed root cause.\n**Customer Impact:**\n**Apigee** - Impacted users experienced issues with the pod scheduling from 16:40 to 19:34.\n**Apigee Edge Public Cloud** - Impacted users observed increased runtime, latency and an increase in 5XX errors for runtime from 16:21 to 17:46.\n**Batch** - New batch jobs that were created in the australia-southeast2 region remained in SCHEDULED status and did not progress. Customers had the option to switch to different regions.\n**Cloud Filestore** - Instance creation/deletion operations failed in the region. Instances with virtual machines in these clusters were not reachable from other regions. Regional instances, depending on the placement of the majority of the virtual machines, went into lockdown.\n**Cloud Firestore** - Customers in the australia-southeast2 region experienced limited to no availability from 16:21 to 16:44.\n**Cloud Key Management Service** - KMS was not reachable in the australia-southeast2 region. Approximately 60% of the traffic was lost during the impacted period.\n**Cloud NAT** - Customers experienced loss of connectivity in the australia-southeast2 region.\n**Cloud Run** - Impacted users experienced dropped requests and high error rates from 16:20 to 17:14.\n**Google BigQuery** - Impacted users observed delay/errors in handling the requests from 16:20 to 16:55.\n**Google Cloud Dataflow** - Impacted users had limited to no availability in the australia-southeast2 region between 16:21 to 17:40.\n**Google Cloud Dataproc** - Impacted users observed that all dataproc services were down from 17:13 to 19:10.\n**Google Cloud Networking** - Impacted users experienced up to 100% of the requests being dropped from 16:21 to 16:43.\n**Google Cloud Pub/Sub** - Cloud Pub/Sub had limited to no availability in the australia-southeast2 region between 16:21 to 17:06.\n**Google Cloud SQL** - Multiple instances of Cloud SQL were unavailable between 16:23 and 17:52.\n**Google Cloud Storage** - Impacted users/projects experienced request timeout or unavailable errors between 16:21 and 17:12.\n**Google Compute Engine** - Impacted users observed GCE operations such as compute.instances.insert fail from 16:40 to 19:34.\n**Google Kubernetes Engine** - Impacted users were unable to make changes to their workloads on GKE clusters from 16:20 to 17:30.\n**Identity and Access Management** - Impacted users experienced loss of traffic and visible latency/unavailability within the region from 16:28 to 17:14.\n**Persistent Disk** - Customers experienced high latency up to several minutes for I/O operations in the australia-southeast2 region. Customers had the workaround to restore snapshots in another region or use asynchronous failover replicas out of this impacted region.\n**Virtual Private Cloud (VPC)** - Impacted users experienced a network outage in and out of the impacted region from 16:35 to 18:41.\n**Resource Manager API** - Impacted users experienced loss of traffic and visible latency/unavailability within the region from 16:28 to 17:14.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-30T02:29:37+00:00","modified":"2024-10-30T19:30:45+00:00","when":"2024-10-30T02:29:37+00:00","text":"The issue with Google Cloud Networking, Cloud Run, Identity and Access Management, Resource Manager API, Persistent Disk, Virtual Private Cloud (VPC), Google Cloud Dataflow, Apigee, Google Compute Engine, Google Cloud SQL, Apigee Edge Public Cloud, Google Cloud Storage, Google BigQuery, Google Kubernetes Engine, Cloud Dataproc has been resolved for all affected users as of Tuesday, 2024-10-29 18:24 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-30T01:47:23+00:00","modified":"2024-10-30T02:29:42+00:00","when":"2024-10-30T01:47:23+00:00","text":"Summary: Some GCP products in australia-southeast2 may have intermittent network connectivity.\nDescription: We are experiencing an issue with Google Cloud Networking, Cloud Run, Resource Manager API, Google Cloud Dataflow, Google Cloud SQL, Google Cloud Storage, Google Kubernetes Engine, Google Compute Engine beginning at Tuesday, 2024-10-29 16:28 US/Pacific.\nBased on the internal metrics the following products have seen recovery.\n- Apigee and Apigee Edge Public Cloud\n- Cloud IAM\n- Cloud KMS\n- Persistent disk\n- Google BigQuery\n- Virtual Private Cloud (VPC)\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-10-29 19:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this may see issues with new deployment.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"}]},{"created":"2024-10-30T00:59:39+00:00","modified":"2024-10-30T01:47:27+00:00","when":"2024-10-30T00:59:39+00:00","text":"Summary: Multiple GCP products impacted in australia-southeast2 with intermittent network connectivity.\nDescription: We are experiencing an issue with Google Cloud Networking, Cloud Run, Resource Manager API, Virtual Private Cloud (VPC), Google Cloud Dataflow, Google Compute Engine, Google Cloud SQL, Google Cloud Storage, Google BigQuery beginning at Tuesday, 2024-10-29 16:28 US/Pacific.\nBased on the internal metrics the following products have seen recovery.\n- Apigee and Apigee Edge Public Cloud\n- Cloud IAM\n- Cloud KMS\n- Persistent disk\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-30 00:41 US/Pacific with current details.\nDiagnosis: Customers impacted by this may see issues with new deployment.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"}]},{"created":"2024-10-30T00:29:08+00:00","modified":"2024-10-30T00:59:43+00:00","when":"2024-10-30T00:29:08+00:00","text":"Summary: Multiple GCP products impacted in australia-southeast2\nDescription: We are experiencing an issue with Google Cloud Networking, Cloud Run, Identity and Access Management, Resource Manager API, Persistent Disk, Virtual Private Cloud (VPC), Google Cloud Dataflow, Google Compute Engine beginning at Tuesday, 2024-10-29 16:21US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-10-29 18:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this may see issues with new deployment.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"}]},{"created":"2024-10-30T00:17:53+00:00","modified":"2024-10-30T00:29:12+00:00","when":"2024-10-30T00:17:53+00:00","text":"Summary: Multiple GCP products impacted in australia-southeast2\nDescription: We are experiencing an issue with FEATURE beginning at Tuesday, 2024-10-29 16:21US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-10-29 18:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this may see issues with new deployment.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"}]}],"most_recent_update":{"created":"2024-11-08T15:14:10+00:00","modified":"2024-11-11T15:43:47+00:00","when":"2024-11-08T15:14:10+00:00","text":"# Incident Report\n## Summary\nOn Tuesday, 29 October 2024 at 16:21 US/Pacific, a power voltage swell event impacted the Network Point of Presence (POP) infrastructure in one of the campuses supporting the australia-southeast2 region, causing networking devices to unexpectedly reboot. The impacted network paths were recovered post reboot and connectivity was restored at 16:43 US/Pacific.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer, and we are taking immediate steps to improve the platforms’ performance and resilience.\n## Impact\nGCP operates multiple PoPs in the australia-southeast2 region to connect Cloud zones to each other and to Google’s Global Network. Google maintains sufficient capacity to ensure that occasional failures of network capacity are not noticeable and/or have minimal disruption to customers.\nOn Tuesday, 29 October 2024, two fiber failures had occurred near this region. These failures reduced the available inter-region network capacity, but did not impact the availability of GCP services in the region.\nThen later that same day, the PoP where 50% of the network capacity for the region is hosted, experienced a power voltage swell event causing the networking devices in the PoP to reboot. When the networking devices in this datacenter rebooted, this networking capacity was temporarily unavailable.\nThis rare triple failure resulted in reduced connectivity across zones in the region and caused multiple Google Cloud services to lose connectivity for 21 minutes. Additionally, customers using Zones A and Zones C, experienced up to 15% increased latency and error rates for 16 minutes due to degraded inter-zone network connectivity while the networking devices recovered from the reboot .\nGoogle engineers were already working on remediating the two fiber failures when they were alerted to the network disruption caused by the voltage swell via an internal monitoring alert on Tuesday, 29 October 2024 16:29 US/Pacific and immediately started an investigation.\nAfter the devices had completed rebooting, the impacted network paths were recovered and all affected Cloud Zones regained full connectivity to the network at 16:43 US/Pacific.\nMajority of GCP services impacted by the issue recovered shortly thereafter. A few Cloud services experienced longer restoration times as manual actions were required in some cases to complete full recovery.\n## Root cause of device reboots\nGoogle’s investigation of the cause of networking devices reboot is ongoing; we will provide additional information once analysis is completed.\n## Remediation and Prevention\nWe are taking the following actions to prevent a recurrence and improve reliability in the future:\n- Review our datacenter power distribution design in the region and implement any recommended additional protections for critical networking devices against voltage swells and sags. This includes ensuring there are two additional static UPS systems in the affected datacenter's power distribution design.\n- Implement changes to network device configuration that reduce time to recover full network connectivity after a failure.\n- Root cause and determine corrective actions for GCP Services that did not recover quickly from the incident after the network connectivity was restored.\n---","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"},{"title":"Apigee Edge Public Cloud","id":"SumcdgBT6GQBzp1vmdXu"},{"title":"Batch","id":"8XjnU88URVtZrAL8KRvA"},{"title":"Cloud Filestore","id":"jog4nyYkquiLeSK5s26q"},{"title":"Cloud Firestore","id":"CETSkT92V21G6A1x28me"},{"title":"Cloud Key Management Service","id":"67cSySTL7dwJZo9JWUGU"},{"title":"Cloud NAT","id":"hCNpnTQHkUCCGxJy35Yq"},{"title":"Cloud Run","id":"9D7d2iNBQWN24zc1VamE"},{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Dataproc","id":"yjXrEg3Yvy26BauMwr69"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Cloud Storage","id":"UwaYoXQ5bHYHG6EdiPB8"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Resource Manager API","id":"MRJK8bmwYLMv6Dtg16kU"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/Pvq1RGrACDwogEE8hcrm","currently_affected_locations":[],"previously_affected_locations":[{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"}]},{"id":"gtxYCj2223df49HggFgK","number":"8993750502106852141","begin":"2024-10-28T21:24:00+00:00","created":"2024-10-28T23:10:34+00:00","end":"2024-10-28T22:38:00+00:00","modified":"2024-10-29T22:17:36+00:00","external_desc":"[Mandiant.com](http://Mandiant.com) was inaccessible globally.","updates":[{"created":"2024-10-29T22:11:36+00:00","modified":"2024-10-29T22:17:36+00:00","when":"2024-10-29T22:11:36+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 28 October 2024 14:24\n**Incident End:** 28 October 2024 15:38\n**Duration:** 1 hour and 14 minutes\n**Affected Services and Features:**\n* Mandiant Attack Surface Management\n* Mandiant Hunt for Chronicle\n* Mandiant Threat Intelligence\n* Mandiant Managed Defense\n* Mandiant Security Validation\n**Regions/Zones:** Global\n**Description:**\nMandiant experienced a service outage for a duration of 1 hour and 14 minutes, beginning at 14:24 US/Pacific on Monday, 28 October 2024, during which the users of mandiant.com were inadvertently redirected to mandiant.exceedlms.com.\nFrom our analysis, the root cause of the issues is an inappropriate redirect rule, which was implemented as part of an ongoing procedure to assess the effectiveness of an upcoming transition.\nGoogle engineers were alerted to the incident from our internal monitoring alerts at 14:29 US/Pacific on Monday, 28 October 2024 and immediately started an investigation. Once the nature and scope of the incident was identified, Google engineers rolled back the change to remove the incorrect redirect.\n**Customer Impact:**\nAll users were not able to access mandiant.com and were redirected to mandiant.exceedlms.com for a total duration of 14 minutes. Once the change was rolled back, users continued to still see impact for a duration of up to 1 hour, due to caching.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-28T23:22:50+00:00","modified":"2024-10-29T22:11:36+00:00","when":"2024-10-28T23:22:50+00:00","text":"The issue with Mandiant Security Validation, Mandiant Attack Surface Mangement, Mandiant Hunt for Chronicle, Mandiant Managed Defense, Mandiant Threat Intelligence has been resolved for all affected users as of Monday, 2024-10-28 14:38 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-28T23:10:30+00:00","modified":"2024-10-28T23:22:54+00:00","when":"2024-10-28T23:10:30+00:00","text":"Summary: [Mandiant.com](http://Mandiant.com) was inaccessible globally.\nDescription: We experienced an issue with mandiant.com beginning on Monday, 2024-10-28 at 14:25 US/Pacific.\nOur engineers have mitigated the issue at Monday, 2024-10-28 at 14:38 US/Pacific.\nWe will provide an update by Monday, 2024-10-28 16:30 US/Pacific with current details.\nWe apologize to all who were affected by the disruption.\nDiagnosis: Customers impacted by this issue were unable to access the webpage.\nWorkaround: Clear browser cache or do a hard refresh (Shift+f5)","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-10-29T22:11:36+00:00","modified":"2024-10-29T22:17:36+00:00","when":"2024-10-29T22:11:36+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 28 October 2024 14:24\n**Incident End:** 28 October 2024 15:38\n**Duration:** 1 hour and 14 minutes\n**Affected Services and Features:**\n* Mandiant Attack Surface Management\n* Mandiant Hunt for Chronicle\n* Mandiant Threat Intelligence\n* Mandiant Managed Defense\n* Mandiant Security Validation\n**Regions/Zones:** Global\n**Description:**\nMandiant experienced a service outage for a duration of 1 hour and 14 minutes, beginning at 14:24 US/Pacific on Monday, 28 October 2024, during which the users of mandiant.com were inadvertently redirected to mandiant.exceedlms.com.\nFrom our analysis, the root cause of the issues is an inappropriate redirect rule, which was implemented as part of an ongoing procedure to assess the effectiveness of an upcoming transition.\nGoogle engineers were alerted to the incident from our internal monitoring alerts at 14:29 US/Pacific on Monday, 28 October 2024 and immediately started an investigation. Once the nature and scope of the incident was identified, Google engineers rolled back the change to remove the incorrect redirect.\n**Customer Impact:**\nAll users were not able to access mandiant.com and were redirected to mandiant.exceedlms.com for a total duration of 14 minutes. Once the change was rolled back, users continued to still see impact for a duration of up to 1 hour, due to caching.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Mandiant Attack Surface Management","id":"v7DL2fMFnZpCxNwd8KE7"},{"title":"Mandiant Hunt for Chronicle","id":"uomxopvCRv8y7bvrA1tK"},{"title":"Mandiant Managed Defense","id":"9aKw9s8p43AYeBmo4Gvx"},{"title":"Mandiant Security Validation","id":"we27wL2FBSvodeP7GiRS"},{"title":"Mandiant Threat Intelligence","id":"Dvtt8cwUdpUYoGMpt2Fc"}],"uri":"incidents/gtxYCj2223df49HggFgK","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"pDtW3Wu1cPpmSxEBK9jp","number":"11584365380803151747","begin":"2024-10-28T21:23:26+00:00","created":"2024-10-28T21:59:25+00:00","end":"2024-10-28T23:09:40+00:00","modified":"2024-10-28T23:09:43+00:00","external_desc":"We are investigating an issue where users may be unable to connect to Colab Enterprise runtimes.","updates":[{"created":"2024-10-28T23:09:40+00:00","modified":"2024-10-28T23:09:44+00:00","when":"2024-10-28T23:09:40+00:00","text":"The issue with Colab Enterprise has been resolved for all affected users as of Monday, 2024-10-28 15:43 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-28T21:59:21+00:00","modified":"2024-10-28T23:09:43+00:00","when":"2024-10-28T21:59:21+00:00","text":"Summary: We are investigating an issue where users may be unable to connect to Colab Enterprise runtimes\nDescription: We are experiencing an issue with Colab Enterprise beginning at Monday, 2024-10-28 14:23 US/Pacific.\nOur engineering team has identified a potential fix and has initiated mitigation efforts.\nWe will provide an update by Monday, 2024-10-28 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Affected users may encounter a message stating: ‘Failed to execute cell’\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-10-28T23:09:40+00:00","modified":"2024-10-28T23:09:44+00:00","when":"2024-10-28T23:09:40+00:00","text":"The issue with Colab Enterprise has been resolved for all affected users as of Monday, 2024-10-28 15:43 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"7Nbc1kZUvPLiihodettN","service_name":"Colab Enterprise","affected_products":[{"title":"Colab Enterprise","id":"7Nbc1kZUvPLiihodettN"}],"uri":"incidents/pDtW3Wu1cPpmSxEBK9jp","currently_affected_locations":[],"previously_affected_locations":[{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"fmTFD58FDR7Qy6qq6H6U","number":"9313833927984552571","begin":"2024-10-25T09:15:00+00:00","created":"2024-10-25T11:13:37+00:00","end":"2024-10-25T15:03:00+00:00","modified":"2024-11-01T02:41:47+00:00","external_desc":"Cloud Build - builds execution is degraded in us-central1","updates":[{"created":"2024-10-31T17:26:55+00:00","modified":"2024-11-01T02:41:47+00:00","when":"2024-10-31T17:26:55+00:00","text":"# Incident Report\n## Summary\nCloud Build in the us-central1 region experienced an outage for 4 hours 30 minutes starting from 02:15 US/Pacific on Friday, 25 October 2024, resulting in builds to be stuck and subsequently expire. Beginning 06:45 US/Pacific Cloud Build started processing the requests in the us-central1 region, however experienced significant execution delays for a duration of 2 hours and 2 minutes. To our Google Cloud customers who were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nCloud Build uses an internal component that manages the execution of the builds by using transient worker Google Compute Engine (GCE) virtual machine (VM) instances. Each machine is only used for execution of one build and deleted after that.\nThe root cause of the issue is Cloud Build's failure to degrade gracefully upon being throttled by its GCE API quota in the us-central1 region. As a result of being throttled, the Cloud Build component responsible for managing worker pools initiated multiple retries due to an incorrect configuration, further exacerbating the quota exhaustion and completely preventing the creation of new worker instances in the region. This effectively halted build processing in the us-central1 region, leading to a regional outage.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage from an internal monitoring alert at 02:31 US/Pacific on Friday 25 October, 2024 and immediately started an investigation.\nOnce the nature and scope of the issue became clear, Google engineers tuned retry settings by 06:40 US/Pacific to bring calls to the GCE API to sustainable levels. This ensured the requests were no longer throttled and the internal system was able to create worker VM instances again to execute new builds and process the backlog. Subsequently at 08:47 the backlog queue dropped to normal levels.\nGoogle engineers proactively increased the quota at 09:43 US/Pacific to ensure this incident does not immediately reoccur.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n- We are improving the mechanism which Cloud Build creates and deletes GCE worker VMs to ensure graceful degradation of service in case of errors.\n- We are conducting a thorough investigation of internal quotas in all regions to ensure that we have enough capacity to execute builds from all customers at peak traffic.\nGoogle is committed to quickly and continually improving our technology and operations to prevent service disruptions. We appreciate your patience and apologize again for the impact to your organization. We thank you for your business.\n## Detailed Description of Impact\n### Cloud Build\nDuring the period of impact, customers’ would have noticed that all the builds (both created manually and scheduled by build triggers) in the us-central1 region were being queued but not executed, and appeared as stuck.\nOnce the incident was resolved, all builds that had exceeded the amount of time they could be queued got marked as expired. The remaining ones were eventually executed but with a delay.\n### Google Cloud Deploy\nCustomers were able to create new releases and rollouts (whether initiated manually or through automation), however the resources became stuck in an 'in-progress' state. Essentially, the Cloud Deploy service was not operational in the us-central1 region, as no deployments were being executed.\n### Google App Engine (GAE)\nGAE version deployments in the us-central1 region saw elevated latency in its execution or failed with an “INTERNAL” error. Request traffic to existing GAE versions was not impacted by this incident.\n### Google Cloud functions\nCustomers would have seen elevated “RESOURCE_EXHAUSTED” errors or elevated latency for the create and update operations in the us-central1 region for Cloud Run functions. Request traffic to existing Cloud Run functions (1st and 2nd Gen) was not impacted by this incident.\n### Google Cloud Run\nA few customers deploying source code to Cloud Run in the us-central1 region experienced increased deployment latency of deployment failures with an “INTERNAL” error.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-25T17:09:42+00:00","modified":"2024-10-31T17:26:55+00:00","when":"2024-10-25T17:09:42+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 25 October, 2024 02:15\n**Incident End:** 25 October, 2024 08:03\n**Duration:** 5 hours, 48 minutes\n**Affected Services and Features:**\n* Cloud Build\n* Cloud Deploy\n**Regions/Zones:** us-central1\n**Description:**\nCloud Build and Cloud Deploy customers in the us-central1 region experienced a service disruption lasting 5 hours and 48 minutes. During this time, 100% of builds were either stuck in the queued phase and subsequently expired, or faced significant execution delays. The root cause was identified as a bug in how Cloud Build interacts with Google Compute Engine (GCE) to provision compute resources. Due to organic traffic growth, Cloud Build exceeded its GCE quota, triggering a retry-storm instead of graceful degradation. This prevented the creation of new worker instances, leading to a regional outage as existing builds could not be processed.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\n* Cloud Build: During the incident, builds were queued for execution but not actually executed and would have appeared as stuck. That applied both to builds created manually and via triggers. Upon this incident’s resolution, most of the builds would have exceeded the amount of time the build can be queued and were marked as expired. These builds will not be executed any more and must be re-created.\n* Google Cloud Deploy: New releases and rollouts (whether initiated manually or through automation) could be created, but the resources became stuck in an 'in-progress' state. Essentially, the Cloud Deploy service was not operational in the us-central1 region, as no deployments were being executed.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-25T15:06:56+00:00","modified":"2024-10-25T17:09:42+00:00","when":"2024-10-25T15:06:56+00:00","text":"The issue with Cloud Build, Google Cloud Deploy has been resolved for all affected projects as of Friday, 2024-10-25 08:06 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-25T14:18:14+00:00","modified":"2024-10-25T15:07:03+00:00","when":"2024-10-25T14:18:14+00:00","text":"Summary: Cloud Build - builds execution is degraded in us-central1\nDescription: We believe the issue with Cloud Build, Google Cloud Deploy is partially resolved. Builds should be now getting executed, however the delays are possible.\nWe are still working on a full resolution. We do not have an ETA for full resolution at this point.\nWe will provide an update by Friday, 2024-10-25 09:30 US/Pacific with current details.\nDiagnosis: Customers might still observe scheduled builds time-out or being stuck in the queued phase or being executed with delays.\nWorkaround: Customers may try to use another Cloud region.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-10-25T14:03:13+00:00","modified":"2024-10-25T14:18:24+00:00","when":"2024-10-25T14:03:13+00:00","text":"Summary: Cloud Build - no builds are being executed in us-central1\nDescription: Mitigation work is still underway by our engineering team.\nWe will provide more information by Friday, 2024-10-25 09:00 US/Pacific.\nDiagnosis: Customers would observe scheduled builds time-out or being stuck in the queued phase.\nWorkaround: Customers may try to use another Cloud region.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-10-25T12:33:49+00:00","modified":"2024-10-25T14:03:13+00:00","when":"2024-10-25T12:33:49+00:00","text":"Summary: Cloud Build - no builds are being executed in us-central1\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-10-25 07:00 US/Pacific.\nDiagnosis: Customers would observe scheduled builds time-out or being stuck in the queued phase.\nWorkaround: Customers may try to use another Cloud region.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-10-25T12:01:45+00:00","modified":"2024-10-25T12:33:49+00:00","when":"2024-10-25T12:01:45+00:00","text":"Summary: Cloud Build - no builds are being executed in us-central1\nDescription: Our resolving teams are still investigating the issue and possible mitigation plan.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-10-25 06:00 US/Pacific.\nDiagnosis: Customers would observe scheduled builds time-out or being stuck in the queued phase.\nWorkaround: Customers may try to use another Cloud region.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-10-25T11:13:24+00:00","modified":"2024-10-25T12:01:45+00:00","when":"2024-10-25T11:13:24+00:00","text":"Summary: Cloud Build - no builds are being executed in us-central1\nDescription: Investigation is currently still ongoing.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-10-25 05:02 US/Pacific.\nDiagnosis: Customers would observe scheduled builds time-out or being stuck in the queued phase.\nWorkaround: Customers may try to use another Cloud region.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-10-31T17:26:55+00:00","modified":"2024-11-01T02:41:47+00:00","when":"2024-10-31T17:26:55+00:00","text":"# Incident Report\n## Summary\nCloud Build in the us-central1 region experienced an outage for 4 hours 30 minutes starting from 02:15 US/Pacific on Friday, 25 October 2024, resulting in builds to be stuck and subsequently expire. Beginning 06:45 US/Pacific Cloud Build started processing the requests in the us-central1 region, however experienced significant execution delays for a duration of 2 hours and 2 minutes. To our Google Cloud customers who were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nCloud Build uses an internal component that manages the execution of the builds by using transient worker Google Compute Engine (GCE) virtual machine (VM) instances. Each machine is only used for execution of one build and deleted after that.\nThe root cause of the issue is Cloud Build's failure to degrade gracefully upon being throttled by its GCE API quota in the us-central1 region. As a result of being throttled, the Cloud Build component responsible for managing worker pools initiated multiple retries due to an incorrect configuration, further exacerbating the quota exhaustion and completely preventing the creation of new worker instances in the region. This effectively halted build processing in the us-central1 region, leading to a regional outage.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage from an internal monitoring alert at 02:31 US/Pacific on Friday 25 October, 2024 and immediately started an investigation.\nOnce the nature and scope of the issue became clear, Google engineers tuned retry settings by 06:40 US/Pacific to bring calls to the GCE API to sustainable levels. This ensured the requests were no longer throttled and the internal system was able to create worker VM instances again to execute new builds and process the backlog. Subsequently at 08:47 the backlog queue dropped to normal levels.\nGoogle engineers proactively increased the quota at 09:43 US/Pacific to ensure this incident does not immediately reoccur.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n- We are improving the mechanism which Cloud Build creates and deletes GCE worker VMs to ensure graceful degradation of service in case of errors.\n- We are conducting a thorough investigation of internal quotas in all regions to ensure that we have enough capacity to execute builds from all customers at peak traffic.\nGoogle is committed to quickly and continually improving our technology and operations to prevent service disruptions. We appreciate your patience and apologize again for the impact to your organization. We thank you for your business.\n## Detailed Description of Impact\n### Cloud Build\nDuring the period of impact, customers’ would have noticed that all the builds (both created manually and scheduled by build triggers) in the us-central1 region were being queued but not executed, and appeared as stuck.\nOnce the incident was resolved, all builds that had exceeded the amount of time they could be queued got marked as expired. The remaining ones were eventually executed but with a delay.\n### Google Cloud Deploy\nCustomers were able to create new releases and rollouts (whether initiated manually or through automation), however the resources became stuck in an 'in-progress' state. Essentially, the Cloud Deploy service was not operational in the us-central1 region, as no deployments were being executed.\n### Google App Engine (GAE)\nGAE version deployments in the us-central1 region saw elevated latency in its execution or failed with an “INTERNAL” error. Request traffic to existing GAE versions was not impacted by this incident.\n### Google Cloud functions\nCustomers would have seen elevated “RESOURCE_EXHAUSTED” errors or elevated latency for the create and update operations in the us-central1 region for Cloud Run functions. Request traffic to existing Cloud Run functions (1st and 2nd Gen) was not impacted by this incident.\n### Google Cloud Run\nA few customers deploying source code to Cloud Run in the us-central1 region experienced increased deployment latency of deployment failures with an “INTERNAL” error.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"}],"uri":"incidents/fmTFD58FDR7Qy6qq6H6U","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"f3u3SddnPVJAY1HH7GZp","number":"7117642666590962253","begin":"2024-10-25T05:32:11+00:00","created":"2024-10-25T06:17:15+00:00","end":"2024-10-25T09:23:50+00:00","modified":"2024-10-25T09:23:59+00:00","external_desc":"Looker Studio experiencing data request slowness or timeouts","updates":[{"created":"2024-10-25T09:23:50+00:00","modified":"2024-10-25T09:24:04+00:00","when":"2024-10-25T09:23:50+00:00","text":"The issue with Looker Studio has been resolved for all affected projects as of Friday, 2024-10-25 02:23 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-25T08:19:21+00:00","modified":"2024-10-25T09:23:59+00:00","when":"2024-10-25T08:19:21+00:00","text":"Summary: Looker Studio experiencing data request slowness or timeouts\nDescription: We are experiencing an issue with Looker Studio beginning at Thursday, 2024-10-24 19:30 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-10-25 03:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The impacted users may observe the batched data requests experiencing slowness or timeout.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-25T06:17:11+00:00","modified":"2024-10-25T08:19:21+00:00","when":"2024-10-25T06:17:11+00:00","text":"Summary: Looker Studio experiencing data request slowness or timeouts\nDescription: We are experiencing an issue with Looker Studio beginning at Thursday, 2024-10-24 19:30 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-10-25 01:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The impacted users may observe the batched data requests experiencing slowness or timeout.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-10-25T09:23:50+00:00","modified":"2024-10-25T09:24:04+00:00","when":"2024-10-25T09:23:50+00:00","text":"The issue with Looker Studio has been resolved for all affected projects as of Friday, 2024-10-25 02:23 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"kEYNqRYFXXHxP9QeFJ1d","service_name":"Looker Studio","affected_products":[{"title":"Looker Studio","id":"kEYNqRYFXXHxP9QeFJ1d"}],"uri":"incidents/f3u3SddnPVJAY1HH7GZp","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"e3yQSE1ysCGjCVEn2q1h","number":"1637890976653413021","begin":"2024-10-24T01:22:00+00:00","created":"2024-10-24T02:56:31+00:00","end":"2024-10-24T09:01:00+00:00","modified":"2024-10-31T15:58:49+00:00","external_desc":"Multiple GCP services impacted in the europe-west3-c zone","updates":[{"created":"2024-10-31T15:50:31+00:00","modified":"2024-10-31T15:58:49+00:00","when":"2024-10-31T15:50:31+00:00","text":"## **Incident Report**\n## **Summary**\nOn Wednesday, 23 October 2024, a power failure occurred in a single data center within the europe-west3 region. This failure degraded the building’s cooling infrastructure, leading to a partial shutdown of the europe-west-c zone to avoid thermal damage and causing Virtual Machines (VMs) to go offline. The event duration was 7 hours and 39 minutes and impacted various Google Cloud services in the affected zone.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer, and we are taking immediate steps to improve the platform’s performance and resilience.\n##\n## **Root Cause**\nOn 23 October 2024, at 18:22 US/Pacific time, an electrical arc flash occurred in one of the europe-west3-c zone's power distribution units, resulting in a partial power outage. This incident also affected the cooling infrastructure, leading to a rise in ambient temperature. To prevent damage, some IT equipment at the facility was shut down, causing Virtual Machines (VMs) in the datacenter to go offline and impacting multiple cloud services in the zone.\n##\n## **Remediation and Prevention**\nGoogle engineers were alerted to VM failures in europe-west3-c zone at 18:39 US/Pacific on 23 October 2024, and immediately launched an investigation. Upon understanding the issue's nature and scope, engineers took precautionary measures to ensure equipment safety by shutting it down and diverting traffic away from the affected infrastructure at 20:43 US/Pacific. Power was manually restored at 21:44 US/Pacific by transferring the load away from the failed components. Cloud traffic was gradually reintroduced to the datacenter at 00:30 US/Pacific on 24 October, 2024\\. Full restoration of all cloud services in the affected zone was completed by 2:09 US/Pacific.\nWe apologize for the length and severity of this incident. We are taking immediate steps to prevent a recurrence and improve reliability in the future. To ensure continued high availability in the future, Google are pursuing the following actions:\n* Complete root cause investigation on the arc flash and complete repairs of the affected power distribution unit.\n* Ensure that the underlying root cause(s) of the arc flash are not present in any other data centers, and remediating any risks which are discovered in the analysis of the event.\n* Further hardening GCP’s Persistent Disk services to prevent any regional impact during single-zone issues. This work is anticipated to be fully rolled out in the coming weeks.\n## **Detailed Description of Impact**\n**Google Compute Engine (GCE) and Persistent Disk (PD):**\nCustomers experienced increased latency and errors when creating new GCE instances and attaching Persistent Disk volumes to existing instances in all europe-west3 zones from 23 October 2024 19:12 to 23:34 US/Pacific. The elevated error rates resulted from problems in access control services for multiple zones that were caused by a misconfiguration and triggered by the outage in europe-west3-c. To prevent a recurrence of this issue, engineers have hardened this component against zonal outages in europe-west3.\nAdditionally, a small percentage of Persistent Disk volumes in europe-west3-c were unavailable from 23 October 2024 18:24 to 24 October 2024 00:58 US/Pacific.\n**Google Cloud Pub/Sub:**\nA small percentage of customers may have experienced errors in API calls from 23 October 2024 18:25 to 18:41 US/Pacific and elevated latency for push subscriptions from 23 October 2024 22:57 to 24 October 2024 00:11 US/Pacific in europe-west3-c.\n**Google Cloud Dataflow:**\nA small percentage of customers may have experienced higher latencies for both batch and streaming jobs from 23 October 2024 19:05 to 23:55 US/Pacific in europe-west3.\n**Dataproc:**\nA small percentage of customers may have experienced errors in all API calls from 23 October 2024 19:12 to 22:00 US/Pacific in europe-west3 and a few customers might have experienced errors in all API calls except the create cluster API call through 24 October 2024 23:47 US/Pacific.\n**Cloud Build:**\nA small percentage of customers may have experienced errors in API calls from 23 October 2024 20:52 to 24 October 2024 01:11 US/Pacific in europe-west3.\n**Google Kubernetes Engine (GKE):**\nDuring the period of the outage, a small percentage of customers may have experienced errors in API calls to europe-west3. A small percentage of customers may have additionally experienced unavailability of their Kubernetes Control Planes in europe-west3 or europe-west3-c and/or Kubernetes nodes in europe-west3-c.\n**Vertex AI Batch Prediction:**\nA small percentage of customers may have experienced errors in batch prediction jobs from 23 October 2024 19:45 to 23:30 US/Pacific in europe-west3.\n**Cloud SQL:**\nCustomers creating or updating instances in europe-west3 may have experienced errors from 23 October 2024 18:35 US/Pacific to 24 October 2024 23:28 US/Pacific. Users with self-service or scheduled maintenance on instances in europe-west3 during this period may have experienced a failed update causing downtime.\n----------","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-24T17:49:54+00:00","modified":"2024-10-31T15:50:31+00:00","when":"2024-10-24T17:49:54+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 23 October 2024 18:30\n**Incident End:** 24 October 2024 02:09\n**Duration:** 7 hours, 39 minutes\n**Affected Services and Features:**\n* Persistent Disk (PD)\n* Google Compute Engine\n* Google Cloud Pub/Sub\n* Google Cloud Dataflow\n* Dataproc\n* Cloud Build\n* Google Kubernetes Engine (GKE)\n* Vertex AI Batch Prediction\n**Regions/Zones:** Region europe-west3 / Zone europe-west3-c\n**Description:**\nMultiple Google Cloud products were impacted in the europe-west3-c zone for a duration of 7 hours, 39 minutes. From preliminary analysis, the root cause of the issue was due to a power failure and cooling issue leading to a fraction of a zone being powered down causing services to be degraded.\nGoogle engineers implemented a fix to return the datacenter to full operation and this mitigated the issue.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\n* Persistent Disk (PD): Customers may observe stuck VM creation, PD control plane errors.\n* Google Compute Engine: Customers lost access to several VMs and disks in the europe-west5-c zone. For the other two zones in the same region, less than 1% of the operations that touch instance and disk resources experienced internal errors.\n* Google Cloud Pub/Sub: Customers may have experienced higher latency for push subscriptions as well as a brief period of elevated Publish unavailability.\n* Google Cloud Dataflow: Batch jobs: some existing jobs experienced delays when scaling workers. Streaming jobs: jobs may not have progressed or scaled up workers.\n* Dataproc: Dataproc cluster operations failures. Uptick in cluster creation and deletion errors during the initial stages of the incident but mitigated from dataproc side by blocking the europe-west-3-c zone ASAP.\n* Cloud Build: Builds in Custom Worker pools took a long time to start.\n* Google Kubernetes Engine (GKE): Customers were unable to create new Google Kubernetes Engine (GKE) cluster nodes in the europe-west3-c zone.\n* Vertex AI Batch Prediction : Vertex batch prediction job failed with \"Unable to prepare an infrastructure for serving within time\" error.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-24T09:14:57+00:00","modified":"2024-10-24T17:49:54+00:00","when":"2024-10-24T09:14:57+00:00","text":"The issue with Google Cloud Pub/Sub, Google Compute Engine, Persistent Disk, Google Cloud Dataflow, Google Cloud Dataproc, Google Kubernetes Engine, Cloud Build, Vertex AI Batch Prediction has been resolved for all affected users as of Thursday, 2024-10-24 02:09 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-24T08:25:13+00:00","modified":"2024-10-24T09:15:16+00:00","when":"2024-10-24T08:25:13+00:00","text":"Summary: Multiple GCP services impacted in the europe-west3-c zone\nDescription: We are experiencing an issue with multiple GCP services including Google Compute Engine, Persistent Disk, Google Cloud Dataflow in the europe-west3-c zone due to power and a cooling issue.\nMitigation work is still underway by our engineering team and we do not have an ETA at the moment.\nWe will provide more information by Thursday, 2024-10-24 02:30 US/Pacific.\nDiagnosis: The majority of the services impact is now limited to the zonal level. Vertex AI Batch Prediction continues to be impacted at the regional level.\nServices impacted include:\n**Google Compute Engine:**\nThe loss of power has led to capacity failure in the region. Customers may experience:\nA percentage of Virtual Machines (VMs) being terminated and not available until power is restored.\nA percentage of VMs may have lost access to their Persistent Disk and may be crashlooping.\nA percentage of regional Persistent Disks may be running in a degraded state.\nThe incident is affecting the Compute API in the following ways:\nCreation of new VMs or disks in europe-west3-c may fail.\nA percentage of customers attempting to consume VM reservations will be unable to do so.\nA percentage of customers who would like to delete their previously running VMs in europe-west3, can delete VMs via the console or GCE APIs. However, there may be a delay in processing these deletions. All deletions will be fully processed when issues in europe-west3-c are resolved.\n**Google Kubernetes Engine:**\nThe Google Kubernetes Engine nodes in the impacted location may be inaccessible and creation of new nodes may fail.\n**Google Cloud Dataflow:**\nSome existing batch jobs may experience delays when scaling workers. In addition streaming jobs may not be progressing or scaling up workers.\n**Google Cloud Dataproc:**\nWhile the existing clusters are not impacted, creation of new clusters may fail.\n**Cloud Build:**\nBuilds in Custom Worker pools take a long time to start.\n**Vertex AI Batch Prediction:**\nA Vertex batch prediction job may fail with an error, \"Unable to prepare an infrastructure for serving within time\".\n**Google Cloud Pub/Sub:**\nThere is no ongoing impact for the users at the moment.\n**Workaround:**\n1. If you are impacted, please migrate the workload or operations from the europe-west3-c zone to other available zones or regions.\n2. For the customers with a degraded Regional Persistent Disk, we recommend you take regular snapshots of your disk.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T07:26:35+00:00","modified":"2024-10-24T08:25:13+00:00","when":"2024-10-24T07:26:35+00:00","text":"Summary: Multiple GCP services impacted in the europe-west3-c zone\nDescription: We are experiencing an issue with multiple GCP services including Google Compute Engine, Persistent Disk, Google Cloud Dataflow in the europe-west3-c zone due to power and a cooling issue.\nMitigation work is still underway by our engineering team and we do not have an ETA at the moment.\nWe will provide more information by Thursday, 2024-10-24 01:30 US/Pacific.\nDiagnosis: The majority of the services impact is now limited to the zonal level. Vertex AI Batch Prediction continues to be impacted at the regional level.\nServices impacted include:\n**Google Compute Engine:**\nThe loss of power has led to capacity failure in the region. Customers may experience:\nA percentage of Virtual Machines (VMs) being terminated and not available until power is restored.\nA percentage of VMs may have lost access to their Persistent Disk and may be crashlooping.\nA percentage of regional Persistent Disks may be running in a degraded state.\nThe incident is affecting the Compute API in the following ways:\nCreation of new VMs or disks in europe-west3-c may fail.\nA percentage of customers attempting to consume VM reservations will be unable to do so.\nA percentage of customers who would like to delete their previously running VMs in europe-west3, can delete VMs via the console or GCE APIs. However, there may be a delay in processing these deletions. All deletions will be fully processed when issues in europe-west3-c are resolved.\n**Google Kubernetes Engine:**\nThe Google Kubernetes Engine nodes in the impacted location may be inaccessible and creation of new nodes may fail.\n**Google Cloud Dataflow:**\nSome existing batch jobs may experience delays when scaling workers. In addition streaming jobs may not be progressing or scaling up workers.\n**Google Cloud Dataproc:**\nWhile the existing clusters are not impacted, creation of new clusters may fail.\n**Cloud Build:**\nBuilds in Custom Worker pools take a long time to start.\n**Vertex AI Batch Prediction:**\nA Vertex batch prediction job may fail with an error, \"Unable to prepare an infrastructure for serving within time\".\n**Google Cloud Pub/Sub:**\nThere is no ongoing impact for the users at the moment.\nWorkaround:\n1. If you are impacted, please migrate the workload or operations from the europe-west3-c zone to other available zones or regions.\n2. For the customers with a degraded Regional Persistent Disk, we recommend you take regular snapshots of your disk.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T06:55:16+00:00","modified":"2024-10-24T07:26:41+00:00","when":"2024-10-24T06:55:16+00:00","text":"Summary: Multiple GCP services impacted in the europe-west3-c zone\nDescription: We are experiencing an issue with multiple GCP services including Google Compute Engine, Persistent Disk, Google Cloud Dataflow in the europe-west3-c zone due to power and a cooling issue.\nMitigation work is still underway by our engineering team and we do not have an ETA at the moment.\nWe will provide more information by Thursday, 2024-10-24 01:30 US/Pacific.\nDiagnosis: The impact is now determined to be back at zonal level. Regional level impact is mitigated at the moment.\nMultiple services are impacted in the europe-west3-c zone:\nGoogle Compute Engine:\nThe loss of power has led to capacity failure in the region. Customers may experience:\nA percentage of Virtual Machines (VMs) being terminated and not available until power is restored.\nA percentage of VMs may have lost access to their Persistent Disk and may be crashlooping.\nA percentage of regional Persistent Disks may be running in a degraded state.\nThe incident is affecting the Compute API in the following ways:\nCreation of new VMs or disks in europe-west3-c may fail.\nA percentage of customers attempting to consume VM reservations will be unable to do so.\nA percentage of customers who would like to delete their previously running VMs in europe-west3, can delete VMs via the console or GCE APIs. However, there may be a delay in processing these deletions. All deletions will be fully processed when issues in europe-west3-c are resolved.\nGoogle Kubernetes Engine:\nThe Google Kubernetes Engine nodes in the impacted location may be inaccessible and creation of new nodes may fail.\nGoogle Cloud Dataflow:\nSome existing batch jobs may experience delays when scaling workers. In addition streaming jobs may not be progressing or scaling up workers.\nGoogle Cloud Dataproc:\nWhile the existing clusters are not impacted, creation of new clusters may fail.\nCloud Build:\nBuilds in Custom Worker pools take a long time to start.\nGoogle Cloud Pub/Sub:\nThere is no ongoing impact for the users at the moment.\nWorkaround: 1. If you are impacted, please migrate the workload or operations from the europe-west3-c zone to other available zones or regions.\n2. For the customers with a degraded regional Persistent Disk, we recommend you take regular snapshots of your disk.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T06:14:36+00:00","modified":"2024-10-24T06:55:21+00:00","when":"2024-10-24T06:14:36+00:00","text":"Summary: Multiple GCP services impacted in the europe-west3 region\nDescription: We are experiencing an issue with multiple GCP services including Google Compute Engine, Persistent Disk, Google Cloud Dataflow in the europe-west3 region due to power and a cooling issue.\nMitigation work is still underway by our engineering team and we do not have an ETA at the moment.\nWe will provide more information by Thursday, 2024-10-24 01:00 US/Pacific.\nDiagnosis: Multiple services are impacted in the europe-west3 region:\n**Google Compute Engine:**\nThe loss of power has led to capacity failure in the region. Customers may experience:\nA percentage of Virtual Machines (VMs) being terminated and not available until power is restored.\nA percentage of VMs may have lost access to their Persistent Disk and may be crashlooping.\nA percentage of regional Persistent Disks may be running in a degraded state.\nThe incident is affecting the Compute API in the following ways:\nCreation of new VMs or disks in europe-west3 may fail.\nA percentage of customers attempting to consume VM reservations will be unable to do so.\nA percentage of customers who would like to delete their previously running VMs in europe-west3, can delete VMs via the console or GCE APIs. However, there may be a delay in processing these deletions. All deletions will be fully processed when issues in europe-west3 are resolved.\n**Google Kubernetes Engine:**\nThe Google Kubernetes Engine nodes in the impacted location may be inaccessible and creation of new nodes may fail.\n**Google Cloud Dataflow:**\nSome existing batch jobs may experience delays when scaling workers. In addition streaming jobs may not be progressing or scaling up workers.\n**Google Cloud Dataproc:**\nWhile the existing clusters are not impacted, creation of new clusters may fail.\n**Google Cloud Pub/Sub:**\nThere is no ongoing impact for the users at the moment.\nWorkaround:\n1. If you are impacted, please migrate the workload or operations from the europe-west3 region to another available regions.\n2. For the customers with a degraded regional Persistent Disk, we recommend you take regular snapshots of your disk.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T05:59:22+00:00","modified":"2024-10-24T06:14:36+00:00","when":"2024-10-24T05:59:22+00:00","text":"Summary: Multiple GCP services impacted in europe-west3-c zone\nDescription: We are experiencing an issue with multiple GCP services including Google Compute Engine, Persistent Disk, Google Cloud Dataflow in europe-west3-c zone due to power and a cooling issue.\nMitigation work is still underway by our engineering team and we do not have an ETA at the moment.\nWe will provide more information by Thursday, 2024-10-24 01:00 US/Pacific.\nDiagnosis: Multiple services are impacted in europe-west3-c:\n**Google Compute Engine:**\nThe loss of power has led to capacity failure in the zone. Customers may experience:\nA percentage of Virtual Machines (VMs) being terminated and not available until power is restored.\nA percentage of VMs may have lost access to their Persistent Disk and may be crashlooping.\nA percentage of regional Persistent Disks may be running in a degraded state.\nThe incident is affecting the Compute API in the following ways:\nCreation of new VMs or disks in europe-west3-c may fail.\nA percentage of customers attempting to consume VM reservations will be unable to do so.\nA percentage of customers who would like to delete their previously running VMs in europe-west3-c, can delete VMs via the console or GCE APIs. However, there may be a delay in processing these deletions. All deletions will be fully processed when issues in europe-west3-c are resolved.\n**Google Kubernetes Engine:**\nThe Google Kubernetes Engine nodes in the impacted location may be inaccessible and creation of new nodes may fail.\n**Google Cloud Dataflow:**\nSome existing batch jobs may experience delays when scaling workers. In addition streaming jobs may not be progressing or scaling up workers.\n**Google Cloud Dataproc:**\nWhile the existing clusters are not impacted, creation of new clusters may fail.\n**Google Cloud Pub/Sub:**\nThere is no ongoing impact for the users at the moment.\nWorkaround:\n1. If you are impacted, please migrate the workload or operations from the europe-west3-c zone to other available zones or regions.\n2. For the customers with a degraded regional Persistent Disk, we recommend you take regular snapshots of your disk.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T04:59:44+00:00","modified":"2024-10-24T05:59:22+00:00","when":"2024-10-24T04:59:44+00:00","text":"Summary: Multiple GCP services impacted in europe-west3-c zone\nDescription: We are experiencing an issue with Google Cloud Pub/Sub, Google Compute Engine, Persistent Disk, Google Cloud Dataflow.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-23 23:00 US/Pacific with current details.\nDiagnosis: Multiple services are impacted in europe-west3-c:\nGoogle Compute Engine: Impacted users may observe VM creation failing and some instances may not be available for operations in this zone.\nGoogle Kubernetes Engine: The Google Kubernetes Engine nodes in impacted location might be inaccessible. Also, creation of new node may fail.\nPersistent Disk: The persistent disk instances might be unreachable for operations.\nGoogle Cloud Dataflow: Some existing batch jobs may experience delays when scaling workers. Also, the streaming jobs may not be progressing or scaling up workers.\nGoogle Cloud Dataproc: While the existing clusters are not impacted, creating new clusters may fail.\nGoogle Cloud Pub/Sub: There is no ongoing impact for the users at the moment.\nWorkaround: If you are impacted, please migrate the workload or operations from the europe-west3-c zone to other available zones or regions.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T04:27:13+00:00","modified":"2024-10-24T04:59:49+00:00","when":"2024-10-24T04:27:13+00:00","text":"Summary: Multiple GCP services impacted in europe-west3-c zone\nDescription: We are experiencing an issue with Google Cloud Pub/Sub, Google Compute Engine, Persistent Disk, Google Cloud Dataflow.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-23 23:00 US/Pacific with current details.\nDiagnosis: Multiple services are impacted in europe-west3-c:\nGoogle Compute Engine: Impacted users may observe VM creation failing and some instances may not be available for operations in this zone.\nPersistent Disk: The persistent disk instances might be unreachable for operations.\nGoogle Cloud Dataflow: While the existing clusters are not impacted, creating new clusters may fail.\nGoogle Cloud Dataproc: While the existing clusters are not impacted, creating new clusters may fail.\nGoogle Cloud Pub/Sub: There is no ongoing impact for the users at the moment.\nWorkaround: If you are impacted, please migrate the workload or operations from the europe-west3-c zone to other available zones or regions.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T03:32:23+00:00","modified":"2024-10-24T04:27:13+00:00","when":"2024-10-24T03:32:23+00:00","text":"Summary: Multiple GCP services impacted in europe-west3-c zone\nDescription: We are experiencing an issue with Google Cloud Pub/Sub, Google Compute Engine, Persistent Disk, Google Cloud Dataflow.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-23 21:30 US/Pacific with current details.\nDiagnosis: Multiple services are impacted in europe-west3-c:\nGoogle Compute Engine: Impacted users may observe VM creation failing and some instances may not be available for operations in this zone.\nPersistent Disk: The persistent disk instances might be unreachable for operations.\nGoogle Cloud Dataflow: While the existing clusters are not impacted, creating new clusters may fail.\nGoogle Cloud Dataproc: While the existing clusters are not impacted, creating new clusters may fail.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2024-10-24T02:56:27+00:00","modified":"2024-10-24T03:32:27+00:00","when":"2024-10-24T02:56:27+00:00","text":"Summary: Multiple GCP services impacted in europe-west3-c zone\nDescription: We are experiencing an issue with Google Cloud Pub/Sub, Google Compute Engine, Persistent Disk beginning at Wednesday, 2024-10-23 18:24 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-23 20:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]}],"most_recent_update":{"created":"2024-10-31T15:50:31+00:00","modified":"2024-10-31T15:58:49+00:00","when":"2024-10-31T15:50:31+00:00","text":"## **Incident Report**\n## **Summary**\nOn Wednesday, 23 October 2024, a power failure occurred in a single data center within the europe-west3 region. This failure degraded the building’s cooling infrastructure, leading to a partial shutdown of the europe-west-c zone to avoid thermal damage and causing Virtual Machines (VMs) to go offline. The event duration was 7 hours and 39 minutes and impacted various Google Cloud services in the affected zone.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer, and we are taking immediate steps to improve the platform’s performance and resilience.\n##\n## **Root Cause**\nOn 23 October 2024, at 18:22 US/Pacific time, an electrical arc flash occurred in one of the europe-west3-c zone's power distribution units, resulting in a partial power outage. This incident also affected the cooling infrastructure, leading to a rise in ambient temperature. To prevent damage, some IT equipment at the facility was shut down, causing Virtual Machines (VMs) in the datacenter to go offline and impacting multiple cloud services in the zone.\n##\n## **Remediation and Prevention**\nGoogle engineers were alerted to VM failures in europe-west3-c zone at 18:39 US/Pacific on 23 October 2024, and immediately launched an investigation. Upon understanding the issue's nature and scope, engineers took precautionary measures to ensure equipment safety by shutting it down and diverting traffic away from the affected infrastructure at 20:43 US/Pacific. Power was manually restored at 21:44 US/Pacific by transferring the load away from the failed components. Cloud traffic was gradually reintroduced to the datacenter at 00:30 US/Pacific on 24 October, 2024\\. Full restoration of all cloud services in the affected zone was completed by 2:09 US/Pacific.\nWe apologize for the length and severity of this incident. We are taking immediate steps to prevent a recurrence and improve reliability in the future. To ensure continued high availability in the future, Google are pursuing the following actions:\n* Complete root cause investigation on the arc flash and complete repairs of the affected power distribution unit.\n* Ensure that the underlying root cause(s) of the arc flash are not present in any other data centers, and remediating any risks which are discovered in the analysis of the event.\n* Further hardening GCP’s Persistent Disk services to prevent any regional impact during single-zone issues. This work is anticipated to be fully rolled out in the coming weeks.\n## **Detailed Description of Impact**\n**Google Compute Engine (GCE) and Persistent Disk (PD):**\nCustomers experienced increased latency and errors when creating new GCE instances and attaching Persistent Disk volumes to existing instances in all europe-west3 zones from 23 October 2024 19:12 to 23:34 US/Pacific. The elevated error rates resulted from problems in access control services for multiple zones that were caused by a misconfiguration and triggered by the outage in europe-west3-c. To prevent a recurrence of this issue, engineers have hardened this component against zonal outages in europe-west3.\nAdditionally, a small percentage of Persistent Disk volumes in europe-west3-c were unavailable from 23 October 2024 18:24 to 24 October 2024 00:58 US/Pacific.\n**Google Cloud Pub/Sub:**\nA small percentage of customers may have experienced errors in API calls from 23 October 2024 18:25 to 18:41 US/Pacific and elevated latency for push subscriptions from 23 October 2024 22:57 to 24 October 2024 00:11 US/Pacific in europe-west3-c.\n**Google Cloud Dataflow:**\nA small percentage of customers may have experienced higher latencies for both batch and streaming jobs from 23 October 2024 19:05 to 23:55 US/Pacific in europe-west3.\n**Dataproc:**\nA small percentage of customers may have experienced errors in all API calls from 23 October 2024 19:12 to 22:00 US/Pacific in europe-west3 and a few customers might have experienced errors in all API calls except the create cluster API call through 24 October 2024 23:47 US/Pacific.\n**Cloud Build:**\nA small percentage of customers may have experienced errors in API calls from 23 October 2024 20:52 to 24 October 2024 01:11 US/Pacific in europe-west3.\n**Google Kubernetes Engine (GKE):**\nDuring the period of the outage, a small percentage of customers may have experienced errors in API calls to europe-west3. A small percentage of customers may have additionally experienced unavailability of their Kubernetes Control Planes in europe-west3 or europe-west3-c and/or Kubernetes nodes in europe-west3-c.\n**Vertex AI Batch Prediction:**\nA small percentage of customers may have experienced errors in batch prediction jobs from 23 October 2024 19:45 to 23:30 US/Pacific in europe-west3.\n**Cloud SQL:**\nCustomers creating or updating instances in europe-west3 may have experienced errors from 23 October 2024 18:35 US/Pacific to 24 October 2024 23:28 US/Pacific. Users with self-service or scheduled maintenance on instances in europe-west3 during this period may have experienced a failed update causing downtime.\n----------","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Dataproc","id":"yjXrEg3Yvy26BauMwr69"},{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Vertex AI Batch Prediction","id":"yVW8aiPWipjd3j67XzDL"}],"uri":"incidents/e3yQSE1ysCGjCVEn2q1h","currently_affected_locations":[],"previously_affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"id":"2GcJSJ2mZp3xw91PbJ87","number":"16814417597939046294","begin":"2024-10-22T21:08:00+00:00","created":"2024-11-04T21:17:49+00:00","end":"2024-11-05T04:30:00+00:00","modified":"2024-11-05T19:34:34+00:00","external_desc":"Large Vertex Model Garden Deployment may experience failures.","updates":[{"created":"2024-11-05T19:31:54+00:00","modified":"2024-11-05T19:34:34+00:00","when":"2024-11-05T19:31:54+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 22 October 2024, 14:08\n**Incident End:** 4 November 2024, 20:30\n**Duration:** 13 days, 6 hours, 22 minutes\n**Affected Services and Features:**\nVertex AI Online Prediction (Vertex Model Garden Deployments)\n**Regions/Zones:** All regions except asia-southeast1, europe-west4, us-central1, us-east1, us-east4\n**Description:**\nDeployment of large models (those that require more than 100GB of disk size) in Vertex AI Online Prediction (Vertex Model Garden Deployments) failed in most of the regions for a duration of up to 13 days, 6 hours, 22 minutes starting on Tuesday, 22 October 2024 at 14:08 US/Pacific.\nFrom preliminary analysis, the root cause of the issue is an internal storage provisioning configuration error that was implemented as part of a recent change.\nGoogle engineers mitigated the impact by rolling back the configuration change that caused the issue.\n**Customer Impact:**\n- Customers would have received errors stating “Model server never became ready”, while performing deployments during the period of impact.\n**Additional details:**\n- As a workaround, customers were able to deploy in one of the non-impacted regions noted above.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-05T04:52:55+00:00","modified":"2024-11-05T19:31:54+00:00","when":"2024-11-05T04:52:55+00:00","text":"The issue with Vertex AI Online Prediction (Large Vertex Model Garden deployment failure) has been resolved for all affected users as of Monday, 2024-11-04 20:28 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-11-05T02:52:41+00:00","modified":"2024-11-05T04:52:58+00:00","when":"2024-11-05T02:52:41+00:00","text":"Summary: Large Vertex Model Garden Deployment may experience failures.\nDescription: Our engineering team is continuing to work on mitigating the issue.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-11-04 21:30 US/Pacific.\nDiagnosis: Customers may experience failures with large Vertex Model Garden deployments when using L4 Graphics Processing Unit (GPU).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-04T21:46:08+00:00","modified":"2024-11-05T02:52:43+00:00","when":"2024-11-04T21:46:08+00:00","text":"Summary: Large Vertex Model Garden Deployment may experience failures.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-11-04 19:00 US/Pacific.\nDiagnosis: Customers may experience failures with large Vertex Model Garden deployments when using L4 Graphics Processing Unit (GPU).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-11-04T21:17:46+00:00","modified":"2024-11-04T21:46:08+00:00","when":"2024-11-04T21:17:46+00:00","text":"Summary: Large Vertex Model Garden Deployment Failures\nDescription: We are experiencing an issue with Vertex Model Garden Deployments.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-11-04 14:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may experience failures with large Vertex Model Garden deployments (greater than 100GB) when deployed on a GKE Autopilot cluster.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-11-05T19:31:54+00:00","modified":"2024-11-05T19:34:34+00:00","when":"2024-11-05T19:31:54+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 22 October 2024, 14:08\n**Incident End:** 4 November 2024, 20:30\n**Duration:** 13 days, 6 hours, 22 minutes\n**Affected Services and Features:**\nVertex AI Online Prediction (Vertex Model Garden Deployments)\n**Regions/Zones:** All regions except asia-southeast1, europe-west4, us-central1, us-east1, us-east4\n**Description:**\nDeployment of large models (those that require more than 100GB of disk size) in Vertex AI Online Prediction (Vertex Model Garden Deployments) failed in most of the regions for a duration of up to 13 days, 6 hours, 22 minutes starting on Tuesday, 22 October 2024 at 14:08 US/Pacific.\nFrom preliminary analysis, the root cause of the issue is an internal storage provisioning configuration error that was implemented as part of a recent change.\nGoogle engineers mitigated the impact by rolling back the configuration change that caused the issue.\n**Customer Impact:**\n- Customers would have received errors stating “Model server never became ready”, while performing deployments during the period of impact.\n**Additional details:**\n- As a workaround, customers were able to deploy in one of the non-impacted regions noted above.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Vertex AI Online Prediction","id":"sdXM79fz1FS6ekNpu37K"}],"uri":"incidents/2GcJSJ2mZp3xw91PbJ87","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"cyHovCjtDs1RPbZAKsme","number":"13698823919325549484","begin":"2024-10-17T02:36:00+00:00","created":"2024-10-17T04:39:41+00:00","end":"2024-10-17T09:45:00+00:00","modified":"2024-10-17T21:27:29+00:00","external_desc":"Case creation may fail intermittently with an error","updates":[{"created":"2024-10-17T21:21:37+00:00","modified":"2024-10-17T21:27:29+00:00","when":"2024-10-17T21:21:37+00:00","text":"## \\# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using help article https://support.google.com/a/answer/1047213.\n(All Times US/Pacific)\n**Incident Start:** 16 October, 2024 19:36\n**Incident End:** 17 October, 2024 02:45\n**Duration:** 7 hours, 9 minutes\n**Affected Services and Features:**\nGoogle Cloud and Workspace Support - Case creation, Case updates, Chat support\n**Regions/Zones:** Global\n**Description:**\nGoogle Cloud and Workspace Support encountered intermittent disruptions to case creation and management for a period of 7 hours and 9 minutes.\nFrom preliminary analysis, we believe the cause of the issue was a recent change to Google Support’s ticket persistence layer. The change granted a subsystem unbounded resource consumption, impacting user requests. Google engineering will perform a thorough root cause analysis and ensure that appropriate measures are taken to prevent a recurrence of the issue.\n**Customer Impact:**\n* Customers impacted by this incident would have noticed delays, timeouts, and other errors during case creation and while performing case updates.\n* Chat support also experienced intermittent disconnects.\n-----","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-17T12:44:23+00:00","modified":"2024-10-17T21:21:37+00:00","when":"2024-10-17T12:44:23+00:00","text":"The issue with Google Cloud Support has been resolved for all affected users as of Thursday, 2024-10-17 04:19 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-17T11:16:54+00:00","modified":"2024-10-17T12:44:33+00:00","when":"2024-10-17T11:16:54+00:00","text":"Summary: Case creation may fail intermittently with an error\nDescription: Our engineering team has determined that further investigation is required to mitigate the issue.\nWe will provide an update by Thursday, 2024-10-17 08:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see issue with case creation and other case updates.\nWorkaround: Retrying operations may work. For Case creation in particular, customers will be redirected to a Contact Us Form (CUF) after multiple failed case creation attempts.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-17T09:27:01+00:00","modified":"2024-10-17T11:16:54+00:00","when":"2024-10-17T09:27:01+00:00","text":"Summary: Case creation may fail intermittently with an error\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-10-17 05:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may see issue with case creation and other case updates.\nWorkaround: Retrying operations may work. For Case creation in particular, customers will be redirected to a Contact Us Form (CUF) after multiple failed case creation attempts.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-17T07:19:54+00:00","modified":"2024-10-17T09:27:11+00:00","when":"2024-10-17T07:19:54+00:00","text":"Summary: Case creation may fail intermittently with an error\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-10-17 02:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see issue with case creation and other case updates\nWorkaround: Retrying operations may work. For Case creation in particular, customers will be redirected to a Contact Us Form (CUF) after multiple failed case creation attempts.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-17T05:38:50+00:00","modified":"2024-10-17T07:19:54+00:00","when":"2024-10-17T05:38:50+00:00","text":"Summary: Case creation may fail intermittently with an error\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-10-17 00:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see issue with case creation and other case updates\nWorkaround: Retrying operations may work. For Case creation in particular, customers will be redirected to a Contact Us Form (CUF) after multiple failed case creation attempts.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-17T04:39:37+00:00","modified":"2024-10-17T05:38:50+00:00","when":"2024-10-17T04:39:37+00:00","text":"Summary: Case creation may fail intermittently with an error\nDescription: We are experiencing an issue with Google Cloud Support.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-16 23:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see issue with case creation and other case updates\nWorkaround: Retrying operations may work. For Case creation in particular, customers will be redirected to a Contact Us Form (CUF) after multiple failed case creation attempts.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-10-17T21:21:37+00:00","modified":"2024-10-17T21:27:29+00:00","when":"2024-10-17T21:21:37+00:00","text":"## \\# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using help article https://support.google.com/a/answer/1047213.\n(All Times US/Pacific)\n**Incident Start:** 16 October, 2024 19:36\n**Incident End:** 17 October, 2024 02:45\n**Duration:** 7 hours, 9 minutes\n**Affected Services and Features:**\nGoogle Cloud and Workspace Support - Case creation, Case updates, Chat support\n**Regions/Zones:** Global\n**Description:**\nGoogle Cloud and Workspace Support encountered intermittent disruptions to case creation and management for a period of 7 hours and 9 minutes.\nFrom preliminary analysis, we believe the cause of the issue was a recent change to Google Support’s ticket persistence layer. The change granted a subsystem unbounded resource consumption, impacting user requests. Google engineering will perform a thorough root cause analysis and ensure that appropriate measures are taken to prevent a recurrence of the issue.\n**Customer Impact:**\n* Customers impacted by this incident would have noticed delays, timeouts, and other errors during case creation and while performing case updates.\n* Chat support also experienced intermittent disconnects.\n-----","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/cyHovCjtDs1RPbZAKsme","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"RAGcW4N9jHRkrAjnX2v7","number":"5456042628083118901","begin":"2024-10-16T06:05:00+00:00","created":"2024-10-16T08:48:19+00:00","end":"2024-10-16T09:49:00+00:00","modified":"2024-10-18T18:28:05+00:00","external_desc":"Multiple GCP products impacted in us-west2 region / us-west2-a zone","updates":[{"created":"2024-10-18T18:28:05+00:00","modified":"2024-10-18T18:28:05+00:00","when":"2024-10-18T18:28:05+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 16 October 2024, multiple Google Cloud products became unavailable in the us-west2-a zone / us-west2 region for a duration of 3 hours.\nTo our affected customers whose business was impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nA mismatch in configuration between two components of Google’s internal cluster management system resulted in a failure of the cluster service discovery component. This failure was triggered by a software rollout within the cluster management system that was believed to be non-impactful to operating jobs. The service discovery component is a fundamental dependency of the cluster management system, and its failure resulted in failures of other infrastructure services hosted in the cluster. Those failures resulted in downstream impacts for various Google Cloud services which are dependent on the cluster management system.\n## Remediation and Prevention\nOnce the mechanism of the fault in the internal lookup and task addressing system was identified, remediation was performed by correcting a file path, allowing the system to restart successfully. The cluster management and other downstream systems recovered once this system was back in operation.\nThe rollout of the portion of the cluster management system that triggered the outage has been paused, and the specific trigger will be remediated before rollouts of that system resume. Additionally, an update to the lookup and task addressing system is being applied to prevent recurrence even if the problematic cluster management software is rolled out again.\n## Detailed Description of Impact\nOn 15 October, 2024 23:05 to 16 October, 2024 02:49 US/Pacific, multiple Google Cloud products became unavailable in the us-west2-a zone / us-west2 region for a duration of 3 hours, 44 minutes.\n### Artifact Registry\nAsynchronous API operations to create new repositories saw an increased failure rate.\n### Cloud Build\nIncreased errors in CreateBuild calls (up to 7% at peak), builds delayed for 2 hours, builds with shorter TTLs expired.\n### Google Cloud Deploy\nRender, predeploy, deploy, verify, and postdeploy operations in us-west2 were unable to complete.\n### Google Cloud Dataflow\nCustomers experienced latency (slowness) while running both batch and streaming jobs.\n### Google Cloud Bigtable\nSome resources in us-west2-a experienced an increase in CANCELED and UNAVAILABLE errors.\n### Google Cloud SQL\nSome instance creation and backup operations failed.\n### Cloud Interconnect\nCloud Interconnect attachments to us-west2 could not be created, updated, or deleted.\n### Virtual Private Cloud\nProgramming of new VMs from other regions was not reaching the impacted zone. This also resulted in cross region packet loss.\n### Cloud Identity and Access Management\nReplication Delay of Sessions, Service Accounts, and other resources to the impacted zone\n### Persistent Disk\nSnapshots intermittently failed in this zone.\n### Compute Engine\nSome VMs in us-west2-a were unavailable; customers were unable to connect to them, modify them, or delete them. New VM creations in the zone were working and those VMs remained healthy.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-16T14:48:56+00:00","modified":"2024-10-18T18:28:05+00:00","when":"2024-10-16T14:48:56+00:00","text":"## \\# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 15 October, 2024 23:05\n**Incident End:** 16 October, 2024 02:49\n**Duration:** 3 hours, 44 minutes\n**Affected Services and Features:**\n* Artifact Registry\n* Cloud Build\n* Google Cloud Deploy\n* Google Cloud Dataflow\n* Google Cloud Bigtable\n* Google Cloud SQL\n* Cloud Interconnect\n* Virtual Private Cloud (VPC)\n* Cloud Identity and Access Management\n* Google Compute Engine\n* Persistent Disk\n**Regions/Zones:** Region us-west2 / Zone us-west2-a\n**Description:**\nMultiple Google Cloud products were unavailable in us-west2-a zone / us-west2 region for a duration of 3 hours, 44 minutes. From preliminary analysis, the root cause of the issue was due to a failure in an internal lookup and task addressing system. Its failure led to the unavailability of our internal cluster management system in the affected zone, impacting Google Cloud services dependent on it.\nGoogle Engineers implemented a fix to return the lookup and task addressing system to full operation and this mitigated the issue.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\n* ***Artifact Registry :*** Asynchronous API operations to create new repositories saw an increased failure rate.\n* ***Cloud Build :*** Increased errors in CreateBuild calls (up to 7% at peak), builds delayed for 2 hours, builds with shorter TTLs expired.\n* ***Google Cloud Deploy :*** Render, predeploy, deploy, verify, and postdeploy operations in us-west2 were unable to complete.\n* ***Google Cloud Dataflow :*** Customers experienced latency (slowness) while running both batch and streaming jobs.\n* ***Google Cloud Bigtable :*** Clusters in us-west2-a experienced an increase in CANCELED and UNAVAILABLE errors and admin operations were failing.\n* ***Google Cloud SQL :*** Some instance creation and backup operations failed.\n* ***Cloud Interconnect :*** Cloud Interconnect attachments to us-west2 could not be created, updated, or deleted.\n* ***Virtual Private Cloud :*** Programming of new VMs from other regions was not reaching the impacted zone. This also resulted in cross region packet loss.\n* ***Cloud Identity and Access Management :*** Replication Delay of Sessions, Service Accounts, and other resources to the impacted zone\n* ***Persistent Disk :*** Snapshots intermittently failed in this zone.\n* ***Compute Engine :*** Some VMs in us-west2-a were unavailable; customers were unable to connect to them, modify them, or delete them. New VM creations in the zone were working and those VMs remained healthy.\n------------","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-16T09:59:06+00:00","modified":"2024-10-16T14:48:56+00:00","when":"2024-10-16T09:59:06+00:00","text":"The issue with Google Cloud SQL, Google Compute Engine, Virtual Private Cloud (VPC), Google Cloud Dataflow, Hybrid Connectivity, Google Cloud Networking, Identity and Access Management, Artifact Registry, Persistent Disk, Google Cloud Deploy, Google Cloud Bigtable, AlloyDB for PostgreSQL has been resolved for all affected users as of Wednesday, 2024-10-16 02:49 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-16T09:48:20+00:00","modified":"2024-10-16T10:19:35+00:00","when":"2024-10-16T09:48:20+00:00","text":"Summary: Multiple GCP products impacted in us-west2 region / us-west2-a zone\nDescription: We are experiencing an issue with multiple GCP products.\nInitial mitigation steps have been successfully completed but still some work is underway by our engineering team.\nWe will provide more information by Wednesday, 2024-10-16 03:30 US/Pacific.\n**Diagnosis:**\n- **Google Cloud SQL**: Some operations such as instance create and backup may fail. - **Cloud Interconnect**: **[mitigated]** Cloud Interconnect attachments to us-west2 cannot be created, updated, or deleted. - **Google Compute Engine**: **[mitigated]** Customers will see some of their VMs unavailable in us-west2-a, where they will not be able to SSH, modify or delete them. New VM creations are working for the zone and those VMs will be healthy. - **Google Cloud Bigtable**: Increase in CANCELED and UNAVAILABLE errors for some resources in us-west2-a - **Google Cloud Dataflow**: **[mitigated]** Impacted customers may have experienced latency (slowness) while running both batch and streaming jobs. This has been mitigated and no further impact is expected. - **Artifact Registry (AR)**: Asynchronous API operations to create new AR repositories saw an increased failure rate - **Persistent Disk**: Snapshots might fail in this zone. - **Cloud Deploy**: Cloud Deploy's render, predeploy, deploy, verify and postdeploy operations in us-west2 are unable to complete. - **Cloud Pub/Sub**: **[mitigated]** The impact for Pub/sub is already mitigated as traffic is diverted away from the impacted location. - **Identity and Access Management**: **[mitigated]** The impact is already mitigated as traffic is diverted away from the impacted location.\n**Workaround:** - **Google Cloud SQL**: Users can perform these operations in a region other than us-west2 for new instance creation. There is no workaround for backups on existing instances at this time. - **Message Streams**: Users can create new VMs. Current VMs are unavailable - **Google Cloud Bigtable**: Route traffic from resources is us-west2-a to another replicated resources in different zone","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-10-16T09:32:53+00:00","modified":"2024-10-16T10:20:54+00:00","when":"2024-10-16T09:32:53+00:00","text":"Summary: Multiple GCP products impacted in us-west2 region / us-west2-a zone\n**Diagnosis:**\n* **Google Cloud SQL:** Some operations such as instance create and backup may fail.\n* **Cloud Interconnect:** Cloud Interconnect attachments to us-west2 cannot be created, updated, or deleted.\n* **Google Compute Engine:** [mitigated] Customers will see some of their VMs unavailable in us-west2-a, where they will not be able to SSH, modify or delete them. New VM creations are working for the zone and those VMs will be healthy.\n* **Google Cloud Bigtable**: Increase in CANCELED and UNAVAILABLE errors for some resources in us-west2-a\n* **Google Cloud Dataflow:** Impacted customers may have experienced latency (slowness) while running both batch and streaming jobs. This has been mitigated and no further impact is expected.\n* **Artifact Registry (AR)**: Asynchronous API operations to create new AR repositories saw an increased failure rate\n* **Persistent Disk**: Snapshots might fail in this zone.\n* **Cloud Deploy:** Cloud Deploy's render, predeploy, deploy, verify and postdeploy operations in us-west2 are unable to complete.\n* **Cloud Pub/Sub:** The impact for Pub/sub is already mitigated as traffic is diverted away from the impacted location.\n* **Identity and Access Management:** The impact is already mitigated as traffic is diverted away from the impacted location\n* **Google Cloud Networking:**\n* **Virtual Private Cloud (VPC):**\n**Workaround:**\n* **Google Cloud SQL**: Users can perform these operations in a region other than us-west2 for new instance creation. There is no workaround for backups on existing instances at this time.\n* **Message Streams**: Users can create new VMs. Current VMs are unavailable\n* **Google Cloud Bigtable**: Route traffic from resources is us-west2-a to another replicated resources in different zone","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-10-16T09:25:49+00:00","modified":"2024-10-16T10:19:18+00:00","when":"2024-10-16T09:25:49+00:00","text":"Summary: Multiple GCP products impacted in us-west2 region / us-west2-a zone\nDescription: We are experiencing an issue with multiple GCP products including Google Cloud SQL, Google Compute Engine, Virtual Private Cloud (VPC), Google Cloud Networking, Google Cloud Bigtable, Hybrid Connectivity, Identity and Access Management and Google Cloud Dataflow. Our engineering teams continue to remain fully engaged, and focusing on finding a mitigation. We will provide an update by Wednesday, 2024-10-16 03:00 US/Pacific with current details.\n**Diagnosis:** - **Google Cloud SQL:** Some operations such as instance create and backup may fail. - **Cloud Interconnect:** Cloud Interconnect attachments to us-west2 cannot be created, updated, or deleted. - **Google Compute Engine:** Customers will see some of their VMs unavailable in us-west2-a, where they will not be able to SSH, modify or delete them. New VM creations are working for the zone and those VMs will be healthy. - **Google Cloud Bigtable**: Increase in CANCELED and UNAVAILABLE errors for some resources in us-west2-a - **Google Cloud Dataflow:** Impacted customers may have experienced latency (slowness) while running both batch and streaming jobs. This has been mitigated and no further impact is expected. - **Artifact Registry (AR)**: Asynchronous API operations to create new AR repositories saw an increased failure rate - **Persistent Disk**: Snapshots might fail in this zone. - **Cloud Deploy**: Cloud Deploy's render, predeploy, deploy, verify and postdeploy operations in us-west2 are unable to complete. - **Pub/Sub**: The impact for Pub/sub is already mitigated as traffic is diverted away from the impacted location.\n**Workaround**: - **CloudSQL**: Users can perform these operations in a region other than us-west2 for new instance creation. There is no workaround for backups on existing instances at this time. - **Message Streams**: Users can create new VMs. Current VMs are unavailable - **Google Cloud Bigtable**: Route traffic from resources is us-west2-a to another replicated resources in different zone","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-10-16T09:05:11+00:00","modified":"2024-10-16T10:19:11+00:00","when":"2024-10-16T09:05:11+00:00","text":"Summary: Multiple GCP products impacted in us-west2 region / us-west2-a zone\nDescription: We are experiencing an issue with several GCP products including Google Cloud SQL, Google Compute Engine, Virtual Private Cloud (VPC), Google Cloud Networking, Cloud Bigtable, Hybrid Connectivity, Identity and Access Management and Google Cloud Dataflow. Our engineering teams continue to remain fully engaged, and focusing on finding a mitigation. We will provide an update by Wednesday, 2024-10-16 02:30 US/Pacific with current details.\n**Diagnosis:** - **Google Cloud SQL**: Some instance operations such as instance create and backup may fail. - **Cloud Interconnect**: Cloud Interconnect attachments to us-west2 cannot be created, updated, or deleted. - **Google Compute Engine**: Customers will see some of their VMs unavailable in us-west2-a, where they will not be able to SSH, modify or delete them. New VM creations are working for the zone and those VMs will be healthy.. - **Google Cloud Bigtable**: Increase in CANCELED and UNAVAILABLE errors for some resources in us-west2-a - **Google Cloud Dataflow**: Impacted customers may experience latency (slowness) while running both batch jobs. Streaming jobs are no longer affected. - **Artifact Registry:** Asynchronous API operations to create new AR repositories saw an increased failure rate, up to 5% average - **Persistent Disk:** Snapshots might fail in this zone. - **Cloud Deploy:** Cloud Deploy's render, predeploy, deploy, verify, and postdeploy operations in us-west2 are unable to complete. - **Pub/Sub**: The impact for Pub/sub is already mitigated as traffic is diverted away from the impacted location\n**Workaround:** - **CloudSQL**: Users can perform these operations in a region other than us-west2 - **Message Streams**: Users can create new VMs. Current VMs are unavailable - **Google Cloud Bigtable**: Route traffic from resources is us-west2-a to another replicated resources in different zone","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-10-16T08:48:15+00:00","modified":"2024-10-16T10:19:02+00:00","when":"2024-10-16T08:48:15+00:00","text":"Summary: Multiple GCP products impacted in us-west2 region / us-west2-a zone\nDescription: We are experiencing an issue with several GCP products including Google Cloud SQL, Google Compute Engine, Virtual Private Cloud (VPC), Google Cloud Networking, Hybrid Connectivity, Identity and Access Management and Google Cloud Dataflow.\nOur engineering teams continue to remain fully engaged, and focusing on finding a mitigation. We will provide an update by Wednesday, 2024-10-16 02:30 US/Pacific with current details.\n**Diagnosis:** - **Google Cloud SQL**: Some instance operations such as instance create and backup may fail. - **Cloud Interconnect**: Cloud Interconnect attachments to us-west2 cannot be created, updated, or deleted. - **Google Compute Engine**: Customers will see some of their VMs unavailable in us-west2-a, where they will not be able to SSH, modify or delete them. New VM creations are working for the zone and those VMs will be healthy.. - **Google Cloud Bigtable**: Increase in CANCELED and UNAVAILABLE errors for some resources in us-west2-a - **Google Cloud Dataflow** Impacted customers may experience latency (slowness) while running both batch jobs. Streaming jobs are no longer affected. - **Artifact Registry:** Asynchronous API operations to create new AR repositories saw an increased failure rate, up to 5% average - **Persistent Disk:** Snapshots might fail in this zone. - **Cloud Deploy:** Cloud Deploy's render, predeploy, deploy, verify, and postdeploy operations in us-west2 are unable to complete. - **Pub/Sub**: The impact for Pub/sub is already mitigated as traffic is diverted away from the impacted location\n**Workaround:** - **CloudSQL**: Users can perform these operations in a region other than us-west2 - **Message Streams**: Users can create new VMs. Current VMs are unavailable - **Google Cloud Bigtable**: Route traffic from resources is us-west2-a to another replicated resources in different zone","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-10-18T18:28:05+00:00","modified":"2024-10-18T18:28:05+00:00","when":"2024-10-18T18:28:05+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 16 October 2024, multiple Google Cloud products became unavailable in the us-west2-a zone / us-west2 region for a duration of 3 hours.\nTo our affected customers whose business was impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nA mismatch in configuration between two components of Google’s internal cluster management system resulted in a failure of the cluster service discovery component. This failure was triggered by a software rollout within the cluster management system that was believed to be non-impactful to operating jobs. The service discovery component is a fundamental dependency of the cluster management system, and its failure resulted in failures of other infrastructure services hosted in the cluster. Those failures resulted in downstream impacts for various Google Cloud services which are dependent on the cluster management system.\n## Remediation and Prevention\nOnce the mechanism of the fault in the internal lookup and task addressing system was identified, remediation was performed by correcting a file path, allowing the system to restart successfully. The cluster management and other downstream systems recovered once this system was back in operation.\nThe rollout of the portion of the cluster management system that triggered the outage has been paused, and the specific trigger will be remediated before rollouts of that system resume. Additionally, an update to the lookup and task addressing system is being applied to prevent recurrence even if the problematic cluster management software is rolled out again.\n## Detailed Description of Impact\nOn 15 October, 2024 23:05 to 16 October, 2024 02:49 US/Pacific, multiple Google Cloud products became unavailable in the us-west2-a zone / us-west2 region for a duration of 3 hours, 44 minutes.\n### Artifact Registry\nAsynchronous API operations to create new repositories saw an increased failure rate.\n### Cloud Build\nIncreased errors in CreateBuild calls (up to 7% at peak), builds delayed for 2 hours, builds with shorter TTLs expired.\n### Google Cloud Deploy\nRender, predeploy, deploy, verify, and postdeploy operations in us-west2 were unable to complete.\n### Google Cloud Dataflow\nCustomers experienced latency (slowness) while running both batch and streaming jobs.\n### Google Cloud Bigtable\nSome resources in us-west2-a experienced an increase in CANCELED and UNAVAILABLE errors.\n### Google Cloud SQL\nSome instance creation and backup operations failed.\n### Cloud Interconnect\nCloud Interconnect attachments to us-west2 could not be created, updated, or deleted.\n### Virtual Private Cloud\nProgramming of new VMs from other regions was not reaching the impacted zone. This also resulted in cross region packet loss.\n### Cloud Identity and Access Management\nReplication Delay of Sessions, Service Accounts, and other resources to the impacted zone\n### Persistent Disk\nSnapshots intermittently failed in this zone.\n### Compute Engine\nSome VMs in us-west2-a were unavailable; customers were unable to connect to them, modify them, or delete them. New VM creations in the zone were working and those VMs remained healthy.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"AlloyDB for PostgreSQL","id":"fPovtKbaWN9UTepMm3kJ"},{"title":"Artifact Registry","id":"QbBuuiRdsLpMr9WmGwm5"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/RAGcW4N9jHRkrAjnX2v7","currently_affected_locations":[],"previously_affected_locations":[{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"ZJyESmcEajZg2wMDJUg6","number":"13054406081845507242","begin":"2024-10-09T21:57:00+00:00","created":"2024-10-10T00:52:57+00:00","end":"2024-10-10T00:18:00+00:00","modified":"2024-10-10T01:27:55+00:00","external_desc":"AppEngine Flexible users may be experiencing an increase in HTTP 429 responses.","updates":[{"created":"2024-10-10T01:27:55+00:00","modified":"2024-10-10T01:27:56+00:00","when":"2024-10-10T01:27:55+00:00","text":"Summary: AppEngine Flexible users may be experiencing an increase in HTTP 429 responses.\nDescription: The issue with Google AppEngine Flexible has been resolved for all affected users as of Wednesday, 2024-10-09 17:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-10T00:52:57+00:00","modified":"2024-10-10T01:27:55+00:00","when":"2024-10-10T00:52:57+00:00","text":"Summary: AppEngine Flexible users may be experiencing an increase in HTTP 429 responses.\nDescription: Mitigation has been applied by our engineering team and we are seeing recovery for impacted users. We will continue to monitor our environment for stability.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe will provide more information by Wednesday, 2024-10-09 18:30 US/Pacific.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-10-10T01:27:55+00:00","modified":"2024-10-10T01:27:56+00:00","when":"2024-10-10T01:27:55+00:00","text":"Summary: AppEngine Flexible users may be experiencing an increase in HTTP 429 responses.\nDescription: The issue with Google AppEngine Flexible has been resolved for all affected users as of Wednesday, 2024-10-09 17:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"kchyUtnkMHJWaAva8aYc","service_name":"Google App Engine","affected_products":[{"title":"Google App Engine","id":"kchyUtnkMHJWaAva8aYc"}],"uri":"incidents/ZJyESmcEajZg2wMDJUg6","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"pLDngXyKVTqRS9isDQVB","number":"5970751919954512922","begin":"2024-10-08T19:15:00+00:00","created":"2024-10-08T21:15:43+00:00","end":"2024-10-09T01:35:00+00:00","modified":"2024-10-09T02:04:54+00:00","external_desc":"App Engine customers in us-central1 one may be experiencing an increase in HTTP 429 responses.","updates":[{"created":"2024-10-09T01:46:39+00:00","modified":"2024-10-09T02:04:54+00:00","when":"2024-10-09T01:46:39+00:00","text":"Summary: App Engine customers in us-central1 one may be experiencing an increase in HTTP 429 responses.\nDescription: The issue with Google App Engine has been resolved for all affected users as of Tuesday, 2024-10-08 18:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-08T23:59:41+00:00","modified":"2024-10-09T01:46:39+00:00","when":"2024-10-08T23:59:41+00:00","text":"Summary: App Engine customers in us-central1 one may be experiencing an increase in HTTP 429 responses.\nDescription: We are experiencing an issue with Google App Engine beginning on Tuesday, 2024-10-08 12:15 US/Pacific in us-central1 region.\nOur engineering team has identified a mitigation strategy. Owing to the mitigation steps implemented thus far, partial recovery has been achieved.\nWe will continue to work on the recovery process and provide an update by Tuesday, 2024-10-08 18:30 US/Pacific with current details.\nDiagnosis: Impacted customers may intermittently encounter a ‘HTTP 429’ response code.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-10-08T22:42:23+00:00","modified":"2024-10-08T23:59:41+00:00","when":"2024-10-08T22:42:23+00:00","text":"Summary: App Engine customers in us-central1 one may be experiencing an increase in HTTP 429 responses.\nDescription: We are experiencing an issue with Google App Engine beginning on Tuesday, 2024-10-08 15:15 US/Pacific.\nOur engineering team is actively working to mitigate the issue.\nWe will provide an update by Tuesday, 2024-10-08 17:00 US/Pacific with current details.\nDiagnosis: Impacted customers for GAE may see an increase in requests returning a HTTP 429 response code.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-10-08T21:15:43+00:00","modified":"2024-10-08T22:42:23+00:00","when":"2024-10-08T21:15:43+00:00","text":"Summary: App Engine customers in us-central1 one may be experiencing an increase in HTTP 429 responses.\nDescription: We are experiencing an issue with Google App Engine beginning on Tuesday, 2024-10-08 15:15 US/Pacific.\nOur engineering team is actively working to mitigate the issue.\nWe will provide an update by Tuesday, 2024-10-08 15:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers for GAE may see an increase in requests returning a HTTP 429 response code.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-10-09T01:46:39+00:00","modified":"2024-10-09T02:04:54+00:00","when":"2024-10-09T01:46:39+00:00","text":"Summary: App Engine customers in us-central1 one may be experiencing an increase in HTTP 429 responses.\nDescription: The issue with Google App Engine has been resolved for all affected users as of Tuesday, 2024-10-08 18:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"kchyUtnkMHJWaAva8aYc","service_name":"Google App Engine","affected_products":[{"title":"Google App Engine","id":"kchyUtnkMHJWaAva8aYc"}],"uri":"incidents/pLDngXyKVTqRS9isDQVB","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"MXtAVH2fTnMpaCDx53JN","number":"15467244175219931594","begin":"2024-10-02T21:18:00+00:00","created":"2024-10-02T23:47:37+00:00","end":"2024-10-02T23:23:00+00:00","modified":"2024-10-08T01:29:17+00:00","external_desc":"Customers may experience intermittent issues with support case creation","updates":[{"created":"2024-10-07T23:28:34+00:00","modified":"2024-10-08T01:29:17+00:00","when":"2024-10-07T23:28:34+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 2 October 2024, Google Cloud and Google Workspace Support experienced intermittent issues with case creation for 2 hours, 5 minutes, between 14:18 US/Pacific to 16:23 US/Pacific. As a result, some users encountered unexpected errors and slight delays in case creation due to this system disruption. Additionally, the backup case creation process (through the UI) experienced elevated latency. To our Google Cloud and Google Workspace customers who experienced delay in case creation during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nGoogle Support’s ticket persistence layer has a capacity management system in its control plane to ensure availability. Google services that integrate with the ticket persistence layer are expected to perform a controlled ramp of traffic and respond to backpressure signals. If an integration continues sending additional load after backpressure signals, the capacity management system applies a temporary throttle to the specific integration to ensure the health of the system.\nBeginning at 14:18 on Wednesday, 02 October 2024, an existing internal Google reporting service that integrates with the ticket persistence layer responded to an increase in data volume by rapidly ramping traffic. Due to a latent issue in the reporting service, it sent traffic in a way that bypassed normal backpressure signals. Over time traffic from the reporting service reached an elevated level to cause high query latency for some operations in our support UI. Once load on the persistence layer exceeded pre-set limits, the capacity management system restored service by temporarily throttling the reporting service’s traffic. Due to a latent issue in the reporting service, when the temporary throttle expired the reporting service again rapidly ramped traffic until the capacity management system again applied a temporary throttle.\n## Remediation and Prevention\nAs each impact period was short, internal monitoring metrics for the support UI did not reach alarm thresholds until 15:01. Once these thresholds were met, Google engineers were alerted to the incident at 15:01 and immediately started working on the issue.\nAfter thorough analysis, our engineering team identified the source of the traffic to the ticket persistence layer and applied a long-lived throttle, restoring service.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* We are revising our monitoring to detect customer impact earlier.\n* We are revising the method used to integrate with the ticket persistence layer to perform a controlled ramp of traffic as well as improve detection and response to backpressure signals. This will ensure all integrations manage resources better, reducing dependency on capacity management failsafes.\n* We are updating our ticket persistence layer to apply traffic isolation and load shedding best practices so that traffic for customer support cases cannot be affected by traffic from internal reporting systems.\n## Detailed Description of Impact\nBetween 14:18 US/Pacific to 16:23 US/Pacific on Wednesday 02, October 2024, users experienced intermittent delays in case creation (up to a minute). \u003c5% of users who attempted to create a support case saw an error message “Unable to create case. Try again.” Almost all users who re-attempted creation succeeded. A small number of users experienced three consecutive failures and were presented with an alternate support contact form.\nAll customers were able to receive support during this time for existing cases.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-03T19:53:53+00:00","modified":"2024-10-07T23:28:34+00:00","when":"2024-10-03T19:53:53+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using help article https://support.google.com/a/answer/1047213.\n(All Times US/Pacific)\n**Incident Start:** 2 Oct 2024 14:18\n**Incident End:** 2 Oct 2024 16:23\n**Duration:** 2 hours, 5 minutes\n**Affected Services and Features:**\nGoogle Cloud Support and Google Workspace Support\n**Regions/Zones:** Global\n**Description:**\nFor 2 hours and 5 minutes, Google Cloud Support and Google Workspace Support customers attempting to create cases experienced intermittent case creation delays, each lasting less than 5 minutes. During the period of impact, the case creation UI directed customers experiencing the longest delays to create their case using a backup system. We have not received any reports of customers being unable to contact Support during the impacted period.\nFrom preliminary analysis, the root cause of the issue was the scaling behavior of a routine data pipeline. The data pipeline ramped use of a common persistence layer in a manner that bypassed normal load shedding and isolation. When the persistence layer became unhealthy, the control plane applied a temporary throttle to the data pipeline, enabling the Support UI to successfully create user cases, until the temporary throttle expired, at which time the pipeline repeated its behavior.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\n- Users faced intermittent delays when attempting to create cases, with a few seeing errors.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-03T01:50:40+00:00","modified":"2024-10-03T19:53:53+00:00","when":"2024-10-03T01:50:40+00:00","text":"The issue with Google Cloud Support case creation has been resolved for all affected customers as of Wednesday, 2024-10-02 16:12 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-03T01:07:42+00:00","modified":"2024-10-03T01:50:44+00:00","when":"2024-10-03T01:07:42+00:00","text":"Summary: Customers may experience intermittent issues with support case creation\nDescription: We experienced an issue where some customers were unable to create cases. This issue has been mitigated as of 16:12 US/Pacific. The support case creation process is now working successfully, but we continue to investigate the underlying cause and monitor our environment for stability. Initial investigations have determined a temporary issue due to an unexpected increase in traffic.\nWe will provide an update by Wednesday, 2024-10-02 20:00 US/Pacific with current details.\nDiagnosis: Impacted customers may receive an unexpected error while attempting case creation.\nWorkaround: Retrying the case creation process may be successful due to the intermittent nature of the issue.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-02T23:47:33+00:00","modified":"2024-10-03T01:07:42+00:00","when":"2024-10-02T23:47:33+00:00","text":"Summary: Customers may experience intermittent issues with support case creation\nDescription: We are experiencing an intermittent issue with Google Cloud Support case creation beginning on Wednesday, 2024-10-02 14:20 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-02 18:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may receive an unexpected error while attempting case creation.\nWorkaround: Retrying the case creation process may be successful due to the intermittent nature of the issue.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-10-07T23:28:34+00:00","modified":"2024-10-08T01:29:17+00:00","when":"2024-10-07T23:28:34+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 2 October 2024, Google Cloud and Google Workspace Support experienced intermittent issues with case creation for 2 hours, 5 minutes, between 14:18 US/Pacific to 16:23 US/Pacific. As a result, some users encountered unexpected errors and slight delays in case creation due to this system disruption. Additionally, the backup case creation process (through the UI) experienced elevated latency. To our Google Cloud and Google Workspace customers who experienced delay in case creation during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nGoogle Support’s ticket persistence layer has a capacity management system in its control plane to ensure availability. Google services that integrate with the ticket persistence layer are expected to perform a controlled ramp of traffic and respond to backpressure signals. If an integration continues sending additional load after backpressure signals, the capacity management system applies a temporary throttle to the specific integration to ensure the health of the system.\nBeginning at 14:18 on Wednesday, 02 October 2024, an existing internal Google reporting service that integrates with the ticket persistence layer responded to an increase in data volume by rapidly ramping traffic. Due to a latent issue in the reporting service, it sent traffic in a way that bypassed normal backpressure signals. Over time traffic from the reporting service reached an elevated level to cause high query latency for some operations in our support UI. Once load on the persistence layer exceeded pre-set limits, the capacity management system restored service by temporarily throttling the reporting service’s traffic. Due to a latent issue in the reporting service, when the temporary throttle expired the reporting service again rapidly ramped traffic until the capacity management system again applied a temporary throttle.\n## Remediation and Prevention\nAs each impact period was short, internal monitoring metrics for the support UI did not reach alarm thresholds until 15:01. Once these thresholds were met, Google engineers were alerted to the incident at 15:01 and immediately started working on the issue.\nAfter thorough analysis, our engineering team identified the source of the traffic to the ticket persistence layer and applied a long-lived throttle, restoring service.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* We are revising our monitoring to detect customer impact earlier.\n* We are revising the method used to integrate with the ticket persistence layer to perform a controlled ramp of traffic as well as improve detection and response to backpressure signals. This will ensure all integrations manage resources better, reducing dependency on capacity management failsafes.\n* We are updating our ticket persistence layer to apply traffic isolation and load shedding best practices so that traffic for customer support cases cannot be affected by traffic from internal reporting systems.\n## Detailed Description of Impact\nBetween 14:18 US/Pacific to 16:23 US/Pacific on Wednesday 02, October 2024, users experienced intermittent delays in case creation (up to a minute). \u003c5% of users who attempted to create a support case saw an error message “Unable to create case. Try again.” Almost all users who re-attempted creation succeeded. A small number of users experienced three consecutive failures and were presented with an alternate support contact form.\nAll customers were able to receive support during this time for existing cases.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/MXtAVH2fTnMpaCDx53JN","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"dPHugwJopTYPziEJEkUm","number":"11730366353253151801","begin":"2024-10-02T11:39:00+00:00","created":"2024-10-02T13:00:53+00:00","end":"2024-10-02T14:08:24+00:00","modified":"2024-10-02T14:08:28+00:00","external_desc":"Mandiant Attack Surface Management customers can observe 500 Errors while navigating via GTI and welcome Screen while Logging into ASM.","updates":[{"created":"2024-10-02T14:08:24+00:00","modified":"2024-10-02T14:08:33+00:00","when":"2024-10-02T14:08:24+00:00","text":"The issue with Mandiant Attack Surface Management has been resolved for all affected users as of Wednesday, 2024-10-02 06:55 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-02T13:18:24+00:00","modified":"2024-10-02T14:08:28+00:00","when":"2024-10-02T13:18:24+00:00","text":"Summary: Mandiant Attack Surface Management customers can observe 500 Errors while navigating via GTI and welcome Screen while Logging into ASM.\nDescription: We are experiencing an issue with Mandiant Attack Surface Management beginning on Wednesday, 2024-10-02 04:39 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-02 07:30 US/Pacific with current details.\nDiagnosis: Mandiant Attack Surface Management customers can observe 500 Errors while navigating via GTI and welcome Screen while Logging into ASM.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-10-02T13:00:41+00:00","modified":"2024-10-02T13:18:24+00:00","when":"2024-10-02T13:00:41+00:00","text":"Summary: Mandiant Attack Surface Management customers can observe 500 Errors while navigating via GTI and welcome Screen while Logging into ASM.\nDescription: We are experiencing an issue with Mandiant Attack Surface Management beginning on Wednesday, 2024-10-02 04:39 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-10-02 06:30 US/Pacific with current details.\nDiagnosis: Mandiant Attack Surface Management customers can observe 500 Errors while navigating via GTI and welcome Screen while Logging into ASM.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-10-02T14:08:24+00:00","modified":"2024-10-02T14:08:33+00:00","when":"2024-10-02T14:08:24+00:00","text":"The issue with Mandiant Attack Surface Management has been resolved for all affected users as of Wednesday, 2024-10-02 06:55 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"v7DL2fMFnZpCxNwd8KE7","service_name":"Mandiant Attack Surface Management","affected_products":[{"title":"Mandiant Attack Surface Management","id":"v7DL2fMFnZpCxNwd8KE7"}],"uri":"incidents/dPHugwJopTYPziEJEkUm","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"mNDbSnPAQnJehVvXMhtU","number":"10174426061614754388","begin":"2024-10-01T16:28:00+00:00","created":"2024-10-01T17:28:14+00:00","end":"2024-10-01T17:03:00+00:00","modified":"2024-10-01T21:40:55+00:00","external_desc":"Vertex AI Search seeing elevated failures for 50% of queries for SearchService","updates":[{"created":"2024-10-01T21:40:24+00:00","modified":"2024-10-01T21:40:55+00:00","when":"2024-10-01T21:40:24+00:00","text":"## Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 01 October, 2024 09:28\n**Incident End:** 01 October, 2024 10:03\n**Duration:** 35 minutes\n**Affected Services and Features:**\nVertex AI Search for Retail\n**Regions/Zones:**\nGlobal\n**Description:**\nVertex AI Search for Retail queries on the Search API experienced elevated error rates and latencies for a duration of 35 minutes. Preliminary analysis indicates the root cause is an issue introduced by a new version release of an internal service which the Search API relies on which caused failures on our Search API.\nThe issue was mitigated by rolling out a patch for our internal service to resolve the disruption.\n**Customer Impact:**\nVertex AI Search for Retail customers experienced elevated error rates and elevated latencies for a subset of queries.\n**Additional Information:**\nAt the time of the outage, we believed that this issue was also impacting Recommendation AI. Upon further investigation, we concluded that this issue had no impact on Recommendation AI.\n-------","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-01T18:14:52+00:00","modified":"2024-10-01T21:40:24+00:00","when":"2024-10-01T18:14:52+00:00","text":"We experienced an issue with Vertex AI Search, Recommendation AI beginning on Tuesday, 2024-10-01 09:29 US/Pacific.\nOur monitoring indicates that the errors have stopped occurring as of 2024-10-01 10:05 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-10-01T17:37:55+00:00","modified":"2024-10-01T18:14:56+00:00","when":"2024-10-01T17:37:55+00:00","text":"Summary: Vertex AI Search seeing elevated failures for 50% of queries for SearchService\nDescription: We are experiencing an issue with Vertex AI Search, Recommendation AI beginning on Tuesday, 2024-10-01 09:29 US/Pacific.\nOur engineering team continues to investigate the issue with the highest priority\nWe will provide an update by Tuesday, 2024-10-01 12:00 US/Pacific with current details.\nDiagnosis: Customers may observe elevated errors while performing search for products.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-10-01T17:35:30+00:00","modified":"2024-10-01T17:37:57+00:00","when":"2024-10-01T17:35:30+00:00","text":"Summary: Vertex AI Search seeing elevated failures for 50% of queries for SearchService\nDescription: We are experiencing an issue with Vertex AI Search, Recommendation AI beginning on Tuesday, 2024-10-01 09:29 US/Pacific.\nOur engineering team continues to investigate the issue with the highest priority\nWe will provide an update by Tuesday, 2024-10-01 12:00 US/Pacific with current details.\nDiagnosis: Customers may observe elevated errors while performing search for products.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-10-01T17:28:09+00:00","modified":"2024-10-01T17:35:33+00:00","when":"2024-10-01T17:28:09+00:00","text":"Summary: Vertex AI Search seeing elevated failures for 50% of queries for SearchService\nDescription: We are experiencing an issue with Vertex AI Search, Recommendation AI beginning on Tuesday, 2024-10-01 09:29 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-10-01 11:45 US/Pacific with current details.\nDiagnosis: Customers may observe elevated errors while performing search for products.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-10-01T21:40:24+00:00","modified":"2024-10-01T21:40:55+00:00","when":"2024-10-01T21:40:24+00:00","text":"## Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 01 October, 2024 09:28\n**Incident End:** 01 October, 2024 10:03\n**Duration:** 35 minutes\n**Affected Services and Features:**\nVertex AI Search for Retail\n**Regions/Zones:**\nGlobal\n**Description:**\nVertex AI Search for Retail queries on the Search API experienced elevated error rates and latencies for a duration of 35 minutes. Preliminary analysis indicates the root cause is an issue introduced by a new version release of an internal service which the Search API relies on which caused failures on our Search API.\nThe issue was mitigated by rolling out a patch for our internal service to resolve the disruption.\n**Customer Impact:**\nVertex AI Search for Retail customers experienced elevated error rates and elevated latencies for a subset of queries.\n**Additional Information:**\nAt the time of the outage, we believed that this issue was also impacting Recommendation AI. Upon further investigation, we concluded that this issue had no impact on Recommendation AI.\n-------","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Vertex AI Search","id":"vNncXxtSVvqyhvSkQ6PJ"}],"uri":"incidents/mNDbSnPAQnJehVvXMhtU","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"sNgEmHBYpCo6WhMawN1K","number":"1472607714523290576","begin":"2024-09-30T14:52:38+00:00","created":"2024-09-30T15:53:54+00:00","end":"2024-09-30T19:51:51+00:00","modified":"2024-09-30T19:51:58+00:00","external_desc":"Mandiant Attack Surface Management users may experience issues with completing scans for existing collections, and starting scans for new collections.","updates":[{"created":"2024-09-30T19:51:51+00:00","modified":"2024-09-30T19:51:59+00:00","when":"2024-09-30T19:51:51+00:00","text":"The issue with Mandiant Attack Surface Mangement has been resolved for all affected users as of Monday, 2024-09-30 12:51 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-30T17:46:48+00:00","modified":"2024-09-30T19:51:58+00:00","when":"2024-09-30T17:46:48+00:00","text":"Summary: Mandiant Attack Surface Management users may experience issues with completing scans for existing collections, and starting scans for new collections.\nDescription: We are experiencing an issue with Mandiant Attack Surface Management beginning on Monday, 2024-09-30 00:17 US/Pacific.\nOur engineering team continues to investigate the issue.\nNew collection data is not being collected, stored, and made available to users.\nExisting scans that have been completed are still accessible.\nAn application update is currently being tested in the staging environment.\nWe will provide an update by Monday, 2024-09-30 13:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: This issue is preventing all collection scans from completing. This includes scans that were already in progress, new scans for existing collections, and scans for newly created collections.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-30T15:53:49+00:00","modified":"2024-09-30T17:46:48+00:00","when":"2024-09-30T15:53:49+00:00","text":"Summary: Mandiant Attack Surface Management users may experience issues with completing scans for existing collections, and starting scans for new collections.\nDescription: We are experiencing an issue with Mandiant Attack Surface Management beginning on Monday, 2024-09-30 00:17 US/Pacific.\nOur engineering team continues to investigate the issue.\nExisting scans that have been completed are still accessible.\nWe will provide an update by Monday, 2024-09-30 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: This issue is preventing all collection scans from completing. This includes scans that were already in progress, new scans for existing collections, and scans for newly created collections.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-09-30T19:51:51+00:00","modified":"2024-09-30T19:51:59+00:00","when":"2024-09-30T19:51:51+00:00","text":"The issue with Mandiant Attack Surface Mangement has been resolved for all affected users as of Monday, 2024-09-30 12:51 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"v7DL2fMFnZpCxNwd8KE7","service_name":"Mandiant Attack Surface Management","affected_products":[{"title":"Mandiant Attack Surface Management","id":"v7DL2fMFnZpCxNwd8KE7"}],"uri":"incidents/sNgEmHBYpCo6WhMawN1K","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"mCjGutE9suzWu57Yz5SD","number":"16737191765617264327","begin":"2024-09-27T10:39:46+00:00","created":"2024-09-27T12:19:20+00:00","end":"2024-09-28T20:49:50+00:00","modified":"2024-09-28T20:49:59+00:00","external_desc":"Google engineers are investigating an issue with VMware Engine","updates":[{"created":"2024-09-28T20:49:50+00:00","modified":"2024-09-28T20:50:00+00:00","when":"2024-09-28T20:49:50+00:00","text":"The issue with VMWare engine has been resolved for all affected users as of Saturday, 2024-09-28 13:49 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-28T13:41:47+00:00","modified":"2024-09-28T20:49:59+00:00","when":"2024-09-28T13:41:47+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: Mitigation work is currently underway and it is being rolled out on all the regions.\nWe will provide more information by Monday, 2024-09-30 11:00 US/Pacific.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: Try creating your VEN again, and let us know if you encounter any further difficulties by opening a support case or replying within the exiting one.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T22:54:00+00:00","modified":"2024-09-28T13:41:47+00:00","when":"2024-09-27T22:54:00+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: Mitigation work is currently underway and it is being rolled out on all the regions.\nWe will provide more information by Monday, 2024-09-30 09:00 US/Pacific.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T19:27:39+00:00","modified":"2024-09-27T22:54:00+00:00","when":"2024-09-27T19:27:39+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Friday, 2024-09-27 15:30 US/Pacific.\nWe will provide more information by Friday, 2024-09-27 16:00 US/Pacific.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T18:29:33+00:00","modified":"2024-09-27T19:27:39+00:00","when":"2024-09-27T18:29:33+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nThe team has found a potential solution and are working to roll it out\nWe will provide an update by Friday, 2024-09-27 12:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T17:27:53+00:00","modified":"2024-09-27T18:29:33+00:00","when":"2024-09-27T17:27:53+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-09-27 11:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T16:22:45+00:00","modified":"2024-09-27T17:27:53+00:00","when":"2024-09-27T16:22:45+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-09-27 10:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T15:24:11+00:00","modified":"2024-09-27T16:22:45+00:00","when":"2024-09-27T15:24:11+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-09-27 09:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T14:15:01+00:00","modified":"2024-09-27T15:24:11+00:00","when":"2024-09-27T14:15:01+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-09-27 08:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T13:19:00+00:00","modified":"2024-09-27T14:15:01+00:00","when":"2024-09-27T13:19:00+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-09-27 07:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-09-27T12:19:00+00:00","modified":"2024-09-27T13:19:00+00:00","when":"2024-09-27T12:19:00+00:00","text":"Summary: Google engineers are investigating an issue with VMware Engine\nDescription: We are experiencing an issue with VMWare engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-09-27 06:30 US/Pacific with current details.\nDiagnosis: VMware Engine network creation is failing for new customer projects of VMware engine. Existing projects appear to be functioning ok.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-09-28T20:49:50+00:00","modified":"2024-09-28T20:50:00+00:00","when":"2024-09-28T20:49:50+00:00","text":"The issue with VMWare engine has been resolved for all affected users as of Saturday, 2024-09-28 13:49 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"}],"uri":"incidents/mCjGutE9suzWu57Yz5SD","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"psbdfLBYU9GR3bNNq7HT","number":"3264900608457753843","begin":"2024-09-25T08:27:00+00:00","created":"2024-09-25T13:45:18+00:00","end":"2024-09-25T19:28:00+00:00","modified":"2024-10-01T07:51:10+00:00","external_desc":"Mandiant Security Validation customers are unable to access the application and run jobs","updates":[{"created":"2024-10-01T04:17:47+00:00","modified":"2024-10-01T07:51:10+00:00","when":"2024-10-01T04:17:47+00:00","text":"# Incident Report\n## Summary\nOn 25 September 2024 from 01:27 to 12:28 US/Pacific, Mandiant Security Validation SaaS customers were unable to run or create Actions, Sequences and Evaluations for a duration of 11 hours and 01 minute.\nCustomers were able to login and view the UI and observe results of previously run jobs, but were unable to create and run content (Actions/Evaluations/Sequences). From 11:23 to 12:28 US/ Pacific (1 hour and 5 minutes), customers were unable to login and were redirected to a maintenance page containing a notice of the outage while engineers fixed the issue.\nTo our Mandiant Security Validation SaaS customers whose business was impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThis issue occurred due to an implementation issue in a backend database that limited a sequence column value. The value was incremented beyond that limit causing the database and system to encounter an error which prevented users from running Actions, Sequence and Evaluations.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via a support case on 25 September 2024 at 01:41 US/Pacific and immediately started an investigation. Once the nature and scope of the issue became clear, Google engineers adjusted the column value limit that was causing the issue. This required taking Mandiant Security Validation SaaS briefly offline to perform the necessary changes needed to resolve the issue and prevent it from recurring in the future. During this time users were redirected to a maintenance page informing them of the outage. Once the system was brought back online, engineers verified the underlying issue was resolved and that no further issues were encountered.\nGoogle is committed to preventing a repeat of this issue in the future and is completing a :\n* Thorough review of the system settings to ensure no further limitations exist that would cause this or similar issues to occur.\n## Detailed Description of Impact\n* From 1:27 to 11:22 US/ Pacific, customers were able to login and view the UI, but were unable to create and run content (Actions/Evaluations/Sequences).\n* From 11:23 to 12:28 US/ Pacific, customers were redirected to a maintenance page containing a notice of the outage.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-26T06:56:35+00:00","modified":"2024-10-01T04:17:47+00:00","when":"2024-09-26T06:56:35+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 25 September 2024 01:27\n**Incident End:** 25 September 2024 12:28\n**Duration:** 11 hours, 1 minute\n**Affected Services and Features:**\nMandiant Security Validation\n**Regions/Zones:** Global\n**Description:**\nMandiant Security Validation SaaS customers were unable to run or create actions/sequences/evaluations for a duration of 11 hours, 1 minute. Preliminary investigation indicates the issue resulted from a maximum value limitation in the backend database. Google will complete a detailed Incident Report (IR) in the following days to provide a comprehensive root cause analysis.\n**Customer Impact:**\nFrom 1:27 to 11:22 US/ Pacific, customers were able to login and view the UI, but were unable to create and run content (Actions/Evaluations/Sequences).\nFrom 11:23 to 12:28 US/ Pacific, customers were redirected to a maintenance page containing a notice of the outage.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-25T19:38:32+00:00","modified":"2024-09-26T06:56:35+00:00","when":"2024-09-25T19:38:32+00:00","text":"The issue with Mandiant Security Validation has been resolved for all affected users as of Wednesday, 2024-09-25 12:30 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-25T18:53:00+00:00","modified":"2024-09-25T19:38:34+00:00","when":"2024-09-25T18:53:00+00:00","text":"Summary: Mandiant Security Validation customers are unable to access the application and run jobs\nDescription: We are experiencing an issue with Mandiant Security Validation beginning on Wednesday, 2024-09-25 01:27 US/Pacific.\nMitigation work is currently underway by our engineering team. Our engineers have successfully tested the mitigation in lower environments and are actively rolling out the same in production.\nThe revised ETA for mitigation is by Wednesday, 2024-09-25 13:00 US/Pacific.\nWe will provide more information by Wednesday, 2024-09-25 13:30 US/Pacific.\nDiagnosis: Mandiant Security Validation is currently in maintenance mode and is not available to customers at the moment. Customers who were logged in are being redirected to the maintenance page, and no new customers will be able to login into the application.\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-25T18:24:51+00:00","modified":"2024-09-25T18:53:07+00:00","when":"2024-09-25T18:24:51+00:00","text":"Summary: Mandiant Security Validation customers are unable to run jobs\nDescription: We are experiencing an issue with Mandiant Security Validation beginning on Wednesday, 2024-09-25 01:27 US/Pacific.\nMitigation work is currently underway by our engineering team. Our engineers has successfully tested the mitigation in lower environments and are actively rolling out the same in production.\nThe revised ETA for mitigation is by Wednesday, 2024-09-25 12:30 US/Pacific.\nWe will provide more information by Wednesday, 2024-09-25 13:00 US/Pacific.\nDiagnosis: Mandiant Security Validation customers are unable to run jobs\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-25T16:37:23+00:00","modified":"2024-09-25T18:24:51+00:00","when":"2024-09-25T16:37:23+00:00","text":"Summary: Mandiant Security Validation customers are unable to run jobs\nDescription: We are experiencing an issue with Mandiant Security Validation beginning on Wednesday, 2024-09-25 01:27 US/Pacific.\nMitigation work is currently underway by our engineering team. Our engineers has successfully tested the mitigation in lower environments and are actively rolling out the same in production.\nThe mitigation is expected to complete by Wednesday, 2024-09-25 11:30 US/Pacific.\nWe will provide more information by Wednesday, 2024-09-25 12:00 US/Pacific.\nDiagnosis: Mandiant Security Validation customers are unable to run jobs\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-25T15:18:07+00:00","modified":"2024-09-25T16:37:23+00:00","when":"2024-09-25T15:18:07+00:00","text":"Summary: Mandiant Security Validation customers are unable to run jobs\nDescription: We are experiencing an issue with Mandiant Security Validation beginning on Wednesday, 2024-09-25 01:27 US/Pacific.\nMitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-25 11:00 US/Pacific.\nDiagnosis: Mandiant Security Validation customers are unable to run jobs\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-25T14:53:41+00:00","modified":"2024-09-25T15:18:07+00:00","when":"2024-09-25T14:53:41+00:00","text":"Summary: Mandiant Security Validation customers are unable to run jobs\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-25 11:00 US/Pacific.\nDiagnosis: Mandiant Security Validation customers are unable to run jobs\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-25T13:51:23+00:00","modified":"2024-09-25T14:53:41+00:00","when":"2024-09-25T13:51:23+00:00","text":"Summary: Mandiant Security Validation customers are unable to run jobs\nDescription: We are experiencing an issue with Mandiant Security Validation beginning on Wednesday, 2024-09-25 06:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-09-25 08:00 US/Pacific with current details.\nDiagnosis: Mandiant Security Validation customers are unable to run jobs\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-25T13:44:59+00:00","modified":"2024-09-25T15:15:46+00:00","when":"2024-09-25T13:44:59+00:00","text":"Summary: Mandiant Security Validation customers are unable to run jobs\nDescription: We are experiencing an issue with Mandiant Security Validation beginning on Wednesday, 2024-09-25 01:27 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-09-25 07:15 US/Pacific with current details.\nDiagnosis: Mandiant Security Validation customers are unable to run jobs\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-10-01T04:17:47+00:00","modified":"2024-10-01T07:51:10+00:00","when":"2024-10-01T04:17:47+00:00","text":"# Incident Report\n## Summary\nOn 25 September 2024 from 01:27 to 12:28 US/Pacific, Mandiant Security Validation SaaS customers were unable to run or create Actions, Sequences and Evaluations for a duration of 11 hours and 01 minute.\nCustomers were able to login and view the UI and observe results of previously run jobs, but were unable to create and run content (Actions/Evaluations/Sequences). From 11:23 to 12:28 US/ Pacific (1 hour and 5 minutes), customers were unable to login and were redirected to a maintenance page containing a notice of the outage while engineers fixed the issue.\nTo our Mandiant Security Validation SaaS customers whose business was impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThis issue occurred due to an implementation issue in a backend database that limited a sequence column value. The value was incremented beyond that limit causing the database and system to encounter an error which prevented users from running Actions, Sequence and Evaluations.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via a support case on 25 September 2024 at 01:41 US/Pacific and immediately started an investigation. Once the nature and scope of the issue became clear, Google engineers adjusted the column value limit that was causing the issue. This required taking Mandiant Security Validation SaaS briefly offline to perform the necessary changes needed to resolve the issue and prevent it from recurring in the future. During this time users were redirected to a maintenance page informing them of the outage. Once the system was brought back online, engineers verified the underlying issue was resolved and that no further issues were encountered.\nGoogle is committed to preventing a repeat of this issue in the future and is completing a :\n* Thorough review of the system settings to ensure no further limitations exist that would cause this or similar issues to occur.\n## Detailed Description of Impact\n* From 1:27 to 11:22 US/ Pacific, customers were able to login and view the UI, but were unable to create and run content (Actions/Evaluations/Sequences).\n* From 11:23 to 12:28 US/ Pacific, customers were redirected to a maintenance page containing a notice of the outage.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"we27wL2FBSvodeP7GiRS","service_name":"Mandiant Security Validation","affected_products":[{"title":"Mandiant Security Validation","id":"we27wL2FBSvodeP7GiRS"}],"uri":"incidents/psbdfLBYU9GR3bNNq7HT","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"xFUwtQAVXD1yJ65HqK2Q","number":"1343748028920975467","begin":"2024-09-25T07:00:00+00:00","created":"2024-09-27T18:05:04+00:00","end":"2024-09-27T21:00:37+00:00","modified":"2024-09-27T21:00:48+00:00","external_desc":"Cloud Trace customers experienced errors in APIs to write trace spans","updates":[{"created":"2024-09-27T21:00:37+00:00","modified":"2024-09-27T21:00:49+00:00","when":"2024-09-27T21:00:37+00:00","text":"The issue with Cloud Trace has been resolved for all affected users as of Friday, 2024-09-27 13:41 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-27T18:05:19+00:00","modified":"2024-09-27T21:00:48+00:00","when":"2024-09-27T18:05:19+00:00","text":"Summary: Cloud Trace customers experiencing errors in APIs to write trace spans\nDescription: We are experiencing an issue with Cloud Trace beginning at Wednesday, 2024-09-25.\nMitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Friday, 2024-09-27 19:00 US/Pacific.\nWe will provide more information by Friday, 2024-09-27 19:30 US/Pacific.\nDiagnosis: Customers who have enabled VPC Service Controls may experience - elevated error rates in API calls that write trace spans - fewer spans shown in the Cloud Trace UI\nWorkaround: None","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-27T18:04:56+00:00","modified":"2024-09-27T18:08:00+00:00","when":"2024-09-27T18:04:56+00:00","text":"Summary: Cloud Trace customers experiencing errors in APIs to write trace spans\nDescription: We are experiencing an issue with Cloud Trace beginning at Wednesday, 2024-09-25.\nMitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Friday, 2024-09-27 19:00 US/Pacific.\nWe will provide more information by Friday, 2024-09-27 19:30 US/Pacific.\nDiagnosis: Customers who have enabled VPC Service Controls may experience - elevated error rates in API calls that write trace spans - fewer spans shown in the Cloud Trace UI\nWorkaround: None","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-09-27T21:00:37+00:00","modified":"2024-09-27T21:00:49+00:00","when":"2024-09-27T21:00:37+00:00","text":"The issue with Cloud Trace has been resolved for all affected users as of Friday, 2024-09-27 13:41 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Trace","id":"TJB9GjVKN1MB4DTQ2Uhz"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"}],"uri":"incidents/xFUwtQAVXD1yJ65HqK2Q","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"nwLcXH4r3BUvTBxGThFK","number":"13738652169588067016","begin":"2024-09-19T03:00:00+00:00","created":"2024-09-23T17:31:15+00:00","end":"2024-09-24T00:32:00+00:00","modified":"2024-09-24T16:17:53+00:00","external_desc":"Chronicle Security transfer delays for some AWS SQS S3 feeds","updates":[{"created":"2024-09-24T00:32:17+00:00","modified":"2024-09-24T16:17:53+00:00","when":"2024-09-24T00:32:17+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Monday, 2024-09-23 17:29 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-23T22:51:47+00:00","modified":"2024-09-24T00:32:23+00:00","when":"2024-09-23T22:51:47+00:00","text":"Summary: Chronicle Security transfer delays for some AWS SQS S3 feeds\nDescription: The issue causing delays in data ingestion from AWS SQS feeds has been mitigated. Chronicle is presently working through the accumulated processing backlog. Customers may still encounter delays in viewing events, detections, and other information until the system has fully recovered.\nWe are closely monitoring the backlog clearance and we will provide more information by Monday, 2024-09-23 18:30 US/Pacific.\nDiagnosis: Customers may experience delays in indexing, searching and rules detection for AWS SQS S3 feeds.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-09-23T21:33:16+00:00","modified":"2024-09-23T22:51:47+00:00","when":"2024-09-23T21:33:16+00:00","text":"Summary: Chronicle Security transfer delays for some AWS SQS S3 feeds\nDescription: The issue causing delays in data ingestion from AWS SQS feeds has been mitigated. Chronicle is presently working through the accumulated processing backlog. Customers may still encounter delays in viewing events, detections, and other information until the system has fully recovered.\nWe are closely monitoring the backlog clearance and we will provide more information by Monday, 2024-09-23 16:00 US/Pacific.\nDiagnosis: Customers may experience delays in indexing, searching and rules detection for AWS SQS S3 feeds.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-09-23T19:18:53+00:00","modified":"2024-09-23T21:33:16+00:00","when":"2024-09-23T19:18:53+00:00","text":"Summary: Chronicle Security transfer delays for some AWS SQS S3 feeds\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-09-23 14:30 US/Pacific.\nDiagnosis: Customers may experience delays in ingestion, indexing, searching and rules detection for AWS SQS S3 feeds.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-09-23T17:46:12+00:00","modified":"2024-09-23T19:18:53+00:00","when":"2024-09-23T17:46:12+00:00","text":"Summary: Chronicle Security transfer delays for some AWS SQS S3 feeds\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-09-23 12:30 US/Pacific.\nDiagnosis: Customers may experience delays in ingestion, indexing, searching and rules detection for AWS SQS S3 feeds.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-09-23T17:43:45+00:00","modified":"2024-09-23T17:46:12+00:00","when":"2024-09-23T17:43:45+00:00","text":"Summary: Chronicle Security transfer delays for some AWS SQS S3 feeds\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-09-23 11:19 US/Pacific.\nDiagnosis: Customers may experience delays in ingestion, indexing, searching and rules detection for AWS SQS S3 feeds.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-09-23T17:31:12+00:00","modified":"2024-09-23T17:43:49+00:00","when":"2024-09-23T17:31:12+00:00","text":"Summary: Chronicle Security transfer delays for some AWS SQS S3 feeds\nDescription: We are experiencing an issue with Chronicle Security AWS SQS S3 feeds.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-09-23 11:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may experience delays in ingestion, indexing, searching and rules detection for AWS SQS S3 feeds.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-09-24T00:32:17+00:00","modified":"2024-09-24T16:17:53+00:00","when":"2024-09-24T00:32:17+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Monday, 2024-09-23 17:29 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/nwLcXH4r3BUvTBxGThFK","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Multi-region: us","id":"us"}]},{"id":"fLYHLzSGXGkLkAjc8MJG","number":"4913116093511525799","begin":"2024-09-18T19:40:23+00:00","created":"2024-09-18T20:11:52+00:00","end":"2024-09-20T21:51:42+00:00","modified":"2024-09-20T21:51:46+00:00","external_desc":"Mandiant Managed Defense reported issues with ingestion of some alerts from CrowdStrike Falcon integrations.","updates":[{"created":"2024-09-20T21:51:42+00:00","modified":"2024-09-20T21:51:47+00:00","when":"2024-09-20T21:51:42+00:00","text":"Our engineering team has completed their investigation and confirmed that there was no service degradation, and no supported alerts from CrowdStrike were missed during this period.\nAny discrepancy observed by our users between the Managed Defense portal and the third party security console would be due to recent changes in how detections are displayed within the CrowdStrike Falcon console.\nThe Managed Defense service continues to operate as intended with the CrowdStrike Falcon integration.\nWe appreciate your patience and understanding.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-19T16:24:44+00:00","modified":"2024-09-20T21:51:46+00:00","when":"2024-09-19T16:24:44+00:00","text":"Summary: Mandiant Managed Defense is experiencing issues with ingestion of some alerts from CrowdStrike integrations.\nDescription: Our engineering team has identified the underlying cause for the issue and are working on steps required for mitigation.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-25 17:00 US/Pacific.\nDiagnosis: A subset of alerts from CrowdStrike integrations are not available for Managed Defense SOC analysis. Customers may not see all supported detection events from CrowdStrike in the Managed Defense Portal.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-18T22:55:23+00:00","modified":"2024-09-19T16:24:44+00:00","when":"2024-09-18T22:55:23+00:00","text":"Summary: Mandiant Managed Defense is experiencing issues with ingestion of some alerts from CrowdStrike integrations.\nDescription: Our engineering team has identified the underlying cause for the issue and are working on steps required for mitigation.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-09-19 10:00 US/Pacific.\nDiagnosis: A subset of alerts from CrowdStrike integrations are not available for Managed Defense SOC analysis. Customers may not see all supported detection events from CrowdStrike in the Managed Defense Portal.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-18T21:40:36+00:00","modified":"2024-09-18T22:55:23+00:00","when":"2024-09-18T21:40:36+00:00","text":"Summary: Mandiant Managed Defense is experiencing issues with ingestion of some alerts from CrowdStrike integrations.\nDescription: Our engineering team has identified the underlying cause for the issue and are working on steps required for mitigation.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-18 16:00 US/Pacific.\nDiagnosis: A subset of alerts from CrowdStrike integrations are not available for Managed Defense SOC analysis. Customers may not see all supported detection events from CrowdStrike in the Managed Defense Portal.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-18T20:38:26+00:00","modified":"2024-09-18T21:40:36+00:00","when":"2024-09-18T20:38:26+00:00","text":"Summary: Mandiant Managed Defense is experiencing issues with ingestion of some alerts from CrowdStrike integrations.\nDescription: We are experiencing an issue with Mandiant Managed Defense.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-09-18 14:45 US/Pacific with current details.\nDiagnosis: A subset of alerts from CrowdStrike integrations are not available for Managed Defense SOC analysis. Customers may not see all supported detection events from CrowdStrike in the Managed Defense Portal.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-18T20:11:45+00:00","modified":"2024-09-18T20:38:26+00:00","when":"2024-09-18T20:11:45+00:00","text":"Summary: Mandiant Managed Defense is experiencing issues with ingestion of some alerts from CrowdStrike integrations.\nDescription: We are experiencing an issue with Mandiant Managed Defense.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-09-18 13:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\n.\nDiagnosis:\nA subset of alerts from CrowdStrike integrations are not available for Managed Defense SOC analysis. Customers may not see all supported detection events from CrowdStrike in the Managed Defense Portal.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-09-20T21:51:42+00:00","modified":"2024-09-20T21:51:47+00:00","when":"2024-09-20T21:51:42+00:00","text":"Our engineering team has completed their investigation and confirmed that there was no service degradation, and no supported alerts from CrowdStrike were missed during this period.\nAny discrepancy observed by our users between the Managed Defense portal and the third party security console would be due to recent changes in how detections are displayed within the CrowdStrike Falcon console.\nThe Managed Defense service continues to operate as intended with the CrowdStrike Falcon integration.\nWe appreciate your patience and understanding.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"9aKw9s8p43AYeBmo4Gvx","service_name":"Mandiant Managed Defense","affected_products":[{"title":"Mandiant Managed Defense","id":"9aKw9s8p43AYeBmo4Gvx"}],"uri":"incidents/fLYHLzSGXGkLkAjc8MJG","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"1yphfNLPHEnwJcWqwxbu","number":"811151781677777788","begin":"2024-09-18T19:34:00+00:00","created":"2024-09-18T21:57:07+00:00","end":"2024-09-18T22:30:00+00:00","modified":"2024-09-26T15:10:38+00:00","external_desc":"Increased latency and error rates observed on Google App Engine, Cloud Firestore, and Google Cloud Functions gen 1.","updates":[{"created":"2024-09-23T14:11:42+00:00","modified":"2024-09-26T15:10:38+00:00","when":"2024-09-23T14:11:42+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 18 September, 2024, Google App Engine, Cloud Firestore, and Google Cloud Run functions (1st gen) experienced increased latency and error rates for a duration of 2 hours and 56 minutes in multiple regions. In some regions, customers experienced a complete service outage for a period between 5 minutes and 67 minutes. Issue began on 18 September 2024 at 12:34 US/Pacific and was completely resolved on 18 September 2024 at 15:30 US/Pacific.\nTo our customers who were impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe root cause was a newly implemented automation code which created a bad traffic routing policy. This policy incorrectly directed our traffic routing control plane to mark all clusters as being unavailable to serve traffic for App Engine, Google Cloud Run functions (1st gen)* and dependent services. Google engineers intervened before the policy was rolled out to all clusters, resulting in a partial outage of the service.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue via internal production monitoring on 18 September 2024 at 13:01 US/Pacific shortly after customers began experiencing the impact. Engineering teams have identified the automation which caused the impact and terminated it at 13:46. However customer impact was only mitigated at 15:30 post manually directing the traffic back to the affected clusters.\nGoogle is committed to preventing a repeat of the issue in the future and is completing the following actions: - We have removed the automation which caused the outage as a short term measure - We are working to implement a more efficient and well tested traffic routing maintenance process - We will implement safeguards in the automation pipeline to prevent recurrence of this issue\n## Detailed Description of Impact\nOn Wednesday 18 September, 2024 from 12:34 US/Pacific to 15:30 US/Pacific, Google App Engine, Google Cloud Run Functions (1st gen)* and Cloud Firestore experienced elevated error rates and increased latency. Customers reported 5xx errors with the message “Request was aborted after waiting too long to attempt to service your request.” and high latency. Customers also experienced high cold starts during this time.\nIn 13 regions, customers experienced a complete service outage for a period between 5 minutes and 67 minutes.\n1. asia-east2\n2. asia-northeast2\n3. asia-northeast3\n4. asia-south1\n5. asia-southeast1\n6. asia-southeast2\n7. europe-west2\n8. europe-west3\n9. europe-west6\n10. northamerica-northeast1\n11. southamerica-east1\n12. us-central2\n13. us-west1\nIn other 11 regions, customers might observe elevated error rates:\n1. asia-east1\n2. asia-northeast1\n3. australia-southeast1\n4. europe-central2\n5. europe-west1\n6. us-central1\n7. us-east1\n8. us-east4\n9. us-west2\n10. us-west3\n11. us-west4\n*Cloud Run and Cloud Run functions (gen2) were not affected.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-19T04:36:46+00:00","modified":"2024-09-23T14:11:42+00:00","when":"2024-09-19T04:36:46+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using **_https://cloud.google.com/support_**\n(All Times US/Pacific)\n**Incident Start** 18 September, 2024, 13:01\n**Incident End** 18 September, 2024, 15:30\n**Duration** 2 hours, 29 minutes\n**Affected Services and Features**\n* Firestore\n* App Engine\n* Google Cloud Functions Gen 1\n**Regions/Zones**\nGlobal\n**Description**\nGoogle App Engine, Google Cloud Functions Gen1, Firestore experienced elevated error rates and increased latency for a period of 2 hours, 29 minutes. Based on our preliminary analysis, the root cause of the issue was identified as a newly implemented automation code which created a bad traffic routing policy. This policy incorrectly directed our traffic routing control plane to mark all clusters as being unavailable to serve traffic for App Engine and dependent services. Google engineers intervened before the policy was rolled out to all clusters, resulting in a partial outage of the service.\nGoogle engineers have identified the automation that was responsible for this change and have terminated it until appropriate safeguards are put in place. The impact was mitigated by manually directing the traffic back to the affected clusters. There is no risk of a recurrence of this outage at the moment.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact**\n* Customers experienced elevated latency and error rates for Google App Engine, Google Cloud Functions Gen1 and Firestore services.\n* Customers in some regions experienced a complete service outage for Google App Engine, Google Cloud Functions Gen1 and Firestore services.\n----","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-18T22:50:26+00:00","modified":"2024-09-19T04:36:46+00:00","when":"2024-09-18T22:50:26+00:00","text":"The issue with Google App Engine, Google Cloud Functions, Cloud Firestore has been resolved for all affected users as of Wednesday, 2024-09-18 15:30 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-18T22:19:22+00:00","modified":"2024-09-18T22:50:30+00:00","when":"2024-09-18T22:19:22+00:00","text":"Summary: Increased latency and error rates observed on Google App Engine, Cloud Firestore, and Google Cloud Functions gen 1.\nDescription: Mitigation has been successfully applied by our engineering team. We are currently monitoring our environment to ensure stability.\nWe will provide more information by Wednesday, 2024-09-18 16:00 US/Pacific.\nDiagnosis: Affected users may encounter elevated latency or an elevated error rate for the impacted products.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Salt Lake City (us-west3)","id":"us-west3"}]},{"created":"2024-09-18T21:57:04+00:00","modified":"2024-09-18T22:19:30+00:00","when":"2024-09-18T21:57:04+00:00","text":"Summary: Increased latency and error rates observed on Google App Engine and Google Cloud Functions gen 1.\nDescription: Mitigation work is currently underway by our engineering team. Based on the investigation thus far, our engineers have identified that Cloud Run is not currently impacted.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-18 16:00 US/Pacific.\nDiagnosis: Affected users may encounter elevated latency or an elevated error rate for the impacted products.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Salt Lake City (us-west3)","id":"us-west3"}]}],"most_recent_update":{"created":"2024-09-23T14:11:42+00:00","modified":"2024-09-26T15:10:38+00:00","when":"2024-09-23T14:11:42+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 18 September, 2024, Google App Engine, Cloud Firestore, and Google Cloud Run functions (1st gen) experienced increased latency and error rates for a duration of 2 hours and 56 minutes in multiple regions. In some regions, customers experienced a complete service outage for a period between 5 minutes and 67 minutes. Issue began on 18 September 2024 at 12:34 US/Pacific and was completely resolved on 18 September 2024 at 15:30 US/Pacific.\nTo our customers who were impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe root cause was a newly implemented automation code which created a bad traffic routing policy. This policy incorrectly directed our traffic routing control plane to mark all clusters as being unavailable to serve traffic for App Engine, Google Cloud Run functions (1st gen)* and dependent services. Google engineers intervened before the policy was rolled out to all clusters, resulting in a partial outage of the service.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue via internal production monitoring on 18 September 2024 at 13:01 US/Pacific shortly after customers began experiencing the impact. Engineering teams have identified the automation which caused the impact and terminated it at 13:46. However customer impact was only mitigated at 15:30 post manually directing the traffic back to the affected clusters.\nGoogle is committed to preventing a repeat of the issue in the future and is completing the following actions: - We have removed the automation which caused the outage as a short term measure - We are working to implement a more efficient and well tested traffic routing maintenance process - We will implement safeguards in the automation pipeline to prevent recurrence of this issue\n## Detailed Description of Impact\nOn Wednesday 18 September, 2024 from 12:34 US/Pacific to 15:30 US/Pacific, Google App Engine, Google Cloud Run Functions (1st gen)* and Cloud Firestore experienced elevated error rates and increased latency. Customers reported 5xx errors with the message “Request was aborted after waiting too long to attempt to service your request.” and high latency. Customers also experienced high cold starts during this time.\nIn 13 regions, customers experienced a complete service outage for a period between 5 minutes and 67 minutes.\n1. asia-east2\n2. asia-northeast2\n3. asia-northeast3\n4. asia-south1\n5. asia-southeast1\n6. asia-southeast2\n7. europe-west2\n8. europe-west3\n9. europe-west6\n10. northamerica-northeast1\n11. southamerica-east1\n12. us-central2\n13. us-west1\nIn other 11 regions, customers might observe elevated error rates:\n1. asia-east1\n2. asia-northeast1\n3. australia-southeast1\n4. europe-central2\n5. europe-west1\n6. us-central1\n7. us-east1\n8. us-east4\n9. us-west2\n10. us-west3\n11. us-west4\n*Cloud Run and Cloud Run functions (gen2) were not affected.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Firestore","id":"CETSkT92V21G6A1x28me"},{"title":"Google App Engine","id":"kchyUtnkMHJWaAva8aYc"},{"title":"Google Cloud Functions","id":"oW4vJ7VNqyxTWNzSHopX"}],"uri":"incidents/1yphfNLPHEnwJcWqwxbu","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Salt Lake City (us-west3)","id":"us-west3"}]},{"id":"QEXJUo5kYdf9YVXPiCCF","number":"3351835369322681033","begin":"2024-09-11T15:00:00+00:00","created":"2024-09-11T18:42:27+00:00","end":"2024-09-11T18:20:00+00:00","modified":"2024-09-11T21:07:11+00:00","external_desc":"Google Cloud Interconnect experienced elevated packet loss from Hyderabad, India edge location to regions asia-south1, asia-south2, and asia-southeast1","updates":[{"created":"2024-09-11T21:07:11+00:00","modified":"2024-09-11T21:07:11+00:00","when":"2024-09-11T21:07:11+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 11 September 2024, 08:00\n**Incident End:** 11 September 2024, 11:20\n**Duration:** 3 hours, 20 minutes\n**Affected Services and Features:**\nHybrid Connectivity - Cloud Interconnect\n**Regions/Zones:**\nasia-south1, asia-south2, and asia-southeast1\n**Description:**\nGoogle Cloud Interconnect experienced elevated packet loss from [Hyderabad, India edge location](https://cloud.google.com/vpc/docs/edge-locations) to cloud regions asia-south1, asia-south2, and asia-southeast1 for a duration of 3 hours, 20 minutes. From preliminary analysis, the root cause of the issue is a potential physical link issue between the [Hyderabad, India edge location](https://cloud.google.com/vpc/docs/edge-locations) and Google's production backbone network.\nThe issue was mitigated by redirecting the network traffic away from the problematic link. Our engineers will continue to work with corresponding vendors to fix the underlying physical link issue.\n**Customer Impact:**\nGoogle Cloud Interconnect customers connecting to cloud regions asia-south1, asia-south2, and asia-southeast1 from [Hyderabad, India edge location](https://cloud.google.com/vpc/docs/edge-locations) experienced elevated packet loss.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-11T18:50:02+00:00","modified":"2024-09-11T21:07:11+00:00","when":"2024-09-11T18:50:02+00:00","text":"The issue with Google Cloud Interconnect has been resolved for all affected users as of Wednesday, 2024-09-11 11:20 US/Pacific. We thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-11T18:42:27+00:00","modified":"2024-09-11T18:50:02+00:00","when":"2024-09-11T18:42:27+00:00","text":"***Description:***\nGoogle Cloud Interconnect experienced elevated packet loss from Hyderabad, India edge location to regions asia-south1, asia-south2, and asia-southeast1 between beginning at Wednesday, 2024-09-11 08:00 US/Pacific and 11:20 US/Pacific.\nOur engineers have taken appropriate measures mitigating the issue at 11:20 US/Pacific and are continuing to monitor closely.\nWe will provide an update by Wednesday, 2024-09-11 12:04 US/Pacific with current details. We apologize to all who are affected by the disruption.\n***Symptoms:***\nGoogle Cloud Interconnect experienced elevated packet loss from Hyderabad, India edge location to regions asia-south1, asia-south2, and asia-southeast1.\n***Workaround:***\nNone at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"}]}],"most_recent_update":{"created":"2024-09-11T21:07:11+00:00","modified":"2024-09-11T21:07:11+00:00","when":"2024-09-11T21:07:11+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 11 September 2024, 08:00\n**Incident End:** 11 September 2024, 11:20\n**Duration:** 3 hours, 20 minutes\n**Affected Services and Features:**\nHybrid Connectivity - Cloud Interconnect\n**Regions/Zones:**\nasia-south1, asia-south2, and asia-southeast1\n**Description:**\nGoogle Cloud Interconnect experienced elevated packet loss from [Hyderabad, India edge location](https://cloud.google.com/vpc/docs/edge-locations) to cloud regions asia-south1, asia-south2, and asia-southeast1 for a duration of 3 hours, 20 minutes. From preliminary analysis, the root cause of the issue is a potential physical link issue between the [Hyderabad, India edge location](https://cloud.google.com/vpc/docs/edge-locations) and Google's production backbone network.\nThe issue was mitigated by redirecting the network traffic away from the problematic link. Our engineers will continue to work with corresponding vendors to fix the underlying physical link issue.\n**Customer Impact:**\nGoogle Cloud Interconnect customers connecting to cloud regions asia-south1, asia-south2, and asia-southeast1 from [Hyderabad, India edge location](https://cloud.google.com/vpc/docs/edge-locations) experienced elevated packet loss.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"5x6CGnZvSHQZ26KtxpK1","service_name":"Hybrid Connectivity","affected_products":[{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"}],"uri":"incidents/QEXJUo5kYdf9YVXPiCCF","currently_affected_locations":[],"previously_affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"}]},{"id":"geg8sdPSGGUW65jpyP3k","number":"2740802251472346110","begin":"2024-09-10T07:44:42+00:00","created":"2024-09-10T07:44:49+00:00","end":"2024-09-10T07:51:36+00:00","modified":"2024-09-10T07:51:39+00:00","external_desc":"Some VPC users experienced multiple issues in northamerica-northeast2-c zone","updates":[{"created":"2024-09-10T07:51:36+00:00","modified":"2024-09-10T07:51:41+00:00","when":"2024-09-10T07:51:36+00:00","text":"We experienced an issue with Virtual Private Cloud (VPC) beginning on Monday, 2024-09-09 05:49 US/Pacific.\nThe issue has been resolved for all affected users as of Monday, 2024-09-09 06:04 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-10T07:44:44+00:00","modified":"2024-09-10T07:51:39+00:00","when":"2024-09-10T07:44:44+00:00","text":"Summary: Some VPC users experienced multiple issues in northamerica-northeast2-c zone\nDescription: We are experiencing an issue with Virtual Private Cloud (VPC) beginning on Monday, 2024-09-09 05:49 US/Pacific.\nThis was the result of a brief power interruption to backbone networking equipment serving the zone.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-09-10 01:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The impacted users would have observed:\n* New VMs failing to reach Google-hosted services\n* Newly created VMs could not be able to reach VMs in a different region\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-09-10T07:51:36+00:00","modified":"2024-09-10T07:51:41+00:00","when":"2024-09-10T07:51:36+00:00","text":"We experienced an issue with Virtual Private Cloud (VPC) beginning on Monday, 2024-09-09 05:49 US/Pacific.\nThe issue has been resolved for all affected users as of Monday, 2024-09-09 06:04 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/geg8sdPSGGUW65jpyP3k","currently_affected_locations":[],"previously_affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"UbJv334dXk5XQNSgiY48","number":"6007350087386333105","begin":"2024-09-09T16:44:29+00:00","created":"2024-09-10T02:00:55+00:00","end":"2024-09-10T04:04:34+00:00","modified":"2024-09-10T04:04:36+00:00","external_desc":"Looker Studio users are experiencing elevated query performance when loading and accessing reports.","updates":[{"created":"2024-09-10T04:04:34+00:00","modified":"2024-09-10T04:04:37+00:00","when":"2024-09-10T04:04:34+00:00","text":"The issue with Looker Studio has been resolved for all affected users as of Monday, 2024-09-09 21:01 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-10T02:00:52+00:00","modified":"2024-09-10T04:04:36+00:00","when":"2024-09-10T02:00:52+00:00","text":"Summary: Looker Studio users are experiencing elevated query performance when loading and accessing reports.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-09-10 09:00 US/Pacific.\nDiagnosis: Looker Studio users are experiencing elevated query performance or errors after wait of around 5 minutes\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-09-10T04:04:34+00:00","modified":"2024-09-10T04:04:37+00:00","when":"2024-09-10T04:04:34+00:00","text":"The issue with Looker Studio has been resolved for all affected users as of Monday, 2024-09-09 21:01 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"kEYNqRYFXXHxP9QeFJ1d","service_name":"Looker Studio","affected_products":[{"title":"Looker Studio","id":"kEYNqRYFXXHxP9QeFJ1d"}],"uri":"incidents/UbJv334dXk5XQNSgiY48","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"8n96oHYYmQe1eSu2Jjqq","number":"7159903109873898506","begin":"2024-09-09T16:44:00+00:00","created":"2024-09-09T17:14:43+00:00","end":"2024-09-10T02:00:00+00:00","modified":"2024-09-10T02:16:14+00:00","external_desc":"Looker Studio users are experiencing elevated query performance when loading and accessing reports.","updates":[{"created":"2024-09-10T02:15:16+00:00","modified":"2024-09-10T02:16:14+00:00","when":"2024-09-10T02:15:16+00:00","text":"This incident is being merged with another incident. All future updates will be provided there: https://status.cloud.google.com/incidents/UbJv334dXk5XQNSgiY48","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-09T20:47:19+00:00","modified":"2024-09-10T02:15:16+00:00","when":"2024-09-09T20:47:19+00:00","text":"**Summary:**\nLooker Studio users are experiencing elevated query performance when loading and accessing reports.\n**Description:**\nAfter further monitoring, our engineers have determined that the issue is still ongoing and are continuing to investigate the issue.\nWe will provide an update by Monday, 2024-09-09 19:00 US/Pacific with current details.\n**Diagnosis:**\nLooker Studio users are experiencing elevated query performance or errors after wait of around 5 minutes\n**Workaround:**\nNone at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-09T19:34:57+00:00","modified":"2024-09-09T20:47:19+00:00","when":"2024-09-09T19:34:57+00:00","text":"Summary:\nLooker Studio users are experiencing elevated query performance when loading and accessing reports.\nDescription:\nAfter further monitoring, our engineers have determined that the issue is still ongoing and are continuing to investigate the issue.\nWe will provide an update by Monday, 2024-09-09 14:00 US/Pacific with current details.\nDiagnosis:\nLooker Studio users are experiencing elevated query performance or errors after wait of around 5 minutes\nWorkaround:\nNone at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-09T18:38:08+00:00","modified":"2024-09-09T19:34:57+00:00","when":"2024-09-09T18:38:08+00:00","text":"Summary:\nLooker Studio users are experiencing elevated query performance when loading and accessing reports.\nDescription:\nOur engineers have taken some mitigation actions and we see that the latency issue is mitigated as of now.\nOur engineers are continuing to monitor closely while working on full resolution.\nWe do not have an ETA for full resolution at this point.\nWe will provide an update by Monday, 2024-09-09 14:00 US/Pacific with current details.\nDiagnosis:\nLooker Studio users are experiencing elevated query performance or errors after wait of around 5 minutes\nWorkaround:\nNone at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-09T18:05:24+00:00","modified":"2024-09-09T18:38:08+00:00","when":"2024-09-09T18:05:24+00:00","text":"Summary:\nLooker Studio users are experiencing elevated query performance when loading and accessing reports.\nDescription:\nWe are experiencing an issue with Looker Studio. Our engineering team continues to investigate the issue. We will provide an update by Monday, 2024-09-09 12:30 US/Pacific with current details. We apologize to all who are affected by the disruption.\nDiagnosis:\nLooker Studio users are experiencing elevated query performance or errors after wait of around 5 minutes\nWorkaround:\nNone at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-09-09T17:14:43+00:00","modified":"2024-09-09T18:05:24+00:00","when":"2024-09-09T17:14:43+00:00","text":"Summary:\nLooker Studio users are experiencing elevated query performance when loading and accessing reports.\nDescription:\nWe are experiencing an issue with Looker Studio. Our engineering team continues to investigate the issue. We will provide an update by Monday, 2024-09-09 11:30 US/Pacific with current details. We apologize to all who are affected by the disruption.\nDiagnosis:\nLooker Studio users are experiencing elevated query performance or errors after wait of around 5 minutes\nWorkaround:\nNone at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-09-10T02:15:16+00:00","modified":"2024-09-10T02:16:14+00:00","when":"2024-09-10T02:15:16+00:00","text":"This incident is being merged with another incident. All future updates will be provided there: https://status.cloud.google.com/incidents/UbJv334dXk5XQNSgiY48","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"kEYNqRYFXXHxP9QeFJ1d","service_name":"Looker Studio","affected_products":[{"title":"Looker Studio","id":"kEYNqRYFXXHxP9QeFJ1d"}],"uri":"incidents/8n96oHYYmQe1eSu2Jjqq","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"XwnegjADrYy2GHJphG2V","number":"17505036385643096421","begin":"2024-09-07T11:20:00+00:00","created":"2024-09-07T13:11:15+00:00","end":"2024-09-07T13:10:00+00:00","modified":"2024-09-13T13:44:30+00:00","external_desc":"Google Compute Engine (GCE) VM instance creation/ deletion and all related operations for other products were failing in the asia-northeast1 region.","updates":[{"created":"2024-09-13T13:44:30+00:00","modified":"2024-09-13T13:44:30+00:00","when":"2024-09-13T13:44:30+00:00","text":"# Incident Report\n## Summary\nOn 7 September 2024 starting at 04:20 US/Pacific, several Google Cloud products experienced a service degradation of varying impact or were unavailable in asia-northeast1 region for a period of 1 hour 50 minutes. The list of impacted products and services is detailed below.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you.\n## Root Cause\nMost Google Cloud products and services use a regional metadata store to support their internal operations. The metadata store supports critical functions such as servicing customer requests, load balancing, admin operations, and retrieving/storing metadata including server location information.\nGoogle Compute Engine (GCE) internal DNS depends on the regional metadata store for storing instance metadata. A routine update of the metadata store to a new software version had a change which resulted in poor handling of a rare resource contention corner case, which caused the writes from GCE internal DNS to a zonal replica of the metadata store to fail.\nDuring such zonal issues, we have automated failover mechanisms to use the healthy replicas from other zones. But during this disruption, a secondary issue caused automated failover to not work, rendering the entire metadata storage unavailable despite two other healthy zones being available.\nThis resulted in disruptions to all GCE instance operations in the asia-northeast1 region. Actions such as creating, deleting, starting, and stopping instances or consuming reservations were affected. This, in turn, affected operations of other services dependent on GCE instances, including GKE, Cloud Build, Cloud Dataflow, Cloud Deploy and Cloud SQL.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue by internal monitoring on 7 September 2024 at 04:26 US/Pacific and immediately started an investigation. The issue was fully mitigated at 06:10 US/Pacific after failover of the metadata storage operations to the healthy zones was manually initiated by our engineering team.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* Improve the automated failover logic used in the metadata storage infrastructure.\n* Improve the testing of how the metadata storage service handles resource contention corner cases.\n* Improve internal processes and documentations to enable faster response and mitigation time for this type of issue.\n## Detailed Description of Impact\n- **Google Compute Engine**\nGoogle Compute Engine requests and operations like creating, deleting, starting or modifying VMs were not executed.\n- **Cloud Build**\nCloud Build deployments via the Cloud Build API or via Cloud Build Integrations were not being executed.\n- **Cloud Dataflow**\nGoogle Cloud Dataflow jobs failed at start up, existing job autoscale functionality was disrupted.\n- **Cloud Deploy**\nGoogle Cloud Deploy actions (render, deploy, verify, pre- and post-deploy hooks) were unresponsive and unable to complete their tasks.\n- **Google Cloud SQL**\nGoogle Cloud SQL, dependent on Google Compute Engine, experienced operational issues during this incident. No database creation or deletion operations were possible during the incident.\n- **Google Kubernetes Engine**\nGoogle Kubernetes Engine experienced issues with operations that depend on GCE. Specifically, cluster and node pool creation, node autoscaling, cluster and node upgrades, and automatic repairs were unsuccessful in the affected region and zones.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-09T13:35:09+00:00","modified":"2024-09-13T13:44:30+00:00","when":"2024-09-09T13:35:09+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below.\nPlease note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues.\nIf you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 07 September, 2024 04:20\n**Incident End:** 07 September, 2024 06:10\n**Duration:** 01 hours, 50 minutes\n**Affected Services and Features:**\n- Cloud Build\n- Google Cloud Dataflow\n- Google Cloud Deploy\n- Google Cloud SQL\n- Google Compute Engine\n- Google Kubernetes Engine\n**Regions/Zones:** asia-northeast1\n**Description:**\nSeveral Google Cloud products experienced a service degradation of varying impact, or were unavailable for a duration of 01 hours, 50 minutes in asia-northeast1 region.\nGoogle Engineers have identified the cause to be a change rollout to an internal component. This change was subsequently rolled back which mitigated all known impacts.\nGoogle will complete a full Incident Report (IR) in the following days that will provide a full root cause.\n**Customer Impact:**\nThrough the incident duration, the impacted Google Cloud services experienced different kinds of service degradations as detailed below.\n- Google Compute Engine requests and operations like creating, deleting, starting or modifying VMs were not executed.\n- Cloud Build deployed via the Cloud Build API or via CloudBuild Integrations were not being executed.\n- Google Cloud Dataflow jobs failed at start up, existing job autoscale functionality was disrupted.\n- Google Cloud Deploy actions (render, deploy, verify, pre- and post-deploy hooks) were unresponsive and unable to complete their tasks.\n- Google Cloud SQL, dependent on Google Compute Engine, which experienced operational issues during this incident. No databases were able to be created or deleted during the incident.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-07T14:15:36+00:00","modified":"2024-09-09T13:35:09+00:00","when":"2024-09-07T14:15:36+00:00","text":"The issue with Cloud Build, Google Cloud Dataflow, Google Cloud Deploy, Google Cloud SQL, Google Compute Engine, Google Kubernetes Engine has been resolved for all affected customers as of Saturday, 2024-09-07 06:10 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-07T14:13:15+00:00","modified":"2024-09-07T14:24:44+00:00","when":"2024-09-07T14:13:15+00:00","text":"Summary: Google Compute Engine (GCE) VM instance creation/ deletion and all related operations for other products were failing in the asia-northeast1 region.\nDescription: Engineering team has rolled out the fix which mitigated the impact on Saturday, 2024-09-07 06:10 US/Pacific.\nWe will provide more information by Saturday, 2024-09-07 07:30 US/Pacific.\nDiagnosis: GCE Customers impacted by this issue were experiencing \"Internal error. Please try again or contact Google Support. (Code: '-1343002181035865699')\" while attempting instance creation.\nCloud Build customers were observing their builds not being executed.\nGoogle Cloud Dataflow customers were unable to create jobs or scale existing jobs.\nWorkaround: Customers may retry their operation in case they experience failures.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"}]},{"created":"2024-09-07T13:11:11+00:00","modified":"2024-09-07T14:25:22+00:00","when":"2024-09-07T13:11:11+00:00","text":"Summary: Google Compute Engine (GCE) VM instance creation/ deletion operations, Google Cloud Dataflow and Cloud Build are failing in the asia-northeast1 region.\nDescription: Mitigation work is currently underway by our engineering teams.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Saturday, 2024-09-07 07:30 US/Pacific.\nDiagnosis: GCE Customers impacted by this issue may experience \"Internal error. Please try again or contact Google Support. (Code: '-1343002181035865699')\" while attempting instance creation.\nCloud Build customers may observe their builds not being executed.\nGoogle Cloud Dataflow customers are unable to create jobs or scale existing jobs.\nWorkaround: Customers may attempt their GCVE, Cloud Build and Cloud Dataflow operations in other region.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"}]}],"most_recent_update":{"created":"2024-09-13T13:44:30+00:00","modified":"2024-09-13T13:44:30+00:00","when":"2024-09-13T13:44:30+00:00","text":"# Incident Report\n## Summary\nOn 7 September 2024 starting at 04:20 US/Pacific, several Google Cloud products experienced a service degradation of varying impact or were unavailable in asia-northeast1 region for a period of 1 hour 50 minutes. The list of impacted products and services is detailed below.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you.\n## Root Cause\nMost Google Cloud products and services use a regional metadata store to support their internal operations. The metadata store supports critical functions such as servicing customer requests, load balancing, admin operations, and retrieving/storing metadata including server location information.\nGoogle Compute Engine (GCE) internal DNS depends on the regional metadata store for storing instance metadata. A routine update of the metadata store to a new software version had a change which resulted in poor handling of a rare resource contention corner case, which caused the writes from GCE internal DNS to a zonal replica of the metadata store to fail.\nDuring such zonal issues, we have automated failover mechanisms to use the healthy replicas from other zones. But during this disruption, a secondary issue caused automated failover to not work, rendering the entire metadata storage unavailable despite two other healthy zones being available.\nThis resulted in disruptions to all GCE instance operations in the asia-northeast1 region. Actions such as creating, deleting, starting, and stopping instances or consuming reservations were affected. This, in turn, affected operations of other services dependent on GCE instances, including GKE, Cloud Build, Cloud Dataflow, Cloud Deploy and Cloud SQL.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue by internal monitoring on 7 September 2024 at 04:26 US/Pacific and immediately started an investigation. The issue was fully mitigated at 06:10 US/Pacific after failover of the metadata storage operations to the healthy zones was manually initiated by our engineering team.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* Improve the automated failover logic used in the metadata storage infrastructure.\n* Improve the testing of how the metadata storage service handles resource contention corner cases.\n* Improve internal processes and documentations to enable faster response and mitigation time for this type of issue.\n## Detailed Description of Impact\n- **Google Compute Engine**\nGoogle Compute Engine requests and operations like creating, deleting, starting or modifying VMs were not executed.\n- **Cloud Build**\nCloud Build deployments via the Cloud Build API or via Cloud Build Integrations were not being executed.\n- **Cloud Dataflow**\nGoogle Cloud Dataflow jobs failed at start up, existing job autoscale functionality was disrupted.\n- **Cloud Deploy**\nGoogle Cloud Deploy actions (render, deploy, verify, pre- and post-deploy hooks) were unresponsive and unable to complete their tasks.\n- **Google Cloud SQL**\nGoogle Cloud SQL, dependent on Google Compute Engine, experienced operational issues during this incident. No database creation or deletion operations were possible during the incident.\n- **Google Kubernetes Engine**\nGoogle Kubernetes Engine experienced issues with operations that depend on GCE. Specifically, cluster and node pool creation, node autoscaling, cluster and node upgrades, and automatic repairs were unsuccessful in the affected region and zones.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"}],"uri":"incidents/XwnegjADrYy2GHJphG2V","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"}]},{"id":"r8xkgSV1f88yYMQMW7rQ","number":"17679450295372493510","begin":"2024-09-04T18:55:00+00:00","created":"2024-09-04T21:45:05+00:00","end":"2024-09-04T19:20:00+00:00","modified":"2024-09-07T00:19:55+00:00","external_desc":"GCVE and Hybrid Connectivity experienced intermittent connectivity issues in the us-central1 region.","updates":[{"created":"2024-09-04T22:38:39+00:00","modified":"2024-09-07T00:19:55+00:00","when":"2024-09-04T22:38:39+00:00","text":"The issue with VMWare engine (GCVE) and Hybrid Connectivity is confirmed to be resolved for all affected customers as of Wednesday, 2024-09-04 12:20 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-04T21:44:59+00:00","modified":"2024-09-07T00:18:38+00:00","when":"2024-09-04T21:44:59+00:00","text":"Summary: GCVE and Hybrid Connectivity experienced intermittent connectivity issues in the us-central1 region.\nDescription: We experienced an intermittent connectivity issue with the VMWare Engine (GCVE), Cloud Interconnect and Cloud VPN in us-central1 between Wednesday, 2024-09-04 11:55 US/Pacific to Wednesday, 2024-09-04 12:20 US/Pacific.\nThe issue is currently mitigated and the Engineering team continues to monitor for next one hour to ensure service recovery for all the impacted customers.\nWe will provide an update by Wednesday, 2024-09-04 15:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue may have experienced up to 25 mins of intermittent connectivity between endpoints that rely on hybrid connectivity, such as Cloud Interconnect, Cloud VPN and between GCVE and Onprem/internet locations.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-09-04T22:38:39+00:00","modified":"2024-09-07T00:19:55+00:00","when":"2024-09-04T22:38:39+00:00","text":"The issue with VMWare engine (GCVE) and Hybrid Connectivity is confirmed to be resolved for all affected customers as of Wednesday, 2024-09-04 12:20 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"}],"uri":"incidents/r8xkgSV1f88yYMQMW7rQ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"twcreT5UAMxRTKm7Y4Q9","number":"14369965491598016289","begin":"2024-09-04T17:54:59+00:00","created":"2024-09-04T19:09:56+00:00","end":"2024-09-04T20:24:55+00:00","modified":"2024-09-05T14:16:44+00:00","external_desc":"Global: Multiple Cloud Products Experiencing Deployment Failures","updates":[{"created":"2024-09-05T14:16:44+00:00","modified":"2024-09-05T14:16:44+00:00","when":"2024-09-05T14:16:44+00:00","text":"We apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support (All Times US/Pacific)\n**Incident Start:** 4 September, 2024 08:16\n**Incident End:** 4 September, 2024 12:25\n**Duration:** 4 hours, 9 minutes\n**Affected Services and Features:**\nCloud Run, Google App Engine, Google Cloud Composer, Google Cloud Console, Google Cloud Functions, Google Cloud Marketplace, Cloud Deployment Manager\n**Regions/Zones:** Global\n**Description:**\nGlobally, multiple cloud products experienced deployment failures due to the inaccessibility of Cloud Deployment Manager (CDM) services. This resulted in outages affecting all deployments and the CDM user interface within the cloud console for a duration of 4 hours and 9 minutes.\nFrom preliminary analysis, the root cause of the issue was a regressive change in our rollout strategy. This change inadvertently caused all incoming requests to be discarded, resulting in widespread RPC errors for both internal systems and external customers.\n**Customer Impact:**\nCloud Composer:\nAll Composer 1 operations (creations/updates/deletions) were failing globally.\nRunning Composer 1 environments were not affected.\nAll other versions (Composer 2, Composer 3) were not affected.\nCloud Marketplace:\nCustomers experienced errors when deploying Deployment Manager based solutions on Google Cloud Marketplace.\nCloud Deployment Manager services were unreachable, caused outages for all deployments and the Deployment Manager UI in the Cloud Console.\nApp Engine Flexible:\nApp Engine Flexible experienced failures in delete flows in us-central1.\n70-100% of all delete requests failed for ~90 min. spread across when the DM was unavailable.\nServing plane was not affected.\nServerless VPC Access:\nApp Engine Standard, Cloud Functions, and Cloud Run were not able to create, update or delete VPC Access Connectors globally.\nExisting connectors were not affected.\nCloud Deployment Manager:\nAll Cloud Deployment Manager operations (create/update/deletes) were failing globally\nCustomers were unable to view, update or delete the deployments via the Google Cloud Console\nAll beta functionalities of Cloud Deployment Manager (Type Providers, Actions) were failing globally.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-04T20:24:55+00:00","modified":"2024-09-05T14:16:44+00:00","when":"2024-09-04T20:24:55+00:00","text":"The issue with Cloud Run, Google App Engine, Google Cloud Composer, Google Cloud Functions, Google Cloud Marketplace, Firebase Extensions has been resolved for all affected users as of Wednesday, 2024-09-04 12:25 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-04T19:25:06+00:00","modified":"2024-09-04T20:24:57+00:00","when":"2024-09-04T19:25:06+00:00","text":"Summary: Global: Multiple Cloud Products Experiencing Deployment Failures\nDescription: We are experiencing an issue with Google Cloud Composer, Google Cloud Marketplace, App Engine Flexible, Cloud Run, App Engine Flexible, and Cloud Functions beginning on Wednesday, 2024-09-04 09:21 US/Pacific.\nMitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide an update by Wednesday, 2024-09-04 13:00 US/Pacific with current details.\nDiagnosis: Cloud Composer:\n- All Composer 1 operations (creations/updates/deletions) are failing globally.\n- Running Composer 1 environments are not affected.\n- All other versions (Composer 2, Composer 3) are not affected.\nCloud Marketplace\n- Customers experience errors when deploying Deployment Manager based solutions on Google Cloud Marketplace.\n- Cloud Deployment Manager services are unreachable, causing outages for all deployments and the Deployment Manager UI in the Cloud Console.\nApp Engine Flexible\n- App Engine Flexible new version deployments are experiencing failures in us-central1.\nServerless VPC Access\n- App Engine Standard, Cloud Functions, and Cloud Run will not be able to create VPC Access connectors.\n- Existing connectors are not affected.\nWorkaround:\n- Affected Google Cloud Composer customers should avoid running any operations on Cloud Composer 1 environments.\n- There are no workarounds for Deployment Manager failures.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-09-04T19:09:53+00:00","modified":"2024-09-04T19:25:09+00:00","when":"2024-09-04T19:09:53+00:00","text":"Summary: Global: Multiple Cloud Products Experiencing Deployment Failures\nDescription: We are experiencing an issue with Google Cloud Composer, Google Cloud Marketplace, and App Engine Flexible beginning on Wednesday, 2024-09-04 09:21 US/Pacific.\nOur engineering team continues to work on identifying a mitigation strategy.\nWe will provide an update by Wednesday, 2024-09-04 12:40 US/Pacific with current details.\nDiagnosis: Cloud Composer:\n- All Composer 1 operations (creations/updates/deletions) are failing globally.\n- Running Composer 1 environments are not affected.\n- All other versions (Composer 2, Composer 3) are not affected.\nCloud Marketplace\n- Customers experience errors when deploying Deployment Manager based solutions on Google Cloud Marketplace.\n- Cloud Deployment Manager services are unreachable, causing outages for all deployments and the Deployment Manager UI in the Cloud Console.\nApp Engine Flexible\n- App Engine Flexible new version deployments are experiencing failures in us-central1.\nWorkaround: - Affected Google Cloud Composer customers should avoid running any operations on Cloud Composer 1 environments.\n- There are no workarounds for Deployment Manager failures.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-09-05T14:16:44+00:00","modified":"2024-09-05T14:16:44+00:00","when":"2024-09-05T14:16:44+00:00","text":"We apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support (All Times US/Pacific)\n**Incident Start:** 4 September, 2024 08:16\n**Incident End:** 4 September, 2024 12:25\n**Duration:** 4 hours, 9 minutes\n**Affected Services and Features:**\nCloud Run, Google App Engine, Google Cloud Composer, Google Cloud Console, Google Cloud Functions, Google Cloud Marketplace, Cloud Deployment Manager\n**Regions/Zones:** Global\n**Description:**\nGlobally, multiple cloud products experienced deployment failures due to the inaccessibility of Cloud Deployment Manager (CDM) services. This resulted in outages affecting all deployments and the CDM user interface within the cloud console for a duration of 4 hours and 9 minutes.\nFrom preliminary analysis, the root cause of the issue was a regressive change in our rollout strategy. This change inadvertently caused all incoming requests to be discarded, resulting in widespread RPC errors for both internal systems and external customers.\n**Customer Impact:**\nCloud Composer:\nAll Composer 1 operations (creations/updates/deletions) were failing globally.\nRunning Composer 1 environments were not affected.\nAll other versions (Composer 2, Composer 3) were not affected.\nCloud Marketplace:\nCustomers experienced errors when deploying Deployment Manager based solutions on Google Cloud Marketplace.\nCloud Deployment Manager services were unreachable, caused outages for all deployments and the Deployment Manager UI in the Cloud Console.\nApp Engine Flexible:\nApp Engine Flexible experienced failures in delete flows in us-central1.\n70-100% of all delete requests failed for ~90 min. spread across when the DM was unavailable.\nServing plane was not affected.\nServerless VPC Access:\nApp Engine Standard, Cloud Functions, and Cloud Run were not able to create, update or delete VPC Access Connectors globally.\nExisting connectors were not affected.\nCloud Deployment Manager:\nAll Cloud Deployment Manager operations (create/update/deletes) were failing globally\nCustomers were unable to view, update or delete the deployments via the Google Cloud Console\nAll beta functionalities of Cloud Deployment Manager (Type Providers, Actions) were failing globally.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Run","id":"9D7d2iNBQWN24zc1VamE"},{"title":"Google App Engine","id":"kchyUtnkMHJWaAva8aYc"},{"title":"Google Cloud Composer","id":"YxkG5FfcC42cQmvBCk4j"},{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"},{"title":"Google Cloud Functions","id":"oW4vJ7VNqyxTWNzSHopX"},{"title":"Google Cloud Marketplace","id":"M34rUHuRgyHXMfbUCSq9"}],"uri":"incidents/twcreT5UAMxRTKm7Y4Q9","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"P2BoQyyvz9BBoBy468Up","number":"12864760057915032515","begin":"2024-09-04T17:40:51+00:00","created":"2024-09-04T17:52:50+00:00","end":"2024-09-04T18:17:10+00:00","modified":"2024-09-04T18:17:13+00:00","external_desc":"This issue is believed to be affecting a very small number of projects and our Engineering Team is working on it. If you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved. No further updates will be provided here. We thank you for your patience while we are working on resolving the issue.","updates":[{"created":"2024-09-04T18:17:10+00:00","modified":"2024-09-04T18:17:13+00:00","when":"2024-09-04T18:17:10+00:00","text":"This issue is believed to be affecting a very small number of projects and our Engineering Team is working on it. If you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved. No further updates will be provided here. We thank you for your patience while we are working on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-04T17:52:46+00:00","modified":"2024-09-04T18:17:10+00:00","when":"2024-09-04T17:52:46+00:00","text":"Summary: Deployments to Google Cloud Marketplace failing for Deployment Manager based solutions\nDescription: We are experiencing an issue with Google Cloud Marketplace.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-09-04 12:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may experience failures when deploying Deployment Manager based solutions on Google Cloud Marketplace\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-09-04T18:17:10+00:00","modified":"2024-09-04T18:17:13+00:00","when":"2024-09-04T18:17:10+00:00","text":"This issue is believed to be affecting a very small number of projects and our Engineering Team is working on it. If you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved. No further updates will be provided here. We thank you for your patience while we are working on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"},{"title":"Google Cloud Marketplace","id":"M34rUHuRgyHXMfbUCSq9"}],"uri":"incidents/P2BoQyyvz9BBoBy468Up","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"7rczP2PdT9JYwUTM7M3N","number":"18280384668079865207","begin":"2024-09-04T13:29:53+00:00","created":"2024-09-04T13:50:22+00:00","end":"2024-09-04T18:39:31+00:00","modified":"2024-09-04T18:39:33+00:00","external_desc":"Virtual Private Cloud (VPC) customers located in us-central1 may observe an issue with VPC Service Controls for newly created VMs or networks.","updates":[{"created":"2024-09-04T18:39:31+00:00","modified":"2024-09-04T18:39:33+00:00","when":"2024-09-04T18:39:31+00:00","text":"The issue with Virtual Private Cloud (VPC), VPC Service Controls has been resolved for all affected projects as of Wednesday, 2024-09-04 11:06 US/Pacific.\nEngineering team will continue to perform an RCA to avoid recurrence.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-09-04T16:55:47+00:00","modified":"2024-09-04T18:39:33+00:00","when":"2024-09-04T16:55:47+00:00","text":"Summary: Virtual Private Cloud (VPC) customers located in us-central1 may observe an issue with VPC Service Controls for newly created VMs or networks.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-04 12:00 US/Pacific.\nDiagnosis: VPC network data for newly created VMs or networks might not be available when requests to Google APIs originate from these VMs/networks. This can cause VPC Service Control restrictions to deny requests for clients that should otherwise be allowed. This may also affect newly created VMs or networks that are non-RFC1918 private networks from using Private Google Access.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-09-04T15:07:42+00:00","modified":"2024-09-04T16:55:47+00:00","when":"2024-09-04T15:07:42+00:00","text":"Summary: Virtual Private Cloud (VPC) customers located in us-central1 may observe an issue with VPC Service Controls for newly created VMs or networks.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-09-04 10:00 US/Pacific.\nDiagnosis: VPC network data for newly created VMs or networks might not be available when requests to Google APIs originate from these VMs/networks. This can cause VPC Service Control restrictions to deny requests for clients that should otherwise be allowed. This may also affect newly created VMs or networks that are non-RFC1918 private networks from using Private Google Access.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-09-04T13:50:05+00:00","modified":"2024-09-04T15:07:54+00:00","when":"2024-09-04T13:50:05+00:00","text":"Summary: Virtual Private Cloud (VPC) customers located in us-central1 may observe an issue with VPC Service Controls\nDescription: We are experiencing an issue with Google Cloud Networking, VPC Service Controls beginning on Wednesday, 2024-09-04 04:45 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-09-04 08:00 US/Pacific with current details.\nDiagnosis: VPC network data for newly created VMs or networks might not be available when requests to Google APIs originate from these VMs/networks. This can cause VPC Service Control restrictions to deny requests for clients that should otherwise be allowed.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-09-04T18:39:31+00:00","modified":"2024-09-04T18:39:33+00:00","when":"2024-09-04T18:39:31+00:00","text":"The issue with Virtual Private Cloud (VPC), VPC Service Controls has been resolved for all affected projects as of Wednesday, 2024-09-04 11:06 US/Pacific.\nEngineering team will continue to perform an RCA to avoid recurrence.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"},{"title":"VPC Service Controls","id":"KM8etHz5u9QRoC7fRVT4"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/7rczP2PdT9JYwUTM7M3N","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"6ef7vnUADHsLTd3hZ3qr","number":"10003902696763230748","begin":"2024-08-27T14:08:28+00:00","created":"2024-08-27T14:09:57+00:00","end":"2024-08-27T14:14:26+00:00","modified":"2024-08-27T14:14:35+00:00","external_desc":"Slowness in the UDM search queries and delayed rule detections in europe multiregion.","updates":[{"created":"2024-08-27T14:14:26+00:00","modified":"2024-08-27T14:14:41+00:00","when":"2024-08-27T14:14:26+00:00","text":"We experienced an issue with Chronicle Security beginning on Tuesday, 2024-08-27 01:37 US/Pacific.\nThe issue has been resolved for all affected users as of Tuesday, 2024-08-27 06:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-27T14:09:38+00:00","modified":"2024-08-27T14:14:35+00:00","when":"2024-08-27T14:09:38+00:00","text":"Summary: Slowness in the UDM search queries and delayed rule detections in europe multiregion.\nDescription: We experienced an issue with Chronicle Security beginning on Tuesday, 2024-08-27 01:37 US/Pacific.\nThe issue has been resolved for all affected users as of Tuesday, 2024-08-27 06:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nDiagnosis: Chronicle Security users might observe slowness in the UDM search queries and delayed rule detections in europe multiregion.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]}],"most_recent_update":{"created":"2024-08-27T14:14:26+00:00","modified":"2024-08-27T14:14:41+00:00","when":"2024-08-27T14:14:26+00:00","text":"We experienced an issue with Chronicle Security beginning on Tuesday, 2024-08-27 01:37 US/Pacific.\nThe issue has been resolved for all affected users as of Tuesday, 2024-08-27 06:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/6ef7vnUADHsLTd3hZ3qr","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"id":"S1wLm4qJBZNcxmsUzT8F","number":"11307139576667217891","begin":"2024-08-16T18:44:00+00:00","created":"2024-08-16T18:58:09+00:00","end":"2024-08-16T23:23:58+00:00","modified":"2024-08-16T23:24:01+00:00","external_desc":"Vertex AI custom training jobs failing if using more than 2GB ephemeral storage","updates":[{"created":"2024-08-16T23:23:58+00:00","modified":"2024-08-16T23:24:02+00:00","when":"2024-08-16T23:23:58+00:00","text":"The issue with Vertex AI Training has been resolved for all affected users as of Friday, 2024-08-16 16:07 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-16T19:03:46+00:00","modified":"2024-08-16T23:24:01+00:00","when":"2024-08-16T19:03:46+00:00","text":"Summary: Vertex AI custom training jobs failing if using more than 2GB ephemeral storage\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-08-16 17:30 US/Pacific.\nDiagnosis: Custom Vertex AI training jobs running on GKE and using more than 2GB of ephemeral storage may fail with the error \"\"Pod ephemeral local storage usage exceeds the total limit of containers 2Gi.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-08-16T18:58:06+00:00","modified":"2024-08-16T19:03:51+00:00","when":"2024-08-16T18:58:06+00:00","text":"Summary: Vertex AI custom training jobs failing if using more than 2GB ephemeral storage\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-08-16 17:00 US/Pacific.\nDiagnosis: Custom Vertex AI training jobs running on GKE and using more than 2GB of ephemeral storage may fail with the error \"\"Pod ephemeral local storage usage exceeds the total limit of containers 2Gi.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-08-16T23:23:58+00:00","modified":"2024-08-16T23:24:02+00:00","when":"2024-08-16T23:23:58+00:00","text":"The issue with Vertex AI Training has been resolved for all affected users as of Friday, 2024-08-16 16:07 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Vertex AI Training","id":"baQeYW2fsPA2vvLCqN93"}],"uri":"incidents/S1wLm4qJBZNcxmsUzT8F","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"ir7Ua1Pjadt25VpX3iM2","number":"2064182516965503144","begin":"2024-08-16T08:27:23+00:00","created":"2024-08-16T09:06:55+00:00","end":"2024-08-16T10:45:56+00:00","modified":"2024-08-16T10:46:06+00:00","external_desc":"Multiple Chronicle SIEM operations might be delayed/not working","updates":[{"created":"2024-08-16T10:45:56+00:00","modified":"2024-08-16T10:46:12+00:00","when":"2024-08-16T10:45:56+00:00","text":"The issue with Chronicle SIEM has been resolved for all affected users as of Friday, 2024-08-16 03:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-16T10:04:10+00:00","modified":"2024-08-16T10:46:06+00:00","when":"2024-08-16T10:04:10+00:00","text":"Summary: Multiple Chronicle SIEM operations might be delayed/not working\nDescription: Mitigation work is still underway by our engineering team.\nWe will provide more information by Friday, 2024-08-16 04:30 US/Pacific.\nDiagnosis: Backstory Customer Management Application Programming Interfaces (APIs) could not be working.\nIngestion of GCP logs might be delayed.\nCloud Console Page of Chronicle may experience slowness.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-08-16T09:06:36+00:00","modified":"2024-08-16T10:04:10+00:00","when":"2024-08-16T09:06:36+00:00","text":"Summary: Multiple Chronicle SIEM operations might be delayed/not working\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-08-16 03:15 US/Pacific.\nDiagnosis: Backstory Customer Management Application Programming Interfaces (APIs) could not be working.\nIngestion of GCP logs might be delayed.\nCloud Console Page of Chronicle may experience slowness.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-08-16T10:45:56+00:00","modified":"2024-08-16T10:46:12+00:00","when":"2024-08-16T10:45:56+00:00","text":"The issue with Chronicle SIEM has been resolved for all affected users as of Friday, 2024-08-16 03:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/ir7Ua1Pjadt25VpX3iM2","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"copWKWyn12YXQHtd2q2b","number":"360306524663902723","begin":"2024-08-15T23:38:15+00:00","created":"2024-08-16T00:01:56+00:00","end":"2024-08-16T00:51:44+00:00","modified":"2024-08-16T00:51:49+00:00","external_desc":"Cluster creation and update operations may fail in Google Distributed Cloud Edge","updates":[{"created":"2024-08-16T00:51:44+00:00","modified":"2024-08-16T00:51:51+00:00","when":"2024-08-16T00:51:44+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved for all affected users as of Thursday, 2024-08-15 17:46 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-16T00:01:52+00:00","modified":"2024-08-16T00:51:49+00:00","when":"2024-08-16T00:01:52+00:00","text":"Summary: Cluster creation and update operations may fail in Google Distributed Cloud Edge\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning at Thursday, 2024-08-15 10:45 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-08-15 18:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers will be unable to perform any cluster mutation operations like (cluster creation, upgrade, nodepool creation, cluster deletion, etc)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-08-16T00:51:44+00:00","modified":"2024-08-16T00:51:51+00:00","when":"2024-08-16T00:51:44+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved for all affected users as of Thursday, 2024-08-15 17:46 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"JKyM3LJTqgETjRCvSK6w","service_name":"Google Distributed Cloud Edge","affected_products":[{"title":"Google Distributed Cloud Edge","id":"JKyM3LJTqgETjRCvSK6w"}],"uri":"incidents/copWKWyn12YXQHtd2q2b","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"ETJGhvY9Xaktw7tgi8dF","number":"2332806682273860440","begin":"2024-08-12T13:20:00+00:00","created":"2024-08-12T14:19:49+00:00","end":"2024-08-12T15:32:00+00:00","modified":"2024-08-15T22:00:08+00:00","external_desc":"[Cloud CDN, Cloud Load Balancing, Hybrid Connectivity] elevated latency in the UK (europe-west2)","updates":[{"created":"2024-08-15T21:04:10+00:00","modified":"2024-08-15T22:00:08+00:00","when":"2024-08-15T21:04:10+00:00","text":"## Incident Report\n## Summary\nOn 12 August 2024 at 06:20 US/Pacific, multiple Google Cloud and Google Workspace products experienced connectivity issues in europe-west2 for a duration of 40 minutes. During the time, ingress traffic to europe-west2 and egress traffic from europe-west2 experienced elevated latencies, connection timeouts, and connection failures.\n## Root Cause\nOn 12 August 2024 06:20 US/Pacific, primary and backup power feeds were both lost in a [Google Point of Presence (POP)](https://cloud.google.com/vpc/docs/edge-locations) due to a substation switchgear failure. The affected POP hosts about ⅓ of serving first-layer [Google Front Ends (GFEs)](https://cloud.google.com/docs/security/infrastructure/design#google-frontend-service) located in europe-west2 and some distributed networking equipment for that region. The power loss impacted the following Google products and services that depend on GFEs in that region:\n* Google Cloud APIs, Google Workspace, and other Google services like YouTube,\n* Customer-created global external application and proxy network load balancers, including Cloud CDN\nThe power loss also impacted the following Google Cloud products which depended on impacted networking equipment:\n* Customer-created regional external application, proxy network, and passthrough network load balancers in the europe-west2 region,\n* External protocol forwarding and VM external IP address connectivity for VMs in the europe-west2 region.\n* Google Cloud Interconnect connections in some LHR colocation facilities.\nImpact was limited to situations where either or both of the following was true:\n* Inbound requests or connections were routed into the europe-west2 region of Google’s network, from the Internet, and those requests or connections depended on networking equipment that was offline, or unreachable pending reconvergence.\n* Outbound responses were routed to the Internet, from the europe-west2 region of Google’s network, and those responses depended on networking equipment that was without power.\nThe power outage caused Internet routes advertised by Google to be withdrawn in networks connected to Google’s network. The withdrawn routes were automatically replaced by other Google-advertised routes that didn’t depend on impacted networking equipment. Withdrawing and replacing routes relies on the BGP protocol and its timers, so replacement route convergence is not instantaneous, and overloading in the automatically selected replacement route GFEs extended the duration of the incident.\n## Detailed Description of Impact\n* Google Workspace: _Gmail, Google Calendar, Google Chat, Google Docs, Google Drive, Google Meet and Google Tasks users connecting to Workspace services from the UK region and surrounding areas experienced connectivity issues as described in the next point.\n* GFE-based products and services: _Customers on the Internet experienced a spike of broken connections followed by elevated latencies or HTTP error responses when communicating with GFE-powered Google APIs and services or customer-created global external application and proxy network load balancers. At roughly 06:23 US/Pacific, Google automatically redirected connections to the nearest possible first-layer GFEs with some latency penalty. Unfortunately, some of the nearest possible first-layer GFEs were overloaded until 06:48 when Google engineers made adjustments to more efficiently distribute incoming requests among nearby first-layer GFEs. Depending on the Google API or service or the customer-created global external load balancer, elevated latencies could have persisted until about 08:30 US/Pacific. Elevated latencies also could have applied to customer-created global external load balancers that had Cloud CDN enabled.\n* Regional Google Cloud products and services: _Until replacement routes were in effect, customers on the Internet experienced connection failures to the following GCP resources in the europe-west2 region: * Regional external application, proxy network, and passthrough network load balancers. * External protocol forwarding and VM external IP addresses.\n* Google Cloud Interconnect: _Google Cloud Interconnect connections in some LHR colocation facilities (lhr-zone1-47, lhr-zone1-832, lhr-zone1-2262, lhr-zone1-4885, lhr-zone1-99051 and lhr-zone2-47) remained offline from 06:20 US/Pacific to at least 06:57 US/Pacific, when power was restored.\nAt 06:43 US/Pacific, power was restored to the impacted networking equipment. Google networking equipment was fully operational by 06:57 US/Pacific, and connectivity to GFE-based products and services, regional Google Cloud products and services, and Google Cloud Interconnect resumed shortly thereafter.\n## Remediation and Prevention\nMultiple Google engineering teams were alerted and automated recovery tooling was triggered as expected; however, manual adjustments were required to address subsequent first-layer GFE overload. Google is reviewing automation improvements in tasks that required manual intervention to reduce the duration of future power event impact. Similarly, Google is working to increase Cloud Interconnect control plane resilience and reduce mitigation time through automated reaction to isolation events.\nAdditionally Google's partner who maintains the affected facility power in LHR (London) is conducting a full root cause analysis with the switchboard manufacturer and substation owner(s) involved in supplying power, including follow up as to why stored or generated on-site emergency power did not carry loads.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-12T19:19:19+00:00","modified":"2024-08-15T21:04:10+00:00","when":"2024-08-12T19:19:19+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using help article https://support.google.com/a/answer/1047213.\n(All Times US/Pacific)\n**GCP Impact Time:**\n**Incident Start:** 12 August 2024 06:20\n**Incident End:** 12 August 2024 08:32\n**Duration:** 2 hours, 12 minutes\n**Workspace Impact Time:**\n**Incident Start:** 12 August 2024 06:20\n**Incident End:** 12 August 2024 07:00\n**Duration:** 40 minutes\n**Affected Services and Features:**\nGCP: Cloud CDN, Cloud Load Balancing, Hybrid Connectivity, Virtual Private Cloud (VPC)\nWorkspace: Gmail, Google Calendar, Google Chat, Google Docs, Google Drive, Google Meet and Google Tasks\n**Regions/Zones:** europe-west2\n**Description:**\n**GCP:**\nCloud CDN, Cloud Load Balancing, Hybrid Connectivity and Virtual Private Cloud (VPC) customers experienced intermittent timeouts (500s) from 06:20 to 07:00 US/Pacific followed by elevated latency in europe-west2 until 08:32 US/Pacific for a total duration of 2 hours and 12 minutes.\nSome customers using [Cloud Interconnect zones](https://cloud.google.com/network-connectivity/docs/interconnect/concepts/choosing-colocation-facilities\\#locations-table) (lhr-zone1-2262, lhr-zone1-832\t,lhr-zone1-47, and lhr-zone2-47) and customers using some Partner Interconnects in London may have experienced connectivity loss to GCP services.\n**Workspace:**\nGmail, Google Calendar, Google Chat, Google Docs, Google Drive, Google Meet and Google Tasks users connecting to Workspace services from the UK region may have experienced connectivity issues for a duration of 40 minutes.\nFrom preliminary analysis, the root cause of the issue was a loss of power to networking equipment in the London data center.\nGoogle will complete a full Incident Report in the following days that will provide a full root cause.\n**Customer Impact:**\n**GCP:**\n* Customers experienced intermittent timeout (500s) followed by elevated latency.\n* Some Cloud Interconnect and Partner Interconnect users experienced connectivity loss to GCP services.\n**Workspace:**\n* Users connecting to Workspace services served from the UK region may have experienced connectivity issues.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-12T15:34:47+00:00","modified":"2024-08-12T19:19:19+00:00","when":"2024-08-12T15:34:47+00:00","text":"The issue with Cloud CDN, Cloud Load Balancing, Hybrid Connectivity, Virtual Private Cloud (VPC) has been resolved for all affected users as of Monday, 2024-08-12 08:19 US/Pacific.\nDuring the issue, users connecting to GCP services from the UK region may have experienced elevated latency, intermittent 500 error rates.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-12T14:19:34+00:00","modified":"2024-08-12T15:34:56+00:00","when":"2024-08-12T14:19:34+00:00","text":"Summary: [Cloud CDN, Cloud Load Balancing, Hybrid Connectivity] elevated latency in the UK (europe-west2)\nDescription: We are experiencing an issue with Cloud CDN, Cloud Load Balancing, Hybrid Connectivity.\nThe issue is mitigated and our engineering team continues to investigate the issue and are monitoring for the residual impact.\nWe will provide an update by Monday, 2024-08-12 08:30 US/Pacific with current details.\nDiagnosis: Customers who are connecting to europe-west2 (specifically in the UK) will see elevated latency.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"London (europe-west2)","id":"europe-west2"}]}],"most_recent_update":{"created":"2024-08-15T21:04:10+00:00","modified":"2024-08-15T22:00:08+00:00","when":"2024-08-15T21:04:10+00:00","text":"## Incident Report\n## Summary\nOn 12 August 2024 at 06:20 US/Pacific, multiple Google Cloud and Google Workspace products experienced connectivity issues in europe-west2 for a duration of 40 minutes. During the time, ingress traffic to europe-west2 and egress traffic from europe-west2 experienced elevated latencies, connection timeouts, and connection failures.\n## Root Cause\nOn 12 August 2024 06:20 US/Pacific, primary and backup power feeds were both lost in a [Google Point of Presence (POP)](https://cloud.google.com/vpc/docs/edge-locations) due to a substation switchgear failure. The affected POP hosts about ⅓ of serving first-layer [Google Front Ends (GFEs)](https://cloud.google.com/docs/security/infrastructure/design#google-frontend-service) located in europe-west2 and some distributed networking equipment for that region. The power loss impacted the following Google products and services that depend on GFEs in that region:\n* Google Cloud APIs, Google Workspace, and other Google services like YouTube,\n* Customer-created global external application and proxy network load balancers, including Cloud CDN\nThe power loss also impacted the following Google Cloud products which depended on impacted networking equipment:\n* Customer-created regional external application, proxy network, and passthrough network load balancers in the europe-west2 region,\n* External protocol forwarding and VM external IP address connectivity for VMs in the europe-west2 region.\n* Google Cloud Interconnect connections in some LHR colocation facilities.\nImpact was limited to situations where either or both of the following was true:\n* Inbound requests or connections were routed into the europe-west2 region of Google’s network, from the Internet, and those requests or connections depended on networking equipment that was offline, or unreachable pending reconvergence.\n* Outbound responses were routed to the Internet, from the europe-west2 region of Google’s network, and those responses depended on networking equipment that was without power.\nThe power outage caused Internet routes advertised by Google to be withdrawn in networks connected to Google’s network. The withdrawn routes were automatically replaced by other Google-advertised routes that didn’t depend on impacted networking equipment. Withdrawing and replacing routes relies on the BGP protocol and its timers, so replacement route convergence is not instantaneous, and overloading in the automatically selected replacement route GFEs extended the duration of the incident.\n## Detailed Description of Impact\n* Google Workspace: _Gmail, Google Calendar, Google Chat, Google Docs, Google Drive, Google Meet and Google Tasks users connecting to Workspace services from the UK region and surrounding areas experienced connectivity issues as described in the next point.\n* GFE-based products and services: _Customers on the Internet experienced a spike of broken connections followed by elevated latencies or HTTP error responses when communicating with GFE-powered Google APIs and services or customer-created global external application and proxy network load balancers. At roughly 06:23 US/Pacific, Google automatically redirected connections to the nearest possible first-layer GFEs with some latency penalty. Unfortunately, some of the nearest possible first-layer GFEs were overloaded until 06:48 when Google engineers made adjustments to more efficiently distribute incoming requests among nearby first-layer GFEs. Depending on the Google API or service or the customer-created global external load balancer, elevated latencies could have persisted until about 08:30 US/Pacific. Elevated latencies also could have applied to customer-created global external load balancers that had Cloud CDN enabled.\n* Regional Google Cloud products and services: _Until replacement routes were in effect, customers on the Internet experienced connection failures to the following GCP resources in the europe-west2 region: * Regional external application, proxy network, and passthrough network load balancers. * External protocol forwarding and VM external IP addresses.\n* Google Cloud Interconnect: _Google Cloud Interconnect connections in some LHR colocation facilities (lhr-zone1-47, lhr-zone1-832, lhr-zone1-2262, lhr-zone1-4885, lhr-zone1-99051 and lhr-zone2-47) remained offline from 06:20 US/Pacific to at least 06:57 US/Pacific, when power was restored.\nAt 06:43 US/Pacific, power was restored to the impacted networking equipment. Google networking equipment was fully operational by 06:57 US/Pacific, and connectivity to GFE-based products and services, regional Google Cloud products and services, and Google Cloud Interconnect resumed shortly thereafter.\n## Remediation and Prevention\nMultiple Google engineering teams were alerted and automated recovery tooling was triggered as expected; however, manual adjustments were required to address subsequent first-layer GFE overload. Google is reviewing automation improvements in tasks that required manual intervention to reduce the duration of future power event impact. Similarly, Google is working to increase Cloud Interconnect control plane resilience and reduce mitigation time through automated reaction to isolation events.\nAdditionally Google's partner who maintains the affected facility power in LHR (London) is conducting a full root cause analysis with the switchboard manufacturer and substation owner(s) involved in supplying power, including follow up as to why stored or generated on-site emergency power did not carry loads.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud CDN","id":"ckSRJf2vQwQy188ULGy3"},{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/ETJGhvY9Xaktw7tgi8dF","currently_affected_locations":[],"previously_affected_locations":[{"title":"London (europe-west2)","id":"europe-west2"}]},{"id":"Z3aJkFKzZWRHSVYDxMaH","number":"17015932302682825625","begin":"2024-08-08T07:35:42+00:00","created":"2024-08-08T08:18:29+00:00","end":"2024-08-09T00:18:47+00:00","modified":"2024-08-09T00:18:50+00:00","external_desc":"Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5","updates":[{"created":"2024-08-09T00:18:47+00:00","modified":"2024-08-09T00:18:51+00:00","when":"2024-08-09T00:18:47+00:00","text":"The issue with Vertex AI Online Prediction and Tensor Processing Unit (TPU) v5e workloads has been resolved for all affected users as of Thursday, 2024-08-08 17:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-08T21:57:28+00:00","modified":"2024-08-09T00:18:50+00:00","when":"2024-08-08T21:57:28+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: We are experiencing an issue with Vertex AI Online Prediction beginning on Wednesday, 2024-08-07 23:55 US/Pacific.\nA limited number of customers are experiencing interruptions while operating their Tensor Processing Unit (TPU) v5e workloads.\nOur engineering team continues to work towards mitigation.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-08-08 18:00 US/Pacific.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: There is no workaround at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T21:16:42+00:00","modified":"2024-08-08T21:57:28+00:00","when":"2024-08-08T21:16:42+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work continues with our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-08-08 16:00 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T19:58:11+00:00","modified":"2024-08-08T21:16:42+00:00","when":"2024-08-08T19:58:11+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work continues with our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-08-08 14:00 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T18:52:53+00:00","modified":"2024-08-08T19:58:11+00:00","when":"2024-08-08T18:52:53+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work continues with our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-08-08 13:00 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T18:11:53+00:00","modified":"2024-08-08T18:52:53+00:00","when":"2024-08-08T18:11:53+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-08-08 12:00 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T16:01:03+00:00","modified":"2024-08-08T18:11:53+00:00","when":"2024-08-08T16:01:03+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to be completed by Thursday, 2024-08-08 07:30 US/Pacific.\nWe will provide more information by Thursday, 2024-08-08 11:00 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T14:54:44+00:00","modified":"2024-08-08T16:01:03+00:00","when":"2024-08-08T14:54:44+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to be completed by Thursday, 2024-08-08 07:30 US/Pacific.\nWe will provide more information by Thursday, 2024-08-08 09:30 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T13:23:22+00:00","modified":"2024-08-08T14:54:44+00:00","when":"2024-08-08T13:23:22+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to be completed by Thursday, 2024-08-08 07:30 US/Pacific.\nWe will provide more information by Thursday, 2024-08-08 08:30 US/Pacific.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T12:52:11+00:00","modified":"2024-08-08T13:23:22+00:00","when":"2024-08-08T12:52:11+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: We are experiencing an issue with Vertex AI Online Prediction beginning on Wednesday, 2024-08-07 23:55 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-08-08 08:00 US/Pacific with current details.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T10:40:13+00:00","modified":"2024-08-08T12:52:11+00:00","when":"2024-08-08T10:40:13+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: We are experiencing an issue with Vertex AI Online Prediction beginning on Wednesday, 2024-08-07 23:55 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-08-08 06:00 US/Pacific with current details.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T09:23:31+00:00","modified":"2024-08-08T10:40:13+00:00","when":"2024-08-08T09:23:31+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: We are experiencing an issue with Vertex AI Online Prediction beginning on Wednesday, 2024-08-07 23:55 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-08-08 04:00 US/Pacific with current details.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T08:31:29+00:00","modified":"2024-08-08T09:23:31+00:00","when":"2024-08-08T08:31:29+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet 3.5 and Opus models in us-east5\nDescription: We are experiencing an issue with Vertex AI Online Prediction beginning on Wednesday, 2024-08-07 23:55 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-08-08 02:30 US/Pacific with current details.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet 3.5 and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-08-08T08:18:26+00:00","modified":"2024-08-08T08:31:29+00:00","when":"2024-08-08T08:18:26+00:00","text":"Summary: Vertex AI Online Prediction experiencing elevated 500 errors for Claude Sonnet and Opus models in us-east5\nDescription: We are experiencing an issue with Vertex AI Online Prediction beginning on Wednesday, 2024-08-07 23:55 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-08-08 02:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Some Vertex AI Online Prediction customers using Claude Sonnet and Opus may experience elevated 500 errors from the calls to the model.\nPlease note that other Generative AI models within Vertex AI Online Prediction are functioning normally.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]}],"most_recent_update":{"created":"2024-08-09T00:18:47+00:00","modified":"2024-08-09T00:18:51+00:00","when":"2024-08-09T00:18:47+00:00","text":"The issue with Vertex AI Online Prediction and Tensor Processing Unit (TPU) v5e workloads has been resolved for all affected users as of Thursday, 2024-08-08 17:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Vertex AI Online Prediction","id":"sdXM79fz1FS6ekNpu37K"}],"uri":"incidents/Z3aJkFKzZWRHSVYDxMaH","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"yGuzk2nmNTiQsE9emSJQ","number":"2145922944891124678","begin":"2024-08-07T21:53:24+00:00","created":"2024-08-08T00:06:45+00:00","end":"2024-08-08T01:44:26+00:00","modified":"2024-08-08T01:44:30+00:00","external_desc":"Cloud TPU Service Activation is impacted.","updates":[{"created":"2024-08-08T01:44:26+00:00","modified":"2024-08-08T01:44:32+00:00","when":"2024-08-08T01:44:26+00:00","text":"The issue with Cloud TPU Services, Vertex AI Online Prediction and Vertex AI Training has been resolved for all affected customers as of Wednesday, 2024-08-07 17:05 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-08T00:06:42+00:00","modified":"2024-08-08T01:44:30+00:00","when":"2024-08-08T00:06:42+00:00","text":"Summary: Cloud TPU Service Activation is impacted.\nDescription: We are investigating issues where customers using TPUs through Vertex AI Service or Cloud TPU Service/API directly may be experiencing API activation failures. This can happen for new customers or existing customers who disable and reactivate the TPU Service beginning on Wednesday, 2024-08-07 12:00 US/Pacific.\nOur engineering team has rolled out a mitigation and is progressing as expected.\nWe will provide an update by Wednesday, 2024-08-07 19:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers would not be able to enable Cloud TPU APIs in their projects and would encounter an error message that states, e.g. \"Failed to upload model. Contact Vertex AI.\"\nWe continue to investigate impact to other Google Cloud Services and products.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-08-08T01:44:26+00:00","modified":"2024-08-08T01:44:32+00:00","when":"2024-08-08T01:44:26+00:00","text":"The issue with Cloud TPU Services, Vertex AI Online Prediction and Vertex AI Training has been resolved for all affected customers as of Wednesday, 2024-08-07 17:05 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Vertex AI Online Prediction","id":"sdXM79fz1FS6ekNpu37K"},{"title":"Vertex AI Training","id":"baQeYW2fsPA2vvLCqN93"}],"uri":"incidents/yGuzk2nmNTiQsE9emSJQ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"n71AYyDXgDVjZubZo8wu","number":"11388187564734614757","begin":"2024-08-03T11:31:00+00:00","created":"2024-08-03T11:36:33+00:00","end":"2024-08-03T13:28:31+00:00","modified":"2024-08-03T14:12:47+00:00","external_desc":"Multiple product outage in europe-west9-a","updates":[{"created":"2024-08-03T13:28:31+00:00","modified":"2024-08-03T14:12:47+00:00","when":"2024-08-03T13:28:31+00:00","text":"The issue with Artifact Registry, Cloud Filestore, Cloud Run, Cloud Workstations, GKE fleet management, Google Cloud Dataflow, Google Cloud Dataproc, Google Compute Engine, Google Kubernetes Engine, Hybrid Connectivity, Persistent Disk, Pub/Sub Lite, Cloud Bigtable has been resolved for all affected users as of Saturday, 2024-08-03 06:26 US/Pacific.\nIf you are still experiencing issues please submit a support case with Google Support.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-03T12:46:30+00:00","modified":"2024-08-03T13:28:46+00:00","when":"2024-08-03T12:46:30+00:00","text":"Summary: Multiple product outage in europe-west9-a\nDescription: We are experiencing an issue in europe-west9-a as of Saturday, 2024-08-03 03:50 US/Pacific. Impacting Artifact Registry, Cloud Filestore, Cloud Run, Cloud Workstations, GKE fleet management, Google Cloud Dataflow, Google Cloud Dataproc, Google Compute Engine, Google Kubernetes Engine, Hybrid Connectivity, Persistent Disk, Pub/Sub Lite, Cloud SQL zonal instances. Other products available in europe-west9-a are likely impacted.\nOur engineering team continues to investigate the issue and work towards mitigation.\nWe will provide an update by Saturday, 2024-08-03 08:17 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west9-a\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Paris (europe-west9)","id":"europe-west9"}]},{"created":"2024-08-03T11:58:19+00:00","modified":"2024-08-03T12:46:30+00:00","when":"2024-08-03T11:58:19+00:00","text":"Summary: Multiple product outage in europe-west9-a\nDescription: We are experiencing an issue in europe-west9-a impacting Google Compute Engine, Persistent Disk, Google Cloud Dataflow, Google Kubernetes Engine.\nOther products available in europe-west9-a are likely impacted.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-08-03 06:52 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west9-a\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Paris (europe-west9)","id":"europe-west9"}]},{"created":"2024-08-03T11:36:14+00:00","modified":"2024-08-03T11:58:36+00:00","when":"2024-08-03T11:36:14+00:00","text":"Summary: Multiple product outage in europe-west9-a\nDescription: We are experiencing an issue in europe-west9-a impacting multiple Cloud products.\nOur engineering team is currently investigating the issue to determine the scope of the impact.\nWe will provide an update by Saturday, 2024-08-03 05:10 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west9-a\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Paris (europe-west9)","id":"europe-west9"}]}],"most_recent_update":{"created":"2024-08-03T13:28:31+00:00","modified":"2024-08-03T14:12:47+00:00","when":"2024-08-03T13:28:31+00:00","text":"The issue with Artifact Registry, Cloud Filestore, Cloud Run, Cloud Workstations, GKE fleet management, Google Cloud Dataflow, Google Cloud Dataproc, Google Compute Engine, Google Kubernetes Engine, Hybrid Connectivity, Persistent Disk, Pub/Sub Lite, Cloud Bigtable has been resolved for all affected users as of Saturday, 2024-08-03 06:26 US/Pacific.\nIf you are still experiencing issues please submit a support case with Google Support.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Artifact Registry","id":"QbBuuiRdsLpMr9WmGwm5"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Cloud Filestore","id":"jog4nyYkquiLeSK5s26q"},{"title":"Cloud Run","id":"9D7d2iNBQWN24zc1VamE"},{"title":"Cloud Workstations","id":"5UUXCiH1vfFHXmbDixrB"},{"title":"GKE fleet management","id":"4osgZCUJuuh3whY4B8tt"},{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Dataproc","id":"yjXrEg3Yvy26BauMwr69"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Pub/Sub Lite","id":"5DWkcStmv4dFHRHLaRXb"}],"uri":"incidents/n71AYyDXgDVjZubZo8wu","currently_affected_locations":[],"previously_affected_locations":[{"title":"Paris (europe-west9)","id":"europe-west9"}]},{"id":"pWkD2xmKoUBiFrvHS8h8","number":"7959660124891669086","begin":"2024-08-02T16:30:00+00:00","created":"2024-08-02T16:49:11+00:00","end":"2024-08-03T00:36:21+00:00","modified":"2024-08-03T00:36:24+00:00","external_desc":"Customers using AutoML translations may experience high latency and 'resource exhausted' failures.","updates":[{"created":"2024-08-03T00:36:21+00:00","modified":"2024-08-03T00:36:25+00:00","when":"2024-08-03T00:36:21+00:00","text":"Our Engineering team has successfully concluded validations and confirmed that issue has been mitigated.\nIf you have questions or require further assistance, please open a case with the Support Team and we will work with you to ensure that your concerns are addressed.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-08-02T21:58:16+00:00","modified":"2024-08-03T00:36:24+00:00","when":"2024-08-02T21:58:16+00:00","text":"Summary: Customers using AutoML translations may experience high latency and resource exhausted failures.\nDescription: We believe the issue with AutoML Translation is mitigated as per our monitoring.\nOur engineering team is in the process of validating the mitigation further, to ensure the issue is fully resolved for all our customers.\nWe will provide an update by Friday, 2024-08-02 19:00 US/Pacific with current details.\nDiagnosis: When users use translation with custom models, most requests will either fail with timeout/resource exhausted, or have latency up to 2 mins.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-08-02T18:53:22+00:00","modified":"2024-08-02T21:58:16+00:00","when":"2024-08-02T18:53:22+00:00","text":"Summary: Customers using AutoML translations may experience high latency and resource exhausted failures.\nDescription: We believe the issue with AutoML Translation is mitigated as per our monitoring.\nOur engineering team is still validating further to ensure the issue is fully resolved for all our customers.\nWe will provide an update by Friday, 2024-08-02 15:00 US/Pacific with current details.\nDiagnosis: When users use translation with custom models, most requests will either fail with timeout/resource exhausted, or have latency up to 2 mins.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-08-02T17:18:37+00:00","modified":"2024-08-02T18:53:22+00:00","when":"2024-08-02T17:18:37+00:00","text":"Summary: Customers using AutoML translations may experience high latency and resource exhausted failures.\nDescription: We believe the issue with AutoML Translation is mitigated as per our monitoring.\nOur engineering team is currently validating further to ensure the issue is fully resolved for all our customers.\nWe will provide an update by Friday, 2024-08-02 12:00 US/Pacific with current details.\nDiagnosis: When users use translation with custom models, most requests will either fail with timeout/resource exhausted, or have latency up to 2 mins.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-08-02T16:56:56+00:00","modified":"2024-08-02T17:18:41+00:00","when":"2024-08-02T16:56:56+00:00","text":"Summary: Customers using AutoML translations may experience high latency and resource exhausted failures.\nDescription: We are experiencing an issue with AutoML translations\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-08-02 11:08 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: AutoML translations may experience high latency and resource exhausted failures.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-08-02T16:49:06+00:00","modified":"2024-08-02T16:57:00+00:00","when":"2024-08-02T16:49:06+00:00","text":"Summary: Customers using AutoML translations may experience high latency and resource exhausted failures.\nDescription: We are experiencing an issue with Cloud Translation.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-08-02 11:08 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-08-03T00:36:21+00:00","modified":"2024-08-03T00:36:25+00:00","when":"2024-08-03T00:36:21+00:00","text":"Our Engineering team has successfully concluded validations and confirmed that issue has been mitigated.\nIf you have questions or require further assistance, please open a case with the Support Team and we will work with you to ensure that your concerns are addressed.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"AutoML Translation","id":"qrU1jFoSMYvoEZG8c76T"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/pWkD2xmKoUBiFrvHS8h8","currently_affected_locations":[],"previously_affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"XUq643xVy4pEEmPziyqh","number":"4971483720084595795","begin":"2024-07-30T09:36:28+00:00","created":"2024-07-30T09:36:32+00:00","end":"2024-07-30T19:44:57+00:00","modified":"2024-07-30T19:44:59+00:00","external_desc":"Private Cloud Certificate’s renewal (DigiCert)","updates":[{"created":"2024-07-30T19:44:57+00:00","modified":"2024-07-30T19:45:00+00:00","when":"2024-07-30T19:44:57+00:00","text":"The issue with VMWare engine has been resolved for all affected projects as of Tuesday, 2024-07-30 12:30 US/Pacific.\nGoogle engineers have completed renewal of all certificates. We thank you for your patience while we worked on resolving the issue.\nCertificate renewals were done on NSX, V-Center and HCX virtual machines.\n3rd party products might still see failures due to an SSL thumbprint mismatch. Customers should check third party applications to ensure backups, replication and other third party tools are working properly with vCenter. If there are any issues noted, please reach out to Google Cloud Support.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-30T18:54:13+00:00","modified":"2024-07-30T19:44:59+00:00","when":"2024-07-30T18:54:13+00:00","text":"Summary: Private Cloud Certificate’s renewal (DigiCert)\nDescription: DigiCert is a digital certificate authority that issues certificates used in your Google Cloud VMware Engine Private Cloud. DigiCert informed us of a recent [incident](https://www.digicert.com/support/certificate-revocation-incident) in which they found a problem with their Domain Control Validation (DCV). As a result, by July 30, 2024, 19:30 UTC (12:30 US/Pacific), all certificates must be revoked without exception.\nGoogle engineers are actively working to renew the certificates on your private cloud before 19:30 UTC (12:30 US/Pacific).\nCertificate renewals are being done on NSX, V-Center and HCX virtual machines and will not have any impact on accessing any workload VMs. During the certificate renewal (which typically takes 30 mins), currently active backup \u0026 replication tasks and management tasks such as node addition/removal may fail. These tasks and new migration can't be started during this period.\nAfter the certificates are renewed, 3rd party products might still see failures due to thumbprint mismatch. Customers should check for third party applications. If there are any issues noted, please reach out to Google Cloud Support.\nWe will provide an update by Tuesday, 2024-07-30 12:30 US/Pacific with current details.\nDiagnosis: No current impact to the customers.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-07-30T17:39:05+00:00","modified":"2024-07-30T18:54:13+00:00","when":"2024-07-30T17:39:05+00:00","text":"Summary: Private Cloud Certificate’s renewal (DigiCert)\nDescription: DigiCert is a digital certificate authority that issues certificates used in your Google Cloud VMware Engine Private Cloud. DigiCert informed us of a recent [incident](https://www.digicert.com/support/certificate-revocation-incident) in which they found a problem with their Domain Control Validation (DCV). As a result, by July 30, 2024, 19:30 UTC (12:30 US/Pacific), all certificates must be revoked without exception.\nGoogle engineers are actively working to renew the certificates on your private cloud before 19:30 UTC (12:30 US/Pacific).\nCertificate renewals are being done on NSX, V-Center and HCX virtual machines and will not have any impact on accessing any workload VMs. During the certificate renewal (which typically takes 30 mins), currently active backup \u0026 replication tasks and management tasks such as node addition/removal may fail. These tasks and new migration can't be started during this period.\nWe will provide an update by Tuesday, 2024-07-30 12:30 US/Pacific with current details.\nDiagnosis: No current impact to the customers.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-07-30T09:36:29+00:00","modified":"2024-07-30T17:39:05+00:00","when":"2024-07-30T09:36:29+00:00","text":"Summary: Private Cloud Certificate’s renewal (DigiCert)\nDescription: DigiCert is a digital certificate authority that issues certificates used in your Google Cloud VMware Engine Private Cloud. DigiCert informed us of a recent [incident](https://www.digicert.com/support/certificate-revocation-incident) in which they found a problem with their Domain Control Validation (DCV). As a result, by July 30, 2024, 19:30 UTC (12:30 US/Pacific), all certificates must be revoked without exception.\nGoogle engineers will renew the certificates on your private cloud before 19:30 UTC (12:30 US/Pacific). Certificate renewal will be done on NSX, V-Center and HCX virtual machines and will not have any impact on accessing any workload VMs. During the certificate renewal (which typically takes 30 mins), currently active backup \u0026 replication tasks and management tasks such as node addition/removal may fail. These tasks and new migration can't be started during this period.\nWe will provide an update by Tuesday, 2024-07-30 10:30 US/Pacific with current details.\nDiagnosis: No current impact to the customers.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-07-30T19:44:57+00:00","modified":"2024-07-30T19:45:00+00:00","when":"2024-07-30T19:44:57+00:00","text":"The issue with VMWare engine has been resolved for all affected projects as of Tuesday, 2024-07-30 12:30 US/Pacific.\nGoogle engineers have completed renewal of all certificates. We thank you for your patience while we worked on resolving the issue.\nCertificate renewals were done on NSX, V-Center and HCX virtual machines.\n3rd party products might still see failures due to an SSL thumbprint mismatch. Customers should check third party applications to ensure backups, replication and other third party tools are working properly with vCenter. If there are any issues noted, please reach out to Google Cloud Support.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"}],"uri":"incidents/XUq643xVy4pEEmPziyqh","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"ueutwY19MFNTod5YVR9D","number":"11485975573026607437","begin":"2024-07-26T16:57:00+00:00","created":"2024-07-26T19:24:11+00:00","end":"2024-07-27T01:11:00+00:00","modified":"2024-07-31T22:58:20+00:00","external_desc":"Persistent Disk (PD) is experiencing issues with disk delete and update operations in turn affecting other disk related operations globally.","updates":[{"created":"2024-07-31T22:58:20+00:00","modified":"2024-07-31T22:58:20+00:00","when":"2024-07-31T22:58:20+00:00","text":"# Incident Report\n## Summary\nOn July 26, 2024, Persistent Disk (PD) experienced issues with disk delete and instance delete operations for a duration of 8 hours and 14 minutes, potentially affecting customers globally. During this period, these operations were stuck in a pending state. To our PD customers whose businesses were impacted during this disruption, we sincerely apologize.\nGoogle is committed to quickly and continually improving our technology and operations to prevent service disruptions. We appreciate your patience and apologize again for the impact to your organization. We thank you for your business.\n## Root Cause\nDue to a coordination error, an internal capacity reclamation process was triggered prematurely without having the necessary internal software in place. As a result, user-initiated disk deletes and related operations were blocked and remained in a pending state for an extended period. It affected approximately 85% of scopes.\nOnce the issue was resolved, the blocked operations were automatically unblocked and completed successfully.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via monitoring alert on July 26 at 9:57 US/Pacific and promptly began an investigation. Upon understanding the issue, engineers swiftly initiated a clean-up of blocked operations, to prevent the issue from affecting more disks.\nTo fully resolve the issue, engineers accelerated the deployment of the software update across remaining scopes. This allowed the stalled operations to complete gracefully, unblocking customer-initiated operations.\nGoogle is committed to preventing a recurrence of this issue and is taking the following actions:\n* **Enforce a rigorous review process:** All future internal cleanup operations will undergo a thorough review and validation process to identify and mitigate potential conflicts with other ongoing processes.\n* **Make error-handling more robust:** Improve the system's ability to detect and gracefully handle stuck operations, preventing them from blocking customer-initiated actions.\n* **Address tooling gaps for phased rollout:** All future internal internal capacity reclamation processes will be conducted in phases to minimize the impact if any unexpected issues arise.\n## Detailed Description of Impact\nOn Friday, July 26, 2024, from 9:57 to 18:11 US/Pacific, PD customers globally experienced issues with disk delete and update operations. This also affected other disk-related actions.\nSpecifically, impacted customers encountered extended delays or failures with the following operations:\n* Disk deletions\n* Virtual Machine (VM) deletions (due to pending disk deletions)\n* Managed Instance Group scale-down operations","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-29T15:07:03+00:00","modified":"2024-07-31T22:58:20+00:00","when":"2024-07-29T15:07:03+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 26 July, 2024 09:57\n**Incident End:** 26 July, 2024 18:11\n**Duration:** 8 hours, 14 mins\n**Affected Services and Features:** Persistent Disk\n**Regions/Zones:** Global\n**Description:**\nPersistent Disk experienced issues with disk delete and update operations for a period of 8 hours and 14 minutes, potentially affecting our customers in all regions. Based on our preliminary analysis, the root cause of the issue was miscoordination of internal software updates.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\nImpacted customers may have experienced issues where the following operations remained in a pending state for an extended period of time:\n- Disk deletions\n- Disk update operations; such as resize\n- VM deletions\n- Managed Instance Groups scale-down operations\n**Additional details:**\nMiscoordination of internal software updates caused blocking of disk deletion and update operations as well as other related operations. This caused these operations to remain in pending state for an extended period of time. Once the issue was resolved the operations were unblocked and resumed to completion.\n---","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-27T01:17:01+00:00","modified":"2024-07-29T15:07:03+00:00","when":"2024-07-27T01:17:01+00:00","text":"The issue with Persistent Disk has been resolved for all affected users as of Friday, 2024-07-26 18:11 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-27T00:05:22+00:00","modified":"2024-07-27T01:17:03+00:00","when":"2024-07-27T00:05:22+00:00","text":"Summary: Persistent Disk (PD) is experiencing issues with disk delete and update operations in turn affecting other disk related operations globally.\nDescription: As a majority of the backlogs have now cleared, our engineers are working on executing measures targeted towards clearing any remaining tasks that require intervention.\nWe will provide an update by Friday, 2024-07-26 18:30 US/Pacific with current details.\nDiagnosis: Impacted customers may experience issues where the following operations remain in pending state for an extended period of time:\n- Disk deletions\n- Disk update operations such as resizes\n- VM deletions are also affected due to issues with disk deletions.\n- Managed Instance Groups scale down operations\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-26T22:17:12+00:00","modified":"2024-07-27T00:05:22+00:00","when":"2024-07-26T22:17:12+00:00","text":"Summary: Persistent Disk (PD) is experiencing issues with disk delete and update operations in turn affecting other disk related operations globally.\nDescription: Our engineers identified the root cause of the issue and executed measures to mitigate the same.\nA majority of the backlogs have now cleared at a steady rate, while our engineers continue to closely monitor the remainder of the operations.\nWe will provide an update by Friday, 2024-07-26 17:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience issues where the following operations remain in pending state for an extended period of time:\n- Disk deletions\n- Disk update operations such as resizes\n- VM deletions are also affected due to issues with disk deletions.\n- Managed Instance Groups scale down operations\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-26T21:17:20+00:00","modified":"2024-07-26T22:17:12+00:00","when":"2024-07-26T21:17:20+00:00","text":"Summary: Persistent Disk (PD) is experiencing issues with disk delete and update operations in turn affecting other disk related operations globally.\nDescription: Our engineers identified the root cause of the issue and have executed measures to mitigate the same. We are currently monitoring the rate of progress for the backlogs being cleared, while working on addressing any anomalies that may be encountered during this process.\nWe do not have an ETA for mitigation at this time.\nWe will provide an update by Friday, 2024-07-26 15:30 US/Pacific with current details.\nDiagnosis: Impacted customers may experience issues where the following operations remain in pending state for an extended period of time:\n- Disk deletions\n- Disk update operations such as resizes\n- VM deletions are also affected due to issues with disk deletions.\n- Managed Instance Groups scale down operations\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-26T20:15:31+00:00","modified":"2024-07-26T21:17:20+00:00","when":"2024-07-26T20:15:31+00:00","text":"Summary: Persistent Disk (PD) is experiencing issues with disk delete and update operations in turn affecting other disk related operations globally.\nDescription: Our engineers identified the root cause of the issue and are rolling out a mitigation. Once the roll out completes, the backlog will be processed.\nWe do not have an ETA for mitigation at this time.\nWe will provide an update by Friday, 2024-07-26 14:30 US/Pacific with current details.\nDiagnosis: Impacted customers may experience issues where the following operations remain in pending state for an extended period of time:\n- Disk deletions\n- Disk update operations such as resizes\n- VM deletions are also affected due to issues with disk deletions.\n- Managed Instance Groups scale down operations\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-26T19:24:09+00:00","modified":"2024-07-26T20:15:31+00:00","when":"2024-07-26T19:24:09+00:00","text":"Summary: Persistent Disk (PD) is experiencing issues with disk delete and update operations in turn affecting other disk related operations globally.\nDescription: We are experiencing an issue with Persistent Disk beginning at Friday, 2024-07-26 10:00 US/Pacific.\nOur engineering team is working on a potential mitigation for the issue.\nWe do not have an ETA for mitigation at this time.\nWe will provide an update by Friday, 2024-07-26 13:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience issues where the following operations remain in pending state for an extended period of time:\n- Disk deletions\n- Disk update operations such as resizes\n- VM deletions are also affected due to issues with disk deletions.\n- Managed Instance Groups scale down operations\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-07-31T22:58:20+00:00","modified":"2024-07-31T22:58:20+00:00","when":"2024-07-31T22:58:20+00:00","text":"# Incident Report\n## Summary\nOn July 26, 2024, Persistent Disk (PD) experienced issues with disk delete and instance delete operations for a duration of 8 hours and 14 minutes, potentially affecting customers globally. During this period, these operations were stuck in a pending state. To our PD customers whose businesses were impacted during this disruption, we sincerely apologize.\nGoogle is committed to quickly and continually improving our technology and operations to prevent service disruptions. We appreciate your patience and apologize again for the impact to your organization. We thank you for your business.\n## Root Cause\nDue to a coordination error, an internal capacity reclamation process was triggered prematurely without having the necessary internal software in place. As a result, user-initiated disk deletes and related operations were blocked and remained in a pending state for an extended period. It affected approximately 85% of scopes.\nOnce the issue was resolved, the blocked operations were automatically unblocked and completed successfully.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via monitoring alert on July 26 at 9:57 US/Pacific and promptly began an investigation. Upon understanding the issue, engineers swiftly initiated a clean-up of blocked operations, to prevent the issue from affecting more disks.\nTo fully resolve the issue, engineers accelerated the deployment of the software update across remaining scopes. This allowed the stalled operations to complete gracefully, unblocking customer-initiated operations.\nGoogle is committed to preventing a recurrence of this issue and is taking the following actions:\n* **Enforce a rigorous review process:** All future internal cleanup operations will undergo a thorough review and validation process to identify and mitigate potential conflicts with other ongoing processes.\n* **Make error-handling more robust:** Improve the system's ability to detect and gracefully handle stuck operations, preventing them from blocking customer-initiated actions.\n* **Address tooling gaps for phased rollout:** All future internal internal capacity reclamation processes will be conducted in phases to minimize the impact if any unexpected issues arise.\n## Detailed Description of Impact\nOn Friday, July 26, 2024, from 9:57 to 18:11 US/Pacific, PD customers globally experienced issues with disk delete and update operations. This also affected other disk-related actions.\nSpecifically, impacted customers encountered extended delays or failures with the following operations:\n* Disk deletions\n* Virtual Machine (VM) deletions (due to pending disk deletions)\n* Managed Instance Group scale-down operations","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"}],"uri":"incidents/ueutwY19MFNTod5YVR9D","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"9mHC9Da7hWZVCBvcTBnB","number":"3743796568419819021","begin":"2024-07-25T16:15:28+00:00","created":"2024-07-25T19:34:40+00:00","end":"2024-07-25T21:38:20+00:00","modified":"2024-07-25T21:38:24+00:00","external_desc":"Google Cloud VMware Engine landing page experienced degradation.","updates":[{"created":"2024-07-25T21:38:20+00:00","modified":"2024-07-25T21:38:24+00:00","when":"2024-07-25T21:38:20+00:00","text":"The issue with VMWare engine has been resolved for all affected users as of Thursday, 2024-07-25 14:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-25T20:41:38+00:00","modified":"2024-07-25T21:38:24+00:00","when":"2024-07-25T20:41:38+00:00","text":"Summary: Google Cloud VMware Engine landing page is experiencing degradation.\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Thursday, 2024-07-25 14:45 US/Pacific.\nWe will provide an update by Thursday, 2024-07-25 15:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nPlease use the alternate link to access the VMWare Engine homepage and then select your project\"\nGoing to the URL directly displays an error \"something went wrong\" or “failed to load” since no project is selected: https://console.cloud.google.com/vmwareengine/dashboard\nDiagnosis: Customers may be unable to access the landing page reference link to VMware Engine in Cloud Console.\nWorkaround: Impacted customers may use this alternate link in the interim: https://console.cloud.google.com/vmwareengine/dashboard","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-07-25T19:34:38+00:00","modified":"2024-07-25T20:41:38+00:00","when":"2024-07-25T19:34:38+00:00","text":"Summary: Customers may be unable to access the landing page reference link to VMware Engine in Cloud Console.\nDescription: We are experiencing an issue with the landing page of VMWare Engine Service.\nOur engineering team continues to investigate the issue.\nPlease use the alternate link to access the VMWare Engine homepage: https://console.cloud.google.com/vmwareengine/dashboard\nWe will provide an update by Thursday, 2024-07-25 13:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may be unable to access the landing page reference link to VMware Engine in Cloud Console.\nWorkaround: Impacted customers may use this alternate link in the interim: https://console.cloud.google.com/vmwareengine/dashboard","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-07-25T21:38:20+00:00","modified":"2024-07-25T21:38:24+00:00","when":"2024-07-25T21:38:20+00:00","text":"The issue with VMWare engine has been resolved for all affected users as of Thursday, 2024-07-25 14:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"}],"uri":"incidents/9mHC9Da7hWZVCBvcTBnB","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"DK3LfKowzJPpZq4Q9YqP","number":"215195772340650488","begin":"2024-07-19T06:48:43+00:00","created":"2024-07-19T06:48:47+00:00","end":"2024-07-19T23:32:59+00:00","modified":"2024-07-19T23:33:02+00:00","external_desc":"Windows VMs using CrowdStrike are crashing.","updates":[{"created":"2024-07-19T23:32:59+00:00","modified":"2024-07-19T23:33:03+00:00","when":"2024-07-19T23:32:59+00:00","text":"Beginning July 19th at 04:09 UTC, Google Cloud detected some customer Windows VMs experiencing Blue Screen of Death (BSOD) and crash loops. These Windows VMs running CrowdStrike Falcon began to fail after a CrowdStrike software update.\nCrowdstrike quickly deployed a fix, however some customer impact remained. While Google Cloud services were not directly impacted, Google Cloud continues to work with CrowdStrike to help our customers recover from any remaining impact.\nCrowdstrike has published a statement about this incident recommending steps for workarounds and remediation: https://www.crowdstrike.com/blog/statement-on-falcon-content-update-for-windows-hosts/\nIf your Windows VM continues to experience issues after a reboot, manual patching. Please contact Google Cloud Customer Support.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-19T21:46:51+00:00","modified":"2024-07-19T23:33:02+00:00","when":"2024-07-19T21:46:51+00:00","text":"Summary: Windows VMs using CrowdStrike are crashing.\nDescription: We are experiencing an issue with Windows VMs running CrowdStrike on Google Compute Engine. CrowdStrike has confirmed that a faulty update to the CrowdStrike Falcon agent was deployed beginning at 04:09 UTC July 19.\nAfter having automatically received a defective patch from CrowdStrike, Windows VMs may crash and might not be able to reboot. Windows VMs that are currently up and running should no longer be impacted.\nAccording to CrowdStrike, 80% of Windows VMs experiencing this issue will self-heal during a reboot.\nGoogle teams are continuing to work with CrowdStrike on helping customers recover their VMs and proactively reaching out to affected customers.\nWe will provide an update by Friday, 2024-07-19 17:00 US/Pacific with current details.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround:\nCrowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE:** Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T20:14:48+00:00","modified":"2024-07-19T21:46:51+00:00","when":"2024-07-19T20:14:48+00:00","text":"Summary: Windows VMs using CrowdStrike are crashing.\nDescription: We are experiencing an issue with Windows VMs running CrowdStrike on Google Compute Engine. CrowdStrike has confirmed that a faulty update to the CrowdStrike Falcon agent was deployed beginning at 04:09 UTC July 19.\nAfter having automatically received a defective patch from CrowdStrike, Windows VMs may crash and might not be able to reboot. Windows VMs that are currently up and running should no longer be impacted.\nAccording to CrowdStrike, 80% of Windows VMs experiencing this issue will self-heal during a reboot.\nGoogle teams are continuing to work with CrowdStrike on helping customers recover their VMs and proactively reaching out to affected customers.\nWe will provide an update by Friday, 2024-07-19 15:00 US/Pacific with current details.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround:\nWorkaround Steps for individual hosts:\n- Reboot the host to give it an opportunity to download the reverted channel file. If the host crashes again, then: - Boot Windows into Safe Mode or the Windows Recovery Environment - NOTE: Putting the host on a wired network (as opposed to WiFi) and using Safe Mode with Networking can help remediation.\n- Navigate to the %WINDIR%\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Boot the host normally.\n- Note: Bitlocker-encrypted hosts may require a recovery key.\nCrowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE: Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T17:51:08+00:00","modified":"2024-07-19T20:14:48+00:00","when":"2024-07-19T17:51:08+00:00","text":"Summary: Windows VMs using CrowdStrike are crashing.\nDescription: We are experiencing an issue with Windows VMs running CrowdStrike on Google Compute Engine. CrowdStrike has confirmed that a faulty update to the CrowdStrike Falcon agent was deployed beginning at 04:09 UTC July 19.\nAfter having automatically received a defective patch from CrowdStrike, Windows VMs may crash and might not be able to reboot. Windows VMs that are currently up and running should no longer be impacted.\nAccording to CrowdStrike, 80% of Windows VMs experiencing this issue will self-heal during a reboot.\nGoogle teams are continuing to work with CrowdStrike on recovery efforts and proactively reaching out to affected customers to mitigate\nWe will provide an update by Friday, 2024-07-19 13:00 US/Pacific with current details.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE: Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T15:59:39+00:00","modified":"2024-07-19T17:51:08+00:00","when":"2024-07-19T15:59:39+00:00","text":"Summary: Windows VMs using CrowdStrike are crashing.\nDescription: We are experiencing an issue with Windows VMs running CrowdStrike on Google Compute Engine. CrowdStrike has confirmed that a faulty update to the CrowdStrike Falcon agent was deployed beginning at 04:09 UTC July 19.\nAfter having automatically received a defective patch from CrowdStrike, Windows VMs crash and might not be able to reboot.\nWindows VMs that are currently up and running should no longer be impacted.\nAccording to CrowdStrike, 80% of Windows VMs experiencing this issue will self-heal during a reboot.\nGoogle teams are continuing to work with CrowdStrike on recovery efforts and proactively reaching out to affected customers to mitigate.\nWe will provide an update by Friday, 2024-07-19 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE: Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T14:05:31+00:00","modified":"2024-07-19T15:59:39+00:00","when":"2024-07-19T14:05:31+00:00","text":"Summary: Windows VMs using CrowdStrike are crashing.\nDescription: We are experiencing an issue with Windows VMs running CrowdStrike on Google Compute Engine. CrowdStrike has confirmed that a faulty update to the CrowdStrike Falcon agent was deployed beginning at 04:09 UTC July 19.\nAfter having automatically received a defective patch from CrowdStrike, Windows VMs crash and might not be able to reboot.\nWindows VMs that are currently up and running should no longer be impacted.\nAccording to CrowdStrike, 80% of Windows VMs experiencing this issue will self-heal during a reboot.\nWe will provide an update by Friday, 2024-07-19 09:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE: Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T13:54:10+00:00","modified":"2024-07-19T14:05:31+00:00","when":"2024-07-19T13:54:10+00:00","text":"Summary: Windows VMs using CrowdStrike are crashing.\nDescription: We are experiencing an issue with Windows VMs running CloudStrike on Google Compute Engine. CloudStrike has confirmed that a faulty update to the CloudStrike Falcon agent was deployed beginning at 04:09 UTC July 19.\nAfter having automatically received a defective patch from CloudStrike, Windows VMs crash and might not be able to reboot.\nWindows VMs that are currently up and running should no longer be impacted.\nAccording to CrowdStrike, 80% of Windows VMs experiencing this issue will self-heal during a reboot.\nWe will provide an update by Friday, 2024-07-19 09:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE: Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T11:56:45+00:00","modified":"2024-07-19T13:54:10+00:00","when":"2024-07-19T11:56:45+00:00","text":"Summary: Windows VMs using Crowdstrike’s csagent.sys are crashing and going into unexpected reboot\nDescription: We are experiencing an issue with Windows VMs running Cloudstrike on Google Compute Engine.\nAfter having automatically received a defective patch from Cloudstrike, Windows VMs crash and will not be able to reboot.\nWindows VMs that are currently up and running should no longer be impacted.\nWe will provide an update by Friday, 2024-07-19 09:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n**NOTE: Ensure that the boot disk image of the recovery VM differs from the boot disk that is being repaired; failure to do so may result in duplicate disk or partition GUID and unpredictable results as confirmed by Microsoft.\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T09:17:13+00:00","modified":"2024-07-19T11:56:55+00:00","when":"2024-07-19T09:17:13+00:00","text":"Summary: Windows VMs using Crowdstrike’s csagent.sys are crashing and going into unexpected reboot\nDescription: We are experiencing an issue with Google Compute Engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-07-19 05:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIdentifying Faulty \"C-00000291*.sys\"\nChannel file \"C-00000291*.sys\" with timestamp of 0527 UTC or later is the reverted (good) version.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T08:07:12+00:00","modified":"2024-07-19T09:17:13+00:00","when":"2024-07-19T08:07:12+00:00","text":"Summary: Windows VMs using Crowdstrike’s csagent.sys are crashing and going into unexpected reboot\nDescription: We are experiencing an issue with Google Compute Engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-07-19 02:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into an unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: Crowdstrike has pushed an update which should replace the agent \"C-00000291*.sys”. The windows VMs that are currently running should no longer be impacted.\nIf your VMs are affected, please follow the Workaround Steps to fix the issue.\n- Follow [offline repair](https://cloud.google.com/compute/docs/troubleshooting/troubleshooting-windows#offline-repair) (Step 1 to 3)\n- Use rescue VM (Test VM), attach boot disk of the affected VM as a secondary disk\n- Navigate to the D:\\Windows\\System32\\drivers\\CrowdStrike directory\n- Locate the file matching “C-00000291*.sys”, and delete it.\n- Power down the Rescue VM detach the secondary disk\n- Re-attach the VM to the original VM and boot","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T07:24:47+00:00","modified":"2024-07-19T08:07:12+00:00","when":"2024-07-19T07:24:47+00:00","text":"Summary: Windows VM are crashing and going into unexpected reboot\nDescription: We are experiencing an issue with Google Compute Engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-07-19 02:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: We recommend the affected users to work with the application package provider and refer to, https://supportportal.crowdstrike.com/s/article/Tech-Alert-Windows-crashes-related-to-Falcon-Sensor-2024-07-19 for additional information.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T07:15:19+00:00","modified":"2024-07-19T07:24:47+00:00","when":"2024-07-19T07:15:19+00:00","text":"Summary: Windows VM are crashing and going into unexpected reboot\nDescription: We are experiencing an issue with Google Compute Engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-07-19 01:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into unexpected reboot.\nImpacted users may observe Serial port 1 showing the call trace,\nSYSTEM_THREAD_EXCEPTION_NOT_HANDLED\nCsagent.sys (part of the Crowdstrike Application package)\n0xFFFFFFFFC0000005 0xFFFFF80E88CF033D 0xFFFF858A870FAC58 0xFFFF858A870FA4A0 Dumping stack trace: 0xFFFFF809E35317BF (pvpanic.sys+0x17BF) 0xFFFFF809E35316CB (pvpanic.sys+0x16CB) 0xFFFFF80335941B27 (ntoskrnl.exe+0x292B27) 0xFFFFF80335940AD9 (ntoskrnl.exe+0x291AD9) 0xFFFFF80335868CE7 (ntoskrnl.exe+0x1B9CE7) 0xFFFFF8033588447C (ntoskrnl.exe+0x1D547C) 0xFFFFF803358416BF (ntoskrnl.exe+0x1926BF) 0xFFFFF8033587335F (ntoskrnl.exe+0x1C435F) 0xFFFFF803356D77D0 (ntoskrnl.exe+0x0287D0) 0xFFFFF8033579D214 (ntoskrnl.exe+0x0EE214) 0xFFFFF8033587CF42 (ntoskrnl.exe+0x1CDF42) 0xFFFFF8033587893D (ntoskrnl.exe+0x1C993D) 0xFFFFF809E314033D (csagent.sys+0x0E033D) 0xFFFFF809E3115EEE (csagent.sys+0x0B5EEE) 0xFFFFF809E3117185 (csagent.sys+0x0B7185) 0xFFFFF809E334A037 (csagent.sys+0x2EA037) 0xFFFFF809E3346BB4 (csagent.sys+0x2E6BB4) 0xFFFFF809E30C68C1 (csagent.sys+0x0668C1) 0xFFFFF809E30C597E (csagent.sys+0x06597E) 0xFFFFF809E30C56EB (csagent.sys+0x0656EB) 0xFFFFF809E316883A (csagent.sys+0x10883A) 0xFFFFF809E30BDD3B (csagent.sys+0x05DD3B) 0xFFFFF809E30BDB57 (csagent.sys+0x05DB57) 0xFFFFF809E315D4D1 (csagent.sys+0x0FD4D1) 0xFFFFF803357B4A85 (ntoskrnl.exe+0x105A85) 0xFFFFF803358719FC (ntoskrnl.exe+0x1C29FC)\nWorkaround: We recommend the affected users to work with the application package provider.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-07-19T06:48:45+00:00","modified":"2024-07-19T07:15:19+00:00","when":"2024-07-19T06:48:45+00:00","text":"Summary: Windows VM are crashing and going into unexpected reboot\nDescription: We are experiencing an issue with Google Compute Engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-07-19 00:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Windows VM are crashing and going into unexpected reboot\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-07-19T23:32:59+00:00","modified":"2024-07-19T23:33:03+00:00","when":"2024-07-19T23:32:59+00:00","text":"Beginning July 19th at 04:09 UTC, Google Cloud detected some customer Windows VMs experiencing Blue Screen of Death (BSOD) and crash loops. These Windows VMs running CrowdStrike Falcon began to fail after a CrowdStrike software update.\nCrowdstrike quickly deployed a fix, however some customer impact remained. While Google Cloud services were not directly impacted, Google Cloud continues to work with CrowdStrike to help our customers recover from any remaining impact.\nCrowdstrike has published a statement about this incident recommending steps for workarounds and remediation: https://www.crowdstrike.com/blog/statement-on-falcon-content-update-for-windows-hosts/\nIf your Windows VM continues to experience issues after a reboot, manual patching. Please contact Google Cloud Customer Support.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"L3ggmi3Jy4xJmgodFA9K","service_name":"Google Compute Engine","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"}],"uri":"incidents/DK3LfKowzJPpZq4Q9YqP","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"YJGAoyHYqtitwVrTodpq","number":"16587822144392793327","begin":"2024-07-16T11:00:02+00:00","created":"2024-07-16T11:31:15+00:00","end":"2024-07-16T12:36:32+00:00","modified":"2024-07-16T12:36:41+00:00","external_desc":"[Chronicle Security] Detection page may not be loading","updates":[{"created":"2024-07-16T12:36:32+00:00","modified":"2024-07-16T12:36:45+00:00","when":"2024-07-16T12:36:32+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Tuesday, 2024-07-16 05:36 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-16T11:41:34+00:00","modified":"2024-07-16T12:36:41+00:00","when":"2024-07-16T11:41:34+00:00","text":"Summary: [Chronicle Security] Detection page may not be loading\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Tuesday, 2024-07-16 06:00 US/Pacific.\nWe will provide more information by Tuesday, 2024-07-16 06:00 US/Pacific.\nDiagnosis: Detection page may not be loading\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-07-16T11:30:57+00:00","modified":"2024-07-16T11:41:34+00:00","when":"2024-07-16T11:30:57+00:00","text":"Summary: [Chronicle Security] Detection page may not be loading\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-07-16 06:00 US/Pacific with current details.\nDiagnosis: Detection page may not be loading .\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-07-16T12:36:32+00:00","modified":"2024-07-16T12:36:45+00:00","when":"2024-07-16T12:36:32+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Tuesday, 2024-07-16 05:36 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/YJGAoyHYqtitwVrTodpq","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"ERzzrJqeGR2GCW51XKFv","number":"17371485787041351543","begin":"2024-07-15T16:29:50+00:00","created":"2024-07-15T17:44:07+00:00","end":"2024-07-15T18:50:32+00:00","modified":"2024-07-23T13:23:19+00:00","external_desc":"us-central1: Elevated Errors and Degraded Query Performance with Monitoring Metrics","updates":[{"created":"2024-07-22T16:17:16+00:00","modified":"2024-07-23T13:23:19+00:00","when":"2024-07-22T16:17:16+00:00","text":"# Incident Report\n## Summary\nOn Monday, 15 July 2024, Google Cloud Monitoring experienced elevated query errors and degraded performance in the us-central1 region for 2 hours and 6 minutes. This impacted monitoring metrics for cloud products in the region, including Cloud Spanner, Google Kubernetes Engine, Cloud Bigtable, AlloyDB and Cloud SQL.\nTo our Cloud Monitoring customers whose monitoring capabilities were impacted during this disruption, we sincerely apologize. We understand the critical role monitoring plays in maintaining your cloud environments, and this is not the level of service we strive to provide.\nWe are committed to preventing similar disruptions in the future and continuing to improve the platform's reliability and performance.\n## Root Cause\nCloud Monitoring has experienced a sudden and unexpected, inorganic increase in usage, observing a 30% increase in growth over the past 30 days. Our automation responded to the unexpected growth, which pushed services past their current scaling limits leading to out of memory crashes reducing/degrading Cloud Monitoring query capacity in the us-central1 region.\nAs a mitigation, engineers increased the memory allocation limit on affected services to increase their scaling limits and will be working with the source of the unexpected growth to try to reduce their usage back into more expected limits.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue by internal monitoring on 15 July 2024 at 08:53 US/Pacific and immediately started an investigation.\nAt 10:11 US/Pacific engineers began rolling out mitigation to increase the memory allocation limit on affected services. The mitigation was completed at 10:52 US/Pacific resolving the issue.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* Dynamically adjust our scaling limits to better respond to load demands using spare capacity\n* Partition the zonal query processor into multiple partitions to avoid becoming a bottleneck in times of heavy load.\n## Detailed Description of Impact\nOn Monday 15 July 2024, from 08:46 to 10:52 US/Pacific, multiple Google Cloud services experienced increased query latency and/or reduced availability in the us-central1.\n**Cloud Monitoring**\nCloud Monitoring customers experienced increased query latency and/or reduced availability for Cloud Monitoring metrics stored in the us-central1 cloud region. Queries for metrics stored in other regions, including the \"global\" region were unaffected.\n**Metrics**\nCloud Monitoring API queries e.g. via QueryTimeSeries, ListTimeSeries, or PromQL endpoints, for metrics in this region may have returned a partial or empty response. Queries fanning out to multiple regions would have returned applicable data from all other regions.\nCertain service metrics which are backed by precomputed queries in this region were unavailable during the outage window. Due to the real-time nature of precomputed queries, these gaps cannot be backfilled and will remain unavailable indefinitely.\n**Dashboards**\nCloud Console dashboards displaying metrics from this region may have data gaps and, in turn, presented a degraded experience to end users during the outage window. Dashboards displaying precomputed query-backed metrics will continue to display data gaps during this period.\n**Incidents and Alert Notifications**\nCloud Alerting policies where the location is retained and maps to the us-central1 region may have returned incorrect results which prevented alerts from firing and associated notifications being sent in a timely manner or, if short-lived, at all.\n80% of alerts in us-central1 (8% of all alerts) were dropped during the outage window, however most Cloud Alerting policies are global, not region-specific.\nCustomers may have experienced the following related to Cloud Alerting incidents and alert notifications:\n* Incident creation: Some incidents were never created, and related notifications were not sent out.\n* Incident creation: Some incidents were created up to 2 hours late; and the notifications were delayed by a similar duration.\n* Incident close: Some incidents opened prior to the outage were prematurely closed due to the absence of the alerting signal during the outage window.\n* Incident reopen: The incidents that prematurely closed as captured in #3 could reopen once the alerts started firing again - leading to double alerting for customers.\nWhen the query processing service was restored, all ad-hoc and precomputed queries, dashboards, alerts, and notifications also returned to normal operation with the exception of the data gaps noted for precomputed queries during the outage.\n**Cloud Bigtable**\nCloud Bigtable customers experienced a period of missing Google Cloud Monitoring metrics for bigtable.googleapis.com for the duration of this outage. When Google Cloud Monitoring returned to normal operations, Cloud Bigtable Google Cloud Monitoring returned as well. Cloud Bigtable's internal auto scaling capability was not impacted, but customers who use Google Cloud Monitoring metrics to scale their Cloud Bigtable usage would have lost metric signal and may have incorrectly scaled their instances as a result of this outage.\n**Alloy DB**\nAlloyDB customers experienced intermittently a period of missing Google Cloud Monitoring metrics from 9:00 to 10:30 PDT. When Google Cloud Monitoring returned to normal operations, AlloyDB Google Cloud Monitoring returned as well.There were no missing metrics after 10:30 PDT.\n**Google Kubernetes Engine**\nGoogle Kubernetes Engine customers experienced intermittently a period of missing Google Cloud Monitoring metrics from 9:00 to 10:30 PDT. When Google Cloud Monitoring returned to normal operations, GKE Google Cloud Monitoring returned as well.There were no missing metrics after 10:30 PDT.\nWorkload autoscaling based on external / custom metrics may not have been actuated during this period. Workload autoscaling based on cpu / memory were not affected.\n**Cloud Spanner**\nCloud Spanner customers experienced a period of missing Google Cloud Monitoring metrics for spanner.googleapis.com for the duration of this outage. When Google Cloud Monitoring returned to normal operations, Cloud Spanner Google Cloud Monitoring returned as well. Cloud Spanner’s native autoscaler was not impacted, but customers who use Google Cloud Monitoring metrics to scale their Cloud Spanner usage (e.g. via open source autoscalers) would have lost metric signal and may have incorrectly scaled their instances as a result of this outage. Data Boost customers who have set up alerts for usage may have gotten alerted as well but Data Boost billing is not impacted.\n**Cloud SQL**\nCloud SQL customers experienced missing Google Cloud Monitoring metrics cloudsql.googleapis.com for the duration of this period. Some customers who set alerts based on these metrics may get incorrectly notified, but Cloud SQL operations and the database datapath were not affected by this incident. The databases all continued to operate normally.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-15T23:21:33+00:00","modified":"2024-07-22T16:17:16+00:00","when":"2024-07-15T23:21:33+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 15 July, 2024 08:46\n**Incident End:** 15 July, 2024 11:00\n**Duration:** 2 hours, 14 minutes\n**Affected Services and Features:**\n- Cloud Monitoring\n- Cloud Spanner\n- Google Cloud Bigtable\n- Google Kubernetes Engine\n- AlloyDB\n**Regions/Zones:** us-central1\n**Description:**\nCloud Monitoring experienced elevated query errors and degraded query performance, impacting monitoring metrics for multiple cloud products in us-central1, due to out-of-memory crashes in part of the query processing service. Google engineers increased the memory allocation limits for this service to mitigate the problem.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\n- Customers experienced errors and/or increased latency when querying monitoring data. This includes queries from Cloud Monitoring API and Google-Managed Prometheus API requests, autoscaling, and viewing dashboards.\n- Alert evaluations were impacted, resulting in potentially missed or false-positive alerts.\n- Any operations relying on querying monitoring metrics were affected.\n- Some GCP system metrics may have missing or incorrect data from the outage period.\n- No customer-written metric data was lost.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-15T18:50:32+00:00","modified":"2024-07-15T23:21:33+00:00","when":"2024-07-15T18:50:32+00:00","text":"The issue with Cloud Monitoring metrics has been resolved for all affected users as of Monday, 2024-07-15 10:52 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-15T18:07:54+00:00","modified":"2024-07-15T18:50:39+00:00","when":"2024-07-15T18:07:54+00:00","text":"Summary: us-central1: Elevated Errors and Degraded Query Performance with Monitoring Metrics\nDescription: We are experiencing issues querying Monitoring metrics with Cloud Monitoring, affecting system metrics from multiple Cloud products and user-defined metrics.\nWe’ve implemented a mitigation which is showing improvements and engineers will continue to monitor.\nWe will provide more information by Monday, 2024-07-15 12:00 US/Pacific.\nDiagnosis: - Affected customers may observe errors and/or latency when trying to query monitoring data, autoscaling, dashboards, for metrics in the us-central1 and global regions.\n- Alert evaluations are also impacted which means customers may not see expected alerts or may see some false positive alerts.\n- The issue impacts any operations that rely on monitoring metrics.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-07-15T17:54:52+00:00","modified":"2024-07-15T18:07:54+00:00","when":"2024-07-15T17:54:52+00:00","text":"Summary: us-central1: Multiple Cloud Products Experiencing Elevated Errors and Degraded Query Performance with Monitoring Metrics\nDescription: We are experiencing issues with Monitoring metrics with Cloud Monitoring, Bigtable, and Cloud Spanner, Cloud SQL, Google Kubernetes Engine.\nMitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Monday, 2024-07-15 12:00 US/Pacific.\nWe will provide more information by Monday, 2024-07-15 12:30 US/Pacific.\nDiagnosis: Affected customers may observe errors and/or latency when trying to query monitoring data, autoscaling, dashboards, and alert evaluations in the us-central1 region.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-07-15T17:43:59+00:00","modified":"2024-07-15T17:54:52+00:00","when":"2024-07-15T17:43:59+00:00","text":"Summary: us-central1: Multiple Cloud Products Experiencing Elevated Errors and Degraded Query Performance with Monitoring Metrics\nDescription: We are experiencing issues with Monitoring metrics with Cloud Monitoring, Bigtable, and Cloud Spanner, Cloud SQL, Google Kubernetes Engine.\nMitigation work is currently underway by our engineering team.\nWe will provide more information by Monday, 2024-07-15 11:20 US/Pacific.\nDiagnosis: Affected customers may observe errors and/or latency when trying to query monitoring data, autoscaling, dashboards, and alert evaluations in the us-central1 region.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-07-22T16:17:16+00:00","modified":"2024-07-23T13:23:19+00:00","when":"2024-07-22T16:17:16+00:00","text":"# Incident Report\n## Summary\nOn Monday, 15 July 2024, Google Cloud Monitoring experienced elevated query errors and degraded performance in the us-central1 region for 2 hours and 6 minutes. This impacted monitoring metrics for cloud products in the region, including Cloud Spanner, Google Kubernetes Engine, Cloud Bigtable, AlloyDB and Cloud SQL.\nTo our Cloud Monitoring customers whose monitoring capabilities were impacted during this disruption, we sincerely apologize. We understand the critical role monitoring plays in maintaining your cloud environments, and this is not the level of service we strive to provide.\nWe are committed to preventing similar disruptions in the future and continuing to improve the platform's reliability and performance.\n## Root Cause\nCloud Monitoring has experienced a sudden and unexpected, inorganic increase in usage, observing a 30% increase in growth over the past 30 days. Our automation responded to the unexpected growth, which pushed services past their current scaling limits leading to out of memory crashes reducing/degrading Cloud Monitoring query capacity in the us-central1 region.\nAs a mitigation, engineers increased the memory allocation limit on affected services to increase their scaling limits and will be working with the source of the unexpected growth to try to reduce their usage back into more expected limits.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue by internal monitoring on 15 July 2024 at 08:53 US/Pacific and immediately started an investigation.\nAt 10:11 US/Pacific engineers began rolling out mitigation to increase the memory allocation limit on affected services. The mitigation was completed at 10:52 US/Pacific resolving the issue.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* Dynamically adjust our scaling limits to better respond to load demands using spare capacity\n* Partition the zonal query processor into multiple partitions to avoid becoming a bottleneck in times of heavy load.\n## Detailed Description of Impact\nOn Monday 15 July 2024, from 08:46 to 10:52 US/Pacific, multiple Google Cloud services experienced increased query latency and/or reduced availability in the us-central1.\n**Cloud Monitoring**\nCloud Monitoring customers experienced increased query latency and/or reduced availability for Cloud Monitoring metrics stored in the us-central1 cloud region. Queries for metrics stored in other regions, including the \"global\" region were unaffected.\n**Metrics**\nCloud Monitoring API queries e.g. via QueryTimeSeries, ListTimeSeries, or PromQL endpoints, for metrics in this region may have returned a partial or empty response. Queries fanning out to multiple regions would have returned applicable data from all other regions.\nCertain service metrics which are backed by precomputed queries in this region were unavailable during the outage window. Due to the real-time nature of precomputed queries, these gaps cannot be backfilled and will remain unavailable indefinitely.\n**Dashboards**\nCloud Console dashboards displaying metrics from this region may have data gaps and, in turn, presented a degraded experience to end users during the outage window. Dashboards displaying precomputed query-backed metrics will continue to display data gaps during this period.\n**Incidents and Alert Notifications**\nCloud Alerting policies where the location is retained and maps to the us-central1 region may have returned incorrect results which prevented alerts from firing and associated notifications being sent in a timely manner or, if short-lived, at all.\n80% of alerts in us-central1 (8% of all alerts) were dropped during the outage window, however most Cloud Alerting policies are global, not region-specific.\nCustomers may have experienced the following related to Cloud Alerting incidents and alert notifications:\n* Incident creation: Some incidents were never created, and related notifications were not sent out.\n* Incident creation: Some incidents were created up to 2 hours late; and the notifications were delayed by a similar duration.\n* Incident close: Some incidents opened prior to the outage were prematurely closed due to the absence of the alerting signal during the outage window.\n* Incident reopen: The incidents that prematurely closed as captured in #3 could reopen once the alerts started firing again - leading to double alerting for customers.\nWhen the query processing service was restored, all ad-hoc and precomputed queries, dashboards, alerts, and notifications also returned to normal operation with the exception of the data gaps noted for precomputed queries during the outage.\n**Cloud Bigtable**\nCloud Bigtable customers experienced a period of missing Google Cloud Monitoring metrics for bigtable.googleapis.com for the duration of this outage. When Google Cloud Monitoring returned to normal operations, Cloud Bigtable Google Cloud Monitoring returned as well. Cloud Bigtable's internal auto scaling capability was not impacted, but customers who use Google Cloud Monitoring metrics to scale their Cloud Bigtable usage would have lost metric signal and may have incorrectly scaled their instances as a result of this outage.\n**Alloy DB**\nAlloyDB customers experienced intermittently a period of missing Google Cloud Monitoring metrics from 9:00 to 10:30 PDT. When Google Cloud Monitoring returned to normal operations, AlloyDB Google Cloud Monitoring returned as well.There were no missing metrics after 10:30 PDT.\n**Google Kubernetes Engine**\nGoogle Kubernetes Engine customers experienced intermittently a period of missing Google Cloud Monitoring metrics from 9:00 to 10:30 PDT. When Google Cloud Monitoring returned to normal operations, GKE Google Cloud Monitoring returned as well.There were no missing metrics after 10:30 PDT.\nWorkload autoscaling based on external / custom metrics may not have been actuated during this period. Workload autoscaling based on cpu / memory were not affected.\n**Cloud Spanner**\nCloud Spanner customers experienced a period of missing Google Cloud Monitoring metrics for spanner.googleapis.com for the duration of this outage. When Google Cloud Monitoring returned to normal operations, Cloud Spanner Google Cloud Monitoring returned as well. Cloud Spanner’s native autoscaler was not impacted, but customers who use Google Cloud Monitoring metrics to scale their Cloud Spanner usage (e.g. via open source autoscalers) would have lost metric signal and may have incorrectly scaled their instances as a result of this outage. Data Boost customers who have set up alerts for usage may have gotten alerted as well but Data Boost billing is not impacted.\n**Cloud SQL**\nCloud SQL customers experienced missing Google Cloud Monitoring metrics cloudsql.googleapis.com for the duration of this period. Some customers who set alerts based on these metrics may get incorrectly notified, but Cloud SQL operations and the database datapath were not affected by this incident. The databases all continued to operate normally.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Monitoring","id":"3zaaDb7antc73BM1UAVT"},{"title":"Cloud Spanner","id":"EcNGGUgBtBLrtm4mWvqC"},{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"}],"uri":"incidents/ERzzrJqeGR2GCW51XKFv","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"rAMBfHu5PBBeBdaV2h7X","number":"2529383699880648721","begin":"2024-07-11T07:00:00+00:00","created":"2024-07-17T02:01:52+00:00","end":"2024-07-17T18:05:00+00:00","modified":"2024-07-18T10:15:51+00:00","external_desc":"BigQuery is experiencing issues with reported storage costs and charges globally.","updates":[{"created":"2024-07-18T10:15:46+00:00","modified":"2024-07-18T10:15:51+00:00","when":"2024-07-18T10:15:46+00:00","text":"**Description:**\nThe issue with Google BigQuery has been fully mitigated as of Wednesday, 2024-07-17 11:05 US/Pacific. Users in all regions should be seeing updated billing information for BigQuery storage SKUs for July 17th onwards. Billing corrections are done for all regions up to July 12th, and we expect the rest to be done within the next 48 hours.. We thank you for your patience while we worked on resolving the issue.\n**Customer Symptoms:**\nBigQuery users may notice a drop in reported storage costs and charges for their projects.\n**Workaround:**\nNone at this time.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-17T02:01:52+00:00","modified":"2024-07-18T10:15:46+00:00","when":"2024-07-17T02:01:52+00:00","text":"**Description:**\nWe are experiencing an issue with Google BigQuery billing beginning on Thursday, 2024-07-11.\nThe issue was caused due to configuration changes that temporarily interrupted the reporting flow of BigQuery storage usage to the GCP billing system.\nOur Engineering team has since reverted the change and expect reporting to return to normal within the next 24 hours.\nAdditionally, they will work to correct the billing reports for the affected days.\nWe will provide an update by Tuesday, 2024-07-16 20:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\n**Customer Symptoms:**\nBigQuery users may notice a drop in reported storage costs and charges for their projects.\n**Workaround:**\nNone at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-07-18T10:15:46+00:00","modified":"2024-07-18T10:15:51+00:00","when":"2024-07-18T10:15:46+00:00","text":"**Description:**\nThe issue with Google BigQuery has been fully mitigated as of Wednesday, 2024-07-17 11:05 US/Pacific. Users in all regions should be seeing updated billing information for BigQuery storage SKUs for July 17th onwards. Billing corrections are done for all regions up to July 12th, and we expect the rest to be done within the next 48 hours.. We thank you for your patience while we worked on resolving the issue.\n**Customer Symptoms:**\nBigQuery users may notice a drop in reported storage costs and charges for their projects.\n**Workaround:**\nNone at this time.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"9CcrhHUcFevXPSVaSxkf","service_name":"Google BigQuery","affected_products":[{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"}],"uri":"incidents/rAMBfHu5PBBeBdaV2h7X","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"6SCpW417YmwFN2Cq8ogN","number":"8858368639433111961","begin":"2024-07-10T22:20:04+00:00","created":"2024-07-10T22:20:09+00:00","end":"2024-07-12T14:39:46+00:00","modified":"2024-07-12T14:39:49+00:00","external_desc":"Elevated latency within Cloud Storage for Firebase in europe-west","updates":[{"created":"2024-07-12T14:39:46+00:00","modified":"2024-07-12T14:39:50+00:00","when":"2024-07-12T14:39:46+00:00","text":"The issue with Cloud Storage for Firebase has been resolved for all affected users as of Friday, 2024-07-12 07:07 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-11T23:48:14+00:00","modified":"2024-07-12T14:39:49+00:00","when":"2024-07-11T23:48:14+00:00","text":"Summary: Elevated latency within Cloud Storage for Firebase in europe-west\nDescription: We believe the latency issue with Cloud Storage for Firebase is partially resolved.\nOur engineers are diligently working towards full resolution. We do not have an ETA for full resolution at this point.\nWe will provide an update by Friday, 2024-07-12 10:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated latency while working with Cloud Storage as part of their Firebase operations.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"}]},{"created":"2024-07-11T19:52:08+00:00","modified":"2024-07-11T23:48:14+00:00","when":"2024-07-11T19:52:08+00:00","text":"Summary: Elevated latency within Cloud Storage for Firebase in europe-west\nDescription: We believe the latency issue with Cloud Storage for Firebase is partially resolved.\nOur engineers are diligently working towards full resolution. We do not have an ETA for full resolution at this point.\nWe will provide an update by Thursday, 2024-07-11 17:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated latency while working with Cloud Storage as part of their Firebase operations.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"}]},{"created":"2024-07-11T16:39:35+00:00","modified":"2024-07-11T19:52:08+00:00","when":"2024-07-11T16:39:35+00:00","text":"Summary: Elevated latency within Cloud Storage for Firebase in europe-west\nDescription: We believe the latency issue with Cloud Storage for Firebase is partially resolved.\nWe do not have an ETA for full resolution at this point.\nWe will provide an update by Thursday, 2024-07-11 13:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated latency while working with Cloud Storage as part of their Firebase operations.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"}]},{"created":"2024-07-10T22:36:30+00:00","modified":"2024-07-11T16:39:35+00:00","when":"2024-07-10T22:36:30+00:00","text":"Summary: Elevated latency within Cloud Storage for Firebase in europe-west\nDescription: Our engineering team is diligently working to identify the root cause of the issue and determine the most effective mitigation strategy.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-07-11 10:00 US/Pacific.\nDiagnosis: Impacted customers may experience elevated latency while working with Cloud Storage as part of their Firebase operations.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"}]},{"created":"2024-07-10T22:20:05+00:00","modified":"2024-07-10T22:36:30+00:00","when":"2024-07-10T22:20:05+00:00","text":"Summary: Elevated latency within Cloud Storage for Firebase in europe-west\nDescription: We are experiencing an issue with Cloud Storage for Firebase beginning on Tuesday, 2024-07-09 12:20 US/Pacific in Western Europe.\nOur engineering team is engaged and is investigating the issue at hand.\nWe will provide an update by Wednesday, 2024-07-10 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated latency while working with Cloud Storage as part of their Firebase operations.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"}]}],"most_recent_update":{"created":"2024-07-12T14:39:46+00:00","modified":"2024-07-12T14:39:50+00:00","when":"2024-07-12T14:39:46+00:00","text":"The issue with Cloud Storage for Firebase has been resolved for all affected users as of Friday, 2024-07-12 07:07 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Storage for Firebase","id":"aY6Fbgy6TV4YWoutjhfe"},{"title":"Google Cloud Storage","id":"UwaYoXQ5bHYHG6EdiPB8"}],"uri":"incidents/6SCpW417YmwFN2Cq8ogN","currently_affected_locations":[],"previously_affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"}]},{"id":"pyetBPMqQtz1kjNr6zpo","number":"9872458149334895577","begin":"2024-07-09T20:17:43+00:00","created":"2024-07-09T20:50:05+00:00","end":"2024-07-10T14:18:42+00:00","modified":"2024-07-10T14:18:52+00:00","external_desc":"Cloud Security Command Center dashboard findings state data refresh issue.","updates":[{"created":"2024-07-10T14:18:42+00:00","modified":"2024-07-10T14:18:57+00:00","when":"2024-07-10T14:18:42+00:00","text":"The issue with Cloud Security Command Center has been resolved for all affected users as of Wednesday, 2024-07-10 07:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-10T05:27:14+00:00","modified":"2024-07-10T14:18:52+00:00","when":"2024-07-10T05:27:14+00:00","text":"Summary: Cloud Security Command Center dashboard findings state data refresh issue.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-07-10 08:45 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe inaccurate vulnerability status of deleted assets in the Cloud Security Command center dashboard.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-07-09T22:38:04+00:00","modified":"2024-07-10T05:27:14+00:00","when":"2024-07-09T22:38:04+00:00","text":"Summary: Cloud Security Command Center dashboard findings state data refresh issue.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-07-09 23:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may observe inaccurate vulnerability status of deleted assets in the Cloud Security Command center dashboard.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-07-09T20:50:01+00:00","modified":"2024-07-09T22:38:04+00:00","when":"2024-07-09T20:50:01+00:00","text":"Summary: Cloud Security Command Center dashboard findings state data refresh issue.\nDescription: We are experiencing an issue with the Cloud Security Command Center dashboard data refresh beginning on Wednesday, 2024-07-03 00:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-07-09 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue may observe inaccurate vulnerability status of deleted assets in the Cloud Security Command center dashboard.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-07-10T14:18:42+00:00","modified":"2024-07-10T14:18:57+00:00","when":"2024-07-10T14:18:42+00:00","text":"The issue with Cloud Security Command Center has been resolved for all affected users as of Wednesday, 2024-07-10 07:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"csyyfUYy88hkeqbv23Mc","service_name":"Cloud Security Command Center","affected_products":[{"title":"Cloud Security Command Center","id":"csyyfUYy88hkeqbv23Mc"}],"uri":"incidents/pyetBPMqQtz1kjNr6zpo","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"MzARofVtutSd2HB5vmkT","number":"10746463202313021384","begin":"2024-07-09T06:20:00+00:00","created":"2024-07-12T11:58:16+00:00","end":"2024-07-12T13:31:00+00:00","modified":"2024-07-12T21:56:45+00:00","external_desc":"reCAPTCHA Enterprise elevated error rate issue","updates":[{"created":"2024-07-12T21:53:13+00:00","modified":"2024-07-12T21:56:45+00:00","when":"2024-07-12T21:53:13+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 8 July, 2024 23:20\n**Incident End:** 12 July, 2024 06:31\n**Duration:** 3 days, 7 hours, 11 minutes\n**Affected Services and Features:** reCAPTCHA Enterprise\n**Regions/Zones:** Global\n**Description:**\nA change in the client code of reCAPTCHA Enterprise caused requests being sent to customer servers instead of reCAPTCHA servers. As a result, customers experienced a significant number of unexpected 4xx errors. A small number of customers reported handling those errors in a way that made reCAPTCHA unusable for them.\nA rollout was initiated on Monday, 8 July 2024 23:20 US/Pacific however, impact was limited leading up to Wednesday, 10 July 2024 16:45 US/Pacific. Affected customers experienced extraneous requests on ~50% of reCAPTCHA scripts for 10 hours until the change was rolled back, fully mitigating the issue for all users.\n**Customer Impact:**\nAffected customers may have experienced requests sent to non-existent URIs in their servers causing 404 errors.\nA small number of customers reported handling the errors in a way that made reCAPTCHA unusable for them.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-12T14:01:07+00:00","modified":"2024-07-12T21:53:13+00:00","when":"2024-07-12T14:01:07+00:00","text":"The issue with reCAPTCHA Enterprise has been resolved for all affected users as of Friday, 2024-07-12 06:31 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-12T13:06:41+00:00","modified":"2024-07-12T14:01:12+00:00","when":"2024-07-12T13:06:41+00:00","text":"Summary: reCAPTCHA Enterprise elevated error rate issue\nDescription: We believe the issue with reCAPTCHA Enterprise is related to a recent update. Our engineering team is actively working to mitigate the problem by rolling back this change.\nThe mitigation is expected to be completed by Friday, 2024-07-12 09:00 US/Pacific.\nWe will provide more information by Friday, 2024-07-12 09:30 US/Pacific.\nDiagnosis: Impacted customers may experience requests sent to non-existent URIs in their servers causing 404s and recaptcha not working as a result.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-07-12T12:26:20+00:00","modified":"2024-07-12T13:06:50+00:00","when":"2024-07-12T12:26:20+00:00","text":"Summary: reCAPTCHA Enterprise elevated error rate issue\nDescription: We believe the issue with reCAPTCHA Enterprise is related to a recent update. Our engineering team is actively working to mitigate the problem by rolling back this change.\nThe mitigation is expected to be completed by Friday, 2024-07-12 09:00 US/Pacific.\nWe will provide more information by Friday, 2024-07-12 09:30 US/Pacific.\nDiagnosis: Impacted customers may experience requests sent to non-existent URIs in their servers causing 404s and recaptcha not working as a result.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-07-12T12:23:59+00:00","modified":"2024-07-12T12:26:20+00:00","when":"2024-07-12T12:23:59+00:00","text":"Summary: reCAPTCHA Enterprise elevated error rate issue\nDescription: We believe the issue with reCAPTCHA Enterprise is related to a recent update. Our engineering team is actively working to mitigate the problem by rolling back this change.\nThe mitigation is expected to completed by Friday, 2024-07-12 09:00 US/Pacific.\nWe will provide more information by Friday, 2024-07-12 09:30 US/Pacific.\nDiagnosis: Impacted customers may experience requests sent to non-existent URIs in their servers causing 404s and recaptcha not working as a result.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-07-12T11:58:03+00:00","modified":"2024-07-12T12:24:06+00:00","when":"2024-07-12T11:58:03+00:00","text":"Summary: reCAPTCHA Enterprise elevated error rate issue\nDescription: We are experiencing an issue with reCAPTCHA Enterprise.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-07-12 06:01 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience 404 error while using reCAPTCHA Enterprise\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-07-12T21:53:13+00:00","modified":"2024-07-12T21:56:45+00:00","when":"2024-07-12T21:53:13+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 8 July, 2024 23:20\n**Incident End:** 12 July, 2024 06:31\n**Duration:** 3 days, 7 hours, 11 minutes\n**Affected Services and Features:** reCAPTCHA Enterprise\n**Regions/Zones:** Global\n**Description:**\nA change in the client code of reCAPTCHA Enterprise caused requests being sent to customer servers instead of reCAPTCHA servers. As a result, customers experienced a significant number of unexpected 4xx errors. A small number of customers reported handling those errors in a way that made reCAPTCHA unusable for them.\nA rollout was initiated on Monday, 8 July 2024 23:20 US/Pacific however, impact was limited leading up to Wednesday, 10 July 2024 16:45 US/Pacific. Affected customers experienced extraneous requests on ~50% of reCAPTCHA scripts for 10 hours until the change was rolled back, fully mitigating the issue for all users.\n**Customer Impact:**\nAffected customers may have experienced requests sent to non-existent URIs in their servers causing 404 errors.\nA small number of customers reported handling the errors in a way that made reCAPTCHA unusable for them.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"BubghYKyn8WLY5wnSjZL","service_name":"reCAPTCHA Enterprise","affected_products":[{"title":"reCAPTCHA Enterprise","id":"BubghYKyn8WLY5wnSjZL"}],"uri":"incidents/MzARofVtutSd2HB5vmkT","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"zTvGnF41WTz3HdzHW9oD","number":"2402644181387727443","begin":"2024-07-03T05:51:30+00:00","created":"2024-07-03T07:13:48+00:00","end":"2024-07-03T07:51:00+00:00","modified":"2024-07-03T07:51:02+00:00","external_desc":"Customers may have experienced issues with connectivity between their GCVE VMWare engine deployment in us-central1 and the rest of their on-premise and internet.","updates":[{"created":"2024-07-03T07:51:00+00:00","modified":"2024-07-03T07:51:03+00:00","when":"2024-07-03T07:51:00+00:00","text":"The issue with VMWare engine has been resolved for all affected customers as of Tuesday, 2024-07-02 23:09 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-03T07:13:46+00:00","modified":"2024-07-03T07:51:02+00:00","when":"2024-07-03T07:13:46+00:00","text":"Summary: Customers may experience issues with connectivity between their GCVE VMWare engine deployment in us-central1 and the rest of their on-premise and internet.\nDescription: Engineering team has fixed the known issue, which has mitigated the situation for the customers who reported the cases. Team is currently verifying the progress to ensure the mitigation has worked for all the affected customers.\nWe will provide more information by Wednesday, 2024-07-03 01:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may see the routes to their GCVE workloads not advertised by their private cloud. Connectivity to the VMware management components was not impacted during this incident. The connectivity among the VMs running within the private cloud hosted in GCVE is not impacted during this incident.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-07-03T07:51:00+00:00","modified":"2024-07-03T07:51:03+00:00","when":"2024-07-03T07:51:00+00:00","text":"The issue with VMWare engine has been resolved for all affected customers as of Tuesday, 2024-07-02 23:09 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"9H6gWUHvb2ZubeoxzQ1Y","service_name":"VMWare engine","affected_products":[{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"}],"uri":"incidents/zTvGnF41WTz3HdzHW9oD","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"PZYkGeBDHSgR9g5y8FT3","number":"3066241165263719043","begin":"2024-07-02T19:37:55+00:00","created":"2024-07-02T19:57:48+00:00","end":"2024-07-03T05:42:36+00:00","modified":"2024-07-03T05:42:38+00:00","external_desc":"Chronicle Security: Increased Latency and Normalization Delays","updates":[{"created":"2024-07-03T05:42:36+00:00","modified":"2024-07-03T05:42:40+00:00","when":"2024-07-03T05:42:36+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Tuesday, 2024-07-02 22:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-03T03:14:31+00:00","modified":"2024-07-03T05:42:38+00:00","when":"2024-07-03T03:14:31+00:00","text":"Summary: Chronicle Security: Increased Latency and Normalization Delays\nDescription: We are experiencing delays with ingestion of data for Chronicle SecOps in the US region.\nOur engineering team has identified a mitigation which is making progress against the issue. The system must work through an ingestion backlog before the service returns to normal for impacted users. We are actively working to expedite this process, which we currently expect to take 6 hours.\nWe appreciate your patience while we continue to mitigate this issue.\nWe will provide another update by Wednesday, 2024-07-03 00:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see delays in events being available for search and detection.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-07-03T01:02:50+00:00","modified":"2024-07-03T03:14:31+00:00","when":"2024-07-03T01:02:50+00:00","text":"Summary: Chronicle Security: Increased Latency and Normalization Delays\nDescription: We are experiencing delays with ingestion of data for Chronicle SecOps in the US region.\nOur engineering team continues to work diligently towards resolving the issue and restore normal functionality as soon as possible.\nWe appreciate your patience while we work to resolve the issue. We will provide another update by Tuesday, 2024-07-02 20:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see delays in events being available for search and detection.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-07-02T22:48:02+00:00","modified":"2024-07-03T01:02:50+00:00","when":"2024-07-02T22:48:02+00:00","text":"Summary: Chronicle Security: Increased Latency and Normalization Delays\nDescription: We are experiencing delays with ingestion of data for Chronicle SecOps in the US region.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-07-02 17:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see delays in events being available for search and detection.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-07-02T21:21:51+00:00","modified":"2024-07-02T22:48:02+00:00","when":"2024-07-02T21:21:51+00:00","text":"Summary: Chronicle Security: Increased Latency and Normalization Delays\nDescription: We are experiencing delays with ingestion of data for Chronicle SecOps in the US region.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-07-02 16:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see delays in events being available for search and detection.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-07-02T19:57:45+00:00","modified":"2024-07-02T21:21:51+00:00","when":"2024-07-02T19:57:45+00:00","text":"Summary: Chronicle Security: Increased Latency and Normalization Delays\nDescription: We are experiencing delays with ingestion of data for Chronicle SecOps in the US region.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-07-02 14:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see delays in events being available for search and detection.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-07-03T05:42:36+00:00","modified":"2024-07-03T05:42:40+00:00","when":"2024-07-03T05:42:36+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Tuesday, 2024-07-02 22:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/PZYkGeBDHSgR9g5y8FT3","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"PHewUhYs2f3a4rLUaBpt","number":"10192544781336870788","begin":"2024-07-01T12:00:00+00:00","created":"2024-07-01T17:32:23+00:00","end":"2024-07-01T19:30:00+00:00","modified":"2024-07-01T18:32:44+00:00","external_desc":"Looker Studio: MySQL Connections Failing Globally","updates":[{"created":"2024-07-01T18:32:33+00:00","modified":"2024-07-01T18:32:44+00:00","when":"2024-07-01T18:32:33+00:00","text":"The issue with Looker Studio has been resolved for all affected projects as of Monday, 2024-07-01 11:31 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-07-01T18:24:46+00:00","modified":"2024-07-01T18:32:33+00:00","when":"2024-07-01T18:24:46+00:00","text":"**DESCRIPTION**\nMitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-07-01 12:30 US/Pacific.\n**CUSTOMER SYMPTOMS**\nMySQL connections for Looker Studio may fail with the error \"Looker Studio cannot connect to your data.\"\n**WORKAROUND**\nNone at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-07-01T17:53:25+00:00","modified":"2024-07-01T18:24:46+00:00","when":"2024-07-01T17:53:25+00:00","text":"**DESCRIPTION**\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-07-01 12:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\n**CUSTOMER SYMPTOMS**\nMySQL connections for Looker Studio may fail with the error \"Looker Studio cannot connect to your data.\"\n**WORKAROUND**\nNone at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-07-01T17:32:23+00:00","modified":"2024-07-01T17:53:25+00:00","when":"2024-07-01T17:32:23+00:00","text":"**DESCRIPTION**\nWe are experiencing an issue with Looker Studio.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-07-01 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\n**CUSTOMER SYMPTOMS**\nMySQL connections for Looker Studio are failing with the error \"Looker Studio cannot connect to your data.\"\n**WORKAROUND**\nNone at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-07-01T18:32:33+00:00","modified":"2024-07-01T18:32:44+00:00","when":"2024-07-01T18:32:33+00:00","text":"The issue with Looker Studio has been resolved for all affected projects as of Monday, 2024-07-01 11:31 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"kEYNqRYFXXHxP9QeFJ1d","service_name":"Looker Studio","affected_products":[{"title":"Looker Studio","id":"kEYNqRYFXXHxP9QeFJ1d"}],"uri":"incidents/PHewUhYs2f3a4rLUaBpt","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"AzZsSSbjftiaL9wgriAh","number":"4705825414718844407","begin":"2024-06-27T00:37:10+00:00","created":"2024-06-27T01:07:09+00:00","end":"2024-06-27T02:12:26+00:00","modified":"2024-06-27T02:12:29+00:00","external_desc":"Issue related to the ‘Detection List’ page not displaying Detections","updates":[{"created":"2024-06-27T02:12:26+00:00","modified":"2024-06-27T02:12:30+00:00","when":"2024-06-27T02:12:26+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-06-26 19:06 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-27T01:07:06+00:00","modified":"2024-06-27T02:12:29+00:00","when":"2024-06-27T01:07:06+00:00","text":"Summary: We are investigating an issue related to the ‘Detection List’ page not displaying Detections\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Wednesday, 2024-06-26 20:00 US/Pacific.\nWe will provide more information by Wednesday, 2024-06-26 20:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may see an empty list of Detections on the left side of the ‘Detections’ page.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-06-27T02:12:26+00:00","modified":"2024-06-27T02:12:30+00:00","when":"2024-06-27T02:12:26+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-06-26 19:06 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/AzZsSSbjftiaL9wgriAh","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"jdwDkmcgpJgKhD96Rs7J","number":"18086830303075598911","begin":"2024-06-26T23:57:31+00:00","created":"2024-06-27T01:26:38+00:00","end":"2024-06-27T02:19:18+00:00","modified":"2024-06-27T02:19:21+00:00","external_desc":"Apigee Edge Public Cloud Errors in us-west2","updates":[{"created":"2024-06-27T02:19:18+00:00","modified":"2024-06-27T02:19:22+00:00","when":"2024-06-27T02:19:18+00:00","text":"The issue with Apigee Edge Public Cloud has been resolved for all affected users as of Wednesday, 2024-06-26 19:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-27T02:01:13+00:00","modified":"2024-06-27T02:19:21+00:00","when":"2024-06-27T02:01:13+00:00","text":"Summary: Apigee Edge Public Cloud Errors in us-west2\nDescription: We are experiencing an issue with Apigee Edge Public Cloud beginning on Wednesday, 2024-06-26 14:44 US/Pacific.\nOur engineering team has identified a mitigation plan and is currently in the process of rolling it out across all impacted systems.\nAt this time, there is no ETA for mitigation.\nWe will provide an update by Wednesday, 2024-06-26 21:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may encounter issues while working with the Apigee datastore with an impact on API runtime, management API and UI.\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-06-27T01:26:36+00:00","modified":"2024-06-27T02:01:16+00:00","when":"2024-06-27T01:26:36+00:00","text":"Summary: Apigee Edge Public Cloud Errors in us-west2\nDescription: We are experiencing an issue with Apigee Edge Public Cloud beginning on Wednesday, 2024-06-26 14:44 US/Pacific.\nOur engineering team continues to investigate the issue and work towards a mitigation strategy.\nAt this time, there is no ETA for mitigation.\nWe will provide an update by Wednesday, 2024-06-26 21:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may encounter issues while working with the Apigee datastore with an impact on API runtime, management API and UI.\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-06-27T02:19:18+00:00","modified":"2024-06-27T02:19:22+00:00","when":"2024-06-27T02:19:18+00:00","text":"The issue with Apigee Edge Public Cloud has been resolved for all affected users as of Wednesday, 2024-06-26 19:16 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"SumcdgBT6GQBzp1vmdXu","service_name":"Apigee Edge Public Cloud","affected_products":[{"title":"Apigee Edge Public Cloud","id":"SumcdgBT6GQBzp1vmdXu"}],"uri":"incidents/jdwDkmcgpJgKhD96Rs7J","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"5NgKqCMjjZzMrYuJk73x","number":"10687336535943485441","begin":"2024-06-26T20:01:58+00:00","created":"2024-06-26T20:02:01+00:00","end":"2024-06-26T21:40:00+00:00","modified":"2024-06-26T21:40:02+00:00","external_desc":"Apigee Edge Public Cloud customers may see missing audit logs starting from 22nd June 2024 globally.","updates":[{"created":"2024-06-26T21:40:00+00:00","modified":"2024-06-26T21:40:03+00:00","when":"2024-06-26T21:40:00+00:00","text":"The issue with Apigee Edge Public Cloud was resolved for all affected users as of Wednesday, 2024-06-26 12:44 US/Pacific.\nThere was a backlog which has also been fully processed as of Wednesday, 2024-06-26 14:35 US/Pacific.\nThere are no residual backlog to be processed or any other known impact at this time.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-26T20:01:59+00:00","modified":"2024-06-26T21:40:02+00:00","when":"2024-06-26T20:01:59+00:00","text":"Summary: Apigee Edge Public Cloud customers may see missing audit logs starting from 22nd June 2024 globally.\nDescription: We believe the issue with Apigee Edge Public Cloud is mitigated.\nCurrently there is a backlog which is currently being cleared and expected to complete by Wednesday, 2024-06-26 17:00 US/Pacific.\nWe will provide an update by Wednesday, 2024-06-26 17:30 US/Pacific with current details.\nDiagnosis: Customers may see missing Apigee Edge Public Cloud audit logs starting from 22nd June 2024 globally.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-06-26T21:40:00+00:00","modified":"2024-06-26T21:40:03+00:00","when":"2024-06-26T21:40:00+00:00","text":"The issue with Apigee Edge Public Cloud was resolved for all affected users as of Wednesday, 2024-06-26 12:44 US/Pacific.\nThere was a backlog which has also been fully processed as of Wednesday, 2024-06-26 14:35 US/Pacific.\nThere are no residual backlog to be processed or any other known impact at this time.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"SumcdgBT6GQBzp1vmdXu","service_name":"Apigee Edge Public Cloud","affected_products":[{"title":"Apigee Edge Public Cloud","id":"SumcdgBT6GQBzp1vmdXu"}],"uri":"incidents/5NgKqCMjjZzMrYuJk73x","currently_affected_locations":[],"previously_affected_locations":[]},{"id":"eRgqHQLkbb87LA13Z9wB","number":"1065855965479054690","begin":"2024-06-26T14:27:17+00:00","created":"2024-06-26T14:57:35+00:00","end":"2024-06-27T05:09:22+00:00","modified":"2024-06-27T05:09:28+00:00","external_desc":"This incident is being merged with an existing incident. All future updates will be provided there: https://status.cloud.google.com/incidents/PtQCvMMdqsRLZLbdC11p","updates":[{"created":"2024-06-27T05:09:22+00:00","modified":"2024-06-27T05:09:28+00:00","when":"2024-06-27T05:09:22+00:00","text":"This incident is being merged with an existing incident. All future updates will be provided there: https://status.cloud.google.com/incidents/PtQCvMMdqsRLZLbdC11p","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-27T02:25:05+00:00","modified":"2024-06-27T05:09:22+00:00","when":"2024-06-27T02:25:05+00:00","text":"Summary: Chronicle Security is experiencing data ingestion delay for some 3rd party API feeds in the US region.\nDescription: We believe the issue with Chronicle Security, Mandiant Managed Defense is partially resolved.\nFull resolution (backlog clearance) is expected to complete by Thursday, 2024-06-27 09:00 US/Pacific.\nWe will provide an update by Thursday, 2024-06-27 00:00 US/Pacific with details on the progress.\nDiagnosis: Data ingestion is delayed by \u003e 17 hours for some 3rd party API feeds.\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T22:24:46+00:00","modified":"2024-06-27T02:25:05+00:00","when":"2024-06-26T22:24:46+00:00","text":"Summary: Chronicle Security is experiencing data ingestion delay for some 3rd party API feeds in the US region.\nDescription: Mitigation work is currently underway by our engineering team.\nMandiant Managed Defense is also impacted due this incident.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-06-26 19:30 US/Pacific.\nDiagnosis: Data ingestion is delayed by \u003e 17 hours for some 3rd party API feeds.\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T18:15:45+00:00","modified":"2024-06-26T22:24:46+00:00","when":"2024-06-26T18:15:45+00:00","text":"Summary: Chronicle Security is experiencing data ingestion delay for some 3rd party API feeds in the US region.\nDescription: Mitigation work is currently underway by our engineering team.\nMandiant Managed Defense is also impacted due this incident.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-06-26 15:30 US/Pacific.\nDiagnosis: Data ingestion is delayed by \u003e 17 hours for some 3rd party API feeds.\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T16:22:16+00:00","modified":"2024-06-26T18:15:45+00:00","when":"2024-06-26T16:22:16+00:00","text":"Summary: Chronicle Security is experiencing data ingestion delay for some 3rd party API feeds in the US region.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-06-26 11:30 US/Pacific.\nDiagnosis: Data ingestion is delayed by \u003e 17 hours for some 3rd party API feeds.\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T16:18:24+00:00","modified":"2024-06-26T16:22:19+00:00","when":"2024-06-26T16:18:24+00:00","text":"Summary: Chronicle Security is experiencing data ingestion delay for some 3rd party API feeds in the US region.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-06-26 11:30 US/Pacific.\nDiagnosis: Data ingestion is delayed by \u003e 17 hours for some 3rd party API feeds.\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T14:57:28+00:00","modified":"2024-06-26T16:18:24+00:00","when":"2024-06-26T14:57:28+00:00","text":"Summary: Chronicle Security is experiencing data ingestion delay for some 3rd party API feeds in the US region.\nDescription: We are experiencing an issue with Chronicle Security beginning on Wednesday, 2024-06-26 06:40 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-06-26 09:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Data ingestion is delayed by \u003e 17 hours for some 3rd party API feeds.\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-06-27T05:09:22+00:00","modified":"2024-06-27T05:09:28+00:00","when":"2024-06-27T05:09:22+00:00","text":"This incident is being merged with an existing incident. All future updates will be provided there: https://status.cloud.google.com/incidents/PtQCvMMdqsRLZLbdC11p","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"},{"title":"Mandiant Managed Defense","id":"9aKw9s8p43AYeBmo4Gvx"}],"uri":"incidents/eRgqHQLkbb87LA13Z9wB","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"id":"hTx18xZFMjkbrtJkKmdd","number":"16311338082900230228","begin":"2024-06-26T14:02:33+00:00","created":"2024-06-26T14:16:33+00:00","end":"2024-06-26T19:31:22+00:00","modified":"2024-06-26T19:31:23+00:00","external_desc":"Apigee Edge Public Cloud customers may see missing audit logs starting from 22nd June 2024 globally.","updates":[{"created":"2024-06-26T19:31:22+00:00","modified":"2024-06-26T19:31:24+00:00","when":"2024-06-26T19:31:22+00:00","text":"The issue with Apigee Edge Public Cloud has been resolved for all affected users as of Wednesday, 2024-06-26 12:04 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-26T19:02:16+00:00","modified":"2024-06-26T19:31:23+00:00","when":"2024-06-26T19:02:16+00:00","text":"Summary: Apigee Edge Public Cloud customers may see missing audit logs starting from 22nd June 2024 globally.\nDescription: We believe the issue with Apigee Edge Public Cloud is mitigated.\nCurrently there is a backlog which is currently being cleared and expected to complete by Wednesday, 2024-06-26 17:00 US/Pacific.\nWe will provide an update by Wednesday, 2024-06-26 17:30 US/Pacific with current details.\nDiagnosis: Customers may see missing Apigee Edge Public Cloud audit logs starting from 22nd June 2024 globally.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-06-26T16:12:03+00:00","modified":"2024-06-26T19:02:16+00:00","when":"2024-06-26T16:12:03+00:00","text":"Summary: Apigee Edge Public Cloud customers may see missing audit logs starting from 22nd June 2024 globally.\nDescription: We are experiencing an issue with Apigee Edge Public Cloud beginning at Saturday, 2024-06-22 00:00 US/Pacific.\nOur engineering team still continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-06-26 12:30 US/Pacific with current details.\nDiagnosis: Customers may see missing Apigee Edge Public Cloud audit logs starting from 22nd June 2024 globally.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-06-26T14:16:22+00:00","modified":"2024-06-26T16:12:03+00:00","when":"2024-06-26T14:16:22+00:00","text":"Summary: Apigee Edge Public Cloud customers may see missing audit logs starting from 22nd June 2024 globally.\nDescription: We are experiencing an issue with Apigee Edge Public Cloud beginning at Saturday, 2024-06-22 00:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-06-26 09:30 US/Pacific with current details.\nDiagnosis: Customers may see missing Apigee Edge Public Cloud audit logs starting from 22nd June 2024 globally.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-06-26T19:31:22+00:00","modified":"2024-06-26T19:31:24+00:00","when":"2024-06-26T19:31:22+00:00","text":"The issue with Apigee Edge Public Cloud has been resolved for all affected users as of Wednesday, 2024-06-26 12:04 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"SumcdgBT6GQBzp1vmdXu","service_name":"Apigee Edge Public Cloud","affected_products":[{"title":"Apigee Edge Public Cloud","id":"SumcdgBT6GQBzp1vmdXu"}],"uri":"incidents/hTx18xZFMjkbrtJkKmdd","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"HE1SjabbQsBp9eGukpgd","number":"8254298644199951516","begin":"2024-06-26T12:56:17+00:00","created":"2024-06-26T13:16:49+00:00","end":"2024-06-26T19:11:55+00:00","modified":"2024-06-26T19:11:57+00:00","external_desc":"Chronicle Security is experiencing Risk Analytics issue in multiregion/us","updates":[{"created":"2024-06-26T19:11:55+00:00","modified":"2024-06-26T19:11:57+00:00","when":"2024-06-26T19:11:55+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-06-26 12:11 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-26T17:19:14+00:00","modified":"2024-06-26T19:11:57+00:00","when":"2024-06-26T17:19:14+00:00","text":"Summary: Chronicle Security is experiencing Risk Analytics issue in multiregion/us\nDescription: Mitigation work is currently underway by our engineering team.\nEngineering has Identified a broken UI as the root cause and is working on implementing a fix.\nWe do not have an ETA for mitigation at this point, however we will continue to provide status updates.\nWe will provide more information by Wednesday, 2024-06-26 12:30 US/Pacific.\nDiagnosis: Impacted users may be unable to perform the risk analytics in chronicle instance.\nThey might observe that no data shows up on the risk analytics page\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T17:15:47+00:00","modified":"2024-06-26T17:19:14+00:00","when":"2024-06-26T17:15:47+00:00","text":"Summary: Chronicle Security is experiencing Risk Analytics issue in multiregion/us\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point, and will continue to provide status updates.\nWe will provide more information by Wednesday, 2024-06-26 12:30 US/Pacific.\nDiagnosis: Impacted users may be unable to perform the risk analytics in chronicle instance.\nThey might observe that no data shows up on the risk analytics page\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T15:06:43+00:00","modified":"2024-06-26T17:15:47+00:00","when":"2024-06-26T15:06:43+00:00","text":"Summary: Chronicle Security is experiencing Risk Analytics issue in multiregion/us\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point, and will continue to provide status updates.\nWe will provide more information by Wednesday, 2024-06-26 10:30 US/Pacific.\nDiagnosis: Impacted users may be unable to perform the risk analytics in chronicle instance.\nThey might observe that no data shows up on the risk analytics page\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T13:16:31+00:00","modified":"2024-06-26T15:06:43+00:00","when":"2024-06-26T13:16:31+00:00","text":"Summary: Chronicle Security is experiencing Risk Analytics issue in multiregion/us\nDescription: We are experiencing an issue with Chronicle Security's Risk Analytics feature beginning on Wednesday, 2024-06-26 03:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-06-26 09:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted users may be unable to perform the risk analytics in chronicle instance.\nThey might observe that no data shows up on the risk analytics page\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-06-26T19:11:55+00:00","modified":"2024-06-26T19:11:57+00:00","when":"2024-06-26T19:11:55+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-06-26 12:11 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/HE1SjabbQsBp9eGukpgd","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"PtQCvMMdqsRLZLbdC11p","number":"10566857602630374527","begin":"2024-06-25T18:58:00+00:00","created":"2024-06-25T19:48:50+00:00","end":"2024-06-28T19:22:00+00:00","modified":"2024-07-01T05:11:35+00:00","external_desc":"Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.","updates":[{"created":"2024-07-01T05:11:35+00:00","modified":"2024-07-01T05:11:35+00:00","when":"2024-07-01T05:11:35+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:**\n25 June, 2024 11:58\n**Incident End:**\n28 June, 2024 12:22\n**Duration:**\n3 Days, 23 minutes\n**Affected Services and Features:**\nGoogle SecOps\n**Regions/Zones:**\nUS - Multi-Region\n**Description:**\nGoogle SecOps experienced service degradation with multiple features in the US/multiregion for a duration of 3 days and 23 minutes.\n**Customer Impact:**\nDuring the incident some customers would have experienced the following issues within Google SecOps:\n**Parser UI:** Affected users would have been unable to access parsers via the user interface.\n**Feeds UI:** Functionality was degraded, preventing feed names from being displayed in the user interface.\n**Raw Log Search Timestamp Selector:** The timestamp selector prevented affected users from selecting earlier dates, thus limiting search functionality.\n**Raw Log Search UI:** The raw log search UI displayed all log types as 0kB, hindering accurate log analysis.\n**Raw Log Search Historic Availability:** Raw log search for historic data beyond 48hrs after ingestion was unavailable.\n**IOC matches page:** Feed Source Names were not shown correctly.\n**Data ingestion:** Ingestion was delayed for some 3rd party API feeds.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-28T19:16:58+00:00","modified":"2024-07-01T05:11:35+00:00","when":"2024-06-28T19:16:58+00:00","text":"The issue with Chronicle Security has been mitigated for all affected users as of Friday, 2024-06-28 11:00AM US/Pacific.\n* Parser UI: The parser UI functionality is working as expected.\n* Feeds UI: The feeds UI functionality is working as expected.\n* Raw Log Search Timestamp Selector Malfunction: Mitigated and fixed.\n* Raw Log Search UI Display Error: Mitigated and fixed.\n* Raw Log Search Historic Availability: Mitigated and fixed.\n* IOC matches page: Mitigated and fixed.\n* Data ingestion: Mitigated and fixed and data backfilled.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-28T11:29:48+00:00","modified":"2024-06-28T19:17:01+00:00","when":"2024-06-28T11:29:48+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.\nDescription: The Raw Log Search historic availability issue is now mitigated.\nThe issue of data ingestion delays is resolved for all 3rd party API sources including Mandiant Managed Defense. Backfill for most log types are complete.\nThe backfill for Proofpoint Tap Alerts for the timeframe of Tuesday, 2024-06-25 12:30 to Wednesday, 2024–06-26 18:30 US/Pacific has been successfully completed.\nOur engineering team will continue working to backfill data for log types Proofpoint On Demand for the timeframe of Tuesday, 2024-06-25 12:30 to Wednesday, 2024–06-26 18:30 US/Pacific.\nThe ETA for completion of most of the backfills is Friday, 2024-06-28 13:00 US/Pacific.\nWe will provide more information by Friday, 2024-06-28 14:00 US/Pacific\nDiagnosis: The customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n**Parser UI:** The parser UI functionality is working as expected.\n**Feeds UI:** The feeds UI functionality is working as expected.\n**Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed.\n**Raw Log Search UI Display Error:** Mitigated and fixed.\n**Raw Log Search Historic Availability:** Mitigated and fixed.\n**IOC matches page:** Mitigated and fixed.\n**Data ingestion:** Delay of \u003e 17 hours for some 3rd party API feeds is now mitigated. Backfill pending for log types: Proofpoint On Demand\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-28T06:39:09+00:00","modified":"2024-06-28T11:29:48+00:00","when":"2024-06-28T06:39:09+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.\nDescription: The Raw Log Search historic availability issue is now mitigated.\nThe issue of data ingestion delays is resolved for all 3rd party API sources including Mandiant Managed Defense. Backfill for most log types are complete.\nOur engineering team will continue working to backfill data for log types Proofpoint On Demand and Proofpoint Tap Alerts for the timeframe of Tuesday, 2024-06-25 12:30 to Wednesday, 2024–06-26 18:30 US/Pacific.\nThe ETA for completion of most of the backfills is Friday, 2024-06-28 10:00 US/Pacific.\nWe will provide more information by Friday, 2024-06-28 05:30 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n**Parser UI:** The parser UI functionality is working as expected.\n**Feeds UI:** The feeds UI functionality is working as expected.\n**Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed.\n**Raw Log Search UI Display Error:** Mitigated and fixed.\n**Raw Log Search Historic Availability:** Mitigated and fixed.\n**IOC matches page:** Mitigated and fixed.\n**Data ingestion:** Delay of \u003e 17 hours for some 3rd party API feeds is now mitigated. Backfill pending for two log types: Proofpoint On Demand and Proofpoint Tap Alerts.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-28T05:59:13+00:00","modified":"2024-06-28T06:39:09+00:00","when":"2024-06-28T05:59:13+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.\nDescription: The Raw Log Search historic availability issue is now mitigated.\nThe issue of data ingestion delays is resolved for all 3rd party API sources including Mandiant Managed Defense. Backfill for most log types are complete.\nOur engineering team will continue working to backfill data for log types Proofpoint On Demand and Proofpoint Tap Alerts for the timeframe of Tuesday, 2024-06-25 12:30 to Wednesday, 2024–06-26 18:30 US/Pacific.\nThe ETA for completion of most of the backfills is Friday, 2024-06-28 10:00 US/Pacific.\nWe will provide more information by Friday, 2024-06-28 05:00 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n**Parser UI:** The parser UI functionality is working as expected.\n**Feeds UI:** The feeds UI functionality is working as expected.\n**Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed.\n**Raw Log Search UI Display Error:** Mitigated and fixed.\n**Raw Log Search Historic Availability:** Mitigated and fixed.\n**IOC matches page:** Mitigated and fixed.\n**Data ingestion:** Delay of \u003e 17 hours for some 3rd party API feeds is now mitigated. Backfill pending for two log types: Proofpoint On Demand and Proofpoint Tap Alerts.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-27T21:31:08+00:00","modified":"2024-06-28T05:59:16+00:00","when":"2024-06-27T21:31:08+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.\nDescription: The Raw Log Search historic availability issue is now mitigated.\nThe issue of data ingestion delays is resolved for all 3rd party API sources including Mandiant Managed Defense. Backfill for most log types are complete.\nOur engineering team will continue working to backfill data for log types Proofpoint On Demand and Proofpoint Tap Alerts for the timeframe of Tuesday, 2024-06-25 12:30 to Wednesday, 2024–06-26 18:30 US/Pacific.\nThe ETA for completion of most of the backfills is Friday, 2024-06-28 10:00 US/Pacific.\nWe will provide more information by Friday, 2024-06-28 00:00 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n**Parser UI:** The parser UI functionality is working as expected.\n**Feeds UI:** The feeds UI functionality is working as expected.\n**Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed.\n**Raw Log Search UI Display Error:** Mitigated and fixed.\n**Raw Log Search Historic Availability:** Mitigated and fixed.\n**IOC matches page:** Mitigated and fixed.\n**Data ingestion:** Delay of \u003e 17 hours for some 3rd party API feeds is now mitigated. Backfill pending for two log types: Proofpoint On Demand and Proofpoint Tap Alerts.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-27T16:56:12+00:00","modified":"2024-06-27T21:31:11+00:00","when":"2024-06-27T16:56:12+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.\nDescription: The Raw Log Search historic availability issue is now mitigated.\nThe issue of data ingestion delays is resolved for all 3rd party API sources including Mandiant Managed Defense. Backfill for most log types are complete.\nOur engineering team will continue working to backfill data for log types Proofpoint On Demand and Proofpoint Tap Alerts for the timeframe of Tuesday, 2024-06-25 12:30 to Wednesday, 2024–06-26 18:30 US/Pacific.\nThe ETA for completion of the backfills is Friday, 2024-06-28 10:00 US/Pacific.\nWe will provide more information by Thursday, 2024-06-27 14:30 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n* **Parser UI:** The parser UI functionality is working as expected.\n* **Feeds UI:** The feeds UI functionality is working as expected.\n* **Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed.\n* **Raw Log Search UI Display Error:** Mitigated and fixed.\n* **Raw Log Search Historic Availability:** Mitigated and fixed.\n* **IOC matches page:** Mitigated and fixed.\n* **Data ingestion:** Delay of \u003e 17 hours for some 3rd party API feeds is now mitigated. Backfill pending for two log types: Proofpoint On Demand and Proofpoint Tap Alerts.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-27T06:08:21+00:00","modified":"2024-06-27T16:56:12+00:00","when":"2024-06-27T06:08:21+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with multiple features in the US/multiregion.\nDescription: Mitigation work is still underway by our engineering team.\nCurrently the primary impact is to historical raw log searches for data beyond 48 hours, which are not updated with late arriving events.\nOur engineering team is actively working to address the backlog and implement a complete mitigation for users in the affected regions.\nThe issue of data ingestion delays is resolved for all 3rd party API sources including Mandiant Managed Defense. Backfill for most log types are complete.\nOur engineer team will continue working to backfill data of log types Proofpoint On Demand and Proofpoint Tap Alerts for duration Tuesday, 2024-06-25 14:55 to Wednesday, 2024–06-26 18:02 US/Pacific with an ETA of Friday, 2024-07-05 10:00 US/Pacific.\nWe will provide more information by Thursday, 2024-06-27 10:00 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n* Parser UI: The parser UI is now functioning and displaying active parsers.\n* Feeds UI: The feeds UI functionality is working as expected\n* Raw Log Search Timestamp Selector Malfunction: Mitigated and fixed in the production environment\n* Raw Log Search UI Display Error: Mitigated and fixed in the production environment .\n* Raw Log Search Historic Availability: Historic data beyond 48hrs after ingestion is available, however such data is not updated with late arriving events. Mitigation is in progress.\n* IOC matches page: The issue with Feed Source Names not shown correctly is now mitigated.\n* Data ingestion: Delay of \u003e 17 hours for some 3rd party API feeds is now mitigated. Backfill pending for two log types.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T23:41:29+00:00","modified":"2024-06-27T06:08:21+00:00","when":"2024-06-26T23:41:29+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregion.\nDescription: Mitigation work is still underway by our engineering team.\nCurrently the primary impact is to historical raw log searches for data beyond 48 hours, which are not updated with late arriving events.\nOur engineering team is actively working to address the backlog and implement a complete mitigation for users in the affected regions.\nWe will provide more information by Thursday, 2024-06-27 10:00 US/Pacific.\nDiagnosis: Customers across US/multiregion would experience the following issues within Chronicle security. However, the remaining features are unaffected by this issue.\n**Parser UI**: The parser UI is now functioning and displaying active parsers.\n**Feeds UI**: The feeds UI functionality is working as expected\n**Raw Log Search Timestamp Selector Malfunction**: Mitigated and fixed in the production environment\n**Raw Log Search UI Display Error**: Mitigated and fixed in the production environment .\n**Raw Log Search Historic Availability**: Historic data beyond 48hrs after ingestion is available, however such data is not updated with late arriving events. Mitigation is in progress.\n**IOC matches page**: The issue with Feed Source Names not shown correctly is now mitigated.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T18:42:23+00:00","modified":"2024-06-26T23:41:29+00:00","when":"2024-06-26T18:42:23+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: Mitigation work is still underway by our engineering team.\nCurrently the primary impact is to historical raw log searches for data beyond 48 hours, which are not updated with new data.\nOur engineering team is actively working to address the backlog and implement a complete mitigation for users in the affected regions.\nWe will provide more information by Wednesday, 2024-06-26 17:00 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n* **Parser UI**: The parser UI is now functioning and displaying active parsers.\n* **Feeds UI**: The feeds UI functionality is working as expected\n* **Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed in the production environment\n* **Raw Log Search UI Display Error:** Mitigated and fixed in the production environment\n* **Raw Log Search Historic Availability:** Historic data beyond 48hrs after ingestion is available, however such data is not updated with any new data. Mitigation is in progress\n* **IOC matches page: The issue with Feed Source Names not shown correctly is now mitigated.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T13:51:54+00:00","modified":"2024-06-26T18:42:23+00:00","when":"2024-06-26T13:51:54+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: Mitigation work is still underway by our engineering team.\nSystem functionality has been fully restored. Our engineering team is actively working to address the backlog and implement a complete mitigation for users in the affected regions.\nWe will provide more information by Wednesday, 2024-06-26 12:00 US/Pacific.\nDiagnosis: The customers across US/multiregion would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n* **Parser UI**: The parser UI is now functioning and displaying active parsers.\n* **Feeds UI**: The feeds UI functionality is working as expected\n* **Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed in the production environment\n* **Raw Log Search UI Display Error:** Mitigated and fixed in the production environment\n* **Raw Log Search Historic Availability:** The issue with Raw log search for historic data beyond 48hrs after ingestion is now mitigated and available for raw log search.\n* **IOC matches page: The issue with Feed Source Names not shown correctly is now mitigated.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T11:00:08+00:00","modified":"2024-06-26T13:51:54+00:00","when":"2024-06-26T11:00:08+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: We are experiencing an issue with Chronicle Security beginning on Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team has determined the root cause of the issue and has prevented any impact in all regions outside the US multi-region.\nMitigation and restoration is underway to restore full system functionality. We will provide more information by Wednesday, 2024-06-26 07:00 US/Pacific\nDiagnosis: The customers across US/multiregion would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n* **Parser UI**: The parser UI is now functioning and displaying active parsers.\n* **Feeds UI**: The feeds UI functionality is working as expected\n* **Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed in the production environment\n* **Raw Log Search UI Display Error:** Mitigated and fixed in the production environment\n* **Raw Log Search Historic Availability:** The issue with Raw log search for historic data beyond 48hrs after ingestion is now mitigated and available for raw log search.\n* **IOC matches page: Feed Source Names are not shown correctly. Mitigation is in progress.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T08:02:25+00:00","modified":"2024-06-26T11:00:08+00:00","when":"2024-06-26T08:02:25+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: We are experiencing an issue with Chronicle Security beginning on Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team has determined the root cause of the issue and has prevented any impact in all regions outside the US multi-region.\nMitigation and restoration is underway to restore full system functionality. We will provide more information by Wednesday, 2024-06-26 05:30 US/Pacific\nDiagnosis: The customers across US/multiregion would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n* **Parser UI**: The parser UI is now functioning and displaying active parsers.\n* **Feeds UI**: The feeds UI functionality is degraded in the US region (the feed name is not currently shown). The mitigation is in progress.\n* **Raw Log Search Timestamp Selector Malfunction:** Mitigated and fixed in the production environment\n* **Raw Log Search UI Display Error:** Mitigated and fixed in the production environment\n* **Raw Log Search Historic Availability:** Raw log search for historic data beyond 48hrs after ingestion is currently unavailable for raw log search. There is no impact to the data integrity of raw logs in Chronicle.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T04:46:19+00:00","modified":"2024-06-26T08:02:25+00:00","when":"2024-06-26T04:46:19+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: We are experiencing an issue with Chronicle Security beginning on Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team has determined the root cause of the issue and has prevented any impact in all regions outside the US multi-region.\nMitigation and restoration is underway to restore full system functionality. We will provide more information by Wednesday, 2024-06-26 02:15 US/Pacific\nDiagnosis: The customers across US/multiregion would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n* **Parser UI**: The parser UI is now functioning and displaying active parsers.\n* **Feeds UI**: The feeds UI functionality is degraded in the US region (the feed name is not currently shown).\n* **Raw Log Search Timestamp Selector Malfunction:** * The timestamp selector does not allow users to select earlier dates, limiting search functionality. * A mitigation to this issue is rolling out.\n* **Raw Log Search UI Display Error:** The raw log search UI displays available data for all log types as 0kB.\n* **Raw Log Search Historic Availability:** Raw log search for historic data beyond 48hrs after ingestion is currently unavailable for raw log search. There is no impact to the data integrity of raw logs in Chronicle.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T02:47:09+00:00","modified":"2024-06-26T04:46:19+00:00","when":"2024-06-26T02:47:09+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: We are experiencing an issue with Chronicle Security beginning on Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team has determined the root cause of the issue and has prevented any impact in all regions outside the US multi-region.\nMitigation and restoration is underway to restore full system functionality. We will provide more information by Tuesday, 2024-06-25 22:30 US/Pacific\nDiagnosis: The customers across US/multiregion would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n* **Parser UI**: The parser UI is now functioning and displaying active parsers.\n* **Feeds UI**: The feeds UI functionality is degraded in the US region (the feed name is not currently shown).\n* **Raw Log Search Timestamp Selector Malfunction:** * The timestamp selector does not allow users to select earlier dates, limiting search functionality. * A mitigation to this issue is rolling out.\n* **Raw Log Search UI Display Error:** The raw log search UI displays available data for all log types as 0kB.\n* **Raw Log Search Historic Availability:** Raw log search for historic data beyond 48hrs after ingestion is currently unavailable for raw log search. There is no impact to the data integrity of raw logs in Chronicle.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-26T01:05:55+00:00","modified":"2024-06-26T02:47:09+00:00","when":"2024-06-26T01:05:55+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in the US/multiregions.\nDescription: We are experiencing an issue with Chronicle Security beginning on Tuesday, 2024-06-25 12:00 US/Pacific.\nMitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Tuesday, 2024-06-25 21:00 US/Pacific.\nWe will provide more information by Tuesday, 2024-06-25 21:30 US/Pacific.\nDiagnosis: The customers across US/multiregions would experience the following issues with the Raw Log search feature within Chronicle Security Console. However, the remaining features are unaffected by this issue.\n**1, Raw Log Search Timestamp Selector Malfunction:** The timestamp selector does not allow users to select earlier dates, limiting search functionality.\n**2, Parser UI Failure:** The parser UI is not functioning, preventing users from accessing and utilizing parsers.\n**3, Raw Log Search UI Display Error:** The raw log search UI displays all log types as 0kB, hindering accurate log analysis.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-25T22:53:45+00:00","modified":"2024-06-26T01:05:57+00:00","when":"2024-06-25T22:53:45+00:00","text":"Summary: Chronicle Security is experiencing a service degradation with a few features in US multi-region.\nDescription: We are experiencing an issue with Chronicle Security beginning at Tuesday, 2024-06-25 12:00 US/Pacific.\nMitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Tuesday, 2024-06-25 21:00 US/Pacific.\nWe will provide more information by Tuesday, 2024-06-25 21:30 US/Pacific.\nDiagnosis: A subset of customers in the US region would experience the following issues.\n**1, Raw Log Search Timestamp Selector Malfunction:** The timestamp selector does not allow users to select earlier dates, limiting search functionality.\n**2, Parser UI Failure:** The parser UI is not functioning, preventing users from accessing and utilizing parsers.\n**3, Raw Log Search UI Display Error:** The raw log search UI displays all log types as 0kB, hindering accurate log analysis.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-25T21:45:50+00:00","modified":"2024-06-25T22:53:45+00:00","when":"2024-06-25T21:45:50+00:00","text":"Summary: Chronicle Security customers may experience Parsers not appearing in parsers UI and raw log search has empty log types\nDescription: We are experiencing an issue with Chronicle Security beginning at Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team is actively investigating the issue to identify the cause and determining mitigation steps.\nWe will provide an update by Tuesday, 2024-06-25 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Parser page is empty in the UI and raw log search has empty log types\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-25T20:45:12+00:00","modified":"2024-06-25T21:45:50+00:00","when":"2024-06-25T20:45:12+00:00","text":"Summary: Chronicle Security customers may experience Parsers not appearing in parsers UI and raw log search has empty log types\nDescription: We are experiencing an issue with Chronicle Security beginning at Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team is actively investigating the issue to identify the cause and determining mitigation steps.\nWe will provide an update by Tuesday, 2024-06-25 15:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Parser page is empty in the UI and raw log search has empty log types\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-25T19:48:43+00:00","modified":"2024-06-25T20:45:12+00:00","when":"2024-06-25T19:48:43+00:00","text":"Summary: Chronicle Security customers may experience Parsers not showing in parsers UI and raw log search has empty log types\nDescription: We are experiencing an issue with Chronicle Security beginning at Tuesday, 2024-06-25 12:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-06-25 14:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Parser page is empty in the UI and raw log search has empty log types\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-07-01T05:11:35+00:00","modified":"2024-07-01T05:11:35+00:00","when":"2024-07-01T05:11:35+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:**\n25 June, 2024 11:58\n**Incident End:**\n28 June, 2024 12:22\n**Duration:**\n3 Days, 23 minutes\n**Affected Services and Features:**\nGoogle SecOps\n**Regions/Zones:**\nUS - Multi-Region\n**Description:**\nGoogle SecOps experienced service degradation with multiple features in the US/multiregion for a duration of 3 days and 23 minutes.\n**Customer Impact:**\nDuring the incident some customers would have experienced the following issues within Google SecOps:\n**Parser UI:** Affected users would have been unable to access parsers via the user interface.\n**Feeds UI:** Functionality was degraded, preventing feed names from being displayed in the user interface.\n**Raw Log Search Timestamp Selector:** The timestamp selector prevented affected users from selecting earlier dates, thus limiting search functionality.\n**Raw Log Search UI:** The raw log search UI displayed all log types as 0kB, hindering accurate log analysis.\n**Raw Log Search Historic Availability:** Raw log search for historic data beyond 48hrs after ingestion was unavailable.\n**IOC matches page:** Feed Source Names were not shown correctly.\n**Data ingestion:** Ingestion was delayed for some 3rd party API feeds.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"},{"title":"Mandiant Managed Defense","id":"9aKw9s8p43AYeBmo4Gvx"}],"uri":"incidents/PtQCvMMdqsRLZLbdC11p","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"id":"DabTxrTX98aUjxfNMLKc","number":"4207184887188406518","begin":"2024-06-25T18:46:58+00:00","created":"2024-06-25T19:08:22+00:00","end":"2024-06-25T21:30:50+00:00","modified":"2024-06-25T21:30:52+00:00","external_desc":"Google Vertex AI Search customers may experience some latency and resource exhausted error","updates":[{"created":"2024-06-25T21:30:50+00:00","modified":"2024-06-25T21:30:52+00:00","when":"2024-06-25T21:30:50+00:00","text":"The issue with Vertex AI Search has been resolved for all affected users as of Tuesday, 2024-06-25 14:28 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-25T20:00:30+00:00","modified":"2024-06-25T21:30:52+00:00","when":"2024-06-25T20:00:30+00:00","text":"Summary: Google Vertex AI Search customers may experience some latency and resource exhausted error\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-06-25 15:00 US/Pacific.\nDiagnosis: Customer may experience some latency and error. It affected answer API and search with summary feature in vertex AI search.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-25T19:11:01+00:00","modified":"2024-06-25T20:00:30+00:00","when":"2024-06-25T19:11:01+00:00","text":"Summary: Google Vertex AI Search customers may experience some latency and resource exhausted error\nDescription: We are experiencing an issue with Vertex AI Search beginning at Tuesday, 2024-06-25 09:10 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-06-25 13:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customer may experience some latency and error. It affected answer API and search with summary feature in vertex AI search.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-25T19:08:19+00:00","modified":"2024-06-25T19:11:05+00:00","when":"2024-06-25T19:08:19+00:00","text":"Summary: Google Vertex AI Search customers may experience some latency and resource exhausted error\nDescription: We are experiencing an issue with Vertex AI Search beginning at Tuesday, 2024-06-25 09:10 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-06-25 13:40 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customer may experience some latency and error. It affected answer API and search with summary feature in vertex AI search.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-06-25T21:30:50+00:00","modified":"2024-06-25T21:30:52+00:00","when":"2024-06-25T21:30:50+00:00","text":"The issue with Vertex AI Search has been resolved for all affected users as of Tuesday, 2024-06-25 14:28 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"vNncXxtSVvqyhvSkQ6PJ","service_name":"Vertex AI Search","affected_products":[{"title":"Vertex AI Search","id":"vNncXxtSVvqyhvSkQ6PJ"}],"uri":"incidents/DabTxrTX98aUjxfNMLKc","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"id":"WSKmKxSuwQ93C1WcQ3h3","number":"3938614477816153053","begin":"2024-06-25T18:32:09+00:00","created":"2024-06-25T18:58:46+00:00","end":"2024-06-25T19:06:23+00:00","modified":"2024-06-25T19:06:30+00:00","external_desc":"Storage Transfer service experienced issues with creating transfer receipts.","updates":[{"created":"2024-06-25T19:06:23+00:00","modified":"2024-06-25T19:06:31+00:00","when":"2024-06-25T19:06:23+00:00","text":"The issue with Storage Transfer Service has been resolved for all affected users as of Tuesday, 2024-06-25 11:34 US/Pacific.\nDuring the issue, transfer receipts for transfer from on-premise to cloud were not generated impacting any customer workflows using transfer receipts. The transfer functionality was not affected by the issue.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-25T18:58:40+00:00","modified":"2024-06-25T19:06:30+00:00","when":"2024-06-25T18:58:40+00:00","text":"Summary: Storage Transfer service experienced issues with creating transfer receipts.\nDescription: We experienced an issue with Storage Transfer Service beginning at Thursday, 2024-06-20\nOur engineering team identified the root cause and rolled out the fix.\nWe will provide an update by Tuesday, 2024-06-25 12:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-06-25T19:06:23+00:00","modified":"2024-06-25T19:06:31+00:00","when":"2024-06-25T19:06:23+00:00","text":"The issue with Storage Transfer Service has been resolved for all affected users as of Tuesday, 2024-06-25 11:34 US/Pacific.\nDuring the issue, transfer receipts for transfer from on-premise to cloud were not generated impacting any customer workflows using transfer receipts. The transfer functionality was not affected by the issue.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Storage","id":"UwaYoXQ5bHYHG6EdiPB8"},{"title":"Storage Transfer Service","id":"reC3xJSY6Gzc8n9eYmmj"}],"uri":"incidents/WSKmKxSuwQ93C1WcQ3h3","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"yDVzvMtWooZJGPDc3wZ8","number":"4938828884735018635","begin":"2024-06-20T17:30:00+00:00","created":"2024-06-20T18:13:16+00:00","end":"2024-06-20T19:42:00+00:00","modified":"2024-06-21T07:56:43+00:00","external_desc":"Google Data Catalog customer may experience availability issues.","updates":[{"created":"2024-06-21T07:56:24+00:00","modified":"2024-06-21T07:56:43+00:00","when":"2024-06-21T07:56:24+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 20 June, 2024 10:30\n**Incident End:** 20 June, 2024 12:42\n**Duration:** 2 hours, 12 minutes\n**Affected Services and Features:**\nDataplex Catalog\n**Regions/Zones:**\nMulti-regions: asia, eu, us\nRegions: asia-northeast2, australia-southeast2, us-east9, europe-north1, us-west5, europe-west8, asia-east1, northamerica-northeast1, us-central5, europe-west2, southamerica-east1, africa-south1, me-west1, europe-southwest1, asia-south2, europe-west7, us-west2, europe-west10, us-central3, us-east2, us-east4, us-east7, europe-central2, us-central1, asia-west2, europe-west3, europe-west5, us-east1, europe-west9, europe-west14, us-west8, europe-west, us-central1google, europe-west11, asia-southeast1, southamerica-west1, us-west4, us-central4, asia-west1, us-west6, us-south1, us-west3, europe-west13, us-west1, me-central2, us-west7, europe-west4, us-east5, europe-west12, us-east8, europe-west6, us-east3, asia-northeast1, europe-west1, me-central1, asia-northeast3, me-central3, asia-south1, asia-southeast2, us-east10, us-central2, asia-east2, europe-north2, northamerica-south1, northamerica-northeast2, us-central, europe-south1, australia-southeast1\n**Description:**\nGoogle Dataplex Catalog experienced availability issues where Dataplex Catalog API and Dataplex Catalog UI in Google Cloud Console returned errors and timeouts on all API calls for a duration of 2 hours and 12 minutes. From preliminary analysis, the root cause of the issue appears to be connectivity issues to a dependent backend service. The issue was quickly mitigated by bringing the dependent service back online.\n**Customer Impact:**\nDataplex Catalog API and Dataplex Catalog UI in Google Cloud Console returned errors and timeouts on all API calls.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-20T19:43:16+00:00","modified":"2024-06-21T07:56:24+00:00","when":"2024-06-20T19:43:16+00:00","text":"The issue with Data Catalog has been resolved for all affected users as of Thursday, 2024-06-20 12:42 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-20T19:31:13+00:00","modified":"2024-06-21T07:53:24+00:00","when":"2024-06-20T19:31:13+00:00","text":"Summary: Google Data Catalog customer may experience availability issues.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-06-20 14:00 US/Pacific.\nDiagnosis: Dataplex Catalog API and Dataplex Catalog UI in Google Cloud Console return errors and timeouts on all API calls.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-06-20T19:00:15+00:00","modified":"2024-06-21T07:52:55+00:00","when":"2024-06-20T19:00:15+00:00","text":"Summary: Google Data Catalog customer may experience availability issues.\nDescription: We are experiencing an issue with Data Catalog.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-06-20 13:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Dataplex Catalog API and Dataplex Catalog UI in Google Cloud Console return errors and timeouts on all API calls.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-06-20T18:32:59+00:00","modified":"2024-06-21T07:52:33+00:00","when":"2024-06-20T18:32:59+00:00","text":"Summary: Google Data Catalog customer may experience availability issues.\nDescription: We are experiencing an issue with Data Catalog.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-06-20 12:40 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Dataplex Catalog API and Dataplex Catalog UI in Google Cloud Console return errors and timeouts on all API calls.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-06-20T18:13:11+00:00","modified":"2024-06-20T18:32:59+00:00","when":"2024-06-20T18:13:11+00:00","text":"Summary: Google Data Catalog customer may experience availability issues.\nDescription: We are experiencing an issue with Data Catalog.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-06-20 12:21 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-06-21T07:56:24+00:00","modified":"2024-06-21T07:56:43+00:00","when":"2024-06-21T07:56:24+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 20 June, 2024 10:30\n**Incident End:** 20 June, 2024 12:42\n**Duration:** 2 hours, 12 minutes\n**Affected Services and Features:**\nDataplex Catalog\n**Regions/Zones:**\nMulti-regions: asia, eu, us\nRegions: asia-northeast2, australia-southeast2, us-east9, europe-north1, us-west5, europe-west8, asia-east1, northamerica-northeast1, us-central5, europe-west2, southamerica-east1, africa-south1, me-west1, europe-southwest1, asia-south2, europe-west7, us-west2, europe-west10, us-central3, us-east2, us-east4, us-east7, europe-central2, us-central1, asia-west2, europe-west3, europe-west5, us-east1, europe-west9, europe-west14, us-west8, europe-west, us-central1google, europe-west11, asia-southeast1, southamerica-west1, us-west4, us-central4, asia-west1, us-west6, us-south1, us-west3, europe-west13, us-west1, me-central2, us-west7, europe-west4, us-east5, europe-west12, us-east8, europe-west6, us-east3, asia-northeast1, europe-west1, me-central1, asia-northeast3, me-central3, asia-south1, asia-southeast2, us-east10, us-central2, asia-east2, europe-north2, northamerica-south1, northamerica-northeast2, us-central, europe-south1, australia-southeast1\n**Description:**\nGoogle Dataplex Catalog experienced availability issues where Dataplex Catalog API and Dataplex Catalog UI in Google Cloud Console returned errors and timeouts on all API calls for a duration of 2 hours and 12 minutes. From preliminary analysis, the root cause of the issue appears to be connectivity issues to a dependent backend service. The issue was quickly mitigated by bringing the dependent service back online.\n**Customer Impact:**\nDataplex Catalog API and Dataplex Catalog UI in Google Cloud Console returned errors and timeouts on all API calls.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"TFedVRYgKGRGMSJrUpup","service_name":"Data Catalog","affected_products":[{"title":"Data Catalog","id":"TFedVRYgKGRGMSJrUpup"}],"uri":"incidents/yDVzvMtWooZJGPDc3wZ8","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"zmxVZmu3Kuk5MDnYyUpd","number":"3908792195560824591","begin":"2024-06-13T17:46:52+00:00","created":"2024-06-13T18:04:28+00:00","end":"2024-06-14T06:36:55+00:00","modified":"2024-06-14T19:13:46+00:00","external_desc":"Chronicle Security is experiencing delays with BigQuery exports, Multi event rules, and Normalization in US multi-region","updates":[{"created":"2024-06-14T19:13:46+00:00","modified":"2024-06-14T19:13:47+00:00","when":"2024-06-14T19:13:46+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 13 June 2024 02:36\n**Incident End:** 13 June 2024 23:24\n**Duration:** 20 hours, 48 minutes\n**Affected Services and Features:**\nChronicle Security\n**Regions/Zones:** US multi-region\n**Description:**\nChronicle Security experienced delays with BigQuery exports, Multi event rules, and Normalization in US multi-region for a duration of 20 hours, 48 minutes.\nAfter preliminary analysis, the root cause of the issue was identified as a temporary unavailability of a specific zone within the internal database service utilized by Chronicle. Despite Chronicle's fault-tolerant setup guaranteeing uninterrupted service availability, its performance gradually diminished over time as a result of diminished data processing capabilities.\nTo alleviate infrastructure load, Chronicle engineering implemented measures to delay the reprocessing of specific data. However, attempts to resume reprocessing led to further delays. Concurrently, a separate, unrelated bug caused excessive database writes, resulting in delays in data normalization. To address this issue, the data pipeline was disabled to eliminate significant delays in data normalization.\n**Customer Impact:**\n* Delays in BigQuery Export and Multi event rules processing\n* Delays in late arriving enrichments and slower Unified Data Model (UDM) search.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-14T06:36:55+00:00","modified":"2024-06-14T19:13:46+00:00","when":"2024-06-14T06:36:55+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Thursday, 2024-06-13 23:24 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-14T06:16:46+00:00","modified":"2024-06-14T06:36:57+00:00","when":"2024-06-14T06:16:46+00:00","text":"Summary: Chronicle Security is experiencing delays with BigQuery exports, Multi event rules, and Normalization in US multi-region\nDescription: We have successfully mitigated the issue pertaining to delays with Normalization.\nWe have completed mitigations for BigQuery export delay and multi event rules, after which we have been observing a steady reduction in delays. We believe that the the issue has been mitigated for the majority of customers, and our engineering team continues working diligently to resolve any remaining issues.\nWe will provide more information by Friday, 2024-06-14 09:00 US/Pacific.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export, in the processing of Multi event rules, and delays in Normalization (The process of converting a raw log to a UDM record) in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-14T03:09:53+00:00","modified":"2024-06-14T06:16:46+00:00","when":"2024-06-14T03:09:53+00:00","text":"Summary: Chronicle Security is experiencing delays with BigQuery exports, Multi event rules, and Normalization in US multi-region\nDescription: We have successfully mitigated the issue pertaining to delays with Normalization.\nWe have completed mitigations for BigQuery export delay and multi event rules, after which we have been observing a steady reduction in delays. We are continuing to monitor and investigate any residual issues, and our engineering team is working diligently to resolve them.\nWe will provide more information by Thursday, 2024-06-13 23:30 US/Pacific.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export, in the processing of Multi event rules, and delays in Normalization (The process of converting a raw log to a UDM record) in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-14T01:11:09+00:00","modified":"2024-06-14T03:09:53+00:00","when":"2024-06-14T01:11:09+00:00","text":"Summary: Chronicle Security is experiencing delays with BigQuery exports, Multi event rules, and Normalization in US multi-region\nDescription: We have successfully mitigated the issue pertaining to delays with Normalization.\nWe have completed mitigations for BigQuery export delay and multi event rules, after which we have been observing a steady reduction in delays. We expect the any residual issues to completely subside in the next 2 hours.\nWe will provide more information by Thursday, 2024-06-13 20:30 US/Pacific.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export, in the processing of Multi event rules, and delays in Normalization (The process of converting a raw log to a UDM record) in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-13T21:38:08+00:00","modified":"2024-06-14T01:11:09+00:00","when":"2024-06-13T21:38:08+00:00","text":"Summary: Chronicle Security is experiencing delays with BigQuery exports, Multi event rules, and Normalization in US multi-region\nDescription: We have completed mitigations for BigQuery export delay and multi event rules, after which we have been observing a steady reduction in delays. We expect the delays to completely subside in the next 5 hours.\nOur engineers have been able to successfully identify the reason for Normalization delays and have applied the required mitigation. We expect any related delays to be completely mitigated in the next 6 hours.\nWe will provide more information by Thursday, 2024-06-13 20:00 US/Pacific.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export, in the processing of Multi event rules, and delays in Normalization (The process of converting a raw log to a UDM record) in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-13T20:23:25+00:00","modified":"2024-06-13T21:38:08+00:00","when":"2024-06-13T20:23:25+00:00","text":"Summary: Chronicle Security is experiencing delays with BigQuery exports, Multi event rules, and Normalization in US multi-region\nDescription: We have completed mitigations for BigQuery export delay and multi event rules and we are seeing gradual reduction in delays. We expect the delays to completely subside in the next 6 hours.\nThe Normalization delays is being investigated by our engineering team.\nWe will provide more information by Thursday, 2024-06-13 15:00 US/Pacific.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export, in the processing of Multi event rules, and delays in Normalization (The process of converting a raw log to a UDM record) in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-13T19:21:36+00:00","modified":"2024-06-13T20:23:25+00:00","when":"2024-06-13T19:21:36+00:00","text":"Summary: Chronicle Security is experiencing delays with Big Query exports, Multi event rules, and Normalization in US multi-region\nDescription: We are experiencing an issue with Chronicle Security Big Query exports, Multi event rules, and Normalization beginning at Thursday, 2024-06-13 09:15 US/Pacific.\nOur engineering team identified the root cause of the issue and are working on a mitigation.\nWe do not have an ETA for mitigation at this point.\nWe will provide an update by Thursday, 2024-06-13 13:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export, in the processing of Multi event rules, and delays in Normalization (The process of converting a raw log to a UDM record) in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-13T18:04:25+00:00","modified":"2024-06-13T19:21:36+00:00","when":"2024-06-13T18:04:25+00:00","text":"Summary: Chronicle Security is experiencing delays with Big Query exports and Multi event rules in US multi-region\nDescription: We are experiencing an issue with Chronicle Security Big Query exports and Multi event rules beginning at Thursday, 2024-06-13 09:15 US/Pacific.\nOur engineering team identified the root cause of the issue and are working on a mitigation.\nWe do not have an ETA for mitigation at this point.\nWe will provide an update by Thursday, 2024-06-13 12:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Chronicle security is experiencing delays in Big Query Export and in the processing of Multi event rules in the US multi region.\n- Single event rules are still being processed and Unified Data Model (UDM) Search and Raw Log Search are still available.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-06-14T19:13:46+00:00","modified":"2024-06-14T19:13:47+00:00","when":"2024-06-14T19:13:46+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 13 June 2024 02:36\n**Incident End:** 13 June 2024 23:24\n**Duration:** 20 hours, 48 minutes\n**Affected Services and Features:**\nChronicle Security\n**Regions/Zones:** US multi-region\n**Description:**\nChronicle Security experienced delays with BigQuery exports, Multi event rules, and Normalization in US multi-region for a duration of 20 hours, 48 minutes.\nAfter preliminary analysis, the root cause of the issue was identified as a temporary unavailability of a specific zone within the internal database service utilized by Chronicle. Despite Chronicle's fault-tolerant setup guaranteeing uninterrupted service availability, its performance gradually diminished over time as a result of diminished data processing capabilities.\nTo alleviate infrastructure load, Chronicle engineering implemented measures to delay the reprocessing of specific data. However, attempts to resume reprocessing led to further delays. Concurrently, a separate, unrelated bug caused excessive database writes, resulting in delays in data normalization. To address this issue, the data pipeline was disabled to eliminate significant delays in data normalization.\n**Customer Impact:**\n* Delays in BigQuery Export and Multi event rules processing\n* Delays in late arriving enrichments and slower Unified Data Model (UDM) search.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/zmxVZmu3Kuk5MDnYyUpd","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"HcNDMn4YeG57exykghT7","number":"14640709118330191358","begin":"2024-06-12T19:06:00+00:00","created":"2024-06-12T21:48:30+00:00","end":"2024-06-12T22:59:00+00:00","modified":"2024-06-21T13:43:13+00:00","external_desc":"Vertex AI Online Prediction, Dialogflow CX, and Agent Assist are experiencing elevated error rates in multiple regions.","updates":[{"created":"2024-06-19T20:09:08+00:00","modified":"2024-06-21T13:43:13+00:00","when":"2024-06-19T20:09:08+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 12 June 2024 at 12:06 US/Pacific, Google Vertex AI, Dialogflow, Agent Assist users experienced elevated errors and product functionality issues in the us-central1, asia-southeast1, europe-west3, europe-west4, us-east1, us-west1, northamerica-northeast1, and us-east4 regions for a duration of 3 hours and 53 minutes. To our customers who were impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nBeginning 8 June 2024, a novel form of user request to the GenerateContent API triggered intermittent segmentation faults in the Vertex Serving Prediction API servers. An affected server would restart after the segfault, and load balancers would send user queries to other healthy API servers until the affected server returned to service. The user-visible outage began when the rate of segfault-triggering traffic increased and the number of API servers offline simultaneously was sufficient to affect overall serving capacity relative to load.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue via internal production monitoring on 10 June 2024 at 07:35 US/Pacific. At this time there was no visible customer impact and google engineers identified the root cause of the segmentation fault and developed a fix which they started progressively rolling out. However, on 12 June 2024 at 12:06 US/Pacific, multiple services built on top of Vertex Prediction started to report user requests that were affected by this issue in production. The root cause of the issue was quickly verified to be the same issue discovered on 10 June 2024 and accelerated the in-progress rollout. The rollout completed on 12 June 2024 at 15:59 US/Pacific, fully mitigating the issue\nGoogle is committed to preventing a repeat of the issue in the future and is completing the following actions:\n- Ensuring early signals of server binary issues are captured in production healthiness analysis, and investigated timely.\n- Ensuring a release validation of feature changes and updates in production.\n## Detailed Description of Impact\nBetween 12 June 2024 12:06 and 12 June 2024 15:59 some users in regions: us-central1, asia-southeast1, europe-west3, europe-west4, us-east1, us-west1, northamerica-northeast1, us-east4, may have experienced the following:\n**Vertex AI Online Prediction:**\n- Google Vertex AI experienced high latency and elevated 500, 502 error rates, while executing prediction tasks using Predict, RawPredict, GenerateContent, and StreamGenerateContent methods.\n- Customers may have also experienced failure in running prediction requests with “CANCELLED” errors.\n**Dialogflow CX:**\n- Dialogflow CX Generators, Generative Fallback, and ML entities experienced elevated “INTERNAL” and “DEADLINE_EXCEEDED” errors and in some cases timeouts.\n**Agent Assist:**\n- Agent Assist features including (Proactive) Generative Knowledge Assist experienced elevated error rates in LLM Summarization and topic modeling features, including Summarization baseline V2 and Summarization with custom sections (powered by generator).\n**Contact Center AI:**\n- Contact Center AI Insights features including LLM summarization and LLM topic modeling also experienced elevated error rates.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-14T15:48:56+00:00","modified":"2024-06-19T20:09:08+00:00","when":"2024-06-14T15:48:56+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 12 June, 2024 12:06\n**Incident End:** 12 June, 2024 15:59\n**Duration:** 3 hours, 53 minutes\n**Affected Services and Features:**\n- Vertex AI Online Prediction\n- Dialogflow CX\n- Agent Assist\n- Contact Centre AI\n**Regions/Zones:** us-central1, asia-southeast1, europe-west3, europe-west4, us-east1, us-west1, northamerica-northeast1, us-east4\n**Description:**\nGoogle Vertex AI, Dialogflow, Agent Assist users experienced elevated errors in multiple regions, impacting the respective product functionality for the duration of 3 hours, 53 minutes. From preliminary analysis, the root cause of the issue was a bug in the recent change to the Vertex AI online prediction platform that led to issues with processing generative requests. A proactive fix was rolled out while new issues were being reported. The incident was validated as resolved once the rollout was complete.\n**Customer Impact:**\n**Vertex AI Online Prediction:**\nGoogle Vertex AI experienced high latency and elevated 500, 502 error rates, while executing prediction tasks.\nCustomers may have also experienced failure in running prediction requests with “CANCELLED” errors.\n**Dialogflow CX**\nDialogflow CX Generators, Generative Fallback, and ML entities experienced elevated “INTERNAL” and “DEADLINE_EXCEEDED” errors and in some cases timeouts.\n**Agent Assist:**\nAgent Assist features including (Proactive) Generative Knowledge Assist experienced elevated error rates in LLM Summarization and topic modeling features, including Summarization baseline V2 and Summarization with custom sections (powered by generator).\n**Contact Center AI:**\nContact Center AI Insights features including LLM summarization and LLM topic modeling also experienced elevated error rates.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-13T00:21:54+00:00","modified":"2024-06-14T15:48:56+00:00","when":"2024-06-13T00:21:54+00:00","text":"The issue with Agent Assist, Dialogflow CX, Vertex AI Online Prediction has been resolved for all affected projects as of Wednesday, 2024-06-12 17:21 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-12T23:20:06+00:00","modified":"2024-06-13T00:21:58+00:00","when":"2024-06-12T23:20:06+00:00","text":"Summary: Vertex AI Online Prediction, Dialogflow CX, and Agent Assist are experiencing elevated error rates in multiple regions.\nDescription: We are experiencing an intermittent issue with Vertex AI Online Prediction, Dialogflow CX, and Agent Assist beginning on Wednesday, 2024-06-12 12:06 US/Pacific.\nMitigation work is currently underway by our engineering team. Our monitoring shows notable recovery.\nWe believe the issue is partially mitigated and we expect us-central1 to be mitigated fully in the next hour. We do not have an ETA for full mitigation in other regions.\nWe will provide more information by Wednesday, 2024-06-12 17:30 US/Pacific.\nWe apologize to all who are affected by the disruption.\nDiagnosis: - Customers that are impacted due to this issue may observe 50X errors while executing prediction tasks.\n- Customers may also see canceled requests for any running prediction tasks.\n- Dialogflow/Agent Assist queries with generative features enabled are receiving CANCELLED and DEADLINE_EXCEEDED errors or timeouts.\n- Some of the BigQuery generative features are also experiencing elevated error rates.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-06-12T22:35:44+00:00","modified":"2024-06-12T23:20:06+00:00","when":"2024-06-12T22:35:44+00:00","text":"Summary: Vertex AI Online Prediction, Dialogflow CX, and Agent Assist are experiencing elevated error rates in multiple regions.\nDescription: We are experiencing an intermittent issue with Vertex AI Online Prediction, Dialogflow CX, and Agent Assist beginning on Wednesday, 2024-06-12 12:06 US/Pacific.\nMitigation work is currently underway by our engineering team. However, we do not have an ETA for mitigation at this point.\nWe are closely monitoring mitigation progress and we will provide more information by Wednesday, 2024-06-12 16:30 US/Pacific.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Customers that are impacted due to this issue may observe 50X errors while executing prediction tasks.\n- Customers may also see canceled requests for any running prediction tasks.\n- User queries are receiving CANCELLED and DEADLINE_EXCEEDED errors or timeout.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-06-12T22:18:29+00:00","modified":"2024-06-12T22:54:59+00:00","when":"2024-06-12T22:18:29+00:00","text":"Summary: Vertex AI Online Prediction, Dialogflow CX, and Agent Assist are experiencing elevated error rates in multiple regions.\nDescription: We are experiencing an intermittent issue with Vertex AI Online Prediction, Dialogflow CX, and Agent Assist beginning on Wednesday, 2024-06-12 12:06 US/Pacific.\nMitigation work is currently underway by our engineering team. However, we do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-06-12 16:30 US/Pacific.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Customers that are impacted due to this issue may observe 50X errors while executing prediction tasks.\n- Customers may also see canceled requests for any running prediction tasks.\n- User queries are receiving CANCELLED and DEADLINE_EXCEEDED errors or timeout.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-06-12T21:48:24+00:00","modified":"2024-06-12T22:18:36+00:00","when":"2024-06-12T21:48:24+00:00","text":"Summary: Vertex AI Online Prediction Experiencing 50X Error or Canceled Requests Intermittently.\nDescription: We are experiencing an intermittent issue with Vertex AI Online Prediction beginning on Wednesday, 2024-06-12 12:06 US/Pacific.\nMitigation work is currently underway by our engineering team. However, we do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-06-12 18:00 US/Pacific.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers that are impacted due to this issue may observe 50X errors while executing prediction tasks.\nCustomers may also see canceled requests for any running prediction tasks.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-06-19T20:09:08+00:00","modified":"2024-06-21T13:43:13+00:00","when":"2024-06-19T20:09:08+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 12 June 2024 at 12:06 US/Pacific, Google Vertex AI, Dialogflow, Agent Assist users experienced elevated errors and product functionality issues in the us-central1, asia-southeast1, europe-west3, europe-west4, us-east1, us-west1, northamerica-northeast1, and us-east4 regions for a duration of 3 hours and 53 minutes. To our customers who were impacted during this disruption, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nBeginning 8 June 2024, a novel form of user request to the GenerateContent API triggered intermittent segmentation faults in the Vertex Serving Prediction API servers. An affected server would restart after the segfault, and load balancers would send user queries to other healthy API servers until the affected server returned to service. The user-visible outage began when the rate of segfault-triggering traffic increased and the number of API servers offline simultaneously was sufficient to affect overall serving capacity relative to load.\n## Remediation and Prevention\nGoogle engineers were alerted to the issue via internal production monitoring on 10 June 2024 at 07:35 US/Pacific. At this time there was no visible customer impact and google engineers identified the root cause of the segmentation fault and developed a fix which they started progressively rolling out. However, on 12 June 2024 at 12:06 US/Pacific, multiple services built on top of Vertex Prediction started to report user requests that were affected by this issue in production. The root cause of the issue was quickly verified to be the same issue discovered on 10 June 2024 and accelerated the in-progress rollout. The rollout completed on 12 June 2024 at 15:59 US/Pacific, fully mitigating the issue\nGoogle is committed to preventing a repeat of the issue in the future and is completing the following actions:\n- Ensuring early signals of server binary issues are captured in production healthiness analysis, and investigated timely.\n- Ensuring a release validation of feature changes and updates in production.\n## Detailed Description of Impact\nBetween 12 June 2024 12:06 and 12 June 2024 15:59 some users in regions: us-central1, asia-southeast1, europe-west3, europe-west4, us-east1, us-west1, northamerica-northeast1, us-east4, may have experienced the following:\n**Vertex AI Online Prediction:**\n- Google Vertex AI experienced high latency and elevated 500, 502 error rates, while executing prediction tasks using Predict, RawPredict, GenerateContent, and StreamGenerateContent methods.\n- Customers may have also experienced failure in running prediction requests with “CANCELLED” errors.\n**Dialogflow CX:**\n- Dialogflow CX Generators, Generative Fallback, and ML entities experienced elevated “INTERNAL” and “DEADLINE_EXCEEDED” errors and in some cases timeouts.\n**Agent Assist:**\n- Agent Assist features including (Proactive) Generative Knowledge Assist experienced elevated error rates in LLM Summarization and topic modeling features, including Summarization baseline V2 and Summarization with custom sections (powered by generator).\n**Contact Center AI:**\n- Contact Center AI Insights features including LLM summarization and LLM topic modeling also experienced elevated error rates.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Agent Assist","id":"eUntUKqUrHdbBLNcVVXq"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"},{"title":"Vertex AI Online Prediction","id":"sdXM79fz1FS6ekNpu37K"}],"uri":"incidents/HcNDMn4YeG57exykghT7","currently_affected_locations":[],"previously_affected_locations":[{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"hAHmS7WwWk83e4ZQ5XUi","number":"6102218050934964879","begin":"2024-06-11T01:07:17+00:00","created":"2024-06-11T01:36:37+00:00","end":"2024-06-11T03:09:07+00:00","modified":"2024-06-26T22:08:25+00:00","external_desc":"Issue with BigQuery Streaming API and Cloud Logging in US Multiregion","updates":[{"created":"2024-06-26T22:08:25+00:00","modified":"2024-06-26T22:08:25+00:00","when":"2024-06-26T22:08:25+00:00","text":"# Incident Report\n## Summary\nBetween Thursday, 30 May 2024 and Thursday, 13 June 2024 at 23:45 US/Pacific on, BigQuery customers experienced elevated query latencies across the multiregions:us and multiregions:eu. As a result, operations requiring compute resources (queries, loads, extracts) would have also experienced resource contention during the same period, and some customers with queries or operations relying on the BigQuery Autoscaler would have been unable to scale up to the desired limits.\nAdditionally, between 16:46 to 20:00 US/Pacific on Monday, 10 June 2024, BigQuery experienced elevated errors across multiple APIs (Jobs, Query, Load, Extract, Storage Read and Write APIs) in the multiregion:us region.\n## Root Cause\nBigQuery uses a distributed shuffle infrastructure for execution of large and complex joins, aggregations and analytic operations needed for query execution. Shuffle storage is a tiered architecture, optimizing for storing data in-memory, but uses SSD then HDD as backing stores to flush to as the aggregate needs increase. The incident was caused by a combination of factors.\n1. Colossus, Google's distributed file system [1] , was migrating to a newer version. This migration caused a gradual increase in traffic to the new system per zone across the fleet.\n2. The SSD cache configuration for flushing was not appropriately set up for the newer file system version.\n3. As a result, BigQuery gradually lost portions of its SSD cache in the relevant zones, proportional to the traffic migrated to the new system.\n4. Queries needing to flush to disk experienced increased latency as flushing directly to HDD increasingly dominated.\nDiverting the network traffic from an affected zone is a mitigation usually taken while determining the root cause of a problem. However, an operator error resulted in reduction of capacity in multiple zones simultaneously on Monday 10 June, 2024 at 16:46 US/Pacific. This led to elevated errors across BigQuery APIs until 20:00 PST the same day when the impact of excessive traffic redirection was mitigated.\n[1] : https://cloud.google.com/blog/products/storage-data-transfer/a-peek-behind-colossus-googles-file-system\n## Remediation and Prevention\n* At 10:23 US/Pacific on Thursday, 30 May 2024, Google engineers were alerted to occasional increased latency of operations materializing data into storage in one zone in the US region. BigQuery automation redirected traffic out of the zone and investigation started to find the root cause.\n* By Wednesday, 5 June 2024, Google engineers expedited the investigation by coordinating between BigQuery engineering and several GCP infrastructure teams.\n* By Monday, 10 June 2024, other BigQuery zones alerted for similar symptoms, with automation being prevented from redirecting traffic as mitigation. Customer reports of slowness began accumulating.\n* **[Wider Impact Starts]** At 16:46 on Monday, 10 June, 2024, an excessive redirection of traffic erroneously occurred. At 18:07 US/Pacific, the incident escalated further with multiple infrastructure, BigQuery engineering, and incident and customer management teams involved.\n* At 18:28 the excessive traffic redirection was rectified. Google engineers then took several actions to quickly absorb the incoming traffic by adding front end capacity and shifting traffic between zones while internal caches recovered.\n* **[Wider Impact Fully Mitigated]** At 20:00 on Monday, 10 June, 2024, traffic returned to baseline before the major sub-incident. Elevated incident management to root cause the original symptoms of storage slowness continued.\n* Due to the nature of the root cause, zone traffic redirection which is the fastest and most reliable mitigation for users impacted by a zone slowness caused the problem to shift to other BigQuery customers elsewhere. This unfortunately complicated investigation and remediation for users during the investigation and extended the duration of impact until the true root cause and trigger were identified.\n* **[Incident Fully Mitigated]** At 21:00, on Thursday June 13, 2024, the fundamental root cause of the gradual loss of SSD caching, and its secondary and tertiary impact on shuffle flush performance and query latency was confirmed. Google engineers rectified the SSD cache configuration (the size of which was already increased in many zones as mitigation) and the incident was fully mitigated.\nWe apologize for the length and severity of this incident. We are taking immediate steps to prevent a recurrence and improve reliability in the future.\n* **Enhanced Detection :** - Enhance BigQuery’s telemetry to detect anomalies in SSD cache utilization faster and more efficiently.\n* Increase the SSD cache capacity for BigQuery Shuffle flushing across the fleet and rectify the SSD cache configuration to restore shuffle flushing performance to shorten mitigation time for any future occurrences.\n* **Preventive Action Items :** - Increase the safeguards against excessive traffic redirection by any means (manual and automatic). - Improve BigQuery’s resilience to sudden increases in traffic for the Streaming APIs to recover faster.\n## Detailed Description of Impact\n**BigQuery:**\n* A subset of customers would have experienced 500 errors while executing calls to insertAll and storage write APIs in the US Multi-region. Additionally, some customers may have experienced system errors using the Jobs and Query API.\n* BigQuery customers would have also experienced periods of reduced query performance and longer latencies. As a side effect, resource contention within user reservations would have also increased.\n* Some BigQuery customers experienced periods of inability to scale up resources using the BigQuery Autoscaler consistently.\n**Cloud Logging:** Ingestion delays to analytics buckets with local global. Logs Explorer queries were not affected.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-11T16:11:12+00:00","modified":"2024-06-26T22:08:25+00:00","when":"2024-06-11T16:11:12+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 10 June 2024 16:45\n**Incident End:** 10 June 2024 20:00\n**Duration:** 3 hours and 15 minutes\n**Affected Services and Features:**\nGoogle BigQuery and Google Cloud Logging\n**Regions/Zones:** Multi-regions: US\n**Description:**\nBigQuery experienced elevated errors across multiple APIs in the US Multi-region due to the concurrent mitigation of simultaneous degradations, which impacted the BigQuery projects hosted in two degraded clusters. Google will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\nBigQuery: A subset of customers would have experienced HTTP 500 errors while executing calls to insertAll and storage write APIs in the US Multi-region. Additionally, some customers may have experienced system errors using the Jobs and Query API.\nCloud Logging: Cloud Logging customers using Log Analytics faced log ingestion delays up to 2 hours for their analytics buckets if they ingested logs via Cloud regions us-central1 or us-central2. Logs Explorer queries in the Google Cloud console were not impacted.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-11T03:09:07+00:00","modified":"2024-06-11T16:11:12+00:00","when":"2024-06-11T03:09:07+00:00","text":"The issue with Cloud Logging, Google BigQuery has been resolved for all affected users as of Monday, 2024-06-10 20:00 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-11T02:06:40+00:00","modified":"2024-06-11T03:09:10+00:00","when":"2024-06-11T02:06:40+00:00","text":"Summary: Issue with BigQuery Streaming API and Cloud Logging in US Multiregion\nDescription: Google engineers are working on mitigating the problem and we are observing the error rate for the impacted APIs dropping. We are closely monitoring the progress of the problem mitigation.\nWe will provide more information by Monday, 2024-06-10 20:30 US/Pacific.\nDiagnosis: Google BigQuery: Customers impacted by this issue may see 500 errors while executing BigQuery statements.\nCloud Logging: Ingestion delays to analytics buckets with local global. Logs Explorer queries are not affected.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-11T01:36:34+00:00","modified":"2024-06-11T02:06:40+00:00","when":"2024-06-11T01:36:34+00:00","text":"Summary: Issue with BigQuery Streaming API and Cloud Logging\nDescription: We are experiencing an issue with Google BigQuery Streaming API, Cloud Logging beginning on Monday, 2024-06-10 16:46 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-06-10 18:59 US/Pacific with current details.\nDiagnosis: Google BigQuery: Customers impacted by this issue may see 500 errors while executing BigQuery statements.\nCloud Logging: Ingestion delays to analytics buckets with local global. Logs Explorer queries are not affected.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-06-26T22:08:25+00:00","modified":"2024-06-26T22:08:25+00:00","when":"2024-06-26T22:08:25+00:00","text":"# Incident Report\n## Summary\nBetween Thursday, 30 May 2024 and Thursday, 13 June 2024 at 23:45 US/Pacific on, BigQuery customers experienced elevated query latencies across the multiregions:us and multiregions:eu. As a result, operations requiring compute resources (queries, loads, extracts) would have also experienced resource contention during the same period, and some customers with queries or operations relying on the BigQuery Autoscaler would have been unable to scale up to the desired limits.\nAdditionally, between 16:46 to 20:00 US/Pacific on Monday, 10 June 2024, BigQuery experienced elevated errors across multiple APIs (Jobs, Query, Load, Extract, Storage Read and Write APIs) in the multiregion:us region.\n## Root Cause\nBigQuery uses a distributed shuffle infrastructure for execution of large and complex joins, aggregations and analytic operations needed for query execution. Shuffle storage is a tiered architecture, optimizing for storing data in-memory, but uses SSD then HDD as backing stores to flush to as the aggregate needs increase. The incident was caused by a combination of factors.\n1. Colossus, Google's distributed file system [1] , was migrating to a newer version. This migration caused a gradual increase in traffic to the new system per zone across the fleet.\n2. The SSD cache configuration for flushing was not appropriately set up for the newer file system version.\n3. As a result, BigQuery gradually lost portions of its SSD cache in the relevant zones, proportional to the traffic migrated to the new system.\n4. Queries needing to flush to disk experienced increased latency as flushing directly to HDD increasingly dominated.\nDiverting the network traffic from an affected zone is a mitigation usually taken while determining the root cause of a problem. However, an operator error resulted in reduction of capacity in multiple zones simultaneously on Monday 10 June, 2024 at 16:46 US/Pacific. This led to elevated errors across BigQuery APIs until 20:00 PST the same day when the impact of excessive traffic redirection was mitigated.\n[1] : https://cloud.google.com/blog/products/storage-data-transfer/a-peek-behind-colossus-googles-file-system\n## Remediation and Prevention\n* At 10:23 US/Pacific on Thursday, 30 May 2024, Google engineers were alerted to occasional increased latency of operations materializing data into storage in one zone in the US region. BigQuery automation redirected traffic out of the zone and investigation started to find the root cause.\n* By Wednesday, 5 June 2024, Google engineers expedited the investigation by coordinating between BigQuery engineering and several GCP infrastructure teams.\n* By Monday, 10 June 2024, other BigQuery zones alerted for similar symptoms, with automation being prevented from redirecting traffic as mitigation. Customer reports of slowness began accumulating.\n* **[Wider Impact Starts]** At 16:46 on Monday, 10 June, 2024, an excessive redirection of traffic erroneously occurred. At 18:07 US/Pacific, the incident escalated further with multiple infrastructure, BigQuery engineering, and incident and customer management teams involved.\n* At 18:28 the excessive traffic redirection was rectified. Google engineers then took several actions to quickly absorb the incoming traffic by adding front end capacity and shifting traffic between zones while internal caches recovered.\n* **[Wider Impact Fully Mitigated]** At 20:00 on Monday, 10 June, 2024, traffic returned to baseline before the major sub-incident. Elevated incident management to root cause the original symptoms of storage slowness continued.\n* Due to the nature of the root cause, zone traffic redirection which is the fastest and most reliable mitigation for users impacted by a zone slowness caused the problem to shift to other BigQuery customers elsewhere. This unfortunately complicated investigation and remediation for users during the investigation and extended the duration of impact until the true root cause and trigger were identified.\n* **[Incident Fully Mitigated]** At 21:00, on Thursday June 13, 2024, the fundamental root cause of the gradual loss of SSD caching, and its secondary and tertiary impact on shuffle flush performance and query latency was confirmed. Google engineers rectified the SSD cache configuration (the size of which was already increased in many zones as mitigation) and the incident was fully mitigated.\nWe apologize for the length and severity of this incident. We are taking immediate steps to prevent a recurrence and improve reliability in the future.\n* **Enhanced Detection :** - Enhance BigQuery’s telemetry to detect anomalies in SSD cache utilization faster and more efficiently.\n* Increase the SSD cache capacity for BigQuery Shuffle flushing across the fleet and rectify the SSD cache configuration to restore shuffle flushing performance to shorten mitigation time for any future occurrences.\n* **Preventive Action Items :** - Increase the safeguards against excessive traffic redirection by any means (manual and automatic). - Improve BigQuery’s resilience to sudden increases in traffic for the Streaming APIs to recover faster.\n## Detailed Description of Impact\n**BigQuery:**\n* A subset of customers would have experienced 500 errors while executing calls to insertAll and storage write APIs in the US Multi-region. Additionally, some customers may have experienced system errors using the Jobs and Query API.\n* BigQuery customers would have also experienced periods of reduced query performance and longer latencies. As a side effect, resource contention within user reservations would have also increased.\n* Some BigQuery customers experienced periods of inability to scale up resources using the BigQuery Autoscaler consistently.\n**Cloud Logging:** Ingestion delays to analytics buckets with local global. Logs Explorer queries were not affected.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"},{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"}],"uri":"incidents/hAHmS7WwWk83e4ZQ5XUi","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"chEUMpNnBDBeMD8cy533","number":"11442483043828797801","begin":"2024-06-10T18:02:13+00:00","created":"2024-06-10T20:44:05+00:00","end":"2024-06-10T22:14:32+00:00","modified":"2024-06-10T22:14:35+00:00","external_desc":"Multi Region US : High normalization / parsing delay in the US region","updates":[{"created":"2024-06-10T22:14:32+00:00","modified":"2024-06-10T22:14:36+00:00","when":"2024-06-10T22:14:32+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Monday, 2024-06-10 15:13 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us!","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-10T20:44:01+00:00","modified":"2024-06-10T22:14:35+00:00","when":"2024-06-10T20:44:01+00:00","text":"Summary: Multi Region US : High normalization / parsing delay in the US region\nDescription: We've received a report of an issue with Chronicle Security as of Monday, 2024-06-10 8:00 US/Pacific.\nOur engineering team continues to investigate the issue. Some mitigation is in place and metrics are improving. We do not have an ETA for resolution at this time.\nWe will provide an update by Monday, 2024-06-10 15:30 US/Pacific with current details.\nDiagnosis: Customers may experience a delay in data normalization which may result in commensurate delays in rule detections.\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-06-10T22:14:32+00:00","modified":"2024-06-10T22:14:36+00:00","when":"2024-06-10T22:14:32+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Monday, 2024-06-10 15:13 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us!","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/chEUMpNnBDBeMD8cy533","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"RAwbQ9t1TVg9ZxC62iuk","number":"17171479791044799595","begin":"2024-06-07T22:18:47+00:00","created":"2024-06-07T23:10:46+00:00","end":"2024-06-08T02:55:32+00:00","modified":"2024-06-08T03:01:57+00:00","external_desc":"Issue with Google Cloud VMWare Engine Private Cloud Provisioning.","updates":[{"created":"2024-06-08T02:55:32+00:00","modified":"2024-06-08T03:01:57+00:00","when":"2024-06-08T02:55:32+00:00","text":"The issue with VMWare engine has been resolved for all affected users as of Friday, 2024-06-07 19:25 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-08T01:05:14+00:00","modified":"2024-06-08T02:55:35+00:00","when":"2024-06-08T01:05:14+00:00","text":"Summary: Issue with Google Cloud VMWare Engine Private Cloud Provisioning.\nDescription: We are experiencing an issue with the VMWare engine beginning on Friday, 2024-06-07 02:00 US/Pacific.\nOur engineering team continues to investigate the issue in collaboration with our backend service to ascertain a mitigation strategy.\nWe will provide an update by Friday, 2024-06-07 21:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may experience delays in creating any new Private Cloud as well as expansion of existing Private Clouds.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-06-07T23:10:42+00:00","modified":"2024-06-08T01:05:14+00:00","when":"2024-06-07T23:10:42+00:00","text":"Summary: Issue with Google Cloud VMWare Engine Private Cloud Provisioning.\nDescription: We are experiencing an issue with the VMWare engine beginning on Friday, 2024-06-07 02:00 US/Pacific.\nOur engineering team continues to investigate the issue in collaboration with our backend service to ascertain a mitigation strategy.\nWe will provide an update by Friday, 2024-06-07 18:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may experience delays in creating any new Private Cloud as well as expansion of existing Private Clouds.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-06-08T02:55:32+00:00","modified":"2024-06-08T03:01:57+00:00","when":"2024-06-08T02:55:32+00:00","text":"The issue with VMWare engine has been resolved for all affected users as of Friday, 2024-06-07 19:25 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"}],"uri":"incidents/RAwbQ9t1TVg9ZxC62iuk","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"eeym8FntbT9cyvr8Jv5f","number":"8567720924743522001","begin":"2024-06-07T15:56:00+00:00","created":"2024-06-07T16:47:39+00:00","end":"2024-06-07T17:17:00+00:00","modified":"2024-06-07T21:26:04+00:00","external_desc":"Dialogflow CX, and Agent Assist are experiencing elevated error rates and time outs with Generative features in multiple regions","updates":[{"created":"2024-06-07T21:25:46+00:00","modified":"2024-06-07T21:26:04+00:00","when":"2024-06-07T21:25:46+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 7 June, 2024 08:56\n**Incident End:** 7 June, 2024 10:17\n**Duration:** 1 hour, 21 minutes\n**Affected Services and Features:**\nAgent Assist\nDialogflow CX\n**Regions/Zones:** Multi-Regional\n**Description:**\nDialogflow CX, and Agent Assist experienced elevated error rates and timeouts with Generative features in multiple regions for a duration of 1 hour and 20 minutes. From preliminary analysis the root cause has been identified as a specific workload which caused overload to a backend service. All services were mitigated by limiting the specific workload, decreasing utilization which resulted in recovery.\n**Customer Impact:**\nDuring the incident timeframe impacted customers experienced:\nDialogflow CX and Agent Assist users observed increased error rates, long latency for queries with generative features enabled and timeouts.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-07T17:34:21+00:00","modified":"2024-06-07T21:25:46+00:00","when":"2024-06-07T17:34:21+00:00","text":"The issue with Agent Assist, Dialogflow CX has been resolved for all affected users as of Friday, 2024-06-07 10:34 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-07T17:15:54+00:00","modified":"2024-06-07T17:34:25+00:00","when":"2024-06-07T17:15:54+00:00","text":"Summary: Dialogflow CX, and Agent Assist are experiencing elevated error rates and time outs with Generative features in multiple regions\nDescription: Our engineering team has identified the cause and performed mitigation actions. Our internal monitoring indicates the error rate subsided.\nOur engineers are continuing to monitor our systems closely\nWe will provide an update by Friday, 2024-06-07 10:50 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Dialogflow CX is experiencing elevated error rates and time outs with Generative features enabled.\n- Agent Assist's Knowledge Assist and Knowledge Search features are also impacted\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Global","id":"global"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-06-07T16:47:35+00:00","modified":"2024-06-07T17:15:57+00:00","when":"2024-06-07T16:47:35+00:00","text":"Summary: Dialogflow CX, and Agent Assist are experiencing elevated error rates and time outs with Generative feature in multiple regions\nDescription: We are experiencing an issue with Dialogflow CX, Agent Assist beginning at Friday, 2024-06-07 08:56 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-06-07 10:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Dialogflow CX is experiencing elevated error rates and time outs with Generate Text requests\n- Agent Assist's Knowledge Assist and Knowledge Search features are also impacted\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Global","id":"global"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-06-07T21:25:46+00:00","modified":"2024-06-07T21:26:04+00:00","when":"2024-06-07T21:25:46+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**Incident Start:** 7 June, 2024 08:56\n**Incident End:** 7 June, 2024 10:17\n**Duration:** 1 hour, 21 minutes\n**Affected Services and Features:**\nAgent Assist\nDialogflow CX\n**Regions/Zones:** Multi-Regional\n**Description:**\nDialogflow CX, and Agent Assist experienced elevated error rates and timeouts with Generative features in multiple regions for a duration of 1 hour and 20 minutes. From preliminary analysis the root cause has been identified as a specific workload which caused overload to a backend service. All services were mitigated by limiting the specific workload, decreasing utilization which resulted in recovery.\n**Customer Impact:**\nDuring the incident timeframe impacted customers experienced:\nDialogflow CX and Agent Assist users observed increased error rates, long latency for queries with generative features enabled and timeouts.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Agent Assist","id":"eUntUKqUrHdbBLNcVVXq"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"}],"uri":"incidents/eeym8FntbT9cyvr8Jv5f","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Global","id":"global"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"tJSctv9JBC8tdGCty6T9","number":"10366592314155301825","begin":"2024-06-06T11:27:58+00:00","created":"2024-06-06T11:51:16+00:00","end":"2024-06-06T13:47:23+00:00","modified":"2024-06-06T16:10:28+00:00","external_desc":"Apigee Edge customers may observe 5xx errors in us-central1 region","updates":[{"created":"2024-06-06T16:10:28+00:00","modified":"2024-06-06T16:10:28+00:00","when":"2024-06-06T16:10:28+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 6 June 2024 00:00\n**Incident End:** 6 June 2024 06:00\n**Duration:** 6 hours\n**Affected Services and Features:**\nApigee Edge Public Cloud\n**Regions/Zones:** us-central1\n**Description:**\nApigee Edge Public Cloud customers located in the us-central1 region observed errors and their traffic may have been interrupted. From preliminary analysis, the root cause of the issue is a misconfiguration of the Apigee Edge NATs (Network Address Translation) that caused the expected Elastic IPs not to be present on the NAT nodes.\n**Customer Impact:**\nApigee Edge Public Cloud customers located in the us-central1 region experienced:\nErrors for all traffic routed through the affected NAT instances\nTraffic interruptions\n**Additional details:**\nElastic IPs attached to NAT instances in us-central1 were unintentionally removed. A manual fix was applied to reattach the correct Elastic IPs.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-06T13:47:23+00:00","modified":"2024-06-06T16:10:28+00:00","when":"2024-06-06T13:47:23+00:00","text":"The issue with Apigee Edge has been resolved for all affected users as of Thursday, 2024-06-06 06:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-06T13:15:30+00:00","modified":"2024-06-06T13:47:34+00:00","when":"2024-06-06T13:15:30+00:00","text":"Summary: Apigee Edge customers may observe 5xx errors in us-central1 region\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-06-06 07:00 US/Pacific.\nDiagnosis: Apigee Edge customers located in us-central1 region may observe 5xx errors and their traffic may be interrupted\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-06-06T12:20:41+00:00","modified":"2024-06-06T13:15:30+00:00","when":"2024-06-06T12:20:41+00:00","text":"Summary: Apigee Edge customers may observe 5xx errors in us-central1 region\nDescription: We are experiencing an issue with Apigee Edge beginning on Thursday, 2024-06-06 00:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-06-06 06:30 US/Pacific with current details.\nDiagnosis: Apigee Edge customers located in us-central1 region may observe 5xx errors and their traffic may be interrupted\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-06-06T11:53:21+00:00","modified":"2024-06-06T12:20:41+00:00","when":"2024-06-06T11:53:21+00:00","text":"Summary: Apigee Edge customers may observe 5xx errors in us-central1 region\nDescription: We are experiencing an issue with Apigee Edge beginning on Thursday, 2024-06-06 00:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-06-06 05:30 US/Pacific with current details.\nDiagnosis: Apigee Edge customers located in us-central1 region may observe 5xx errors and their traffic may be interrupted\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-06-06T11:50:55+00:00","modified":"2024-06-06T11:53:21+00:00","when":"2024-06-06T11:50:55+00:00","text":"Summary: Apigee customers may observe 5xx errors in us-central1 region\nDescription: We are experiencing an issue with Apigee beginning on Thursday, 2024-06-06 00:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-06-06 05:30 US/Pacific with current details.\nDiagnosis: Apigee customers located in us-central1 region may observe 5xx errors and their traffic may be interrupted\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-06-06T16:10:28+00:00","modified":"2024-06-06T16:10:28+00:00","when":"2024-06-06T16:10:28+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 6 June 2024 00:00\n**Incident End:** 6 June 2024 06:00\n**Duration:** 6 hours\n**Affected Services and Features:**\nApigee Edge Public Cloud\n**Regions/Zones:** us-central1\n**Description:**\nApigee Edge Public Cloud customers located in the us-central1 region observed errors and their traffic may have been interrupted. From preliminary analysis, the root cause of the issue is a misconfiguration of the Apigee Edge NATs (Network Address Translation) that caused the expected Elastic IPs not to be present on the NAT nodes.\n**Customer Impact:**\nApigee Edge Public Cloud customers located in the us-central1 region experienced:\nErrors for all traffic routed through the affected NAT instances\nTraffic interruptions\n**Additional details:**\nElastic IPs attached to NAT instances in us-central1 were unintentionally removed. A manual fix was applied to reattach the correct Elastic IPs.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"9Y13BNFy4fJydvjdsN3X","service_name":"Apigee","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"}],"uri":"incidents/tJSctv9JBC8tdGCty6T9","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"Hrepehs4bLtgxEefSzSw","number":"15460437067707160221","begin":"2024-06-04T17:29:00+00:00","created":"2024-06-04T21:13:56+00:00","end":"2024-06-04T21:15:00+00:00","modified":"2024-06-17T07:27:28+00:00","external_desc":"Google Cloud Shell - Connectivity Issues","updates":[{"created":"2024-06-14T06:33:37+00:00","modified":"2024-06-17T07:27:28+00:00","when":"2024-06-14T06:33:37+00:00","text":"# Incident Report\n## Summary\nCloud Shell enables all Google Cloud users to access and manage their cloud resources directly from their web browser, using a command-line interface. This service eliminates the need for users to install the Google Cloud Software Development Kits (SDK) or other tools on their local machines. Cloud Shell is implemented as a container running on a Virtual Machine (VM) in multi-tenant Google-owned projects.\nTo improve scalability and availability, Cloud Shell runs multiple Google Kubernetes Engine (GKE) clusters per region, and runs in multiple regions. These clusters are updated weekly.\nOn 4 June 2024, some users attempting to connect to Cloud Shell may have encountered connectivity issues for a duration of 3 hours 46 minutes. We apologize for the inconvenience this service disruption has caused.\n## Root Cause\nDuring a routine weekly update, the Cloud Shell engineering team deployed a script that had been tested and validated in the pre-production environment. However, when executed across production GKE clusters supporting Cloud Shell, the script unintentionally enabled GKE API-side validation for custom Kubernetes resources. This validation caused GKE to reject updates to these custom resources, ultimately leading to provisioning errors in Cloud Shell. Affected customers would have received the following error message: \"Cloud Shell is experiencing some issues provisioning a VM to you. Please try again in a few minutes.\"\n## Remediation and Prevention\nGoogle engineers were alerted to the issue via an internal monitoring alert on 4 June 2024 at 10:29 US/Pacific and immediately started an investigation. Once the nature and scope of the issue were identified, engineers rapidly developed and deployed a code fix to all affected Cloud Shell GKE clusters. This fix ensured that Cloud Shell's custom resources were correctly validated, thus resolving the provisioning errors. By 14:29 on the same day, the issue was fully mitigated for all impacted users.\nGoogle is committed to preventing a repeat of this issue by implementing stricter change control through more comprehensive policies and guidelines for manual patching to the Cloud Shell environment.\nWe apologize for the impact of this issue and are taking steps to address the scope and duration of this incident as well as the root cause itself. We thank you for your business.\n## Detailed Description of Impact\nStarting on 4 June 2024 at 10:29 US/ Pacific, some customers attempting to connect to Cloud Shell may have encountered connectivity issues for a duration of 3 hours 46 minutes. Customers affected by this issue may have received the following error on Cloud Shell: “Cloud Shell is experiencing some issues provisioning a VM to you. Please try again in a few minutes.”","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-05T05:56:12+00:00","modified":"2024-06-14T06:33:37+00:00","when":"2024-06-05T05:56:12+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage has caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n**(All Times US/Pacific)**\n**Incident Start:** 4 June 2024 10:29\n**Incident End:** 4 June 2024 14:15\n**Duration:** 3 hours 46 minutes\n**Affected Services and Features:** Cloud Shell\n**Regions/Zones:** Global\n**Description:**\nStarting on 4 June 2024 10:29, customers attempting to connect to Cloud Shell may have encountered connectivity issues for the duration of 3 hours 46 minutes. Based on the preliminary analysis, the root cause was identified as a recent change performed in GKE (Google Kubernetes Engine) that resulted in an API validation error. Google will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\nCustomers affected by this issue would have encountered the following error on Cloud Shell: “Cloud Shell is experiencing some issues provisioning a VM to you. Please try again in a few minutes.”","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-04T21:33:51+00:00","modified":"2024-06-05T05:56:12+00:00","when":"2024-06-04T21:33:51+00:00","text":"The issue with Google Cloud Shell has been resolved for all affected users as of Tuesday, 2024-06-04 14:29 US/Pacific.\nWe understand that this issue impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-04T21:13:51+00:00","modified":"2024-06-04T21:33:54+00:00","when":"2024-06-04T21:13:51+00:00","text":"Summary: Google Cloud Shell - Connectivity Issues\nDescription: We are experiencing an issue with Google Cloud Shell beginning at Tuesday, 2024-06-04 11:14 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-06-04 14:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers trying to connect to Cloud Shell may not be able to do so.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[]}],"most_recent_update":{"created":"2024-06-14T06:33:37+00:00","modified":"2024-06-17T07:27:28+00:00","when":"2024-06-14T06:33:37+00:00","text":"# Incident Report\n## Summary\nCloud Shell enables all Google Cloud users to access and manage their cloud resources directly from their web browser, using a command-line interface. This service eliminates the need for users to install the Google Cloud Software Development Kits (SDK) or other tools on their local machines. Cloud Shell is implemented as a container running on a Virtual Machine (VM) in multi-tenant Google-owned projects.\nTo improve scalability and availability, Cloud Shell runs multiple Google Kubernetes Engine (GKE) clusters per region, and runs in multiple regions. These clusters are updated weekly.\nOn 4 June 2024, some users attempting to connect to Cloud Shell may have encountered connectivity issues for a duration of 3 hours 46 minutes. We apologize for the inconvenience this service disruption has caused.\n## Root Cause\nDuring a routine weekly update, the Cloud Shell engineering team deployed a script that had been tested and validated in the pre-production environment. However, when executed across production GKE clusters supporting Cloud Shell, the script unintentionally enabled GKE API-side validation for custom Kubernetes resources. This validation caused GKE to reject updates to these custom resources, ultimately leading to provisioning errors in Cloud Shell. Affected customers would have received the following error message: \"Cloud Shell is experiencing some issues provisioning a VM to you. Please try again in a few minutes.\"\n## Remediation and Prevention\nGoogle engineers were alerted to the issue via an internal monitoring alert on 4 June 2024 at 10:29 US/Pacific and immediately started an investigation. Once the nature and scope of the issue were identified, engineers rapidly developed and deployed a code fix to all affected Cloud Shell GKE clusters. This fix ensured that Cloud Shell's custom resources were correctly validated, thus resolving the provisioning errors. By 14:29 on the same day, the issue was fully mitigated for all impacted users.\nGoogle is committed to preventing a repeat of this issue by implementing stricter change control through more comprehensive policies and guidelines for manual patching to the Cloud Shell environment.\nWe apologize for the impact of this issue and are taking steps to address the scope and duration of this incident as well as the root cause itself. We thank you for your business.\n## Detailed Description of Impact\nStarting on 4 June 2024 at 10:29 US/ Pacific, some customers attempting to connect to Cloud Shell may have encountered connectivity issues for a duration of 3 hours 46 minutes. Customers affected by this issue may have received the following error on Cloud Shell: “Cloud Shell is experiencing some issues provisioning a VM to you. Please try again in a few minutes.”","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"Wdsr1n5vyDvCt78qEifm","service_name":"Google Cloud Console","affected_products":[{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"}],"uri":"incidents/Hrepehs4bLtgxEefSzSw","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"RUuXnDMJ8umNQMFqHpa7","number":"3089702292998495295","begin":"2024-06-04T03:43:00+00:00","created":"2024-06-04T06:01:45+00:00","end":"2024-06-04T08:27:00+00:00","modified":"2024-06-04T18:22:48+00:00","external_desc":"Google Cloud Task and Cloud Scheduler are experiencing elevated errors while executing the task APIs in the us-central1 region.","updates":[{"created":"2024-06-04T18:21:16+00:00","modified":"2024-06-04T18:22:48+00:00","when":"2024-06-04T18:21:16+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 3 June, 2024 20:43\n**Incident End:** 4 June, 2024 01:27\n**Duration:** 4 hours, 44 minutes\n**Affected Services and Features:**\nGoogle Cloud Task\nCloud Scheduler\n**Regions/Zones:** us-central1\n**Description:**\nGoogle Cloud Tasks and Cloud Scheduler experienced an increased error rate in the us-central1 region for a duration of 4 hours, 44 minutes. The issues affected Tasks API usage, Task execution, and scheduled job execution, resulting in task processing delays or failures. This may have caused delays or failures in other dependent processes. While the exact cause is still being investigated, initial findings point to a change activity causing an unexpected impact.\n**Customer Impact:**\nIncreased error rate when using Tasks API and for Task execution\nIncreased failure rate for some Scheduler jobs execution\n**Additional details:**\nThere is no immediate risk for the issue to reoccur. Additional preventative action items will be identified once a complete root cause analysis is completed.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-04T08:29:00+00:00","modified":"2024-06-04T18:21:16+00:00","when":"2024-06-04T08:29:00+00:00","text":"The issue with Google Cloud Scheduler, Google Cloud Tasks has been resolved for all affected users as of Tuesday, 2024-06-04 01:28 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-04T07:27:24+00:00","modified":"2024-06-04T08:29:11+00:00","when":"2024-06-04T07:27:24+00:00","text":"Summary: Google Cloud Task and Cloud Scheduler are experiencing elevated errors while executing the task APIs in the us-central1 region.\nDescription: Our engineering teams are actively working to implement the mitigation fix.\n**The mitigation has been completed for the Google Cloud Scheduler, Customer would see the service recovery.**\n**Our initial mitigation efforts have successfully restored partial functionality of Google Cloud Task for most customers. We anticipate full recovery by Tuesday, 2024-06-04 01:30 US/Pacific as we complete the remaining mitigation rollouts.**\nWe will provide more information by Tuesday, 2024-06-04 02:00 US/Pacific.\nDiagnosis: The impacted customers in the us-central1 region would experience increased errors when using Tasks API and for Task execution. This may result in tasks not being processed as expected, leading to delays or failures in dependent processes.\nAdditionally, there were failures for some scheduler jobs execution which is not mitigated.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-06-04T06:38:43+00:00","modified":"2024-06-04T07:27:28+00:00","when":"2024-06-04T06:38:43+00:00","text":"Summary: Google Cloud Task and Cloud Scheduler are experiencing elevated errors while executing the task APIs in the us-central1 region.\nDescription: Our engineering teams are actively working to implement the mitigation fix.\n**The mitigation has been completed for the Google Cloud Scheduler, Customer would see the service recovery.**\nThe mitigation for Google Cloud Task is underway and expected to be completed by Tuesday, 2024-06-04 00:15 US/Pacific.\nWe will provide more information by Tuesday, 2024-06-04 00:45 US/Pacific.\nDiagnosis: The impacted customers in the us-central1 region would experience increased errors when using Tasks API and for Task execution. This may result in tasks not being processed as expected, leading to delays or failures in dependent processes.\nAdditionally, there were failures for some scheduler jobs execution which is not mitigated.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-06-04T06:01:42+00:00","modified":"2024-06-04T06:38:47+00:00","when":"2024-06-04T06:01:42+00:00","text":"Summary: Google Cloud Task is experiencing elevated errors while executing the task APIs in the us-central1 region.\nDescription: Our engineering teams are actively working to implement the mitigation fix.\nThe mitigation fix is expected to be completed by Tuesday, 2024-06-04 00:00 US/Pacific.\nWe will provide more information by Tuesday, 2024-06-04 00:30 US/Pacific.\nDiagnosis: The impacted customers in the us-central1 region would experience increased errors when using Tasks API and for Task execution. This may result in tasks not being processed as expected, leading to delays or failures in dependent processes.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-06-04T18:21:16+00:00","modified":"2024-06-04T18:22:48+00:00","when":"2024-06-04T18:21:16+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 3 June, 2024 20:43\n**Incident End:** 4 June, 2024 01:27\n**Duration:** 4 hours, 44 minutes\n**Affected Services and Features:**\nGoogle Cloud Task\nCloud Scheduler\n**Regions/Zones:** us-central1\n**Description:**\nGoogle Cloud Tasks and Cloud Scheduler experienced an increased error rate in the us-central1 region for a duration of 4 hours, 44 minutes. The issues affected Tasks API usage, Task execution, and scheduled job execution, resulting in task processing delays or failures. This may have caused delays or failures in other dependent processes. While the exact cause is still being investigated, initial findings point to a change activity causing an unexpected impact.\n**Customer Impact:**\nIncreased error rate when using Tasks API and for Task execution\nIncreased failure rate for some Scheduler jobs execution\n**Additional details:**\nThere is no immediate risk for the issue to reoccur. Additional preventative action items will be identified once a complete root cause analysis is completed.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Scheduler","id":"Y9fKAQ6BVTQUYomrNN9A"},{"title":"Google Cloud Tasks","id":"tMWyzhyKK4rAzAf7x62h"}],"uri":"incidents/RUuXnDMJ8umNQMFqHpa7","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"8L8sTkLJ4ZQ3Ro9t9mcS","number":"11755089611509900766","begin":"2024-06-03T21:40:05+00:00","created":"2024-06-03T22:10:20+00:00","end":"2024-06-03T23:46:13+00:00","modified":"2024-06-03T23:46:16+00:00","external_desc":"Google Distributed Cloud Edge: We are seeing issues with management traffic between GDC Connected Zones and Google.","updates":[{"created":"2024-06-03T23:46:13+00:00","modified":"2024-06-03T23:46:17+00:00","when":"2024-06-03T23:46:13+00:00","text":"The issue with Google Distributed Cloud Edge is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-03T23:31:58+00:00","modified":"2024-06-03T23:46:16+00:00","when":"2024-06-03T23:31:58+00:00","text":"Summary: Google Distributed Cloud Edge: We are seeing issues with management traffic between GDC Connected Zones and Google.\nDescription: Upon investigation, our engineers have determined that this issue does not have a global impact. The issue has been confirmed to be impacting the following regions only:\neurope-north1\neurope-west1\neurope-west3\nEurope-west9\nnorthamerica-northeast1\nnorthamerica-northeast2\nus-central1\nus-east1\nus-west1\nMitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-06-03 18:00 US/Pacific.\nDiagnosis: Customers impacted by this issue may be unable to create new clusters or upgrade GDC Connected versions on existing clusters.\nThis affects some system management tasks and cluster provisioning operations. Customer workloads are unaffected.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-06-03T22:35:28+00:00","modified":"2024-06-03T23:32:01+00:00","when":"2024-06-03T22:35:28+00:00","text":"Summary: Google Distributed Cloud Edge: We are seeing issues with management traffic between GDC Connected Zones and Google.\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Monday, 2024-05-31 07:48 US/Pacific.\nOur engineering team continues to investigate the issue to ascertain a mitigation strategy.\nWe will provide an update by Monday, 2024-06-03 17:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue may be unable to create new clusters or upgrade GDC Connected versions on existing clusters.\nThis affects some system management tasks and cluster provisioning operations. Customer workloads are unaffected.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-06-03T22:10:15+00:00","modified":"2024-06-03T22:35:30+00:00","when":"2024-06-03T22:10:15+00:00","text":"Summary: Google Distributed Cloud Edge: We are seeing issues with management traffic between GDC Connected Zones and Google.\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning at Monday, 2024-05-31 07:48 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-06-03 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue may be unable to create new clusters or upgrade GDC Connected versions on existing clusters.\nThis affects some system management tasks and cluster provisioning operations. Customer workloads are unaffected.\nWorkaround:\nNone at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-06-03T23:46:13+00:00","modified":"2024-06-03T23:46:17+00:00","when":"2024-06-03T23:46:13+00:00","text":"The issue with Google Distributed Cloud Edge is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"JKyM3LJTqgETjRCvSK6w","service_name":"Google Distributed Cloud Edge","affected_products":[{"title":"Google Distributed Cloud Edge","id":"JKyM3LJTqgETjRCvSK6w"}],"uri":"incidents/8L8sTkLJ4ZQ3Ro9t9mcS","currently_affected_locations":[],"previously_affected_locations":[{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"BuY95M3s4h7KVrEsTKTe","number":"16443732016849890186","begin":"2024-06-03T21:23:52+00:00","created":"2024-06-03T21:54:35+00:00","end":"2024-06-04T00:05:16+00:00","modified":"2024-06-04T00:05:18+00:00","external_desc":"We've received a report of an issue with Spectrum Access System.","updates":[{"created":"2024-06-04T00:05:16+00:00","modified":"2024-06-04T00:05:19+00:00","when":"2024-06-04T00:05:16+00:00","text":"The issue with Spectrum Access System is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf as a customer, you are seeing high latency while using your Citizens Broadband Radio Service Devices (CBSD) or Domain Proxies, we recommend that you close and reopen TCP connections to Google SAS.\nIf you have any further questions, please open a case with the Google Cloud Support Team and we will work with you until the queries are resolved.\nWe thank you for your patience.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-06-03T22:44:36+00:00","modified":"2024-06-04T00:05:18+00:00","when":"2024-06-03T22:44:36+00:00","text":"Summary: We've received a report of an issue with Spectrum Access System.\nDescription: We are experiencing an issue with the Spectrum Access System beginning around Friday, 2024-05-24 21:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-06-03 17:30 US/Pacific with current details.\nDiagnosis: Customers impacted due to this issue have their CBSDs experiencing elevated CBSD API latency.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-06-03T22:31:17+00:00","modified":"2024-06-03T22:44:40+00:00","when":"2024-06-03T22:31:17+00:00","text":"Summary: We've received a report of an issue with Spectrum Access System.\nDescription: We are experiencing an issue with the Spectrum Access System beginning on Friday, 2024-05-31 12:10 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-06-03 17:00 US/Pacific with current details.\nDiagnosis: Customers impacted due to this issue have their CBSDs experiencing elevated CBSD API latency.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]},{"created":"2024-06-03T21:54:28+00:00","modified":"2024-06-03T22:31:17+00:00","when":"2024-06-03T21:54:28+00:00","text":"Summary: We've received a report of an issue with Spectrum Access System.\nDescription: We are experiencing an issue with the Spectrum Access System beginning on Friday, 2024-05-31 12:10 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-06-03 15:23 US/Pacific with current details.\nDiagnosis: Customers impacted due to this issue are experiencing a low timeout on their requests.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[]}],"most_recent_update":{"created":"2024-06-04T00:05:16+00:00","modified":"2024-06-04T00:05:19+00:00","when":"2024-06-04T00:05:16+00:00","text":"The issue with Spectrum Access System is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf as a customer, you are seeing high latency while using your Citizens Broadband Radio Service Devices (CBSD) or Domain Proxies, we recommend that you close and reopen TCP connections to Google SAS.\nIf you have any further questions, please open a case with the Google Cloud Support Team and we will work with you until the queries are resolved.\nWe thank you for your patience.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"mvbUB33MdKYrS5tawfzM","service_name":"Spectrum Access System","affected_products":[{"title":"Spectrum Access System","id":"mvbUB33MdKYrS5tawfzM"}],"uri":"incidents/BuY95M3s4h7KVrEsTKTe","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"AzpZZehsqqWatRGVqnvN","number":"4928440150956239615","begin":"2024-05-29T20:52:49+00:00","created":"2024-05-29T21:19:30+00:00","end":"2024-05-29T22:10:23+00:00","modified":"2024-05-29T22:10:26+00:00","external_desc":"Regional outage in northamerica-northeast1 and us-west4 for Google Cloud Netapp Volumes","updates":[{"created":"2024-05-29T22:10:23+00:00","modified":"2024-05-29T22:10:27+00:00","when":"2024-05-29T22:10:23+00:00","text":"The issue with Google Cloud NetApp Volumes has been resolved for all affected projects as of Wednesday, 2024-05-29 15:04 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-29T21:19:23+00:00","modified":"2024-05-29T22:10:26+00:00","when":"2024-05-29T21:19:23+00:00","text":"Summary: Regional outage in northamerica-northeast1 and us-west4 for Google Cloud Netapp Volumes\nDescription: We are experiencing region outage for northamerica-northeast1 and us-west4 with Google Cloud NetApp Volumes beginning on Wednesday, 2024-05-29 11:06 US/Pacific.\nWe are working with our Partner Engineering team to renew SSL certificates in impacted regions.\nWe will provide an update by Wednesday, 2024-05-29 16:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may not be able to create, list or update resources in the impacted regions. This is for all GCNV resources including but not limited to Storage Pools, Volumes, Backups, BackupVaults, Replications etc.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-05-29T22:10:23+00:00","modified":"2024-05-29T22:10:27+00:00","when":"2024-05-29T22:10:23+00:00","text":"The issue with Google Cloud NetApp Volumes has been resolved for all affected projects as of Wednesday, 2024-05-29 15:04 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"vtzMUyQ4z9CbA1x6z85s","service_name":"Google Cloud NetApp Volumes","affected_products":[{"title":"Google Cloud NetApp Volumes","id":"vtzMUyQ4z9CbA1x6z85s"}],"uri":"incidents/AzpZZehsqqWatRGVqnvN","currently_affected_locations":[],"previously_affected_locations":[{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"kfPPiGH2SorcnAeB1UyH","number":"12320010141960623450","begin":"2024-05-28T00:44:36+00:00","created":"2024-05-28T01:07:40+00:00","end":"2024-05-28T03:53:33+00:00","modified":"2024-05-28T03:53:36+00:00","external_desc":"Some Google Cloud NetApp Volume customers experiencing UI errors","updates":[{"created":"2024-05-28T03:53:33+00:00","modified":"2024-05-28T03:53:37+00:00","when":"2024-05-28T03:53:33+00:00","text":"The issue with Google Cloud NetApp Volumes has been resolved for all affected users as of Monday, 2024-05-27 20:37 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-28T03:07:54+00:00","modified":"2024-05-28T03:53:36+00:00","when":"2024-05-28T03:07:54+00:00","text":"Summary: Some Google Cloud NetApp Volume customers experiencing UI errors\nDescription: Our engineering team continues to investigate the issue in collaboration with Netapp engineering.\nWe will provide more information by Monday, 2024-05-27 22:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may encounter an error message on the UI page (\nhttps://console.cloud.google.com/netapp/cloud-volumes/volumes) that says - “There was an error while loading /netapp/cloud-volumes?project=g1p-sre-consumer-05\u0026supportedpurview=project. Please try again”.\nThis issue is only impacting customers who have not migrated to Cloud NetApp Volumes User Interface, and are still using NetApp Cloud Volumes Service UI.\nIf you are using the 1P GCNV UI link (https://console.cloud.google.com/netapp/volumes/), you can ignore this message\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-28T01:07:33+00:00","modified":"2024-05-28T03:07:54+00:00","when":"2024-05-28T01:07:33+00:00","text":"Summary: Google Cloud NetApp Volumes\nDescription: We've received a report of an issue with Google Cloud NetApp Volumes as of Monday, 2024-05-27 17:44 US/Pacific.\nOur engineering team continues to investigate the issue.\nThis issue is only impacting customers who have not migrated to Cloud NetApp Volumes User Interface, and are still using NetApp Cloud Volumes Service UI.\nWe will provide more information by Monday, 2024-05-27 20:15 US/Pacific.\nDiagnosis: Customers impacted by this issue may encounter an error message on the UI page (\nhttps://console.cloud.google.com/netapp/cloud-volumes/volumes) that says - “There was an error while loading /netapp/cloud-volumes?project=g1p-sre-consumer-05\u0026supportedpurview=project. Please try again”.\nIf you are using the 1P GCNV UI link (https://console.cloud.google.com/netapp/volumes/), you can ignore this message\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-05-28T03:53:33+00:00","modified":"2024-05-28T03:53:37+00:00","when":"2024-05-28T03:53:33+00:00","text":"The issue with Google Cloud NetApp Volumes has been resolved for all affected users as of Monday, 2024-05-27 20:37 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"vtzMUyQ4z9CbA1x6z85s","service_name":"Google Cloud NetApp Volumes","affected_products":[{"title":"Google Cloud NetApp Volumes","id":"vtzMUyQ4z9CbA1x6z85s"}],"uri":"incidents/kfPPiGH2SorcnAeB1UyH","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"ncjBLad658bKo25mRPS1","number":"11987473090564497149","begin":"2024-05-24T16:33:44+00:00","created":"2024-05-24T17:27:00+00:00","end":"2024-05-24T21:20:06+00:00","modified":"2024-05-24T21:20:09+00:00","external_desc":"Failure in loading of one of the pages in Chronicle Security.","updates":[{"created":"2024-05-24T21:20:06+00:00","modified":"2024-05-24T21:20:09+00:00","when":"2024-05-24T21:20:06+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Friday, 2024-05-24 14:12 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-24T20:54:03+00:00","modified":"2024-05-24T21:20:09+00:00","when":"2024-05-24T20:54:03+00:00","text":"Summary: Failure in loading of one of the pages in Chronicle Security.\nDescription: We are experiencing an issue with Chronicle Security beginning on Friday, 2024-05-24 07:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-24 16:00 US/Pacific with current details.\nDiagnosis: Customers may see failure in the loading of the Curated Detections page. Customers in the impacted region may also experience delays in curated rules, and elevated latency in retrieving detections.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-24T19:38:09+00:00","modified":"2024-05-24T20:54:03+00:00","when":"2024-05-24T19:38:09+00:00","text":"Summary: Failure in loading of one of the pages in Chronicle Security.\nDescription: We are experiencing an issue with Chronicle Security beginning on Friday, 2024-05-24 07:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-24 14:00 US/Pacific with current details.\nDiagnosis: Customers may see failure in the loading of the Curated Detections page. Customers in the impacted region may also experience delays in curated rules, and elevated latency in retrieving detections.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-24T18:22:12+00:00","modified":"2024-05-24T19:38:09+00:00","when":"2024-05-24T18:22:12+00:00","text":"Summary: Failure in loading of one of the pages in Chronicle Security\nDescription: We are experiencing an issue with Chronicle Security beginning on Friday, 2024-05-24 07:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-24 12:45 US/Pacific with current details.\nDiagnosis: Customers may see failure in the loading of the Curated Detections page. Customers in the impacted region may also experience delays in curated rules, and elevated latency in retrieving detections.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-24T17:46:08+00:00","modified":"2024-05-24T18:22:12+00:00","when":"2024-05-24T17:46:08+00:00","text":"Summary: Failure in loading of one of the pages in Chronicle Security\nDescription: We are experiencing an issue with Chronicle Security beginning on Friday, 2024-05-24 07:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-24 11:45 US/Pacific with current details.\nDiagnosis: Customers may see failure in the loading of the Curated Detections page\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-24T17:26:55+00:00","modified":"2024-05-24T17:46:13+00:00","when":"2024-05-24T17:26:55+00:00","text":"Summary: Failure in loading of one of the pages in Chronicle Security\nDescription: We are experiencing an issue with Chronicle Security beginning on Friday, 2024-05-24 07:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-24 11:30 US/Pacific with current details.\nDiagnosis: Customers may see failure in the loading of the Curated Detections page\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-05-24T21:20:06+00:00","modified":"2024-05-24T21:20:09+00:00","when":"2024-05-24T21:20:06+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Friday, 2024-05-24 14:12 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/ncjBLad658bKo25mRPS1","currently_affected_locations":[],"previously_affected_locations":[{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"g1qS6Fj8LihRWYQmCjDJ","number":"3592887658658343689","begin":"2024-05-24T10:53:00+00:00","created":"2024-05-24T11:16:49+00:00","end":"2024-05-24T16:30:00+00:00","modified":"2024-05-24T21:05:36+00:00","external_desc":"EdgeContainer API cannot create and upgrade clusters","updates":[{"created":"2024-05-24T21:04:48+00:00","modified":"2024-05-24T21:05:36+00:00","when":"2024-05-24T21:04:48+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 24 May, 2024 03:53\n**Incident End:** 24 May, 2024 09:30\n**Duration:** 5 hours, 36 minutes\n**Affected Services and Features:** Google Distributed Cloud Edge\n**Regions/Zones:** Global\n**Description:**\nGoogle Distributed Cloud Edge users were unable to execute cluster creation and cluster upgrade globally for a duration of 5 hours, 36 minutes.\nFrom preliminary analysis, the root cause of the issue was activation of “safe mode” in a backend system that GDCE depends upon for initiation of certain management operations. These management operations that are blocked by the safe mode activation include certain components of the workflows corresponding to cluster creation and upgrade.\n**Customer Impact:**\nDuring the incident timeframe users in GDC connected regions were unable to\n- Execute cluster creation\n- Upgrade clusters\nThe incident only prevented the operations listed above from being executed, it did not affect existing clusters or normal cluster operations.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-24T16:36:40+00:00","modified":"2024-05-24T21:04:48+00:00","when":"2024-05-24T16:36:40+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved for all affected users as of Friday, 2024-05-24 09:36 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-24T15:47:50+00:00","modified":"2024-05-24T16:36:44+00:00","when":"2024-05-24T15:47:50+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 11:00 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T14:51:21+00:00","modified":"2024-05-24T15:47:50+00:00","when":"2024-05-24T14:51:21+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 09:00 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T14:24:41+00:00","modified":"2024-05-24T14:51:21+00:00","when":"2024-05-24T14:24:41+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 08:00 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T13:50:47+00:00","modified":"2024-05-24T14:24:41+00:00","when":"2024-05-24T13:50:47+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 07:30 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T13:21:09+00:00","modified":"2024-05-24T13:50:47+00:00","when":"2024-05-24T13:21:09+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 07:00 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T12:49:26+00:00","modified":"2024-05-24T13:21:09+00:00","when":"2024-05-24T12:49:26+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 06:30 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T12:21:03+00:00","modified":"2024-05-24T12:49:26+00:00","when":"2024-05-24T12:21:03+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 06:00 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T11:49:52+00:00","modified":"2024-05-24T12:21:03+00:00","when":"2024-05-24T11:49:52+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 05:30 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-05-24T11:16:32+00:00","modified":"2024-05-24T11:49:52+00:00","when":"2024-05-24T11:16:32+00:00","text":"Summary: EdgeContainer API cannot create and upgrade clusters\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Friday, 2024-05-24 03:38 US/Pacific.\nMitigation work is still underway by our engineering team.\nWe will provide an update by Friday, 2024-05-24 05:00 US/Pacific with current details.\nDiagnosis: Inability to exercise cluster creation and cluster upgrade in all regions of GDC connected\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-05-24T21:04:48+00:00","modified":"2024-05-24T21:05:36+00:00","when":"2024-05-24T21:04:48+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 24 May, 2024 03:53\n**Incident End:** 24 May, 2024 09:30\n**Duration:** 5 hours, 36 minutes\n**Affected Services and Features:** Google Distributed Cloud Edge\n**Regions/Zones:** Global\n**Description:**\nGoogle Distributed Cloud Edge users were unable to execute cluster creation and cluster upgrade globally for a duration of 5 hours, 36 minutes.\nFrom preliminary analysis, the root cause of the issue was activation of “safe mode” in a backend system that GDCE depends upon for initiation of certain management operations. These management operations that are blocked by the safe mode activation include certain components of the workflows corresponding to cluster creation and upgrade.\n**Customer Impact:**\nDuring the incident timeframe users in GDC connected regions were unable to\n- Execute cluster creation\n- Upgrade clusters\nThe incident only prevented the operations listed above from being executed, it did not affect existing clusters or normal cluster operations.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"JKyM3LJTqgETjRCvSK6w","service_name":"Google Distributed Cloud Edge","affected_products":[{"title":"Google Distributed Cloud Edge","id":"JKyM3LJTqgETjRCvSK6w"}],"uri":"incidents/g1qS6Fj8LihRWYQmCjDJ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"ucDmnkU6HncGBiXCYFp4","number":"5070357295694252014","begin":"2024-05-24T06:42:30+00:00","created":"2024-05-24T06:42:35+00:00","end":"2024-05-24T06:44:34+00:00","modified":"2024-05-24T06:44:37+00:00","external_desc":"GCP Support Case Creation failure","updates":[{"created":"2024-05-24T06:44:34+00:00","modified":"2024-05-24T06:44:38+00:00","when":"2024-05-24T06:44:34+00:00","text":"The issue with Google Cloud Support has been resolved for all affected users as of Thursday, 2024-05-23 23:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-24T06:42:32+00:00","modified":"2024-05-24T06:44:37+00:00","when":"2024-05-24T06:42:32+00:00","text":"Summary: GCP Support Case Creation failure\nDescription: We experienced an issue with Google Cloud Support beginning on Thursday, 2024-05-23 22:20 US/Pacific. A small set of GCP customers were unable to create support cases between 22:20 and 23:00 US/ Pacific.\nOur engineering team investigated and mitigated the issue on Thursday, 2024-05-23 23:00 US/Pacific.\nWe will provide an update by Friday, 2024-05-24 00:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers impacted by this issue would have observed failures while creating support cases during this period. Affected customers can now retry case creation.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-05-24T06:44:34+00:00","modified":"2024-05-24T06:44:38+00:00","when":"2024-05-24T06:44:34+00:00","text":"The issue with Google Cloud Support has been resolved for all affected users as of Thursday, 2024-05-23 23:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/ucDmnkU6HncGBiXCYFp4","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"qNhiGBTnMwW5L1iX6rbY","number":"13602242455244178377","begin":"2024-05-22T13:05:21+00:00","created":"2024-05-22T13:22:53+00:00","end":"2024-05-23T16:04:27+00:00","modified":"2024-05-23T16:04:35+00:00","external_desc":"[Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally","updates":[{"created":"2024-05-23T16:04:27+00:00","modified":"2024-05-23T16:04:36+00:00","when":"2024-05-23T16:04:27+00:00","text":"The issue with Vertex AI Search structData has been resolved for all affected users as of Thursday, 2024-05-23 08:56 US/Pacific. There may be some indexing delays, and customers are advised to send search requests to verify if the response contains structData.\nWe thank you for your patience while we worked on resolving the issue. If you have questions or are impacted, please open a case with the Support Team.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-23T15:50:18+00:00","modified":"2024-05-23T16:04:35+00:00","when":"2024-05-23T15:50:18+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: Our engineering team is still validating the fix and has determined that additional time is needed to mitigate the issue.\nWe don't have a firm mitigation ETA at this point but will continue to communicate any change in status.\nWe will provide an update by Thursday, 2024-05-23 13:00 US/Pacific with current details.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-05-23T03:46:14+00:00","modified":"2024-05-23T15:50:18+00:00","when":"2024-05-23T03:46:14+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: Our engineering team is still validating the fix and has determined that additional time is needed to mitigate the issue.\nWe don't have a firm mitigation ETA at this point but will continue to communicate any change in status.\nWe will provide an update by Thursday, 2024-05-23 09:00 US/Pacific with current details.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-05-22T23:47:46+00:00","modified":"2024-05-23T03:46:14+00:00","when":"2024-05-22T23:47:46+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: Our engineering team is still validating the fix and has determined that additional time is needed to mitigate the issue.\nWe don't have a firm mitigation ETA at this point but will continue to communicate any change in status.\nWe will provide an update by Wednesday, 2024-05-22 23:00 US/Pacific with current details.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-05-22T18:55:38+00:00","modified":"2024-05-22T23:47:46+00:00","when":"2024-05-22T18:55:38+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: Our engineering team is still validating the fix and has determined that additional time is needed to mitigate the issue.\nWe don't have a firm mitigation ETA at this point but will continue to communicate any change in status.\nWe will provide an update by Wednesday, 2024-05-22 17:00 US/Pacific with current details.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-05-22T15:24:09+00:00","modified":"2024-05-22T18:55:38+00:00","when":"2024-05-22T15:24:09+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: Our engineering team has identified and are validating a fix in production.\nWe don't have a firm mitigation ETA at this point but will continue to communicate any changes in status.\nWe will provide more information by Wednesday, 2024-05-22 13:00 US/Pacific.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-05-22T14:03:33+00:00","modified":"2024-05-22T15:24:09+00:00","when":"2024-05-22T14:03:33+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-05-22 09:00 US/Pacific.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-05-22T13:22:41+00:00","modified":"2024-05-22T14:03:33+00:00","when":"2024-05-22T13:22:41+00:00","text":"Summary: [Vertex AI Search] structData may not be returned for some documents in multi regions: EU, US and also Globally\nDescription: We are experiencing an issue with Vertex AI Search.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-05-22 08:00 US/Pacific with current details.\nDiagnosis: structData may not be returned for some documents in multi regions: EU, US and also Globally\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-05-23T16:04:27+00:00","modified":"2024-05-23T16:04:36+00:00","when":"2024-05-23T16:04:27+00:00","text":"The issue with Vertex AI Search structData has been resolved for all affected users as of Thursday, 2024-05-23 08:56 US/Pacific. There may be some indexing delays, and customers are advised to send search requests to verify if the response contains structData.\nWe thank you for your patience while we worked on resolving the issue. If you have questions or are impacted, please open a case with the Support Team.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"vNncXxtSVvqyhvSkQ6PJ","service_name":"Vertex AI Search","affected_products":[{"title":"Vertex AI Search","id":"vNncXxtSVvqyhvSkQ6PJ"}],"uri":"incidents/qNhiGBTnMwW5L1iX6rbY","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Global","id":"global"},{"title":"Multi-region: us","id":"us"}]},{"id":"cHcPpKJUMW1LReqkJmvT","number":"14378761328175642276","begin":"2024-05-22T05:56:00+00:00","created":"2024-05-22T06:12:20+00:00","end":"2024-05-22T07:41:55+00:00","modified":"2024-05-22T07:41:58+00:00","external_desc":"Redirect errors for customers in multiple regions while using Chronicle Security","updates":[{"created":"2024-05-22T07:41:55+00:00","modified":"2024-05-22T07:41:59+00:00","when":"2024-05-22T07:41:55+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-05-22 00:08 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-22T07:12:02+00:00","modified":"2024-05-22T07:41:58+00:00","when":"2024-05-22T07:12:02+00:00","text":"Summary: Redirect errors for customers in multiple regions while using Chronicle Security\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-05-22 01:30 US/Pacific.\nDiagnosis: Impacted users may observe issues while logging into their Google Security Operations instances and may experience redirect errors.\nWorkaround: Impacted users can use the following URL to access their instance. Please replace the \\","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-22T06:12:13+00:00","modified":"2024-05-22T07:12:02+00:00","when":"2024-05-22T06:12:13+00:00","text":"Summary: Redirect errors for customers in multiple regions while using Chronicle Security\nDescription: We are experiencing an intermittent issue with Chronicle Security beginning at Tuesday, 2024-05-21 20:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-05-22 00:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted users may observe issues while logging into their Google Security Operations instances and may experience redirect errors.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-05-22T07:41:55+00:00","modified":"2024-05-22T07:41:59+00:00","when":"2024-05-22T07:41:55+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-05-22 00:08 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/cHcPpKJUMW1LReqkJmvT","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"fbQKxdKPwfW2KEitwv2K","number":"13456313037257458755","begin":"2024-05-18T16:52:22+00:00","created":"2024-05-18T17:06:01+00:00","end":"2024-05-18T18:08:51+00:00","modified":"2024-05-18T18:08:55+00:00","external_desc":"Increased latency for create task calls in us-east1 for Google Cloud Tasks","updates":[{"created":"2024-05-18T18:08:51+00:00","modified":"2024-05-18T18:09:00+00:00","when":"2024-05-18T18:08:51+00:00","text":"The issue with Google Cloud Tasks has been resolved for all affected projects as of Saturday, 2024-05-18 10:36 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-18T17:50:41+00:00","modified":"2024-05-18T18:08:55+00:00","when":"2024-05-18T17:50:41+00:00","text":"Summary: Increased latency for create task calls in us-east1 for Google Cloud Tasks\nDescription: We are experiencing an issue with Google Cloud Tasks beginning on Saturday, 2024-05-18 07:44 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-05-18 11:30 US/Pacific with current details.\nDiagnosis: Customers using Google Cloud Tasks can observe increased latency while using it in us-east1 region\nWorkaround: Move Google Cloud Tasks queues to a different region","status":"SERVICE_INFORMATION","affected_locations":[{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-05-18T17:24:50+00:00","modified":"2024-05-18T17:50:41+00:00","when":"2024-05-18T17:24:50+00:00","text":"Summary: Increased latency for create task calls in us-east1 for Google Cloud Tasks\nDescription: We are experiencing an issue with Google Cloud Tasks beginning on Saturday, 2024-05-18 07:44 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-05-18 11:00 US/Pacific with current details.\nDiagnosis: Customers using Google Cloud Tasks can observe increased latency while using it in us-east1 region\nWorkaround: Move Google Cloud Tasks queues to a different region","status":"SERVICE_INFORMATION","affected_locations":[{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-05-18T17:05:44+00:00","modified":"2024-05-18T17:24:50+00:00","when":"2024-05-18T17:05:44+00:00","text":"Summary: Increased latency for create task calls in us-east1 for Google Cloud Tasks\nDescription: We are experiencing an issue with Google Cloud Tasks beginning on Saturday, 2024-05-18 07:44 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2024-05-18 10:30 US/Pacific with current details.\nDiagnosis: Customers using Google Cloud Tasks can observe increased latency while using it in us-east1 region\nWorkaround: Move Google Cloud Tasks queues to a different region","status":"SERVICE_INFORMATION","affected_locations":[{"title":"South Carolina (us-east1)","id":"us-east1"}]}],"most_recent_update":{"created":"2024-05-18T18:08:51+00:00","modified":"2024-05-18T18:09:00+00:00","when":"2024-05-18T18:08:51+00:00","text":"The issue with Google Cloud Tasks has been resolved for all affected projects as of Saturday, 2024-05-18 10:36 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"tMWyzhyKK4rAzAf7x62h","service_name":"Google Cloud Tasks","affected_products":[{"title":"Google Cloud Tasks","id":"tMWyzhyKK4rAzAf7x62h"}],"uri":"incidents/fbQKxdKPwfW2KEitwv2K","currently_affected_locations":[],"previously_affected_locations":[{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"id":"4h6FaocYdygFkaAU6xZS","number":"2346213432762351209","begin":"2024-05-17T08:30:01+00:00","created":"2024-05-17T08:50:26+00:00","end":"2024-05-17T09:23:33+00:00","modified":"2024-05-17T09:23:42+00:00","external_desc":"Increased latency for create task calls in us-east1 for Google Cloud Tasks","updates":[{"created":"2024-05-17T09:23:33+00:00","modified":"2024-05-17T09:23:47+00:00","when":"2024-05-17T09:23:33+00:00","text":"The issue with Google Cloud Tasks has been resolved for all affected projects as of Friday, 2024-05-17 01:45 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-17T08:50:09+00:00","modified":"2024-05-17T09:23:42+00:00","when":"2024-05-17T08:50:09+00:00","text":"Summary: Increased latency for create task calls in us-east1 for Google Cloud Tasks\nDescription: We are experiencing an issue with Google Cloud Tasks beginning on Thursday, 2024-05-16 23:38 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-17 02:30 US/Pacific with current details.\nDiagnosis: Customers using Google Cloud Tasks can observe increased latency while using it in us-east1 region.\nWorkaround: Move Google Cloud Tasks queues to a different region.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"South Carolina (us-east1)","id":"us-east1"}]}],"most_recent_update":{"created":"2024-05-17T09:23:33+00:00","modified":"2024-05-17T09:23:47+00:00","when":"2024-05-17T09:23:33+00:00","text":"The issue with Google Cloud Tasks has been resolved for all affected projects as of Friday, 2024-05-17 01:45 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"tMWyzhyKK4rAzAf7x62h","service_name":"Google Cloud Tasks","affected_products":[{"title":"Google Cloud Tasks","id":"tMWyzhyKK4rAzAf7x62h"}],"uri":"incidents/4h6FaocYdygFkaAU6xZS","currently_affected_locations":[],"previously_affected_locations":[{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"id":"xVSEV3kVaJBmS7SZbnre","number":"5427664776422626960","begin":"2024-05-16T22:22:00+00:00","created":"2024-05-16T23:24:05+00:00","end":"2024-05-17T01:10:00+00:00","modified":"2024-05-23T01:55:56+00:00","external_desc":"Multiple cloud products are experiencing network connectivity issues. New instances of several cloud products will come up without a network in many regions.","updates":[{"created":"2024-05-22T16:44:37+00:00","modified":"2024-05-23T01:55:56+00:00","when":"2024-05-22T16:44:37+00:00","text":"# Incident Report\n## Summary\nOn 16 May 2024, the Google Infrastructure team was executing a routine maintenance action to shut down an unused Virtual Private Cloud (VPC) controller in a single Google Cloud zone. Unfortunately, a bug in the automation caused the component to be shut down in all zones where it was still in use. This resulted in networking connectivity issues and/or service disruptions for multiple Google Cloud products. The majority of the impact lasted 2 hours and 48 minutes.\nSome products took longer to fully recover based on the failures they experienced as outlined below in the impact section. To our Google Cloud customers, whose services were impacted, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe root cause was a bug in maintenance automation intended to shut down an unused VPC Controller in a single zone. A parameter specifying the zone for maintenance operations was modified during a refactor of the automation software earlier in the year. This modification resulted in the target zone parameter being ignored and the shut down operation taking effect on VPC controllers in all cloud zones. The test environment caught this failure mode, but the error was misinterpreted as an expected failure in the test environment. As a result, the change made it to production.\nShutdowns are a routine operational practice that we perform to maintain our systems without customer impact. The automation has a safeguard in place to limit the scope of maintenance operations. This safeguard was inadvertently disabled during a migration to a new version of the automation framework, resulting in the shutdown operation taking effect on VPC controllers in all cloud zones.\nA separate instance of the VPC Controller is launched into every Google Cloud zone. The jobs running in a given zone are responsible for programming the hosts in that zone (such as GCE VM hosts) so that the Endpoints running on those machines are able to reach other Endpoints in their VPC networks, reach on-prem destinations via VPN / Interconnect, and reach the Internet.\nWithin each zone, the VPC Control Plane is sharded into different components, each of which is responsible for programming a subset of traffic paths. The intent behind this architecture is to isolate outages affecting control plane jobs, in order to limit the blast radius. Outages affecting control plane jobs within a given cluster are expected to only affect the local cluster, and, ideally, only a subset of traffic within the local cluster.\nThe VPC Network architecture follows a fail static design. If control plane jobs are not available, the data plane continues using the last programmed state until the control plane jobs provide an update. This design reduced the impact of this outage to network activity involving interaction with the control plane, detailed below.\nThe VPC controller shutdown affected a subset of Cloud VPC control plane jobs in all zones, impacting control plane operations such as creating new VMs, auto scaler operations, applying configuration changes to customer projects and accurately reflecting changes in VM health state. The data plane impact included packet loss during VM live migration and network congestion in some zones.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage through internal monitoring alerts at 15:41 US/Pacific and immediately started an investigation. Maintenance operations were immediately paused to prevent a recurrence. In parallel, affected VPC Controllers were brought back to a serving state. Once the operations were completed, VPC controllers in most of the zones were operational by 17:30 US/Pacific.\nBy 18:09 US/Pacific, all VPC controllers were restored, and the backlog of applying changes to the dataplane cleared up by 18:40 US/Pacific, mitigating impact for most of the products. Some products took longer to mitigate as outlined below in the impact section.\nGoogle Cloud is committed to preventing a repeat of this issue in the future and is completing the following actions:\n1. Implement additional safety measures to validate maintenance operations have appropriately scoped zone information.\n2. Extend defense in depth by improving safety in underlying tools to reject maintenance operations without zones specified.\n3. Increase on-demand access control to limit the ability of maintenance operations to work across multiple zones in a single invocation.\n4. Ensure we have the required safety checks, appropriate testing and validation processes for maintenance operations including positive and negative test cases for the safety measures 1, 2 and 3 above to prevent regression.\nWe apologize for the impact of this issue and are taking steps to address the scope and duration of this incident as well as the root cause itself. We thank you for your business.\n**## Detailed Description of Impact**\n**AlloyDB for PostgreSQL:**\n- From 15:30 to 19:00 US/Pacific, new AlloyDB VM creation may have failed in the europe-west1, europe-west3, europe-north1, europe-north2, asia-east1 and asia-northeast1 regions. All of these regions encountered \u003c0.5% creation failure while the asia-northeast1 region encountered around 1% instance creation failures.\n- There were about 10% auto failovers triggered in us-central1 and less than 1% auto-failovers triggered in other regions.\n**Apigee :**\n- From 15:22 to 18:10 US/Pacific, some Apigee customers may have experienced network latencies, timeouts, or 500 errors in their runtimes.\n**Apigee Edge Public Cloud:**\n- From 15:22 to 18:10 US/Pacific, some Apigee Edge customers using GCP infrastructure may have experienced network latencies, timeouts, or 500 errors in their runtimes.\n- Additionally, some customers encountered errors during proxy deployments, resulting in partial deployments.\n- The impact was mostly resolved automatically as the network recovered. However, some cases of partial proxy deployments required manual intervention and were recovered by 17 May 2024 01:00 US/Pacific.\n**Batch:**\n- From 15:50 to 18:00 US/Pacific, batch jobs experienced elevated “NO_VM_REPORT” failure rates globally.\n**Cloud Build:**\n- From 15:39 to 17:28 US/Pacific, build jobs were experiencing elevated scheduling latency or time outs globally.\n**Cloud Composer:**\n- From 15:40 to 17:30 US/Pacific, environment creation and update operations were failing across all regions. - 80% of environment creation operations failed. - Several environments that started image upgrade operation during the disruption were irrecoverably broken.\n- ~2.5% of Airflow tasks in running environments have failed globally due to a direct impact on connectivity of Composer components or outage impact on services that tasks have operated on.\n**Cloud Data Fusion:**\n- From 15:50 to 16:50 US/Pacific, new instance creation experienced failures globally.\n- Error rates for new instance creation remained elevated until 17:46 PDT.\n- Instance creation requests submitted during this period may have timed out (failed) or succeeded after a longer-than-usual delay.\n**Cloud Filestore:**\n- From 15:40 to 18:00 US/Pacific, Filestore instance operations such as Creation, and Deletion were degraded or failing in most regions and zones.\n- Some existing instances were unable to report metrics or start operations during this period, and due to the degraded VPC performance, customer VMs may not have been able to access existing instances.\n**Cloud Firewall:**\n- From 15:20 to 18:40 US/Pacific, all new firewall rules and firewall updates were not propagated. This affected firewall modifications for all VM instances in all GCE Zones.\n**Cloud IDS:**\n- From 15:45 to 18:40 US/Pacific, any calls to alter or create IDS Endpoints would have failed.\n- Packet Mirroring to existing Cloud IDS Endpoints continued working as intended.\n**Cloud Interconnect:**\n- From 15:45 to 18:09 US/Pacific, customers were unable to make any changes to their Cloud Interconnect resources because the control/management plane was down.\n- Most existing Interconnect attachments were unaffected, except that changes to learned BGP routes did not propagate during the outage.\n- A small number of attachments (1%) experienced dataplane packet loss triggered by maintenance operations (e.g. VM migration or attachment dataplane machine maintenance).\n**Cloud Load Balancing:**\n- From 16:00 to 18:20 US/Pacific, customers were unable to make changes to load balancer configurations that involved network programming as a dependency.\n- Some customers experienced data plane impact as well, manifested as 500 errors; this was either because certain workloads could not be autoscaled due to absence of network programming or programming for their network was not complete before the service disruption started.\n**Cloud NAT:**\n- From 15:45 to 18:20 US/Pacific, customers were unable to make any changes to their Cloud NAT resources because the control/management plane was down.\n- Cloud NAT Dynamic Port Allocation (DPA) experienced allocation failures.\n- Most existing NAT configurations were unaffected, although a small number (\u003c1%) saw dataplane loss. In addition, a small number (\u003c1%) of NAT configurations took up to two additional hours for control plane changes to take effect.\n**Cloud NGFW Enterprise.**\n- From 15:45 to 18:40 US/Pacific, any calls to alter or create firewall-endpoints, firewall-endpoint-associations, security-profiles, or security-profile-groups would have failed.\n- Packet Inspection through “Proceed to L7 Inspection” Firewall Rules continued to work as intended.\n**Cloud Router:**\n- From 15:45 to 18:20 US/Pacific, customers were unable to make any changes to their Cloud Router resources because the control/management plane was down.\n- Changes to Border Gateway Protocol (BGP) routes or any new learned routes advertised by customers, or any changes to route health triggered by unhealthy Interconnects / VPN tunnels, would not have been applied to the dataplane. Most existing BGP sessions stayed up during this event.\n**Cloud Run:**\n- From 15:35 to 18:06 US/Pacific customers using Direct VPC Egress for Cloud Run were unable to deploy new services. Customers with existing services using Direct VPC Egress were unable to scale up, including from 0.\n- Customers using existing VPC Access Connectors may have experienced network slowdown due to VPC Access Connectors not being able to scale up. Customers were also unable to deploy new VPC Access Connectors.\n**Cloud Security Command Center:**\n- From 16:40 to 17:30 US/Pacific, 25% of scheduled Attack Path Simulations failed globally, affecting 7% of onboarded organizations.\n- The attack exposure scores of findings were not updated and new attack paths were not created during this time for affected customers.\n- Customers could still view older attack exposure scores and attack paths during this time.\n**Cloud Shell:**\n- From 15:50 to 16:50 US/Pacific, Cloud Shell sessions failed to start globally. ~15% of Cloud Shell sessions failed to startup.\n**Cloud VPN:**\n- From 15:45 to 18:40 US/Pacific, customers were unable to make any changes to their Cloud VPN resources because the control/management plane was down.\n- Most existing tunnels were not affected, except that changes to learned BGP routes did not propagate during the outage.\n- Routine VPN maintenance operations which occurred during this time window broke the dataplane for a small number (1%) of customer tunnels, which should have recovered along with the control plane.\n**Cloud Workstations:**\n- From 15:50 to 16:50 US/Pacific, Workstation startup failed primarily across US regions. Around 25% of Workstations failed to startup.\n**Colab Enterprise:**\n- From 15:50 to 16:50 US/Pacific, Colab Enterprise NotebookRuntime operations: AssignNotebookRuntime, StartNotebookRuntime, UpgradeNotebookRuntime failed globally.\n**Firebase Test Lab:**\n- Between 15:50 and 17:50 PDT, new Cloud VM creations failed globally.\n- Around 17% of the total VM creation requests globally at the time encountered issues.\n100% of customer executions failed on x86 emulators, impacting external customer CI tests, causing a complete pause of executions on those devices.\n**Google App Engine Flexible:**\n- From 15:35 to 18:06 US/Pacific customers were unable to create App Engine Flex deployments and App Engine Flex deployments were unable to scale up.\n**Google Cloud Dataflow:**\n- From 15:44 to 18:52 US/Pacific, Batch and streaming jobs became unhealthy in all regions: - New Dataflow jobs were not initialized successfully. - Already running Dataflow batch jobs experienced prolonged execution time - Already running Streaming jobs experienced elevated watermarks\n**Google Cloud Dataproc:**\n- From 15:34 to 18:16 US/Pacific, cluster creations failed globally.\n- Some cluster deletion workflows may have failed causing the corresponding VMs to not be entirely deleted. Our engineers have identified such VMs and performed a clean up after the service disruption was mitigated.\n**Google Cloud Deploy:**\n- From 15:39 to 17:28 US/Pacific, Cloud Deploy customers were experiencing significant latency or time outs for renders and deploys globally.\n**Google Cloud Functions:**\n- From 15:35 to 18:06 US/Pacific customers using Cloud Functions were unable to deploy globally.\n- Customers using existing VPC Access Connectors may have experienced network slowdown due to VPC Access Connectors not being able to scale up. Customers were also unable to deploy new VPC Access Connectors.\n**Google Cloud SQL:**\n- From 15:47 to 16:30 US/Pacific, all create, clone, restore operations were failing along with some patch \u0026 update operations. Some instances were impacted due to operational failures.\n- From 16:30 to 19:00 US/Pacific, recovery was started with 90% of the impacted instances restored or - recovered by 19:00. Failure rates were still elevated at about 10%.\n- All remaining instances were restored by 22:30 US/Pacific.\n**Google Compute Engine:**\n- From 15:45 to 16:45 US/Pacific, most of the networking requests experienced elevated failure rates globally.\n- From 16:00 to 17:15 US/Pacific, instance deletion and instance update operations experienced elevated failure rates globally.\n- From 15:45 to 18:15 US/Pacific, for instance group creation and deletion operations experienced elevated failure rates globally.\n**Google Kubernetes Engine:**\n- From 15:45 to 17:30 US/Pacific, GKE operations that created or updated a VM failed. These operations are cluster creation and deletion, node pool creation and deletion, node upgrade, scaling up existing nodepool, network configuration changes.\n**Infrastructure Manager:**\n- From 15:40 to 17:15 US/Pacific, deployment create/update/delete/unlock and preview create/delete operations degraded globally with increased latency and failures.\nCustomers could still view and export deployments, previews, revisions and resources.\n**Looker:**\n- From 15:50 to 17:30 US/Pacific, \u003c0.2% of looker instances went down due to Cloud SQL DB connection failure.\n**Memorystore for Memcached:**\n- From 15:45 to 17:15 US/Pacific, Create, Update and Delete Memcache instance operations were failing across all regions.\n- Around 0.8% of existing nodes restarted with a downtime of approximately 50 minutes possibly because these VMs underwent live migration.\n**Memorystore for Redis:**\n- From 15:40 to 18:40 US/Pacific, newly started Redis instance VM nodes were unreachable for networking. These instances could have been newly created by the customer, created indirectly through a mutating change such as resource scaling or addition of a new replica, or created automatically due to routine system operations. Impacts would have manifested as Basic (unreplicated) instances being unusable, and on Standard instances it could have appeared as reduced reader capacity or outright instance unavailability. Some Redis instances experienced some long tail issues and eventually recovered at 20:05 US/Pacific.\n- In addition, mutating operations such as CreateInstance failed during this window.\n**Vertex AI Workbench:**\n- From 15:40 to 17:30 US/Pacific, customers could not provision new instances. Existing instances were not affected.\n**Virtual Private Cloud:**\n- From 15:20 to 18:40 US/Pacific, all new endpoints (e.g., VMs, VPN gateways) and services (load balancers) received no network programming and observed packet loss to a majority of destinations.\n- 0.07% of VMs that were migrated during the outage also experienced connectivity loss for the duration of the service disruption.\n- Long-running VMs were also affected due to data plane overload in a few locations. Peak loss was 3% in us-east1-c and average loss was 0.05% across all Cloud zones.\n- Access to Google Services was degraded in a few GCE zones; a fraction (up to 25% in us-east4-c) of TCP connections were blackholed. Applications that retry with a different TCP source port would have experienced limited outages even in these Cloud zones.\n--------------------","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-17T06:02:35+00:00","modified":"2024-05-22T16:44:37+00:00","when":"2024-05-17T06:02:35+00:00","text":"# Mini Incident Report\nWe extend our sincerest apologies for the service interruption incurred as a result of this service outage. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 16 May, 2024 15:22\n**Incident End:** 16 May, 2024 18:10\n**Duration:** 2 hours, 48 minutes\n**Affected Services and Features:**\n* AlloyDB for PostgreSQL\n* Apigee\n* Backup and DR\n* Batch\n* Cloud Build\n* Cloud Data Fusion\n* Cloud Developer Tools\n* Cloud Filestore\n* Cloud Load Balancing\n* Cloud Machine Learning\n* Cloud Memorystore\n* Cloud NAT\n* Cloud Run\n* Cloud Workstations\n* Colab Enterprise\n* Contact Center AI Platform\n* Dataproc Metastore\n* Firebase Test Lab\n* Google App Engine\n* Google Cloud Composer\n* Google Cloud Dataflow\n* Google Cloud Dataproc\n* Google Cloud Deploy\n* Google Cloud Networking\n* Google Cloud SQL\n* Google Compute Engine (GCE)\n* Google Kubernetes Engine (GKE)\n* Hybrid Connectivity\n* Memorystore for Memcached\n* Memorystore for Redis\n* Vertex AI Workbench User Managed Notebooks\n* Virtual Private Cloud (VPC)\n**Regions/Zones:** Global\n**Description:**\nMultiple Google Cloud products experienced network connectivity issues and service outage of varying impact for a duration of up to 2 hours and 48 minutes. Preliminary findings are that a bug in maintenance automation intended to shutdown an unused network control component in a single location instead caused the component to be shutdown in many locations where it was still in use. Google engineers restarted the affected component, restoring normal operation.\nGoogle engineers have identified the automation that was responsible for this change and have terminated it until appropriate safeguards are put in place. There is no risk of a recurrence of this outage at the moment.\nGoogle will complete an Incident Report in the following days that will provide a full root cause.\n**Customer Impact:**\nDuring the impact timeframe, Google Cloud Networking exhibited the following degradations:\n* New VM instances were provisioned without network connectivity hence unable to establish network connections.\n* Migrated/Restarted VMs lost network connectivity.\n* Virtual networking configurations could not be updated (e.g. firewalls, network load balancers).\n* Partial packet loss for certain VPC network flows was observed in us-central1 and us-east1.\n* Cloud NAT Dynamic Port Allocation (DPA) experienced allocation failures.\n* Creation of new GKE nodes and nodepools experienced failures.\nAdditionally, other Google products that depended on GCE VM creation or network configuration updates were not able to successfully complete operations during this time.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-17T01:40:17+00:00","modified":"2024-05-17T06:02:35+00:00","when":"2024-05-17T01:40:17+00:00","text":"The issue with Network programming of Apigee, Backup and DR, Cloud Build, Cloud Data Fusion, Cloud Filestore, Cloud Load Balancing, Cloud NAT, Cloud Run, Cloud Workstations, Colab Enterprise, Contact Center AI Platform, Dataproc Metastore, Firebase Test Lab, Google App Engine, Google Cloud Composer, Google Cloud Dataflow, Google Cloud Dataproc, Google Cloud Deploy, Google Cloud Networking, Google Compute Engine, Google Kubernetes Engine, Hybrid Connectivity, Memorystore for Memcached, Memorystore for Redis, Vertex AI Workbench Instances, Virtual Private Cloud (VPC) has been resolved for all affected projects as of Thursday, 2024-05-16 18:10 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-17T01:37:22+00:00","modified":"2024-05-17T01:40:24+00:00","when":"2024-05-17T01:37:22+00:00","text":"Summary: Multiple cloud products are experiencing network connectivity issues. New instances of several cloud products will come up without a network in many regions.\nDescription: We are seeing recovery for most of the affected products across all regions.\nCloud Functions, Cloud Run, and App Engine Flex are recovered as of 18:06 US/Pacific\nGoogle engineers are actively working to fully restore control plane functionality for all affected products and regions.\nWe do not have an ETA for complete mitigation at this point\nWe will provide an update by Thursday, 2024-05-16 18:55 US/Pacific with current details.\nDiagnosis:\n- Customers impacted by this issue may see slow programming in the Cloud Networking control plane. New VMs or newly migrated VMs may have delayed network programming.\n- New connections via Google Cloud Load Balancer may also fail to establish.\n- For GKE cluster creation and deletion, node pool creation and deletion, scale ups, upgrades, as well as changes to networking configuration are impacted.\n- For Cloud Run DirectVPC customers, Cloud Run scaling, including from 0 will not work.\n- Serverless VPC Connectors cannot be created or scale.\n- App Engine Flex deployments cannot be created or scale.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-17T00:59:03+00:00","modified":"2024-05-17T01:37:27+00:00","when":"2024-05-17T00:59:03+00:00","text":"Summary: Multiple cloud products are experiencing network connectivity issues. New instances of several cloud products will come up without a network in many regions.\nDescription: We are experiencing an issue with Google Cloud products that use our network infrastructure beginning on Thursday, 2024-05-16 14:44 US/Pacific.\nWe are seeing considerable recovery on most of the affected products across all regions.\nGoogle engineers are actively working to fully restore control plane functionality for all affected products and regions.\nWe do not have an ETA for complete mitigation at this point\nWe will provide an update by Thursday, 2024-05-16 18:20 US/Pacific with current details.\nDiagnosis:\n- Customers impacted by this issue may see slow programming in the Cloud Networking control plane. New VMs or newly migrated VMs may have delayed network programming.\n- New connections via Google Cloud Load Balancer may also fail to establish.\n- For GKE cluster creation and deletion, node pool creation and deletion, scale ups, upgrades, as well as changes to networking configuration are impacted.\n- For Cloud Run DirectVPC customers, Cloud Run scaling, including from 0 will not work.\n- Serverless VPC Connectors cannot be created or scale.\n- App Engine Flex deployments cannot be created or scale.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-17T00:44:43+00:00","modified":"2024-05-17T00:59:08+00:00","when":"2024-05-17T00:44:43+00:00","text":"Summary: Multiple cloud products are experiencing network connectivity issues. New instances of several cloud products will come up without a network in many regions.\nDescription: We are experiencing an issue with Google Cloud products that use our network infrastructure beginning on Thursday, 2024-05-16 14:44 US/Pacific.\nWe are seeing considerable recovery on most of the affected products across all regions.\nGoogle engineers are actively working to fully restore control plane functionality for all affected products and regions.\nWe do not have an ETA for complete mitigation at this point\nWe will provide an update by Thursday, 2024-05-16 18:15 US/Pacific with current details.\nDiagnosis:\n- Customers impacted by this issue may see slow programming in the Cloud Networking control plane. New VMs or newly migrated VMs may have delayed network programming.\n- New connections via Google Cloud Load Balancer may also fail to establish.\n- For GKE cluster creation and deletion, node pool creation and deletion, scale ups, upgrades, as well as changes to networking configuration are impacted.\n- For Cloud Run DirectVPC customers, Cloud Run scaling, including from 0 will not work.\n- Serverless VPC Connectors cannot be created or scale.\n- App Engine Flex deployments cannot be created or scale.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-17T00:12:29+00:00","modified":"2024-05-17T00:44:47+00:00","when":"2024-05-17T00:12:29+00:00","text":"Summary: Multiple cloud products are experiencing network connectivity issues. New instances of several cloud products will come up without a network in many regions.\nDescription: Mitigation work is currently underway by our engineering team. Mitigation actions in us-central1 are completed.\nGoogle engineers are actively working to restore control plane functionality to remaining affected regions.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-05-16 17:45 US/Pacific.\nDiagnosis:\n- Customers impacted by this issue may see slow programming in the Cloud Networking control plane. New VMs or newly migrated VMs may have delayed network programming.\n- New connections via Google Cloud Load Balancer may also fail to establish.\n- For GKE cluster and node pool creation, and well has changes to networking configuration are impacted.\n- For Cloud Run DirectVPC customers, Cloud Run scaling, including from 0 will not work.\n- Serverless VPC Connectors cannot be created or scale.\n- App Engine Flex deployments cannot be created or scale.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-16T23:24:02+00:00","modified":"2024-05-17T00:12:33+00:00","when":"2024-05-16T23:24:02+00:00","text":"Summary: Programming failures for New Virtual Private Cloud endpoints globally affecting New GCE VM network programming and Cloud Run using Direct VPC.\nDescription: We are experiencing an issue with Virtual Private Cloud (VPC) beginning on Thursday, 2024-05-16 14:44 US/Pacific.\nOur engineering team continues to investigate the issue\nWe will provide an update by Thursday, 2024-05-16 16:57 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see slow programming in the Cloud Networking control plane. New VMs or newly migrated VMs may have delayed network programming.\nFor Cloud Run DirectVPC customers, Cloud Run scaling, including from 0 may not work.\nServerless VPC Connectors cannot be created or scale.\nApp Engine Flex deployments cannot be created or scale.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-05-22T16:44:37+00:00","modified":"2024-05-23T01:55:56+00:00","when":"2024-05-22T16:44:37+00:00","text":"# Incident Report\n## Summary\nOn 16 May 2024, the Google Infrastructure team was executing a routine maintenance action to shut down an unused Virtual Private Cloud (VPC) controller in a single Google Cloud zone. Unfortunately, a bug in the automation caused the component to be shut down in all zones where it was still in use. This resulted in networking connectivity issues and/or service disruptions for multiple Google Cloud products. The majority of the impact lasted 2 hours and 48 minutes.\nSome products took longer to fully recover based on the failures they experienced as outlined below in the impact section. To our Google Cloud customers, whose services were impacted, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe root cause was a bug in maintenance automation intended to shut down an unused VPC Controller in a single zone. A parameter specifying the zone for maintenance operations was modified during a refactor of the automation software earlier in the year. This modification resulted in the target zone parameter being ignored and the shut down operation taking effect on VPC controllers in all cloud zones. The test environment caught this failure mode, but the error was misinterpreted as an expected failure in the test environment. As a result, the change made it to production.\nShutdowns are a routine operational practice that we perform to maintain our systems without customer impact. The automation has a safeguard in place to limit the scope of maintenance operations. This safeguard was inadvertently disabled during a migration to a new version of the automation framework, resulting in the shutdown operation taking effect on VPC controllers in all cloud zones.\nA separate instance of the VPC Controller is launched into every Google Cloud zone. The jobs running in a given zone are responsible for programming the hosts in that zone (such as GCE VM hosts) so that the Endpoints running on those machines are able to reach other Endpoints in their VPC networks, reach on-prem destinations via VPN / Interconnect, and reach the Internet.\nWithin each zone, the VPC Control Plane is sharded into different components, each of which is responsible for programming a subset of traffic paths. The intent behind this architecture is to isolate outages affecting control plane jobs, in order to limit the blast radius. Outages affecting control plane jobs within a given cluster are expected to only affect the local cluster, and, ideally, only a subset of traffic within the local cluster.\nThe VPC Network architecture follows a fail static design. If control plane jobs are not available, the data plane continues using the last programmed state until the control plane jobs provide an update. This design reduced the impact of this outage to network activity involving interaction with the control plane, detailed below.\nThe VPC controller shutdown affected a subset of Cloud VPC control plane jobs in all zones, impacting control plane operations such as creating new VMs, auto scaler operations, applying configuration changes to customer projects and accurately reflecting changes in VM health state. The data plane impact included packet loss during VM live migration and network congestion in some zones.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage through internal monitoring alerts at 15:41 US/Pacific and immediately started an investigation. Maintenance operations were immediately paused to prevent a recurrence. In parallel, affected VPC Controllers were brought back to a serving state. Once the operations were completed, VPC controllers in most of the zones were operational by 17:30 US/Pacific.\nBy 18:09 US/Pacific, all VPC controllers were restored, and the backlog of applying changes to the dataplane cleared up by 18:40 US/Pacific, mitigating impact for most of the products. Some products took longer to mitigate as outlined below in the impact section.\nGoogle Cloud is committed to preventing a repeat of this issue in the future and is completing the following actions:\n1. Implement additional safety measures to validate maintenance operations have appropriately scoped zone information.\n2. Extend defense in depth by improving safety in underlying tools to reject maintenance operations without zones specified.\n3. Increase on-demand access control to limit the ability of maintenance operations to work across multiple zones in a single invocation.\n4. Ensure we have the required safety checks, appropriate testing and validation processes for maintenance operations including positive and negative test cases for the safety measures 1, 2 and 3 above to prevent regression.\nWe apologize for the impact of this issue and are taking steps to address the scope and duration of this incident as well as the root cause itself. We thank you for your business.\n**## Detailed Description of Impact**\n**AlloyDB for PostgreSQL:**\n- From 15:30 to 19:00 US/Pacific, new AlloyDB VM creation may have failed in the europe-west1, europe-west3, europe-north1, europe-north2, asia-east1 and asia-northeast1 regions. All of these regions encountered \u003c0.5% creation failure while the asia-northeast1 region encountered around 1% instance creation failures.\n- There were about 10% auto failovers triggered in us-central1 and less than 1% auto-failovers triggered in other regions.\n**Apigee :**\n- From 15:22 to 18:10 US/Pacific, some Apigee customers may have experienced network latencies, timeouts, or 500 errors in their runtimes.\n**Apigee Edge Public Cloud:**\n- From 15:22 to 18:10 US/Pacific, some Apigee Edge customers using GCP infrastructure may have experienced network latencies, timeouts, or 500 errors in their runtimes.\n- Additionally, some customers encountered errors during proxy deployments, resulting in partial deployments.\n- The impact was mostly resolved automatically as the network recovered. However, some cases of partial proxy deployments required manual intervention and were recovered by 17 May 2024 01:00 US/Pacific.\n**Batch:**\n- From 15:50 to 18:00 US/Pacific, batch jobs experienced elevated “NO_VM_REPORT” failure rates globally.\n**Cloud Build:**\n- From 15:39 to 17:28 US/Pacific, build jobs were experiencing elevated scheduling latency or time outs globally.\n**Cloud Composer:**\n- From 15:40 to 17:30 US/Pacific, environment creation and update operations were failing across all regions. - 80% of environment creation operations failed. - Several environments that started image upgrade operation during the disruption were irrecoverably broken.\n- ~2.5% of Airflow tasks in running environments have failed globally due to a direct impact on connectivity of Composer components or outage impact on services that tasks have operated on.\n**Cloud Data Fusion:**\n- From 15:50 to 16:50 US/Pacific, new instance creation experienced failures globally.\n- Error rates for new instance creation remained elevated until 17:46 PDT.\n- Instance creation requests submitted during this period may have timed out (failed) or succeeded after a longer-than-usual delay.\n**Cloud Filestore:**\n- From 15:40 to 18:00 US/Pacific, Filestore instance operations such as Creation, and Deletion were degraded or failing in most regions and zones.\n- Some existing instances were unable to report metrics or start operations during this period, and due to the degraded VPC performance, customer VMs may not have been able to access existing instances.\n**Cloud Firewall:**\n- From 15:20 to 18:40 US/Pacific, all new firewall rules and firewall updates were not propagated. This affected firewall modifications for all VM instances in all GCE Zones.\n**Cloud IDS:**\n- From 15:45 to 18:40 US/Pacific, any calls to alter or create IDS Endpoints would have failed.\n- Packet Mirroring to existing Cloud IDS Endpoints continued working as intended.\n**Cloud Interconnect:**\n- From 15:45 to 18:09 US/Pacific, customers were unable to make any changes to their Cloud Interconnect resources because the control/management plane was down.\n- Most existing Interconnect attachments were unaffected, except that changes to learned BGP routes did not propagate during the outage.\n- A small number of attachments (1%) experienced dataplane packet loss triggered by maintenance operations (e.g. VM migration or attachment dataplane machine maintenance).\n**Cloud Load Balancing:**\n- From 16:00 to 18:20 US/Pacific, customers were unable to make changes to load balancer configurations that involved network programming as a dependency.\n- Some customers experienced data plane impact as well, manifested as 500 errors; this was either because certain workloads could not be autoscaled due to absence of network programming or programming for their network was not complete before the service disruption started.\n**Cloud NAT:**\n- From 15:45 to 18:20 US/Pacific, customers were unable to make any changes to their Cloud NAT resources because the control/management plane was down.\n- Cloud NAT Dynamic Port Allocation (DPA) experienced allocation failures.\n- Most existing NAT configurations were unaffected, although a small number (\u003c1%) saw dataplane loss. In addition, a small number (\u003c1%) of NAT configurations took up to two additional hours for control plane changes to take effect.\n**Cloud NGFW Enterprise.**\n- From 15:45 to 18:40 US/Pacific, any calls to alter or create firewall-endpoints, firewall-endpoint-associations, security-profiles, or security-profile-groups would have failed.\n- Packet Inspection through “Proceed to L7 Inspection” Firewall Rules continued to work as intended.\n**Cloud Router:**\n- From 15:45 to 18:20 US/Pacific, customers were unable to make any changes to their Cloud Router resources because the control/management plane was down.\n- Changes to Border Gateway Protocol (BGP) routes or any new learned routes advertised by customers, or any changes to route health triggered by unhealthy Interconnects / VPN tunnels, would not have been applied to the dataplane. Most existing BGP sessions stayed up during this event.\n**Cloud Run:**\n- From 15:35 to 18:06 US/Pacific customers using Direct VPC Egress for Cloud Run were unable to deploy new services. Customers with existing services using Direct VPC Egress were unable to scale up, including from 0.\n- Customers using existing VPC Access Connectors may have experienced network slowdown due to VPC Access Connectors not being able to scale up. Customers were also unable to deploy new VPC Access Connectors.\n**Cloud Security Command Center:**\n- From 16:40 to 17:30 US/Pacific, 25% of scheduled Attack Path Simulations failed globally, affecting 7% of onboarded organizations.\n- The attack exposure scores of findings were not updated and new attack paths were not created during this time for affected customers.\n- Customers could still view older attack exposure scores and attack paths during this time.\n**Cloud Shell:**\n- From 15:50 to 16:50 US/Pacific, Cloud Shell sessions failed to start globally. ~15% of Cloud Shell sessions failed to startup.\n**Cloud VPN:**\n- From 15:45 to 18:40 US/Pacific, customers were unable to make any changes to their Cloud VPN resources because the control/management plane was down.\n- Most existing tunnels were not affected, except that changes to learned BGP routes did not propagate during the outage.\n- Routine VPN maintenance operations which occurred during this time window broke the dataplane for a small number (1%) of customer tunnels, which should have recovered along with the control plane.\n**Cloud Workstations:**\n- From 15:50 to 16:50 US/Pacific, Workstation startup failed primarily across US regions. Around 25% of Workstations failed to startup.\n**Colab Enterprise:**\n- From 15:50 to 16:50 US/Pacific, Colab Enterprise NotebookRuntime operations: AssignNotebookRuntime, StartNotebookRuntime, UpgradeNotebookRuntime failed globally.\n**Firebase Test Lab:**\n- Between 15:50 and 17:50 PDT, new Cloud VM creations failed globally.\n- Around 17% of the total VM creation requests globally at the time encountered issues.\n100% of customer executions failed on x86 emulators, impacting external customer CI tests, causing a complete pause of executions on those devices.\n**Google App Engine Flexible:**\n- From 15:35 to 18:06 US/Pacific customers were unable to create App Engine Flex deployments and App Engine Flex deployments were unable to scale up.\n**Google Cloud Dataflow:**\n- From 15:44 to 18:52 US/Pacific, Batch and streaming jobs became unhealthy in all regions: - New Dataflow jobs were not initialized successfully. - Already running Dataflow batch jobs experienced prolonged execution time - Already running Streaming jobs experienced elevated watermarks\n**Google Cloud Dataproc:**\n- From 15:34 to 18:16 US/Pacific, cluster creations failed globally.\n- Some cluster deletion workflows may have failed causing the corresponding VMs to not be entirely deleted. Our engineers have identified such VMs and performed a clean up after the service disruption was mitigated.\n**Google Cloud Deploy:**\n- From 15:39 to 17:28 US/Pacific, Cloud Deploy customers were experiencing significant latency or time outs for renders and deploys globally.\n**Google Cloud Functions:**\n- From 15:35 to 18:06 US/Pacific customers using Cloud Functions were unable to deploy globally.\n- Customers using existing VPC Access Connectors may have experienced network slowdown due to VPC Access Connectors not being able to scale up. Customers were also unable to deploy new VPC Access Connectors.\n**Google Cloud SQL:**\n- From 15:47 to 16:30 US/Pacific, all create, clone, restore operations were failing along with some patch \u0026 update operations. Some instances were impacted due to operational failures.\n- From 16:30 to 19:00 US/Pacific, recovery was started with 90% of the impacted instances restored or - recovered by 19:00. Failure rates were still elevated at about 10%.\n- All remaining instances were restored by 22:30 US/Pacific.\n**Google Compute Engine:**\n- From 15:45 to 16:45 US/Pacific, most of the networking requests experienced elevated failure rates globally.\n- From 16:00 to 17:15 US/Pacific, instance deletion and instance update operations experienced elevated failure rates globally.\n- From 15:45 to 18:15 US/Pacific, for instance group creation and deletion operations experienced elevated failure rates globally.\n**Google Kubernetes Engine:**\n- From 15:45 to 17:30 US/Pacific, GKE operations that created or updated a VM failed. These operations are cluster creation and deletion, node pool creation and deletion, node upgrade, scaling up existing nodepool, network configuration changes.\n**Infrastructure Manager:**\n- From 15:40 to 17:15 US/Pacific, deployment create/update/delete/unlock and preview create/delete operations degraded globally with increased latency and failures.\nCustomers could still view and export deployments, previews, revisions and resources.\n**Looker:**\n- From 15:50 to 17:30 US/Pacific, \u003c0.2% of looker instances went down due to Cloud SQL DB connection failure.\n**Memorystore for Memcached:**\n- From 15:45 to 17:15 US/Pacific, Create, Update and Delete Memcache instance operations were failing across all regions.\n- Around 0.8% of existing nodes restarted with a downtime of approximately 50 minutes possibly because these VMs underwent live migration.\n**Memorystore for Redis:**\n- From 15:40 to 18:40 US/Pacific, newly started Redis instance VM nodes were unreachable for networking. These instances could have been newly created by the customer, created indirectly through a mutating change such as resource scaling or addition of a new replica, or created automatically due to routine system operations. Impacts would have manifested as Basic (unreplicated) instances being unusable, and on Standard instances it could have appeared as reduced reader capacity or outright instance unavailability. Some Redis instances experienced some long tail issues and eventually recovered at 20:05 US/Pacific.\n- In addition, mutating operations such as CreateInstance failed during this window.\n**Vertex AI Workbench:**\n- From 15:40 to 17:30 US/Pacific, customers could not provision new instances. Existing instances were not affected.\n**Virtual Private Cloud:**\n- From 15:20 to 18:40 US/Pacific, all new endpoints (e.g., VMs, VPN gateways) and services (load balancers) received no network programming and observed packet loss to a majority of destinations.\n- 0.07% of VMs that were migrated during the outage also experienced connectivity loss for the duration of the service disruption.\n- Long-running VMs were also affected due to data plane overload in a few locations. Peak loss was 3% in us-east1-c and average loss was 0.05% across all Cloud zones.\n- Access to Google Services was degraded in a few GCE zones; a fraction (up to 25% in us-east4-c) of TCP connections were blackholed. Applications that retry with a different TCP source port would have experienced limited outages even in these Cloud zones.\n--------------------","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"AlloyDB for PostgreSQL","id":"fPovtKbaWN9UTepMm3kJ"},{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"},{"title":"Backup and DR","id":"2wwSnezzV5o8a8JouBch"},{"title":"Batch","id":"8XjnU88URVtZrAL8KRvA"},{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"},{"title":"Cloud Data Fusion","id":"rLKDHeeaBiXTeutF1air"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Cloud Filestore","id":"jog4nyYkquiLeSK5s26q"},{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Cloud Memorystore","id":"LGPLu3M5pcUAKU1z6eP3"},{"title":"Cloud NAT","id":"hCNpnTQHkUCCGxJy35Yq"},{"title":"Cloud Run","id":"9D7d2iNBQWN24zc1VamE"},{"title":"Cloud Workstations","id":"5UUXCiH1vfFHXmbDixrB"},{"title":"Colab Enterprise","id":"7Nbc1kZUvPLiihodettN"},{"title":"Contact Center AI Platform","id":"eSAGSSEKoxh8tTJucdYg"},{"title":"Dataproc Metastore","id":"PXZh68NPz9auRyo4tVfy"},{"title":"Firebase Test Lab","id":"rn3e8YyTQJ3fTNTfJ41k"},{"title":"Google App Engine","id":"kchyUtnkMHJWaAva8aYc"},{"title":"Google Cloud Composer","id":"YxkG5FfcC42cQmvBCk4j"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Dataproc","id":"yjXrEg3Yvy26BauMwr69"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Memorystore for Memcached","id":"paC6vmsvnjCHsBkp4Wva"},{"title":"Memorystore for Redis","id":"3yFciKa9NQH7pmbnUYUs"},{"title":"Vertex AI Workbench User Managed Notebooks","id":"GewfNoG6JvJ1kjnL9jb8"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/xVSEV3kVaJBmS7SZbnre","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"j6rYCkFSaV4ZCzNtYgnM","number":"13802583263996125309","begin":"2024-05-10T08:54:26+00:00","created":"2024-05-10T09:20:27+00:00","end":"2024-05-10T17:04:02+00:00","modified":"2024-05-10T17:04:11+00:00","external_desc":"Chronicle Security: Issues Viewing Curated Rules (and Curated Detections)","updates":[{"created":"2024-05-10T17:04:02+00:00","modified":"2024-05-10T17:04:12+00:00","when":"2024-05-10T17:04:02+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Friday, 2024-05-10 08:05 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-10T14:03:25+00:00","modified":"2024-05-10T17:04:11+00:00","when":"2024-05-10T14:03:25+00:00","text":"Summary: Chronicle Security: Issues Viewing Curated Rules (and Curated Detections)\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to be completed by Friday, 2024-05-10 10:00 US/Pacific.\nWe will provide more information by Friday, 2024-05-10 10:30 US/Pacific.\nDiagnosis: Impacted users may experience the following:\n** Various APIs that are related to Curated Rules will return an error, such as GetCuratedRuleSet, ListCuratedRuleSets, ListCountCuratedRuleSetDetections.\n** In the UI, the \"Curated Detections\" will not load, and will show \"There was an error.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-10T13:54:12+00:00","modified":"2024-05-10T14:03:25+00:00","when":"2024-05-10T13:54:12+00:00","text":"Summary: Chronicle Security: Issues Viewing Curated Rules (and Curated Detections)\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-05-10 09:00 US/Pacific.\nDiagnosis: Impacted users may experience the following:\n** Various APIs that are related to Curated Rules will return an error, such as GetCuratedRuleSet, ListCuratedRuleSets, ListCountCuratedRuleSetDetections.\n** In the UI, the \"Curated Detections\" will not load, and will show \"There was an error.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-10T11:12:49+00:00","modified":"2024-05-10T13:54:12+00:00","when":"2024-05-10T11:12:49+00:00","text":"Summary: Chronicle Security: Issues Viewing Curated Rules (and Curated Detections)\nDescription: Our engineering team has determined that further investigation is required to mitigate the issue.\nWe will provide an update by Friday, 2024-05-10 07:00 US/Pacific with current details.\nDiagnosis: Impacted users may experience the following:\n** Various APIs that are related to Curated Rules will return an error, such as GetCuratedRuleSet, ListCuratedRuleSets, ListCountCuratedRuleSetDetections.\n** In the UI, the \"Curated Detections\" will not load, and will show \"There was an error.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-10T09:50:16+00:00","modified":"2024-05-10T11:12:49+00:00","when":"2024-05-10T09:50:16+00:00","text":"Summary: Chronicle Security: Issues Viewing Curated Rules (and Curated Detections)\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-05-10 05:00 US/Pacific.\nDiagnosis: Impacted users may experience the following:\n** Various APIs that are related to Curated Rules will return an error, such as GetCuratedRuleSet, ListCuratedRuleSets, ListCountCuratedRuleSetDetections.\n** In the UI, the \"Curated Detections\" will not load, and will show \"There was an error.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-05-10T09:20:13+00:00","modified":"2024-05-10T09:50:16+00:00","when":"2024-05-10T09:20:13+00:00","text":"Summary: Chronicle Security: Issues Viewing Curated Rules (and Curated Detections)\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-10 04:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted users may experience failures for the below APIs\nListCuratedRuleSets\nListCuratedRules\nCountAllCuratedRuleSetDetections\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-05-10T17:04:02+00:00","modified":"2024-05-10T17:04:12+00:00","when":"2024-05-10T17:04:02+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Friday, 2024-05-10 08:05 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/j6rYCkFSaV4ZCzNtYgnM","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"5feV12qHeQoD3VdD8byK","number":"978879062657371275","begin":"2024-05-09T02:00:00+00:00","created":"2024-05-09T03:29:48+00:00","end":"2024-05-09T05:28:53+00:00","modified":"2024-05-21T22:51:25+00:00","external_desc":"Multiple services impacted in australia-southeast1.","updates":[{"created":"2024-05-21T22:51:25+00:00","modified":"2024-05-21T22:51:25+00:00","when":"2024-05-21T22:51:25+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 8 May 2024, multiple Google Cloud services experienced a partial service outage in australia-southeast1-a for varying durations of up to 2 hours and 55 minutes. The full list of impacted products and services is detailed below.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you.\n## Root Cause\nOn 8 May, at 18:44 US/Pacific, a public utility power issue resulted in an undervoltage condition followed by power loss that affected a portion of Google’s third-party data center in Sydney. As a result of this issue, the operating current exceeded the trip settings of the automatic transfer switch (ATS) units.\nATS units have trip settings to protect the load from electrical faults. Additionally, ATS units are configured in pairs to provide a redundant power path to the critical load.\nIn this case, both ATS units feeding the affected rows exceeded their trip settings due to overcurrent. Further investigation into the ATS units determined that they were configured with trip settings that were not in accordance with the site design.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via internal monitoring on Wednesday, 8 May at 18:55 US/Pacific and immediately started an investigation. On-site data center operations were engaged at 19:00 US/Pacific, and the scope of the power loss was confirmed at 19:22 US/Pacific.\nOn-site engineers restored power to the affected rows at 19:00 US/Pacific by manually closing breakers for both of the ATS units.\nOn Wednesday, 8 May at19:42 US/Pacific, network connectivity for the affected racks began recovering. All services had recovered by 21:55 US/Pacific with the exception of a very small percentage of Persistent Disk devices which required manual intervention.\nOn Thursday, 9 May at 07:47 US/Pacific, the public utility power issue was resolved.\nAll power had been switched back to utility feeds on Thursday, 9 May at 09:26 US/Pacific.\nGoogle is committed preventing a repeat of this issue in the future and is completing the following actions:\n* A case has been opened with the utility provider to determine the cause of the power event that led to the undervoltage condition.\n* Audit and update all ATS device settings if needed based on current site load.\n* Complete a full audit of all operational procedures for the Sydney third-party data center location.\n## Detailed Description of Impact\nOn Wednesday 8 May, from 18:45 to 21:40 US/Pacific, multiple Google Cloud services experienced a partial service outage in the australia-southeast1-a zone.\n**Persistent Disk:**\n* From 18:45 to 21:45 US/Pacific, approximately 0.4% of Persistent Disk devices in australia-southeast1-a experienced failures for disk operations, including snapshots, clones, and attachments.\n* Approximately 0.05% of PD devices experienced extended impact and required manual intervention.\n**Google Cloud Dataflow:**\n* From 18:45 to 21:55 US/Pacific, customers experienced increased latency for affected streaming jobs.\n* From 19:45 to 20:45 US/Pacific, some batch jobs took longer than normal to execute.\n* From 18:49 to 19:15 US/Pacific, a small number of new job submissions failed.\n**Google Cloud Pub/Sub:**\n* For a total of 13 minutes between 18:46 to 19:19 US/Pacific, affected customers experienced intermittent request error rates up to 1% and elevated message delivery latency.\n**Google BigQuery:**\n* From 18:45 to 21:50 US/Pacific, 0.8% of job failures for Job API and 5 minutes of failures for Metadata API.\n* From 19:00 to 19:20: 17% of projects experienced slower than normal performance.\n* From 21:25 to 22:25: over 10% of projects experienced slower than normal performance.\n**Google Compute Engine:**\n* From 18:45 to 19:30 US/Pacific, 11.8% of VMs in australia-southeast1-a were paused and restarted, and another 4.0% experienced pauses without restarting.\n**Cloud Filestore:**\n* From 18:45 to 21:43 US/Pacific, affected customers were unable to access their NFS filestore in the australia-southeast1-a zone.\n**Virtual Private Cloud (VPC):**\n* From 18:45 to 21:31 US/Pacific, affected customers experienced delays while creating new VMs and packet loss / unreachability for existing VMs. The VMs in australia-southeast1-a which went down could have faced delayed programming upon recreation. Roughly half of the traffic to australia-southeast1-a VMs was dropped.\n**Cloud SQL:**\n* From 18:45 to 21:45 US/Pacific, affected customers were unable to access their Cloud SQL instances in australia-southeast1-a. High Availability instances successfully failed over to other zones and recovered in 1-4 minutes. The majority of affected zonal instances were inaccessible for 20-30 minutes, with a few experiencing extended recovery times of up to 3 hours.\n**Cloud Logging:**\n* From 18:45 to 19:05 US/Pacific, affected customers may have experienced a small increase in error rates for inflight requests in this zone.\n* Cloud Logging is a regional service so the vast majority of the requests in the australia-southeast1 region were not affected.\n**Cloud Bigtable:**\n* From 18:45 to 19:10 US/Pacific, affected customers would have experienced high error rates in australia-southeast1-a.\n**Cloud Apigee:**\n* From 18:45 to 19:15 US/Pacific, there were multiple periods of impact ranging from 5 to 30 minutes, with error rates between 5% and 38% respectively, due to nodes restarting.\n* During the impact periods, customers may have experienced a “GKE cluster is currently undergoing repair\" error.\n**Memorystore for Redis**\n* From 18:46 to 21:31 US/Pacific, a subset of basic tier instances in australia-southeast1-a zone would have been unavailable.\n* Affected standard tier instances may have experienced brief unavailability as they failed over to replicas.\n**Dialogflow**\n* From 18:45 to 19:15 US/Pacific, affected customers would have experienced up to a 3% error rate in australia-southeast1.\n**Google Kubernetes Engine**\n* From 18:45 to approximately 19:45 US/Pacific, 14% of GKE clusters in australia-southeast1 were unavailable.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-09T17:58:34+00:00","modified":"2024-05-21T22:51:25+00:00","when":"2024-05-09T17:58:34+00:00","text":"# Mini Incident Report\nWe apologize for any inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 8 May, 2024 18:45\n**Incident End:** 8 May, 2024 21:40\n**Duration:** 2 hrs 55 minutes\n**Affected Services and Features:**\n- Persistent Disk,\n- Google Cloud Dataflow\n- Google Cloud Pub/Sub\n- Google Big Query\n- Google Compute Engine\n- Cloud Filestore\n- Virtual Private Cloud (VPC)\n- Cloud SQL\n- Cloud Logging\n- Cloud Bigtable\n- Cloud Apigee\n**Regions/Zones:** australia-southeast1\n**Description:**\nMultiple Google Cloud products experienced service disruptions of varying impact and duration, with the longest lasting being 2 hours and 55 minutes in the australia-southeast1 region. From preliminary analysis, the root cause of this incident is currently believed to be an unplanned power event caused by a power failover due to a utility company outage. Google will complete a full Incident Report in the following days that will provide a detailed root cause.\n**Customer Impact:**\n- Persistent Disk - impacted users experienced slow or unavailable devices.\n- Google Cloud Dataflow - impacted users experienced an increase in streaming jobs with watermarks in australia-southeast1-a zone for a duration of 30 minutes.\n- Google Cloud Pub/Sub - users experienced an increased error rate for “Publish requests” for a duration of about 35 minutes.\n- Google Big Query - impacted users experienced failures for BigQuery jobs in the australia-southeast1 region.\n- Google Compute Engine - impacted VMs went into repair mode for about 45 minutes.\n- Cloud Filestore - multiple Filestore instances in australia-southeast1-a were unavailable and had missing metrics for a duration of 2 hours 55 minutes, with the last impacted instance confirmed to have recovered at 21:43 PT.\n- Virtual Private Cloud (VPC) - the impacted users experienced packet loss, unavailability of existing VMs and delays while creating new VMs.\n- Cloud SQL - impacted users experienced errors when accessing their Cloud SQL database instances in the australia-southeast1-a zone.\n- Cloud Logging - Cloud Logging experienced a minor increase in ingestion error in australia-southeast1 for a duration of 15 minutes.\n- Cloud Bigtable - users experienced a high error rate in the impacted region for a duration of about 25 minutes.\n- Cloud Apigee - impacted users received 5XX and 2XX error for a duration of 30 minutes.\n**Additional details:**\nAfter service mitigation and full closure of the incident, there was continued Persistent Disk impact for a narrowed group of customers identified. This has since been resolved with no further isolated impact.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-09T05:28:53+00:00","modified":"2024-05-09T17:58:34+00:00","when":"2024-05-09T05:28:53+00:00","text":"The issue with Apigee, Cloud Filestore, Cloud Logging, Google BigQuery, Google Cloud Bigtable, Google Cloud Dataflow, Google Cloud Pub/Sub, Google Cloud SQL, Google Compute Engine, Persistent Disk, Virtual Private Cloud (VPC) has been resolved for all affected users as of Wednesday, 2024-05-08 21:40 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-09T04:32:41+00:00","modified":"2024-05-09T05:28:58+00:00","when":"2024-05-09T04:32:41+00:00","text":"Summary: Multiple services impacted in australia-southeast1.\nDescription: We are experiencing an issue with Persistent Disk, Google Cloud Dataflow, Google Cloud Pub/Sub, Google BigQuery, Google Compute Engine, Cloud Filestore, Virtual Private Cloud (VPC), Cloud logging, Cloud SQL, Cloud Bigtable, Apigee beginning at Wednesday, 2024-05-08 18:45 US/Pacific.\nMitigation strategy has been identified. The services are now recovering.\nWe will provide an update by Wednesday, 2024-05-08 23:00 US/Pacific with current details.\nDiagnosis: Multiple GCP services are experiencing issues in australia-southeast1 region.\n**Persistent Disk:** While most devices have restored their functionality, some users might encounter slow or unavailable devices.\n**Google Cloud Dataflow:** Users experienced issues for streaming jobs with Watermark increasing. The issue with Google Cloud Dataflow is mitigated at 2024-05-08 19:47:27 PDT.\n**Google Cloud Pub/Sub:** The PubSub impact is mitigated.\n**Google BigQuery:** The impacted users experienced failures with the bigquery jobs in the australia-southeast1 Region. The issue with Google Bigquery has been resolved for all the affected users as of Wednesday, 2024-05-08 21:13 US/Pacific.\n**Google Compute Engine:** VM’s went into repair for around 45 minutes and have started recovering.\n**Cloud Filestore:** The Filestore is partially recovered. However, a small subset of users would not able to access the NFS filestore in the australia-southeast1-a zone.\n**Virtual Private Cloud (VPC):** The impacted users may face delays while creating new VMs and packet loss / unreachability for existing VMs.\n**Cloud SQL:** A subset of the Cloud SQL users are experiencing errors when accessing their Cloud SQL database instances in the australia-southeast1-a zone.\n**Cloud logging:** All requests are failing at the send request step. The issue with Cloud logging has been resolved for all the affected users as of Wednesday, 2024-05-08 21:16:07 US/Pacific.\n**Cloud Bigtable:** Cloud Bigtable experienced a high error rate for 25 minutes in australia-southeast1-a due to a power event. The issue with Cloud Bigtable has been resolved for all the affected users as of Wednesday, 2024-05-08 20:08:30 US/Pacific.\n**Apigee:** There was a minor outage due to the GKE error which caused all of the nodes to restart. The GKE cluster is currently undergoing repair. This resulted in a 30 minute outage for the customer. The issue with Apigee has been resolved for all the affected users as of Wednesday 2024-05-08 20:34:47 US/Pacific.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"}]},{"created":"2024-05-09T03:29:43+00:00","modified":"2024-05-09T04:32:45+00:00","when":"2024-05-09T03:29:43+00:00","text":"Summary: Multiple services impacted in australia-southeast1.\nDescription: We are experiencing an issue with Big Query, Google filestore, Cloud PubSub beginning at Wednesday, 2024-05-08 18:45 US/Pacific.\nMitigation strategy has been identified. The services are now recovering.\nWe will provide an update by Wednesday, 2024-05-08 21:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Multiple GCP services are experiencing issues in australia-southeast1 region.\n**Persistent Disk:** While most devices have restored their functionality, some users might encounter slow or unavailable devices.\n**Google Cloud Dataflow:** Users experienced issues for streaming jobs with Watermark increasing. The issue with Google Cloud Dataflow is mitigated at 2024-05-08 19:47:27 PDT.\n**Google Cloud Pub/Sub:** The PubSub impact is mitigated.\n**Google BigQuery:** The impacted users may experience failures with the bigquery jobs in the australia-southeast1 Region.\n**Google Compute Engine:** VM’s went into repair for around 45 minutes and have started recovering. The issue with the Compute Engine is mitigated at 2024-05-08 19:43:43 PDT.\n**Cloud Filestore:** The impacted customers are unable to access the NFS Filestores in the australia-southeast1-a Zone.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"}]}],"most_recent_update":{"created":"2024-05-21T22:51:25+00:00","modified":"2024-05-21T22:51:25+00:00","when":"2024-05-21T22:51:25+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 8 May 2024, multiple Google Cloud services experienced a partial service outage in australia-southeast1-a for varying durations of up to 2 hours and 55 minutes. The full list of impacted products and services is detailed below.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you.\n## Root Cause\nOn 8 May, at 18:44 US/Pacific, a public utility power issue resulted in an undervoltage condition followed by power loss that affected a portion of Google’s third-party data center in Sydney. As a result of this issue, the operating current exceeded the trip settings of the automatic transfer switch (ATS) units.\nATS units have trip settings to protect the load from electrical faults. Additionally, ATS units are configured in pairs to provide a redundant power path to the critical load.\nIn this case, both ATS units feeding the affected rows exceeded their trip settings due to overcurrent. Further investigation into the ATS units determined that they were configured with trip settings that were not in accordance with the site design.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via internal monitoring on Wednesday, 8 May at 18:55 US/Pacific and immediately started an investigation. On-site data center operations were engaged at 19:00 US/Pacific, and the scope of the power loss was confirmed at 19:22 US/Pacific.\nOn-site engineers restored power to the affected rows at 19:00 US/Pacific by manually closing breakers for both of the ATS units.\nOn Wednesday, 8 May at19:42 US/Pacific, network connectivity for the affected racks began recovering. All services had recovered by 21:55 US/Pacific with the exception of a very small percentage of Persistent Disk devices which required manual intervention.\nOn Thursday, 9 May at 07:47 US/Pacific, the public utility power issue was resolved.\nAll power had been switched back to utility feeds on Thursday, 9 May at 09:26 US/Pacific.\nGoogle is committed preventing a repeat of this issue in the future and is completing the following actions:\n* A case has been opened with the utility provider to determine the cause of the power event that led to the undervoltage condition.\n* Audit and update all ATS device settings if needed based on current site load.\n* Complete a full audit of all operational procedures for the Sydney third-party data center location.\n## Detailed Description of Impact\nOn Wednesday 8 May, from 18:45 to 21:40 US/Pacific, multiple Google Cloud services experienced a partial service outage in the australia-southeast1-a zone.\n**Persistent Disk:**\n* From 18:45 to 21:45 US/Pacific, approximately 0.4% of Persistent Disk devices in australia-southeast1-a experienced failures for disk operations, including snapshots, clones, and attachments.\n* Approximately 0.05% of PD devices experienced extended impact and required manual intervention.\n**Google Cloud Dataflow:**\n* From 18:45 to 21:55 US/Pacific, customers experienced increased latency for affected streaming jobs.\n* From 19:45 to 20:45 US/Pacific, some batch jobs took longer than normal to execute.\n* From 18:49 to 19:15 US/Pacific, a small number of new job submissions failed.\n**Google Cloud Pub/Sub:**\n* For a total of 13 minutes between 18:46 to 19:19 US/Pacific, affected customers experienced intermittent request error rates up to 1% and elevated message delivery latency.\n**Google BigQuery:**\n* From 18:45 to 21:50 US/Pacific, 0.8% of job failures for Job API and 5 minutes of failures for Metadata API.\n* From 19:00 to 19:20: 17% of projects experienced slower than normal performance.\n* From 21:25 to 22:25: over 10% of projects experienced slower than normal performance.\n**Google Compute Engine:**\n* From 18:45 to 19:30 US/Pacific, 11.8% of VMs in australia-southeast1-a were paused and restarted, and another 4.0% experienced pauses without restarting.\n**Cloud Filestore:**\n* From 18:45 to 21:43 US/Pacific, affected customers were unable to access their NFS filestore in the australia-southeast1-a zone.\n**Virtual Private Cloud (VPC):**\n* From 18:45 to 21:31 US/Pacific, affected customers experienced delays while creating new VMs and packet loss / unreachability for existing VMs. The VMs in australia-southeast1-a which went down could have faced delayed programming upon recreation. Roughly half of the traffic to australia-southeast1-a VMs was dropped.\n**Cloud SQL:**\n* From 18:45 to 21:45 US/Pacific, affected customers were unable to access their Cloud SQL instances in australia-southeast1-a. High Availability instances successfully failed over to other zones and recovered in 1-4 minutes. The majority of affected zonal instances were inaccessible for 20-30 minutes, with a few experiencing extended recovery times of up to 3 hours.\n**Cloud Logging:**\n* From 18:45 to 19:05 US/Pacific, affected customers may have experienced a small increase in error rates for inflight requests in this zone.\n* Cloud Logging is a regional service so the vast majority of the requests in the australia-southeast1 region were not affected.\n**Cloud Bigtable:**\n* From 18:45 to 19:10 US/Pacific, affected customers would have experienced high error rates in australia-southeast1-a.\n**Cloud Apigee:**\n* From 18:45 to 19:15 US/Pacific, there were multiple periods of impact ranging from 5 to 30 minutes, with error rates between 5% and 38% respectively, due to nodes restarting.\n* During the impact periods, customers may have experienced a “GKE cluster is currently undergoing repair\" error.\n**Memorystore for Redis**\n* From 18:46 to 21:31 US/Pacific, a subset of basic tier instances in australia-southeast1-a zone would have been unavailable.\n* Affected standard tier instances may have experienced brief unavailability as they failed over to replicas.\n**Dialogflow**\n* From 18:45 to 19:15 US/Pacific, affected customers would have experienced up to a 3% error rate in australia-southeast1.\n**Google Kubernetes Engine**\n* From 18:45 to approximately 19:45 US/Pacific, 14% of GKE clusters in australia-southeast1 were unavailable.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"},{"title":"Cloud Filestore","id":"jog4nyYkquiLeSK5s26q"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"},{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"},{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/5feV12qHeQoD3VdD8byK","currently_affected_locations":[],"previously_affected_locations":[{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"}]},{"id":"92DZXgeiWQUKG16qhPHt","number":"4031284577467947132","begin":"2024-05-08T23:41:50+00:00","created":"2024-05-09T00:29:20+00:00","end":"2024-05-09T10:44:07+00:00","modified":"2024-05-09T10:44:17+00:00","external_desc":"Apigee API Platform - Service Issues","updates":[{"created":"2024-05-09T10:44:07+00:00","modified":"2024-05-09T10:44:21+00:00","when":"2024-05-09T10:44:07+00:00","text":"The issue with Apigee has been resolved for all affected users as of Thursday, 2024-05-09 03:40 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-09T04:40:20+00:00","modified":"2024-05-09T10:44:17+00:00","when":"2024-05-09T04:40:20+00:00","text":"Summary: Apigee API Platform - Service Issues\nDescription: Starting Wednesday, 2024-05-08 16:41 US/Pacific, customers making use of caching policies in their API proxies are experiencing issues with Apigee API Platform services.\nThe mitigation roll out is progressing region by region, and the roll out will continue till all regions have received the update. We expect this to be completed within the next 8 hours. We do not have an ETA for completion of mitigation at this time.\nWe will provide an update by Thursday, 2024-05-09 08:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience latency in the completion of their API calls.\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-09T03:36:31+00:00","modified":"2024-05-09T04:40:20+00:00","when":"2024-05-09T03:36:31+00:00","text":"Summary: Apigee API Platform - Service Issues\nDescription: Starting Wednesday, 2024-05-08 16:41 US/Pacific, customers making use of caching policies in their API proxies are experiencing issues with Apigee API Platform services.\nThe mitigation roll out is in progress, we do not have an ETA for completion of mitigation at this time.\nWe will provide an update by Wednesday, 2024-05-08 23:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience latency in the completion of their API calls.\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-09T00:29:18+00:00","modified":"2024-05-09T03:36:31+00:00","when":"2024-05-09T00:29:18+00:00","text":"Summary: Apigee API Platform - Service Issues\nDescription: Starting Wednesday, 2024-05-08 16:41 US/Pacific, customers making use of caching policies in their API proxies are experiencing issues with Apigee API Platform services.\nOur engineering team has identified a mitigation for the issue and is working on creating an implementation strategy to roll it out. We do not have an ETA for completion of mitigation at this time.\nWe will provide an update by Wednesday, 2024-05-08 21:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience latency in the completion of their API calls.\nWorkaround: There is no workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-05-09T10:44:07+00:00","modified":"2024-05-09T10:44:21+00:00","when":"2024-05-09T10:44:07+00:00","text":"The issue with Apigee has been resolved for all affected users as of Thursday, 2024-05-09 03:40 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"9Y13BNFy4fJydvjdsN3X","service_name":"Apigee","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"}],"uri":"incidents/92DZXgeiWQUKG16qhPHt","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"rErZ4r752o4rt4qaotfL","number":"13400700893373178731","begin":"2024-05-03T18:25:38+00:00","created":"2024-05-03T19:36:15+00:00","end":"2024-05-03T22:37:24+00:00","modified":"2024-05-03T22:37:26+00:00","external_desc":"Firebase authentication failures for few customers.","updates":[{"created":"2024-05-03T22:37:24+00:00","modified":"2024-05-03T22:37:27+00:00","when":"2024-05-03T22:37:24+00:00","text":"The issue with Firebase Authentication, Identity Platform has been resolved for all affected users as of Friday, 2024-05-03 15:27 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-03T21:29:51+00:00","modified":"2024-05-03T22:37:26+00:00","when":"2024-05-03T21:29:51+00:00","text":"Summary: Firebase authentication failures for few customers.\nDescription: We are experiencing an issue with Identity Platform, Firebase Authentication as of Friday, 2024-05-03 11:20 US/Pacific.\nOur engineering team has identified a mitigation strategy and is currently in the process of implementing it in production. We do not have an ETA for completion of mitigation at this time.\nWe will provide more information by Friday, 2024-05-03 16:30 US/Pacific.\nDiagnosis: A few customers with sign-in redirect flow may see sign-in errors in the browser.\nWorkaround: Customers are advised to follow the best practices as noted here : https://firebase.google.com/docs/auth/web/redirect-best-practices","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-05-03T20:24:17+00:00","modified":"2024-05-03T21:29:51+00:00","when":"2024-05-03T20:24:17+00:00","text":"Summary: Firebase authentication failures for few customers.\nDescription: We are experiencing an issue with Identity Platform, Firebase Authentication.\nOur engineering teams are actively working on a mitigation and are currently validating the changes.\nWe will provide an update by Friday, 2024-05-03 14:30 US/Pacific with current details.\nDiagnosis: A few customers with sign-in redirect flow may see sign-in errors in the browser.\nWorkaround: Customers are advised to follow the best practices as noted here : https://firebase.google.com/docs/auth/web/redirect-best-practices","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-05-03T19:36:06+00:00","modified":"2024-05-03T20:24:17+00:00","when":"2024-05-03T19:36:06+00:00","text":"Summary: Firebase authentication failures in Chrome browser\nDescription: We are experiencing an issue with Identity Platform, Firebase Authentication.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-05-03 13:30 US/Pacific with current details.\nDiagnosis: Customers may see sign-in errors on the Chrome browser.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-05-03T22:37:24+00:00","modified":"2024-05-03T22:37:27+00:00","when":"2024-05-03T22:37:24+00:00","text":"The issue with Firebase Authentication, Identity Platform has been resolved for all affected users as of Friday, 2024-05-03 15:27 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Firebase Authentication","id":"uAWwkob8rYyzVKgQ8jCH"},{"title":"Identity Platform","id":"LE1X2BHYANNsHtG1NM1M"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"}],"uri":"incidents/rErZ4r752o4rt4qaotfL","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"8xe5wtseE3Wc5PoMb7Re","number":"8704997251240886106","begin":"2024-05-02T15:23:07+00:00","created":"2024-05-02T15:26:59+00:00","end":"2024-05-07T17:34:09+00:00","modified":"2024-05-07T17:34:11+00:00","external_desc":"Cloud Build: Cross-project trigger creation failing","updates":[{"created":"2024-05-07T17:34:09+00:00","modified":"2024-05-07T17:34:12+00:00","when":"2024-05-07T17:34:09+00:00","text":"The issue with Cloud Build is believed to be affecting a very small number of projects and our Engineering Team is working on it with expected full mitigation by Friday, 2024-05-10 17:00 US/Pacific.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nAs a workaround, affected customers can grant tokenAccessor permissions on the project directly, rather than on the connection using the following:\ngcloud projects add-iam-policy-binding","status":"AVAILABLE","affected_locations":[]},{"created":"2024-05-06T12:38:56+00:00","modified":"2024-05-07T17:34:11+00:00","when":"2024-05-06T12:38:56+00:00","text":"Summary: Cloud Build: Cross-project trigger creation failing\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2024-05-10 10:00 US/Pacific.\nDiagnosis: Customers attempting to create a cross-project trigger will experience an unexpected permission error.\nWorkaround: Customers can grant tokenAccessor permissions on the project directly, rather than on the connection.\ngcloud projects add-iam-policy-binding {PROJECT-ID} --member={Service-Account} --role=\"roles/cloudbuild.tokenAccessor\"","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-02T19:19:58+00:00","modified":"2024-05-06T12:38:56+00:00","when":"2024-05-02T19:19:58+00:00","text":"Summary: Cloud Build: Cross-project trigger creation failing\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-05-06 10:00 US/Pacific.\nDiagnosis: Customers attempting to create a cross-project trigger will experience an unexpected permission error.\nWorkaround: Customers can grant tokenAccessor permissions on the project directly, rather than on the connection.\ngcloud projects add-iam-policy-binding {PROJECT-ID} --member={Service-Account} --role=\"roles/cloudbuild.tokenAccessor\"","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-02T17:33:20+00:00","modified":"2024-05-02T19:19:58+00:00","when":"2024-05-02T17:33:20+00:00","text":"Summary: Cloud Build: Cross-project trigger creation failing\nDescription: We are experiencing an issue with Cloud Build.\nOur engineering team has identified a mitigation and are currently working on a rollout plan.\nWe will provide an update by Thursday, 2024-05-02 13:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers attempting to create a cross-project trigger will experience an unexpected permission error.\nWorkaround: Customers can grant tokenAccessor permissions on the project directly, rather than on the connection.\ngcloud projects add-iam-policy-binding {PROJECT-ID} --member={Service-Account} --role=\"roles/cloudbuild.tokenAccessor\"","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-02T16:20:11+00:00","modified":"2024-05-02T17:33:20+00:00","when":"2024-05-02T16:20:11+00:00","text":"Summary: Cloud Build: Cross-project trigger creation failing\nDescription: We are experiencing an issue with Cloud Build.\nOur engineering team has identified a mitigation and are currently working on a rollout plan.\nWe will provide an update by Thursday, 2024-05-02 11:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers attempting to create a cross-project trigger will experience an unexpected permission error.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-05-02T15:26:55+00:00","modified":"2024-05-02T16:20:11+00:00","when":"2024-05-02T15:26:55+00:00","text":"Summary: Cloud Build: Cross-project trigger creation failing\nDescription: We are experiencing an issue with Cloud Build.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-05-02 09:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customer attempting to create a cross-project trigger will experience an unexpected permission error.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-05-07T17:34:09+00:00","modified":"2024-05-07T17:34:12+00:00","when":"2024-05-07T17:34:09+00:00","text":"The issue with Cloud Build is believed to be affecting a very small number of projects and our Engineering Team is working on it with expected full mitigation by Friday, 2024-05-10 17:00 US/Pacific.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nAs a workaround, affected customers can grant tokenAccessor permissions on the project directly, rather than on the connection using the following:\ngcloud projects add-iam-policy-binding","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"}],"uri":"incidents/8xe5wtseE3Wc5PoMb7Re","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"UMy7btMrvSjDLd9zkMRK","number":"15409551107479513441","begin":"2024-04-25T17:30:00+00:00","created":"2024-04-25T21:19:59+00:00","end":"2024-04-25T21:00:00+00:00","modified":"2024-04-26T20:08:05+00:00","external_desc":"Google Cloud Monitoring experiencing intermittent issues with metric availability for GCP resources in US regions.","updates":[{"created":"2024-04-26T20:05:24+00:00","modified":"2024-04-26T20:08:05+00:00","when":"2024-04-26T20:05:24+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 25 April 2024 10:30\n**Incident End:** 25 April 2024 14:00\n**Duration:** 3 hours, 30 minutes\n**Affected Services and Features:**\n- Google Bigtable - Monitoring metrics\n- Cloud Spanner - Monitoring metrics\n**Regions/Zones:** US regions\n**Description:**\nGoogle Cloud Monitoring experienced issues with metric data availability impacting metric data for Cloud Bigtable and Cloud Spanner for a duration of approximately 3 hours, 30 minutes. The issue was limited to Cloud Bigtable and Cloud Spanner resources in US regions.\nFrom preliminary analysis, the root cause of the issue is a code change in Google Cloud Monitoring that caused delays in processing metric data and the timely delivery of those metrics to Google Cloud Customers.\nThe issue was fully mitigated once our engineers rolled back the code change.\n**Customer Impact:**\n- Customers may have experienced telemetry delays and in some cases metric gaps in their monitoring dashboards for the affected timeline. GCP and third-party services consuming these metrics on Google Cloud Monitoring API may also have experienced metric gaps, including those dependent on metrics for autoscaling.\n---","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-25T22:50:34+00:00","modified":"2024-04-26T20:05:24+00:00","when":"2024-04-25T22:50:34+00:00","text":"After deep dive investigations, Our engineers determined that the impact was limited to BigTable and Cloud Spanner metrics.\nFrom our preliminary analysis, the suspected root cause of the issue is a recent roll out. This was rolled back by our engineers to the last known stable version.\nThe roll back has mitigated all known issues with Cloud Monitoring as of Thursday, 2024-04-25 12:35 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us!","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-25T21:58:10+00:00","modified":"2024-04-25T22:50:36+00:00","when":"2024-04-25T21:58:10+00:00","text":"Summary: Google Cloud Monitoring experiencing intermittent issues with metric availability for GCP resources in US regions.\nDescription: We are experiencing an issue with Cloud Monitoring beginning at Thursday, 2024-04-25 12:30 US/Pacific.\nUpon further investigation, we believe the impact is limited to metrics of GCP resources in US regions.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-04-25 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: - Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard.\n- GCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues.\n- This includes products that rely on metrics for autoscaling.\n- Alerting mechanisms reliant on this metric data may experience the issue too.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-04-25T21:19:56+00:00","modified":"2024-04-25T21:58:10+00:00","when":"2024-04-25T21:19:56+00:00","text":"Summary: Google cloud Monitoring experiencing intermittent issues with metric availability\nDescription: We are experiencing an issue with Cloud Monitoring beginning at Thursday, 2024-04-25 12:30 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-04-25 15:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Multiple cloud services experiencing intermittent issues with metric availability\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-04-26T20:05:24+00:00","modified":"2024-04-26T20:08:05+00:00","when":"2024-04-26T20:05:24+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 25 April 2024 10:30\n**Incident End:** 25 April 2024 14:00\n**Duration:** 3 hours, 30 minutes\n**Affected Services and Features:**\n- Google Bigtable - Monitoring metrics\n- Cloud Spanner - Monitoring metrics\n**Regions/Zones:** US regions\n**Description:**\nGoogle Cloud Monitoring experienced issues with metric data availability impacting metric data for Cloud Bigtable and Cloud Spanner for a duration of approximately 3 hours, 30 minutes. The issue was limited to Cloud Bigtable and Cloud Spanner resources in US regions.\nFrom preliminary analysis, the root cause of the issue is a code change in Google Cloud Monitoring that caused delays in processing metric data and the timely delivery of those metrics to Google Cloud Customers.\nThe issue was fully mitigated once our engineers rolled back the code change.\n**Customer Impact:**\n- Customers may have experienced telemetry delays and in some cases metric gaps in their monitoring dashboards for the affected timeline. GCP and third-party services consuming these metrics on Google Cloud Monitoring API may also have experienced metric gaps, including those dependent on metrics for autoscaling.\n---","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Monitoring","id":"3zaaDb7antc73BM1UAVT"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"}],"uri":"incidents/UMy7btMrvSjDLd9zkMRK","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"8nARuhS5t4wLSNUuqq7j","number":"7991235696029805853","begin":"2024-04-25T04:21:35+00:00","created":"2024-04-25T04:51:12+00:00","end":"2024-04-25T17:38:20+00:00","modified":"2024-04-25T17:38:27+00:00","external_desc":"Ingestion delays for small number of Third Party API Feeds while using Chronicle Security in US Multi-regions","updates":[{"created":"2024-04-25T17:38:20+00:00","modified":"2024-04-25T17:38:27+00:00","when":"2024-04-25T17:38:20+00:00","text":"The issue with Chronicle Security has been resolved as of Thursday, 2024-04-25 10:37 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-25T16:57:59+00:00","modified":"2024-04-25T17:38:27+00:00","when":"2024-04-25T16:57:59+00:00","text":"Summary: Ingestion delays for small number of Third Party API Feeds while using Chronicle Security in US Multi-regions\nDescription: Mitigation work is taking more time than expected and is currently underway by our engineering team.\nWe will provide more information by Thursday, 2024-04-25 11:30 US/Pacific.\nDiagnosis: Some Chronicle Security customers in the US may notice ingestion delays for Third Party API Feeds. Our investigation suggests it is impacting a very small number of feeds.\nThe delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-25T15:30:58+00:00","modified":"2024-04-25T16:57:59+00:00","when":"2024-04-25T15:30:58+00:00","text":"Summary: Ingestion delays for small number of Third Party API Feeds while using Chronicle Security in US Multi-regions\nDescription: Mitigation work is taking more time than expected and is currently underway by our engineering team.\nWe will provide more information by Thursday, 2024-04-25 10:00 US/Pacific.\nDiagnosis: Some Chronicle Security customers in the US may notice ingestion delays for Third Party API Feeds. Our investigation suggests it is impacting a very small number of feeds.\nThe delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-25T13:52:19+00:00","modified":"2024-04-25T15:30:58+00:00","when":"2024-04-25T13:52:19+00:00","text":"Summary: Ingestion delays for small number of Third Party API Feeds while using Chronicle Security in US Multi-regions\nDescription: Mitigation work is taking more time than expected and is currently underway by our engineering team.\nWe will provide more information by Thursday, 2024-04-25 08:30 US/Pacific.\nDiagnosis: Some Chronicle Security customers in the US may notice ingestion delays for Third Party API Feeds. Our investigation suggests it is impacting a very small number of feeds.\nThe delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-25T10:50:30+00:00","modified":"2024-04-25T13:52:19+00:00","when":"2024-04-25T10:50:30+00:00","text":"Summary: Ingestion delays for small number of Third Party API Feeds while using Chronicle Security in US Multi-regions\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Thursday, 2024-04-25 07:00 US/Pacific.\nWe will provide more information by Thursday, 2024-04-25 07:00 US/Pacific.\nDiagnosis: Some Chronicle Security customers in the US may notice ingestion delays for Third Party API Feeds. Our investigation suggests it is impacting a very small number of feeds.\nThe delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-25T07:01:34+00:00","modified":"2024-04-25T10:50:30+00:00","when":"2024-04-25T07:01:34+00:00","text":"Summary: Ingestion delays for small number of Third Party API Feeds while using Chronicle Security in US Multi-regions\nDescription: We are experiencing an issue with Chronicle Security beginning at Wednesday, 2024-04-24 17:39 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-04-25 04:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Some Chronicle Security customers in the US may notice ingestion delays for Third Party API Feeds. Our investigation suggests it is impacting a very small number of feeds.\nThe delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-25T06:25:43+00:00","modified":"2024-04-25T07:01:34+00:00","when":"2024-04-25T06:25:43+00:00","text":"Summary: Ingestion delays for Third Party API Feeds while using Chronicle Security in US Multi-regions\nDescription: We are experiencing an issue with Chronicle Security beginning at Wednesday, 2024-04-24 17:39 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-04-25 04:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Some Chronicle Security customers in the US may notice ingestion delays for Third Party API Feeds. The delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-25T04:51:08+00:00","modified":"2024-04-25T06:27:14+00:00","when":"2024-04-25T04:51:08+00:00","text":"Summary: Ingestion delays for Third Party API Feeds for Chronicle customers in the US\nDescription: We are experiencing an issue with Chronicle Security beginning at Wednesday, 2024-04-24 17:39 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-04-24 23:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for Third Party API Feeds. The delays in ingested data may surface as delays in detections and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-04-25T17:38:20+00:00","modified":"2024-04-25T17:38:27+00:00","when":"2024-04-25T17:38:20+00:00","text":"The issue with Chronicle Security has been resolved as of Thursday, 2024-04-25 10:37 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/8nARuhS5t4wLSNUuqq7j","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"d36j7unm1PYkiKYBsLj6","number":"7219027735350535734","begin":"2024-04-24T17:19:17+00:00","created":"2024-04-24T17:46:17+00:00","end":"2024-04-24T21:43:30+00:00","modified":"2024-04-24T21:43:34+00:00","external_desc":"Cloud Source Repositories repo administration operations (SourceRepo API) are unavailable","updates":[{"created":"2024-04-24T21:43:30+00:00","modified":"2024-04-24T21:43:35+00:00","when":"2024-04-24T21:43:30+00:00","text":"The issue with Cloud Source Repositories has been resolved for all affected users as of Wednesday, 2024-04-24 14:34 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-24T20:12:46+00:00","modified":"2024-04-24T21:43:34+00:00","when":"2024-04-24T20:12:46+00:00","text":"Summary: Cloud Source Repositories repo administration operations (SourceRepo API) are unavailable\nDescription: Mitigation work is currently underway by our engineering team and they are working on pushing out a fix to production.\nThe mitigation is expected to complete by Wednesday, 2024-04-24 15:00 US/Pacific.\nWe will provide more information by Wednesday, 2024-04-24 15:30 US/Pacific.\nDiagnosis: Cloud Source Repository users who use the SourceRepo API are unable to make changes to repository metadata (create/update/delete).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-04-24T18:51:06+00:00","modified":"2024-04-24T20:12:46+00:00","when":"2024-04-24T18:51:06+00:00","text":"Summary: Cloud Source Repositories repo administration operations (SourceRepo API) are unavailable\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-04-24 13:00 US/Pacific.\nDiagnosis: Cloud Source Repository users who use the SourceRepo API are unable to make changes to repository metadata (create/update/delete).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-04-24T18:24:37+00:00","modified":"2024-04-24T18:51:06+00:00","when":"2024-04-24T18:24:37+00:00","text":"Summary: Cloud Source Repositories repo administration operations (SourceRepo API) are unavailable\nDescription: We are experiencing an issue with Cloud Source Repositories.\nOur engineering team continues to investigate the issue to determine a mitigation strategy.\nWe will provide more information by Wednesday, 2024-04-24 12:00 US/Pacific.\nDiagnosis: Cloud Source Repository users who use the SourceRepo API are unable to make changes to repository metadata (create/update/delete).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-04-24T18:06:20+00:00","modified":"2024-04-24T18:24:37+00:00","when":"2024-04-24T18:06:20+00:00","text":"Summary: Cloud Source Repository (SourceRepo API) is unavailable\nDescription: We are experiencing an issue with Cloud Source Repositories.\nOur engineering team is investigating the issue to determine a mitigation\nstrategy.\nWe will provide more information by Wednesday, 2024-04-24 11:45 US/Pacific.\nDiagnosis: Cloud Source Repository users who use the SourceRepo API/RepositoryControlService are unable to make changes to metadata including (create/update/delete).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-04-24T17:46:12+00:00","modified":"2024-04-24T18:06:20+00:00","when":"2024-04-24T17:46:12+00:00","text":"Summary: Cloud Source Repository (SourceRepo API) is unavailable\nDescription: We are experiencing an issue with Cloud Source Repositories API.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-04-24 11:20 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Cloud Source Repository users who use the SourceRepo API/RepsoitoryControlService are unable to make changes to metadata including (create/update/delete).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-04-24T21:43:30+00:00","modified":"2024-04-24T21:43:35+00:00","when":"2024-04-24T21:43:30+00:00","text":"The issue with Cloud Source Repositories has been resolved for all affected users as of Wednesday, 2024-04-24 14:34 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Cloud Source Repositories","id":"B2EfFwmTPqzAqHmnpHvm"}],"uri":"incidents/d36j7unm1PYkiKYBsLj6","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"v3Jve6VgYqC3iqNFYrDb","number":"6781612415568866031","begin":"2024-04-23T20:24:45+00:00","created":"2024-04-23T21:24:42+00:00","end":"2024-04-23T22:53:41+00:00","modified":"2024-04-23T22:53:43+00:00","external_desc":"Some Customers may receive errors while trying to access development Apigee portal.","updates":[{"created":"2024-04-23T22:53:41+00:00","modified":"2024-04-23T22:53:45+00:00","when":"2024-04-23T22:53:41+00:00","text":"The issue with Apigee has been resolved for all affected customers as of Tuesday, 2024-04-23 15:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-23T21:24:35+00:00","modified":"2024-04-23T22:53:43+00:00","when":"2024-04-23T21:24:35+00:00","text":"Summary: Some Customers may receive errors while trying to access development Apigee portal.\nDescription: We are experiencing an issue with Apigee beginning at Tuesday, 2024-04-23 12:25 US/Pacific.\nOur engineering team has mitigated the issue and is currently monitoring the situation for any recurrence.\nWe will provide an update by Tuesday, 2024-04-23 16:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may receive HTML errors instead of correct data while trying to access Apigee portal.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-04-23T22:53:41+00:00","modified":"2024-04-23T22:53:45+00:00","when":"2024-04-23T22:53:41+00:00","text":"The issue with Apigee has been resolved for all affected customers as of Tuesday, 2024-04-23 15:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"9Y13BNFy4fJydvjdsN3X","service_name":"Apigee","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"}],"uri":"incidents/v3Jve6VgYqC3iqNFYrDb","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"AessYzCE2FaA6yGCHgyw","number":"17015898597561278069","begin":"2024-04-23T15:46:00+00:00","created":"2024-04-23T18:55:47+00:00","end":"2024-04-24T07:23:00+00:00","modified":"2024-04-25T00:41:32+00:00","external_desc":"Chronicle Security - Service Issues","updates":[{"created":"2024-04-25T00:40:55+00:00","modified":"2024-04-25T00:41:32+00:00","when":"2024-04-25T00:40:55+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 23 April 2024 08:46\n**Incident End:** 24 April 2024 00:23\n**Duration:** 15 hours, 37 minutes\n**Affected Services and Features:** Chronicle SIEM\n**Regions/Zones:** US multi-region\n**Description:**\nSome Chronicle customers in the US may have noticed ingestion delays for Third Party Feeds. Delays in the ingested data surfaced delays in detections and impacted other downstream product features. The impact lasted for 15 hours, 37 minutes.\nFrom preliminary analysis, the root cause of the issue is that the service responsible for API transfers was rendered partially unavailable due to a combination of crashes and longer cache refresh times due to overload.\n**Customer Impact:**\n* Some Chronicle customers in the US may have noticed ingestion delays for Third Party API Feeds [1]\n* Any systematic detections dependent on these logs would also be delayed proportionately.\n* The data would automatically catch up post mitigation.\n**Reference(s):**\n[1] - https://cloud.google.com/chronicle/docs/reference/feed-management-api#api-log-types","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-24T07:29:18+00:00","modified":"2024-04-25T00:40:55+00:00","when":"2024-04-24T07:29:18+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-04-24 00:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-23T22:47:55+00:00","modified":"2024-04-24T07:29:21+00:00","when":"2024-04-23T22:47:55+00:00","text":"Summary: Chronicle Security - Service Issues\nDescription: Our engineering team has rolled out a mitigation to all impacted systems after which the ingestion backlog has been processed fully. Any remaining downstream impact is expected to be resolved thereafter.\nThe team is currently monitoring the mitigation applied to ensure extended stability.\nWe will provide an update by Wednesday, 2024-04-24 00:30 US/Pacific with current details.\nDiagnosis: Some Chronicle customers in the US may have noticed ingestion delays for Third Party API Feeds.\nThe delays in ingested data may have surfaced as delays in detection and other downstream product features.\nWorkaround: None required.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-23T21:39:26+00:00","modified":"2024-04-23T22:47:55+00:00","when":"2024-04-23T21:39:26+00:00","text":"Summary: We are experiencing an issue with Chronicle Security.\nDescription: Upon further investigation, our engineering team has identified a mitigation strategy which is being rolled out to impacted systems.\nWe will provide an update by Tuesday, 2024-04-23 16:00 US/Pacific with current details.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for Third Party API Feeds.\nThe delays in ingested data may surface as delays in detection and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-23T20:30:51+00:00","modified":"2024-04-23T21:39:26+00:00","when":"2024-04-23T20:30:51+00:00","text":"Summary: We are experiencing an issue with Chronicle Security.\nDescription: Our engineering team has determined that further investigation is required to identify the root cause and mitigate the issue.\nWe will provide an update by Tuesday, 2024-04-23 14:30 US/Pacific with current details.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for certain Feeds. The delays in ingested data may surface as delays in detection and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-23T20:29:41+00:00","modified":"2024-04-23T20:31:00+00:00","when":"2024-04-23T20:29:41+00:00","text":"Summary: We are experiencing an issue with Chronicle Security.\nDescription: Our engineering team has determined that further investigation is required to mitigate the issue.\nWe will provide an update by Tuesday, 2024-04-23 14:30 US/Pacific with current details.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for certain Feeds. The delays in ingested data may surface as delays in detection and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-23T19:37:29+00:00","modified":"2024-04-23T20:29:41+00:00","when":"2024-04-23T19:37:29+00:00","text":"Summary: We are experiencing an issue with Chronicle Security.\nDescription: Our engineering team has determined that further investigation is required to mitigate the issue.\nWe will provide an update by Tuesday, 2024-04-23 13:30 US/Pacific with current details.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for certain Feeds. The delays in ingested data may surface as delays in detection and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-23T19:07:33+00:00","modified":"2024-04-23T19:37:29+00:00","when":"2024-04-23T19:07:33+00:00","text":"Summary: We are experiencing an issue with Chronicle Security.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-04-23 13:09 US/Pacific.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for certain Feeds. The delays in ingested data may surface as delays in detection and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-04-23T18:55:40+00:00","modified":"2024-04-23T19:07:33+00:00","when":"2024-04-23T18:55:40+00:00","text":"Summary: We are experiencing an issue with Chronicle Security.\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-04-23 12:51 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Some Chronicle customers in the US may notice ingestion delays for certain Feeds. The delays in ingested data may surface as delays in detection and other downstream product features.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-04-25T00:40:55+00:00","modified":"2024-04-25T00:41:32+00:00","when":"2024-04-25T00:40:55+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 23 April 2024 08:46\n**Incident End:** 24 April 2024 00:23\n**Duration:** 15 hours, 37 minutes\n**Affected Services and Features:** Chronicle SIEM\n**Regions/Zones:** US multi-region\n**Description:**\nSome Chronicle customers in the US may have noticed ingestion delays for Third Party Feeds. Delays in the ingested data surfaced delays in detections and impacted other downstream product features. The impact lasted for 15 hours, 37 minutes.\nFrom preliminary analysis, the root cause of the issue is that the service responsible for API transfers was rendered partially unavailable due to a combination of crashes and longer cache refresh times due to overload.\n**Customer Impact:**\n* Some Chronicle customers in the US may have noticed ingestion delays for Third Party API Feeds [1]\n* Any systematic detections dependent on these logs would also be delayed proportionately.\n* The data would automatically catch up post mitigation.\n**Reference(s):**\n[1] - https://cloud.google.com/chronicle/docs/reference/feed-management-api#api-log-types","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/AessYzCE2FaA6yGCHgyw","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"P43dHoyBajdZ82B1kubv","number":"13492242596738600854","begin":"2024-04-22T16:00:00+00:00","created":"2024-04-22T21:11:41+00:00","end":"2024-04-23T05:18:00+00:00","modified":"2024-04-23T21:48:30+00:00","external_desc":"Batch - Service Issues in us-central1","updates":[{"created":"2024-04-23T21:48:07+00:00","modified":"2024-04-23T21:48:30+00:00","when":"2024-04-23T21:48:07+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 22 April 2024 09:00\n**Incident End:** 22 April 2024 22:18\n**Duration:** 13 hours, 18 minutes\n**Affected Services and Features:**\nGoogle Batch (new batch job creation, scheduling of queued jobs)\n**Regions/Zones:** us-central1\n**Description:**\nGoogle Batch experienced an issue with almost all new incoming jobs stuck in Queued state in the us-central1 region for a period of 8 hours, 37 minutes.\nFrom our preliminary analysis, the root cause of the issue is increased transaction contention resulting in significant latency increase. This effect accumulated and further slowed down the system's processing and resulted in the jobs to remain in Queued status instead of progressing to Scheduled status.\nGoogle engineers were alerted by our internal monitoring and immediately started an investigation. Once the nature of impact was clear, our engineering team mitigated the impact by disabling new job scheduling, and cleared the stuck jobs by restarting them.\n**Customer Impact:**\nCustomers from the affected region trying to use Batch experienced delays while changing the status of their batch jobs from Queued to Scheduled. As a workaround, impacted customers were advised to try submitting their jobs from another region wherever possible.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-23T05:23:52+00:00","modified":"2024-04-23T21:48:07+00:00","when":"2024-04-23T05:23:52+00:00","text":"The issue with Batch has been resolved for all affected users as of Monday, 2024-04-22 22:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-23T04:24:35+00:00","modified":"2024-04-23T07:40:12+00:00","when":"2024-04-23T04:24:35+00:00","text":"Summary: Batch - Service Issues in us-central1\nDescription: We've received a report of an issue with Batch as of Monday, 2024-04-22 09:00 US/Pacific.\nThis issue is impacting all new incoming requests for batch jobs in the 'us-central1' region.\nOur engineering team continues to work on the mitigation strategy identified. There is no ETA for the completion of the mitigation activities.\nWe will provide more information by Monday, 2024-04-22 23:30 US/Pacific.\nDiagnosis: Customers trying to use Batch would experience delays while changing the status of their batch jobs from Queued to Scheduled.\nWorkaround: Customers could try submitting their jobs from another region as a workaround.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-04-23T01:16:57+00:00","modified":"2024-04-23T07:39:53+00:00","when":"2024-04-23T01:16:57+00:00","text":"Summary: Batch - Service Issues in us-central1\nDescription: We've received a report of an issue with Batch as of Monday, 2024-04-22 09:00 US/Pacific.\nThis issue is impacting all new incoming requests for batch jobs in us-central1.\n**Our engineering team continues to work on the mitigation strategy identified. There is no ETA for the completion of the mitigation activities.**\n**We will provide more information by Monday, 2024-04-22 21:30 US/Pacific.**\nDiagnosis: Customers trying to use Batch would experience delays while changing the status of their batch jobs from Queued to Scheduled.\nWorkaround: Customers could try submitting their jobs from another region as a workaround.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-04-22T23:24:33+00:00","modified":"2024-04-23T07:39:27+00:00","when":"2024-04-22T23:24:33+00:00","text":"Summary: Batch - Service Issues in us-central1\nDescription: We've received a report of an issue with Batch as of Monday, 2024-04-22 09:00 US/Pacific.\nThis issue is impacting all new incoming requests for batch jobs in us-central1.\n**Our engineering team has identified a mitigation strategy. The mitigation work is currently underway.**\nWe will provide more information by Monday, 2024-04-22 18:30 US/Pacific.\nDiagnosis: Customers trying to use Batch would experience delays while changing the status of their batch jobs from Queued to Scheduled.\nWorkaround: Customers could try submitting their jobs from another region as a workaround.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-04-22T22:09:57+00:00","modified":"2024-04-23T07:39:02+00:00","when":"2024-04-22T22:09:57+00:00","text":"Summary: Batch - Service Issues in us-central1\nDescription: We've received a report of an issue with Batch as of Monday, 2024-04-22 09:00 US/Pacific.\nThis issue is impacting all new incoming requests for batch jobs in us-central1.\nOur engineering team continues to investigate the issue in hand to ascertain a mitigation plan.\nWe will provide more information by Monday, 2024-04-22 16:30 US/Pacific.\nDiagnosis: Customers trying to use Batch would experience delays while changing the status of their batch jobs from Queued to Scheduled.\nWorkaround: Customers could try submitting their jobs from another region as a workaround.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-04-22T21:11:38+00:00","modified":"2024-04-23T07:38:39+00:00","when":"2024-04-22T21:11:38+00:00","text":"Summary: Batch - Service Issues in us-central1\nDescription: We've received a report of an issue with Batch as of Monday, 2024-04-22 09:00 US/Pacific.\nThis issue is impacting all new incoming requests for batch jobs in us-central1.\nOur engineering team is investigating the issue in hand to ascertain a mitigation plan.\nWe will provide more information by Monday, 2024-04-22 15:15 US/Pacific\nDiagnosis: Customers trying to use Batch would experience delays while changing the status of their batch jobs from Queued to Scheduled.\nWorkaround: Customers could try submitting their jobs from another region as a workaround.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-04-23T21:48:07+00:00","modified":"2024-04-23T21:48:30+00:00","when":"2024-04-23T21:48:07+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 22 April 2024 09:00\n**Incident End:** 22 April 2024 22:18\n**Duration:** 13 hours, 18 minutes\n**Affected Services and Features:**\nGoogle Batch (new batch job creation, scheduling of queued jobs)\n**Regions/Zones:** us-central1\n**Description:**\nGoogle Batch experienced an issue with almost all new incoming jobs stuck in Queued state in the us-central1 region for a period of 8 hours, 37 minutes.\nFrom our preliminary analysis, the root cause of the issue is increased transaction contention resulting in significant latency increase. This effect accumulated and further slowed down the system's processing and resulted in the jobs to remain in Queued status instead of progressing to Scheduled status.\nGoogle engineers were alerted by our internal monitoring and immediately started an investigation. Once the nature of impact was clear, our engineering team mitigated the impact by disabling new job scheduling, and cleared the stuck jobs by restarting them.\n**Customer Impact:**\nCustomers from the affected region trying to use Batch experienced delays while changing the status of their batch jobs from Queued to Scheduled. As a workaround, impacted customers were advised to try submitting their jobs from another region wherever possible.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"8XjnU88URVtZrAL8KRvA","service_name":"Batch","affected_products":[{"title":"Batch","id":"8XjnU88URVtZrAL8KRvA"}],"uri":"incidents/P43dHoyBajdZ82B1kubv","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"4zoFRGwdwyDHxHVGwMUX","number":"18036169777503085015","begin":"2024-04-19T19:39:46+00:00","created":"2024-04-19T20:54:52+00:00","end":"2024-04-19T22:03:09+00:00","modified":"2024-04-19T22:03:16+00:00","external_desc":"Google Distributed Cloud Edge customers may have experienced failures with creation, update, and deletion of\nLocal Control Plane clusters and Nodepool lifecycle operations.","updates":[{"created":"2024-04-19T22:03:09+00:00","modified":"2024-04-19T22:03:17+00:00","when":"2024-04-19T22:03:09+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved as of Friday, 2024-04-19 14:58 US/Pacific.\nWe understand that this issue impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-19T20:54:45+00:00","modified":"2024-04-19T22:03:16+00:00","when":"2024-04-19T20:54:45+00:00","text":"Summary: Google Distributed Cloud Edge customers may experience failures with creation, update, and deletion of\nLocal Control Plane clusters and Nodepool lifecycle operations.\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Friday, 2024-04-19 15:00 US/Pacific.\nWe will provide more information by Friday, 2024-04-19 16:00 US/Pacific.\nDiagnosis: Existing clusters are unaffected, but they currently cannot be updated via the Edge Container API.\nExisting workloads should not be affected, and LCP clusters can still be interacted with via the Kubernetes API without any issues.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-04-19T22:03:09+00:00","modified":"2024-04-19T22:03:17+00:00","when":"2024-04-19T22:03:09+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved as of Friday, 2024-04-19 14:58 US/Pacific.\nWe understand that this issue impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"JKyM3LJTqgETjRCvSK6w","service_name":"Google Distributed Cloud Edge","affected_products":[{"title":"Google Distributed Cloud Edge","id":"JKyM3LJTqgETjRCvSK6w"}],"uri":"incidents/4zoFRGwdwyDHxHVGwMUX","currently_affected_locations":[],"previously_affected_locations":[{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"xTu9uZq6y7nLPdxAwTkE","number":"6568892905384083767","begin":"2024-04-18T15:32:33+00:00","created":"2024-04-18T16:02:58+00:00","end":"2024-04-18T17:01:06+00:00","modified":"2024-04-18T17:02:33+00:00","external_desc":"Google Container Ananlysis API customers may experience errors while new instance provisioning from Chronicle SOAR","updates":[{"created":"2024-04-18T17:01:06+00:00","modified":"2024-04-18T17:01:11+00:00","when":"2024-04-18T17:01:06+00:00","text":"The issue with Chronicle SOAR has been resolved for all affected users as of Thursday, 2024-04-18 09:58 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-18T16:02:44+00:00","modified":"2024-04-18T17:02:33+00:00","when":"2024-04-18T16:02:44+00:00","text":"Summary: Google Container Ananlysis API customers may experience errors while new instance provisioning from Chronicle SOAR\nDescription: We are experiencing an issue with Chronicle SOAR beginning at Thursday, 2024-04-18 08:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-04-18 10:14 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may get permission denied error when calling Container Analysis API.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]}],"most_recent_update":{"created":"2024-04-18T17:01:06+00:00","modified":"2024-04-18T17:01:11+00:00","when":"2024-04-18T17:01:06+00:00","text":"The issue with Chronicle SOAR has been resolved for all affected users as of Thursday, 2024-04-18 09:58 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"GTT16Lf72XZKWArC9VxA","service_name":"Chronicle SOAR","affected_products":[{"title":"Chronicle SOAR","id":"GTT16Lf72XZKWArC9VxA"}],"uri":"incidents/xTu9uZq6y7nLPdxAwTkE","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"id":"CDhjPFgeK6vHanTDaNpj","number":"8864037395439842190","begin":"2024-04-17T02:52:06+00:00","created":"2024-04-17T03:25:19+00:00","end":"2024-04-17T04:46:31+00:00","modified":"2024-04-17T04:46:33+00:00","external_desc":"The Issue with AppSheet automations/bots not sending AMP emails, and the preview of embedded views in the Editor is displaying errors.","updates":[{"created":"2024-04-17T04:46:31+00:00","modified":"2024-04-17T04:46:34+00:00","when":"2024-04-17T04:46:31+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Tuesday, 2024-04-16 21:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-17T04:17:34+00:00","modified":"2024-04-17T04:46:33+00:00","when":"2024-04-17T04:17:34+00:00","text":"Summary: The Issue with AppSheet automations/bots not sending AMP emails, and the preview of embedded views in the Editor is displaying errors.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-04-16 22:30 US/Pacific.\nDiagnosis: This affects customers using AMP emails (see https://support.google.com/appsheet/answer/11511240?hl=en). Non-AMP emails are unaffected.\nUsers are getting error in the Automation bot, which leads to the AMP email not being sent, while previewing the embedded view in the AppSheet gives the below error :\nError: Unable to load DLL 'ClearScriptV8.win-x64.dll' or one of its dependencies: The specified module could not be found. (0x8007007E)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-04-17T03:25:15+00:00","modified":"2024-04-17T04:17:34+00:00","when":"2024-04-17T03:25:15+00:00","text":"Summary: The Issue with AppSheet automations/bots not sending AMP emails, and the preview of embedded views in the Editor is displaying errors.\nDescription: We are experiencing an issue with AppSheet beginning at Tuesday, 2024-04-16 13:30 US/Pacific.\nMitigation work is currently underway by our engineering team.\nWe will provide an update by Tuesday, 2024-04-16 21:25 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: This affects customers using AMP emails (see https://support.google.com/appsheet/answer/11511240?hl=en). Non-AMP emails are unaffected.\nUsers are getting error in the Automation bot, which leads to the AMP email not being sent, while previewing the embedded view in the AppSheet gives the below error :\nError: Unable to load DLL 'ClearScriptV8.win-x64.dll' or one of its dependencies: The specified module could not be found. (0x8007007E)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-04-17T04:46:31+00:00","modified":"2024-04-17T04:46:34+00:00","when":"2024-04-17T04:46:31+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Tuesday, 2024-04-16 21:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FWjKi5U7KX4FUUPThHAJ","service_name":"AppSheet","affected_products":[{"title":"AppSheet","id":"FWjKi5U7KX4FUUPThHAJ"}],"uri":"incidents/CDhjPFgeK6vHanTDaNpj","currently_affected_locations":[],"previously_affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"SPSqWfXYYSGzBmQCXNA4","number":"5212456933930716916","begin":"2024-04-16T09:20:00+00:00","created":"2024-04-17T08:45:53+00:00","end":"2024-04-17T10:40:00+00:00","modified":"2024-04-19T17:21:21+00:00","external_desc":"Creation and Upgrades are failing for some Environments while using Cloud Composer 2","updates":[{"created":"2024-04-19T17:21:21+00:00","modified":"2024-04-19T17:21:21+00:00","when":"2024-04-19T17:21:21+00:00","text":"# Incident Report\n## Summary\nBetween 16 and 17 April 2024, Cloud Composer users experienced an elevated failure rate when creating, resizing or upgrading to newer versions of Cloud Composer 2 Environments with “Private IP” configuration for a duration of 1 day, 1 hour and 20 minutes.\nExisting Private IP environments continued to operate normally if they were not upgraded or resized.\nTo our Cloud Composer customers whose businesses were impacted during this disruption: we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe root cause was due to an issue introduced by a recent change to the latest stable Container Operating System (COS) image used by Cloud Composer in one of its workloads.\nThe new version of the COS image (M113) moved from iptables-legacy to iptables-nft package as the default, which impacted Konlet (system executing containers)’s handling of iptables to break.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via our monitoring tools on 16 April at 09:17 US/Pacific and immediately started an investigation. Once the nature and scope of the issue became clear, Google engineers reverted the recently introduced rollout.\nGoogle is committed preventing a repeat of this issue in the future and is completing the following actions:\n- Improve our monitoring systems to ensure that in the future during similar scenarios we would be notified quicker what could help with resolving an issue earlier.\n- Cloud Composer team will modify the approach of ingesting new container base images to regionalize it and be independent of the ongoing rollouts of the base image versions.\n- The process of intake and testing of container base images will be extended to make the testing more extensive.\n## Detailed Description of Impact\nBetween 16 April 2024 from 02:20 to 17 April, 03:40 US/Pacific impacted customers might have experienced issues with:\n- Creating new Composer Environments with Private IP configuration failed. In total we observed \u003c 200 customer projects where creations failed due to the issue.\n- Upgrades and resize operations for existing Composer Environments in the Private IP configuration failed. In total we observed \u003c 20 environments with failed upgrades due to the issue.\n- Environments in Private IP configuration might have encountered issues with scaling for increased database-intensive workloads.\n- Existing Composer 2 Environments, if not modified, functioned correctly.\n- Customers who triggered an upgrade during an outage were facing an issue with the Composer environment which was unhealthy, including workloads with failed upgrades which could have caused ongoing performance issues.\n- Once the issue was mitigated, the impacted environments returned to the healthy state.\nDuring the outage customers were asked to refrain from performing upgrade operations until mitigation has been confirmed.\n**Additional Information for Customers:**\n- Fewer than 20 of the customer environments encountered upgrade failures and all of the customer environments with “Private IP” configuration that experienced upgrade failures during the incident will now upgrade successfully.\nIf you are one of the customers that experienced upgrade failure during the incident and still continue to have issues with further upgrades, please reach out to Google Cloud Support using https://cloud.google.com/support for assistance with recovery.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-17T16:39:34+00:00","modified":"2024-04-19T17:21:21+00:00","when":"2024-04-17T16:39:34+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 16 April 2024 02:20\n**Incident End:** 17 April 2024 03:40\n**Duration:** 1 day 1 hours, 20 minutes\n**Affected Services and Features:**\nGoogle Cloud Composer\n**Regions/Zones:**\nGlobal\n**Description:**\nGoogle Cloud Composer users experienced an elevated failure rate when creating, resizing, or upgrading to newer versions of Cloud Composer 2 Environments with “Private IP” configuration. This was due to an inadvertent issue introduced by a recent change to the latest stable Container Operating System (COS) image used by Cloud Composer in one of its workloads.\nExisting Private IP environments continued to operate normally if they were not upgraded or resized.\nGoogle engineers executed a rollback of the change to mitigate the issue on 17 April 2024 at 03:40 US/Pacific.\nGoogle will complete a full Incident Report in the following days that will provide a detailed root cause.\n**Customer Impact:**\n- Impacted users experienced issues with creating new Composer Environments, upgrades, and resize operations for existing Composer Environments in Private IP configuration.\n- Composer environments with failed upgrades were unhealthy and workloads could fail or experience performance issues\n- Environments might have encountered issues with scaling for increase database-intensive workloads\n**Additional details:**\n- Less than 0.25% of the customer environments encountered upgrade failures and some of the customer environments with “Private IP” configuration that experienced upgrade failures during the incident will now upgrade successfully.\n- A few environments may still experience failures on further upgrades. These environments are still functioning normally and will continue serving workloads without any issues. Our engineers are working internally to identify these environments and take additional actions so that they can be further upgraded successfully. These actions are expected to be completed by early next week.\nIf you are one of the customers that experienced upgrade failure during the incident and still continue to have issues with further upgrades, please reach out to Google Cloud Support using https://cloud.google.com/support for assistance with recovery. Alternatively, you can recreate these environments.\n--------","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-17T11:41:59+00:00","modified":"2024-04-17T16:39:34+00:00","when":"2024-04-17T11:41:59+00:00","text":"The issue with Google Cloud Composer has been resolved for all affected users as of Wednesday, 2024-04-17 04:09 US/Pacific.\nUsers are now able to create new Composer Environment and upgrade existing ones. Some failed upgrades during the duration of the incident may have been automatically recovered. If you're still experiencing issues, please contact us via a customer support case for our repair procedure.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-17T10:46:28+00:00","modified":"2024-04-17T11:42:03+00:00","when":"2024-04-17T10:46:28+00:00","text":"Summary: Creation and Upgrades are failing for some Environments while using Cloud Composer 2\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-04-17 05:30 US/Pacific.\nDiagnosis: Impacted customers may experience issues with creating new Composer Environments and upgrades for existing Composer Environments. Existing Composer 2 Environments, if not modified, should function correctly.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-04-17T08:45:50+00:00","modified":"2024-04-17T10:46:28+00:00","when":"2024-04-17T08:45:50+00:00","text":"Summary: Creation and Upgrades are failing for some Environments while using Cloud Composer 2\nDescription: Customers might experience issues with creating or upgrading to newer versions of Cloud Composer 2 Environments.The problem exists for the “Private IP” Composer Environment.\nWe have identified the root cause of the above issue, and are working on a fix.\nUsers are requested to refrain from performing upgrade operations until mitigation has been confirmed.\nWe will provide more information by Wednesday, 2024-04-17 04:00 US/Pacific.\nDiagnosis: Impacted customers may experience issues with creating new Composer Environments and upgrades for existing Composer Environments. Existing Composer 2 Environments, if not modified, should function correctly.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-04-19T17:21:21+00:00","modified":"2024-04-19T17:21:21+00:00","when":"2024-04-19T17:21:21+00:00","text":"# Incident Report\n## Summary\nBetween 16 and 17 April 2024, Cloud Composer users experienced an elevated failure rate when creating, resizing or upgrading to newer versions of Cloud Composer 2 Environments with “Private IP” configuration for a duration of 1 day, 1 hour and 20 minutes.\nExisting Private IP environments continued to operate normally if they were not upgraded or resized.\nTo our Cloud Composer customers whose businesses were impacted during this disruption: we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe root cause was due to an issue introduced by a recent change to the latest stable Container Operating System (COS) image used by Cloud Composer in one of its workloads.\nThe new version of the COS image (M113) moved from iptables-legacy to iptables-nft package as the default, which impacted Konlet (system executing containers)’s handling of iptables to break.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via our monitoring tools on 16 April at 09:17 US/Pacific and immediately started an investigation. Once the nature and scope of the issue became clear, Google engineers reverted the recently introduced rollout.\nGoogle is committed preventing a repeat of this issue in the future and is completing the following actions:\n- Improve our monitoring systems to ensure that in the future during similar scenarios we would be notified quicker what could help with resolving an issue earlier.\n- Cloud Composer team will modify the approach of ingesting new container base images to regionalize it and be independent of the ongoing rollouts of the base image versions.\n- The process of intake and testing of container base images will be extended to make the testing more extensive.\n## Detailed Description of Impact\nBetween 16 April 2024 from 02:20 to 17 April, 03:40 US/Pacific impacted customers might have experienced issues with:\n- Creating new Composer Environments with Private IP configuration failed. In total we observed \u003c 200 customer projects where creations failed due to the issue.\n- Upgrades and resize operations for existing Composer Environments in the Private IP configuration failed. In total we observed \u003c 20 environments with failed upgrades due to the issue.\n- Environments in Private IP configuration might have encountered issues with scaling for increased database-intensive workloads.\n- Existing Composer 2 Environments, if not modified, functioned correctly.\n- Customers who triggered an upgrade during an outage were facing an issue with the Composer environment which was unhealthy, including workloads with failed upgrades which could have caused ongoing performance issues.\n- Once the issue was mitigated, the impacted environments returned to the healthy state.\nDuring the outage customers were asked to refrain from performing upgrade operations until mitigation has been confirmed.\n**Additional Information for Customers:**\n- Fewer than 20 of the customer environments encountered upgrade failures and all of the customer environments with “Private IP” configuration that experienced upgrade failures during the incident will now upgrade successfully.\nIf you are one of the customers that experienced upgrade failure during the incident and still continue to have issues with further upgrades, please reach out to Google Cloud Support using https://cloud.google.com/support for assistance with recovery.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"YxkG5FfcC42cQmvBCk4j","service_name":"Google Cloud Composer","affected_products":[{"title":"Google Cloud Composer","id":"YxkG5FfcC42cQmvBCk4j"}],"uri":"incidents/SPSqWfXYYSGzBmQCXNA4","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"6krbBsY91cACxDxuZwsP","number":"17264442431972761440","begin":"2024-04-15T20:00:00+00:00","created":"2024-04-17T20:58:57+00:00","end":"2024-04-15T20:35:00+00:00","modified":"2024-04-18T17:23:29+00:00","external_desc":"Cloud Pub/Sub customers in europe-north1, southamerica-west1, us-west2 may have experienced permission denied errors for all API methods, including methods used to publish and subscribe.","updates":[{"created":"2024-04-18T17:23:29+00:00","modified":"2024-04-18T17:23:29+00:00","when":"2024-04-18T17:23:29+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 15 April 2024 13:00\n**Incident End:** 15 April 2024 13:35\n**Duration:** 35 minutes\n**Affected Services and Features:**\nCloud Pub/Sub\n**Regions/Zones:** Finland (europe-north1), Santiago (southamerica-west1), Los Angeles (us-west2)\n**Description:**\nCloud Pub/Sub customers in europe-north1, southamerica-west1, us-west2 experienced permission denied errors for all API methods, including methods used to publish and subscribe for a duration of 35 minutes. From preliminary analysis, the root cause of the issue was a bad configuration change which was identified by our internal monitoring tools. The change was immediately rolled back and the impact was fully mitigated at 13:35.\n**Customer Impact:**\nCustomers in the affected regions may have observed permission denied errors for all API methods.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-17T21:00:02+00:00","modified":"2024-04-18T17:23:29+00:00","when":"2024-04-17T21:00:02+00:00","text":"We experienced an issue with Cloud Pub/sub beginning at Monday, 2024-04-15 13:00 US/Pacific.\nDuring the issue, customers publishing and subscribing in the affected regions may have been impacted. Publishing traffic redirected due to Message Storage Policies to the affected regions may have been impacted. Control operations may have been affected also.\nThe issue was caused by a bad configuration change and the change was rolled back resolving the issue for all affected users as of Monday, 2024-04-15 13:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-17T20:58:54+00:00","modified":"2024-04-17T21:00:08+00:00","when":"2024-04-17T20:58:54+00:00","text":"Summary: Cloud Pub/Sub customers in europe-north1, southamerica-west1, us-west2 may have experienced permission denied errors for all API methods, including methods used to publish and subscribe.\nDescription: We are investigating a potential issue with Google Cloud Pub/Sub.\nWe will provide more information by Wednesday, 2024-04-17 14:15 US/Pacific.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-04-18T17:23:29+00:00","modified":"2024-04-18T17:23:29+00:00","when":"2024-04-18T17:23:29+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 15 April 2024 13:00\n**Incident End:** 15 April 2024 13:35\n**Duration:** 35 minutes\n**Affected Services and Features:**\nCloud Pub/Sub\n**Regions/Zones:** Finland (europe-north1), Santiago (southamerica-west1), Los Angeles (us-west2)\n**Description:**\nCloud Pub/Sub customers in europe-north1, southamerica-west1, us-west2 experienced permission denied errors for all API methods, including methods used to publish and subscribe for a duration of 35 minutes. From preliminary analysis, the root cause of the issue was a bad configuration change which was identified by our internal monitoring tools. The change was immediately rolled back and the impact was fully mitigated at 13:35.\n**Customer Impact:**\nCustomers in the affected regions may have observed permission denied errors for all API methods.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"dFjdLh2v6zuES6t9ADCB","service_name":"Google Cloud Pub/Sub","affected_products":[{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"}],"uri":"incidents/6krbBsY91cACxDxuZwsP","currently_affected_locations":[],"previously_affected_locations":[{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"chBKHSir2hAG2qZ2XTWK","number":"11904483107122872306","begin":"2024-04-12T13:54:00+00:00","created":"2024-04-12T14:33:12+00:00","end":"2024-04-12T14:41:00+00:00","modified":"2024-04-12T18:38:01+00:00","external_desc":"Apigee Portals Failing to Load","updates":[{"created":"2024-04-12T18:37:07+00:00","modified":"2024-04-12T18:38:01+00:00","when":"2024-04-12T18:37:07+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 12 April 2024 06:54\n**Incident End:** 12 April 2024 07:41\n**Duration:** 47 minutes\n**Affected Services and Features:**\nApigee - Portals\n**Regions/Zones:** Global\n**Description:**\nApigee portals experienced unavailability and elevated error rates globally for a duration of 47 minutes. From preliminary analysis, the root cause was a transient issue and Google engineers will continue to investigate the full root cause.\nWe will provide an update once the investigation completes.\n**Customer Impact:**\nDuring the incident, customers may have experienced 405 errors and were unable to access their portal.\n---","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-12T14:44:52+00:00","modified":"2024-04-12T18:37:07+00:00","when":"2024-04-12T14:44:52+00:00","text":"The issue with Apigee has been resolved for all affected projects as of Friday, 2024-04-12 07:44 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-12T14:32:54+00:00","modified":"2024-04-12T14:44:56+00:00","when":"2024-04-12T14:32:54+00:00","text":"Summary: We've received a report of an issue with Apigee\nDescription: We are experiencing an issue with Apigee.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-04-12 08:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[]}],"most_recent_update":{"created":"2024-04-12T18:37:07+00:00","modified":"2024-04-12T18:38:01+00:00","when":"2024-04-12T18:37:07+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 12 April 2024 06:54\n**Incident End:** 12 April 2024 07:41\n**Duration:** 47 minutes\n**Affected Services and Features:**\nApigee - Portals\n**Regions/Zones:** Global\n**Description:**\nApigee portals experienced unavailability and elevated error rates globally for a duration of 47 minutes. From preliminary analysis, the root cause was a transient issue and Google engineers will continue to investigate the full root cause.\nWe will provide an update once the investigation completes.\n**Customer Impact:**\nDuring the incident, customers may have experienced 405 errors and were unable to access their portal.\n---","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"9Y13BNFy4fJydvjdsN3X","service_name":"Apigee","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"}],"uri":"incidents/chBKHSir2hAG2qZ2XTWK","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"oK6h8kUWHne2CEycXGVs","number":"14317187444338044722","begin":"2024-04-10T14:39:36+00:00","created":"2024-04-10T15:26:06+00:00","end":"2024-04-10T18:35:32+00:00","modified":"2024-04-10T18:35:35+00:00","external_desc":"We are experiencing an issue with Chronicle Security","updates":[{"created":"2024-04-10T18:35:32+00:00","modified":"2024-04-10T18:35:35+00:00","when":"2024-04-10T18:35:32+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-04-10 11:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-10T17:57:04+00:00","modified":"2024-04-10T18:35:35+00:00","when":"2024-04-10T17:57:04+00:00","text":"Summary: We are experiencing an issue with Chronicle Security\nDescription: Mitigation work is ongoing and Engineering is observing some recovery.\nWe do not have an ETA for mitigation at this point and will continue to provide updates on full recovery.\nWe will provide more information by Wednesday, 2024-04-10 12:30 US/Pacific.\nDiagnosis: Chronicle may be processing ingested data slower than normal and detections on recent data may be impacted, causing some customers to experience data freshness issues.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-04-10T17:22:27+00:00","modified":"2024-04-10T17:57:04+00:00","when":"2024-04-10T17:22:27+00:00","text":"Summary: We are experiencing an issue with Chronicle Security\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-04-10 11:15 US/Pacific.\nDiagnosis: Chronicle may be processing ingested data slower than normal and detections on recent data may be impacted, causing some customers to experience data freshness issues.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-04-10T16:18:46+00:00","modified":"2024-04-10T17:22:27+00:00","when":"2024-04-10T16:18:46+00:00","text":"Summary: We are experiencing an issue with Chronicle Security\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-04-10 11:00 US/Pacific.\nDiagnosis: Chronicle may be processing ingested data slower than normal and detections on recent data may be impacted, causing some customers to experience data freshness issues.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-04-10T15:26:02+00:00","modified":"2024-04-10T16:18:46+00:00","when":"2024-04-10T15:26:02+00:00","text":"Summary: We are experiencing an issue with Chronicle Security\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-04-10 09:21 US/Pacific with current details.\nDiagnosis: Chronicle may be processing ingested data slower than normal and detections on recent data may be impacted, causing some customers to experience data freshness issues.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-04-10T18:35:32+00:00","modified":"2024-04-10T18:35:35+00:00","when":"2024-04-10T18:35:32+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Wednesday, 2024-04-10 11:35 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/oK6h8kUWHne2CEycXGVs","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"NcBsiFKsKXLSpHWU2VXK","number":"13148312295244569958","begin":"2024-04-04T20:15:00+00:00","created":"2024-04-04T22:17:29+00:00","end":"2024-04-04T22:15:00+00:00","modified":"2024-04-05T20:14:01+00:00","external_desc":"AppSheet Databases UI experienced elevated errors when loading the AppSheet Database UI via web browsers.","updates":[{"created":"2024-04-05T20:03:50+00:00","modified":"2024-04-05T20:14:01+00:00","when":"2024-04-05T20:03:50+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 4 April, 2024 13:15\n**Incident End:** 4 April, 2024 15:15\n**Duration:** 2 hours\n**Affected Services and Features:**\nAppSheet Databases\n**Regions/Zones:** Global\n**Description:**\nAppSheet experienced increased errors when loading the AppSheet Database UI via web browser globally for a duration of 2 hours. The impact was only at the UI level, applications using the AppSheet as a datasource were not impacted. From preliminary analysis, the root cause of the issue is due to a recent change released in the AppSheet product that prevented users from accessing and managing their data from the UI via web browsers.\nGoogle engineers executed a rollback of the change that introduced the issue which was completed by 15:15 US/Pacific completely resolved the issue.\n**Customer Impact:**\n* All the customers using AppSheet Databases encountered an error “Something has gone wrong” when attempting to access or manage their database data from the UI via web browsers.\n* The impact was only at the UI level. There was no impact on the Apps using AppSheet Databases as their datasource.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-04T22:55:44+00:00","modified":"2024-04-05T20:03:50+00:00","when":"2024-04-04T22:55:44+00:00","text":"The issue with AppSheet has been resolved for all affected projects as of Thursday, 2024-04-04 15:33 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-04T22:17:26+00:00","modified":"2024-04-04T22:55:48+00:00","when":"2024-04-04T22:17:26+00:00","text":"Summary: AppSheet Databases UI is experiencing elevated errors when loading the AppSheet Database UI via web browsers.\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Thursday, 2024-04-04 16:30 US/Pacific.\nWe will provide more information by Thursday, 2024-04-04 17:00 US/Pacific.\nDiagnosis: All the customers using AppSheet Databases would encounter an error “Something has gone wrong” and are unable to manage their database data. However, there is no impact on the Apps using AppSheet Databases as their datasource.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-04-05T20:03:50+00:00","modified":"2024-04-05T20:14:01+00:00","when":"2024-04-05T20:03:50+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 4 April, 2024 13:15\n**Incident End:** 4 April, 2024 15:15\n**Duration:** 2 hours\n**Affected Services and Features:**\nAppSheet Databases\n**Regions/Zones:** Global\n**Description:**\nAppSheet experienced increased errors when loading the AppSheet Database UI via web browser globally for a duration of 2 hours. The impact was only at the UI level, applications using the AppSheet as a datasource were not impacted. From preliminary analysis, the root cause of the issue is due to a recent change released in the AppSheet product that prevented users from accessing and managing their data from the UI via web browsers.\nGoogle engineers executed a rollback of the change that introduced the issue which was completed by 15:15 US/Pacific completely resolved the issue.\n**Customer Impact:**\n* All the customers using AppSheet Databases encountered an error “Something has gone wrong” when attempting to access or manage their database data from the UI via web browsers.\n* The impact was only at the UI level. There was no impact on the Apps using AppSheet Databases as their datasource.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"FWjKi5U7KX4FUUPThHAJ","service_name":"AppSheet","affected_products":[{"title":"AppSheet","id":"FWjKi5U7KX4FUUPThHAJ"}],"uri":"incidents/NcBsiFKsKXLSpHWU2VXK","currently_affected_locations":[],"previously_affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"TJJTRtYHPtya879vqqvJ","number":"11339241587371214357","begin":"2024-04-04T11:36:53+00:00","created":"2024-04-04T11:50:14+00:00","end":"2024-04-04T12:40:47+00:00","modified":"2024-04-04T12:40:57+00:00","external_desc":"Cloud Billing Issue : Increased error rate for Cloud Billing API calls","updates":[{"created":"2024-04-04T12:40:47+00:00","modified":"2024-04-04T12:41:02+00:00","when":"2024-04-04T12:40:47+00:00","text":"The issue with Cloud Billing has been resolved for all affected users as of Thursday, 2024-04-04 05:10 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},{"created":"2024-04-04T11:50:01+00:00","modified":"2024-04-04T12:40:57+00:00","when":"2024-04-04T11:50:01+00:00","text":"Summary: Cloud Billing Issue : Increased error rate for Cloud Billing API calls\nDescription: We are experiencing an issue with Cloud Billing beginning on Thursday, 2024-04-04 02:18 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-04-04 06:00 US/Pacific with current details.\nDiagnosis: Impacted customers may be unable to view the cost details in the Cloud Console Billing pages and may experience high error rate for Billing API calls\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-04-04T12:40:47+00:00","modified":"2024-04-04T12:41:02+00:00","when":"2024-04-04T12:40:47+00:00","text":"The issue with Cloud Billing has been resolved for all affected users as of Thursday, 2024-04-04 05:10 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Billing","id":"oLCqDYkE9NFWQVgctQTL"},{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"}],"uri":"incidents/TJJTRtYHPtya879vqqvJ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"k2PVGkU8WDvQUHAiET2f","number":"11216070837893101187","begin":"2024-03-29T17:26:15+00:00","created":"2024-03-29T17:29:14+00:00","end":"2024-03-29T18:01:13+00:00","modified":"2024-03-29T18:01:14+00:00","external_desc":"Google Cloud Support Delayed Case Handling","updates":[{"created":"2024-03-29T18:01:12+00:00","modified":"2024-03-29T18:01:16+00:00","when":"2024-03-29T18:01:12+00:00","text":"The issue with Google Cloud Support has been resolved for all affected projects as of Friday, 2024-03-29 11:01 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-03-29T17:29:12+00:00","modified":"2024-03-29T17:29:17+00:00","when":"2024-03-29T17:29:12+00:00","text":"Summary: Google Cloud Support Delayed Case Handling\nDescription: We are experiencing an issue with Google Cloud Support.\nMitigation work is currently underway by our engineering team.\nWe will provide an update by Friday, 2024-03-29 12:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Google Cloud customers may experience delays in case handling.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-03-29T18:01:12+00:00","modified":"2024-03-29T18:01:16+00:00","when":"2024-03-29T18:01:12+00:00","text":"The issue with Google Cloud Support has been resolved for all affected projects as of Friday, 2024-03-29 11:01 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"bGThzF7oEGP5jcuDdMuk","service_name":"Google Cloud Support","affected_products":[{"title":"Google Cloud Support","id":"bGThzF7oEGP5jcuDdMuk"}],"uri":"incidents/k2PVGkU8WDvQUHAiET2f","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"8cyevJX5zmqCoP8gqqRG","number":"11074182206360329850","begin":"2024-03-29T17:00:00+00:00","created":"2024-03-29T22:33:44+00:00","end":"2024-03-30T12:27:00+00:00","modified":"2024-04-02T03:44:24+00:00","external_desc":"Chronicle Delays in Data Availability","updates":[{"created":"2024-04-02T03:34:44+00:00","modified":"2024-04-02T03:44:24+00:00","when":"2024-04-02T03:34:44+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using help article https://support.google.com/a/answer/1047213.\n**(All Times US/Pacific)**\n**Incident Start:** 29 March 2024 10:00\n**Incident End:** 30 March 2024 05:27\n**Duration:** 19 hours, 27 minutes\n**Affected Services and Features:**\nChronicle Security\n**Regions/Zones:**\nGlobal\n**Description:**\nOn 29 March 2024 at 10:00, Chronicle Security experienced delays in data processing for Entity Graph, BigQuery Export, User and Entity Behavior Analytics (UEBA) features including metrics and entity risk scores, and Log Export. From preliminary analysis, the root cause was related to a recent rollout that improperly configured an access transparency check for some of Chronicle Security’s back-end data processing pipelines.\nGoogle Engineers completed a roll back of the change that introduced the issue at 18:09, which allowed the affected data pipelines to run. All of the affected data pipelines had successfully completed by 30 March 2024 at 05:26, fully resolving impact.\n**Customer Impact:**\nCustomers experienced delays in data availability in all regions for multiple Chronicle system components and product features, including Entity Graph, BigQuery Export, UEBA Simple Analytics, and Log Export.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-30T12:27:03+00:00","modified":"2024-03-30T12:27:05+00:00","when":"2024-03-30T12:27:03+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Saturday, 2024-03-30 05:26 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-30T05:57:14+00:00","modified":"2024-03-30T12:22:24+00:00","when":"2024-03-30T05:57:14+00:00","text":"Summary: Chronicle Delays in Data Availability\nDescription: The issue with Chronicle Security is partially resolved. The issue is fully resolved for all Chronicle regions except the US where it is partially resolved.\nThe remaining impact includes: some legacy udm event exports to BigQuery/Looker are delayed in the US for some customers, the Entity Graph table/explore is delayed in the US.\nWe will provide more information by Saturday, 2024-03-30 08:00 US/Pacific.\nDiagnosis: Customers may experience residual delays in data freshness in some regions for log export, Entity graph and User and Entity Behavioral Analytics(UEBA) Simple Analytics.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-30T01:24:35+00:00","modified":"2024-03-30T01:24:37+00:00","when":"2024-03-30T01:24:35+00:00","text":"Summary: Chronicle Delays in Data Availability\nDescription: We believe the issue with Chronicle Security is partially resolved.\nThe issue has been mitigated in most regions. Some regions have rolled back but not fully completed dependent processing for Entity Graph and some tables/explores for BigQuery and Looker.\nWe will provide more information by Friday, 2024-03-29 23:30 US/Pacific.\nDiagnosis: Customers may experience residual delays in data freshness in some regions for log export, Entity graph and UEBA Simple Analytics.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-30T00:45:23+00:00","modified":"2024-03-30T00:45:25+00:00","when":"2024-03-30T00:45:23+00:00","text":"Summary: Chronicle Delays in Data Availability\nDescription: Mitigation work is currently underway by our engineering team.\nWe have mitigated UEBA metrics in all regions except Americas, Entity Graph in all regions except Americas and Europe, and BigQuery data export for UDM aggregates, IOCs, and Entity Graph in most regions. Mitigations are under way for the remaining regions.\nWe will provide more information by Friday, 2024-03-29 18:30 US/Pacific.\nDiagnosis: Customers may experience delays in data availability in all regions for multiple Chronicle system components and product features (Entity Graph, UEBA Simple Analytics, Log Export, Indexing).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-30T00:13:02+00:00","modified":"2024-03-30T00:13:05+00:00","when":"2024-03-30T00:13:02+00:00","text":"Summary: Chronicle Delays in Data Availability\nDescription: Mitigation work is currently underway by our engineering team.\nWe have globally mitigated UEBA risk score. We have mitigated UEBA metrics, Entity Graph and BigQuery data export for UDM aggregates, IOCs, and Entity Graph in most regions. Mitigations are also under way for indexing metrics including prevalence and first seen (which were delayed in the Entity Graph).\nWe will provide more information by Friday, 2024-03-29 17:45 US/Pacific.\nDiagnosis: Customers may experience delays in data availability in all regions for multiple Chronicle system components and product features (Entity Graph, UEBA Simple Analytics, Log Export, Indexing).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-29T23:43:00+00:00","modified":"2024-03-29T23:43:02+00:00","when":"2024-03-29T23:43:00+00:00","text":"Summary: Chronicle Delays in Data Availability\nDescription: Mitigation work is currently underway by our engineering team.\nWe have globally mitigated BigQuery data export for rule detections (for the legacy Looker connector, external Looker connector, and legacy BigQuery direct access). Mitigations are underway for additional BigQuery data exports (UDM aggregates, IOCs, Entity Graph). Mitigations are under way for Entity Graph, Log Export to GCS,\nWe will provide more information by Friday, 2024-03-29 17:00 US/Pacific\nDiagnosis: Customers may experience delays in data availability in all regions for multiple Chronicle system components and product features (Entity Graph, UEBA Simple Analytics, Log Export).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"created":"2024-03-29T22:38:15+00:00","modified":"2024-04-02T03:37:00+00:00","when":"2024-03-29T22:38:15+00:00","text":"Summary: Chronicle Delays in Data Availability\nDescription: We are experiencing an issue with Chronicle Security beginning at Friday, 2024-03-29 10:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-03-29 17:35 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may experience delays in data availability in all regions for multiple Chronicle system components and product features (Entity Graph, BigQuery Export, UEBA Simple Analytics, Log Export).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]}],"most_recent_update":{"created":"2024-04-02T03:34:44+00:00","modified":"2024-04-02T03:44:24+00:00","when":"2024-04-02T03:34:44+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using help article https://support.google.com/a/answer/1047213.\n**(All Times US/Pacific)**\n**Incident Start:** 29 March 2024 10:00\n**Incident End:** 30 March 2024 05:27\n**Duration:** 19 hours, 27 minutes\n**Affected Services and Features:**\nChronicle Security\n**Regions/Zones:**\nGlobal\n**Description:**\nOn 29 March 2024 at 10:00, Chronicle Security experienced delays in data processing for Entity Graph, BigQuery Export, User and Entity Behavior Analytics (UEBA) features including metrics and entity risk scores, and Log Export. From preliminary analysis, the root cause was related to a recent rollout that improperly configured an access transparency check for some of Chronicle Security’s back-end data processing pipelines.\nGoogle Engineers completed a roll back of the change that introduced the issue at 18:09, which allowed the affected data pipelines to run. All of the affected data pipelines had successfully completed by 30 March 2024 at 05:26, fully resolving impact.\n**Customer Impact:**\nCustomers experienced delays in data availability in all regions for multiple Chronicle system components and product features, including Entity Graph, BigQuery Export, UEBA Simple Analytics, and Log Export.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/8cyevJX5zmqCoP8gqqRG","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"}]},{"id":"zzizj3QAAaGqyqvwKcrT","number":"11984151550178413233","begin":"2024-03-28T16:48:00+00:00","created":"2024-03-28T17:35:01+00:00","end":"2024-03-28T20:20:00+00:00","modified":"2024-03-28T20:21:09+00:00","external_desc":"Cloud Customers may experience minimal service disruption in us-east5-c","updates":[{"created":"2024-03-28T18:52:24+00:00","modified":"2024-03-28T19:14:00+00:00","when":"2024-03-28T18:52:24+00:00","text":"Summary: Cloud Customers may experience minimal service disruption in us-east5-c\nDescription: The issue with Google Compute Engine, Persistent Disk, Virtual Private Cloud (VPC) and Google Kubernetes Engine is believed to be affecting a very small number of projects and our Engineering Team has taken appropriate measures to mitigate the issue and are continuing to closely monitor the environment.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Columbus (us-east5)","id":"us-east5"}]},{"created":"2024-03-28T18:13:16+00:00","modified":"2024-03-28T18:15:30+00:00","when":"2024-03-28T18:13:16+00:00","text":"Summary: Cloud Customers may experience minimal service disruption in us-east5-c\nDescription: We are experiencing an issue with Google Compute Engine, Persistent Disk, and Google Kubernetes Engine.\nThe maintenance activity in us-east5-c is now complete and telemetry is not showing any visible impact. Our engineers are continuing to monitor and full mitigation is expected to complete by Thursday, 2024-03-28 12:45 US/Pacific.\nWe will provide an update by Thursday, 2024-03-28 11:45 US/Pacific with current details.\nDiagnosis:\n- Customers may experience increased latency or packet loss.\n- TPU instances may experience network performance degradation.\n- Newly created VMs will be served via other zones in the us-east5 region.\n- Existing workload should not be impacted. The impact may be limited to a fraction of ingress, egress, and VM-to-VM traffic.\nWorkaround: Customers can use other zones without impact at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Columbus (us-east5)","id":"us-east5"}]},{"created":"2024-03-28T17:34:55+00:00","modified":"2024-03-28T18:15:49+00:00","when":"2024-03-28T17:34:55+00:00","text":"Summary: Cloud Customers may experience minimal service disruption in us-east5-c\nDescription: We are experiencing an issue with Google Compute Engine, Persistent Disk, and Google Kubernetes Engine. Our engineers are working on performing maintenance in the us-east5-c zone to minimize impact.\nWe will provide an update by Thursday, 2024-03-28 11:05 US/Pacific with current details.\nDiagnosis:\n- Customers may experience increased latency or packet loss.\n- Newly created VMs will be served via other zones in the us-east5 region.\n- The current impact is limited to existing VMs where they may experience elevated network latencies and packet loss.\n- Existing workload should not be impacted. The impact should be limited to VM-to-VM traffic.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Columbus (us-east5)","id":"us-east5"}]}],"most_recent_update":{"created":"2024-03-28T18:52:24+00:00","modified":"2024-03-28T19:14:00+00:00","when":"2024-03-28T18:52:24+00:00","text":"Summary: Cloud Customers may experience minimal service disruption in us-east5-c\nDescription: The issue with Google Compute Engine, Persistent Disk, Virtual Private Cloud (VPC) and Google Kubernetes Engine is believed to be affecting a very small number of projects and our Engineering Team has taken appropriate measures to mitigate the issue and are continuing to closely monitor the environment.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Columbus (us-east5)","id":"us-east5"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/zzizj3QAAaGqyqvwKcrT","currently_affected_locations":[],"previously_affected_locations":[{"title":"Columbus (us-east5)","id":"us-east5"}]},{"id":"TZMCWaQRtAmX9ZDVwYUx","number":"14822853631540204442","begin":"2024-03-26T19:50:00+00:00","created":"2024-03-26T23:14:08+00:00","end":"2024-03-27T00:30:00+00:00","modified":"2024-03-27T18:16:44+00:00","external_desc":"Cloud Workstations service issues for us-central1 and us-west1regions.","updates":[{"created":"2024-03-27T18:16:18+00:00","modified":"2024-03-27T18:16:18+00:00","when":"2024-03-27T18:16:18+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below.\nPlease note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using the help article https://support.google.com/a/answer/1047213.\n(All Times US/Pacific)\n**Incident Start:** 26 March 2024 12:50\n**Incident End:** 26 March, 2024 17:30\n**Duration:** 4 hours, 40 minutes\n**Affected Services and Features:**\nCloud Workstations\n**Regions/Zones:**\nus-west1, us-central1\n**Description:**\nGoogle Cloud Workstations in us-west1 and us-central1 experienced 500 internal errors and slow load time in the Cloud Console for a duration of 4 hours 40 minutes. Our preliminary analysis shows the root cause of the issue being a transient overload of our API servers.\nThe traffic overload was due to a configuration change which was quickly identified and rolled back.\nGoogle engineers mitigated the issue by throttling traffic as required to normalize resource utilization and serve all requests.\n**Customer Impact:**\n- Impacted customers would have observed 500 internal errors.\n- Experienced slow load times in the Cloud Console.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-27T00:35:14+00:00","modified":"2024-03-27T00:35:16+00:00","when":"2024-03-27T00:35:14+00:00","text":"The issue with Cloud Workstations has been resolved for all affected users as of Tuesday, 2024-03-26 17:20 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-27T00:10:54+00:00","modified":"2024-03-27T00:10:57+00:00","when":"2024-03-27T00:10:54+00:00","text":"Summary: Cloud Workstations service issues for us-central1 and us-west1regions.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-03-26 18:15 US/Pacific.\nDiagnosis: Impacted customers may receive errors or experience slow load times viewing workstations in the cloud console.\nWorkaround: Customers may reload the page in pantheon.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-26T23:14:06+00:00","modified":"2024-03-26T23:14:10+00:00","when":"2024-03-26T23:14:06+00:00","text":"Summary: Cloud Workstations service issues for us-central1 and us-west1 regions.\nDescription: We are experiencing an issue with Cloud Workstations.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-03-26 17:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may receive 500 internal error while trying to access Cloud Workstation.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-03-27T18:16:18+00:00","modified":"2024-03-27T18:16:18+00:00","when":"2024-03-27T18:16:18+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below.\nPlease note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support or to Google Workspace Support using the help article https://support.google.com/a/answer/1047213.\n(All Times US/Pacific)\n**Incident Start:** 26 March 2024 12:50\n**Incident End:** 26 March, 2024 17:30\n**Duration:** 4 hours, 40 minutes\n**Affected Services and Features:**\nCloud Workstations\n**Regions/Zones:**\nus-west1, us-central1\n**Description:**\nGoogle Cloud Workstations in us-west1 and us-central1 experienced 500 internal errors and slow load time in the Cloud Console for a duration of 4 hours 40 minutes. Our preliminary analysis shows the root cause of the issue being a transient overload of our API servers.\nThe traffic overload was due to a configuration change which was quickly identified and rolled back.\nGoogle engineers mitigated the issue by throttling traffic as required to normalize resource utilization and serve all requests.\n**Customer Impact:**\n- Impacted customers would have observed 500 internal errors.\n- Experienced slow load times in the Cloud Console.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"5UUXCiH1vfFHXmbDixrB","service_name":"Cloud Workstations","affected_products":[{"title":"Cloud Workstations","id":"5UUXCiH1vfFHXmbDixrB"}],"uri":"incidents/TZMCWaQRtAmX9ZDVwYUx","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"rPi4kCYpkABwgrGAL5vE","number":"1602095486238380946","begin":"2024-03-26T15:20:56+00:00","created":"2024-03-26T15:49:16+00:00","end":"2024-03-27T02:06:57+00:00","modified":"2024-03-27T02:06:57+00:00","external_desc":"[Cloud Build] Builds failing for new projects during default service account initialization","updates":[{"created":"2024-03-27T02:06:55+00:00","modified":"2024-03-27T02:06:58+00:00","when":"2024-03-27T02:06:55+00:00","text":"The issue with Cloud Build has been resolved for all affected users as of Tuesday, 2024-03-26 18:39 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-26T22:00:21+00:00","modified":"2024-03-26T22:00:25+00:00","when":"2024-03-26T22:00:21+00:00","text":"Summary: [Cloud Build] Builds failing for new projects during default service account initialization\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation work is taking more time than expected.\nWe will provide more information by Tuesday, 2024-03-26 21:30 US/Pacific.\nDiagnosis: Users may experience Builds failing in new projects that does not have a default compute service account and haven't successfully completed a prior GCB build\nWorkaround: ** If the project does not have a compute service account (i.e. PROJECT_NUM-compute@developer.gserviceaccount.com), the user may enable GCE API to add the service account and builds should work\n** The user can run builds using logging option CLOUD_LOGGING_ONLY.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-26T16:47:06+00:00","modified":"2024-03-26T16:47:10+00:00","when":"2024-03-26T16:47:06+00:00","text":"Summary: [Cloud Build] Builds failing for new projects during default service account initialization\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Tuesday, 2024-03-26 15:00 US/Pacific.\nWe will provide more information by Tuesday, 2024-03-26 15:30 US/Pacific.\nDiagnosis: Users may experience Builds failing in new projects that does not have a default compute service account and haven't successfully completed a prior GCB build\nWorkaround: ** If the project does not have a compute service account (i.e. PROJECT_NUM-compute@developer.gserviceaccount.com), the user may enable GCE API to add the service account and builds should work\n** The user can run builds using logging option CLOUD_LOGGING_ONLY.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-26T16:39:29+00:00","modified":"2024-03-26T16:39:32+00:00","when":"2024-03-26T16:39:29+00:00","text":"Summary: [Cloud Build] Builds failing for new projects during default service account initialization\nDescription: We are experiencing an issue with Cloud Build beginning on Tuesday, 2024-03-19 12:06 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-03-26 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users may experience Builds failing in new projects that does not have a default compute service account and haven't successfully completed a prior GCB build\nWorkaround: ** If the project does not have a compute service account (i.e. PROJECT_NUM-compute@developer.gserviceaccount.com), the user may enable GCE API to add the service account and builds should work\n** The user can run builds using logging option CLOUD_LOGGING_ONLY.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-26T15:49:07+00:00","modified":"2024-03-26T15:49:22+00:00","when":"2024-03-26T15:49:07+00:00","text":"Summary: [Cloud Build] Builds failing for new projects during default service account initialization\nDescription: We are experiencing an issue with Cloud Build beginning on Tuesday, 2024-03-19 12:06 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-03-26 10:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users may experience Builds failing in new projects that does not have a default compute service account and haven't successfully completed a prior GCB build\nWorkaround: ** If the project does not have a compute service account (i.e. PROJECT_NUM-compute@developer.gserviceaccount.com), the user may enable GCE API to add the service account and builds should work\n** The user can run builds using logging option CLOUD_LOGGING_ONLY.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-03-27T02:06:55+00:00","modified":"2024-03-27T02:06:58+00:00","when":"2024-03-27T02:06:55+00:00","text":"The issue with Cloud Build has been resolved for all affected users as of Tuesday, 2024-03-26 18:39 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"}],"uri":"incidents/rPi4kCYpkABwgrGAL5vE","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"TZ5k2iyxxrGHwjGm6cta","number":"6009275808744292929","begin":"2024-03-25T21:25:20+00:00","created":"2024-03-25T21:53:16+00:00","end":"2024-03-26T01:01:06+00:00","modified":"2024-03-26T01:01:06+00:00","external_desc":"High normalization latency in the US multi-region.","updates":[{"created":"2024-03-26T01:01:04+00:00","modified":"2024-03-26T01:01:07+00:00","when":"2024-03-26T01:01:04+00:00","text":"The issue with Chronicle Security has been resolved for all affected customers as of Monday, 2024-03-25 17:54 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-25T23:18:19+00:00","modified":"2024-03-25T23:18:23+00:00","when":"2024-03-25T23:18:19+00:00","text":"Summary: High normalization latency in the US multi-region.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2024-03-25 21:00 US/Pacific.\nDiagnosis: Customers are unable to view the latest ingested logs on the Chronicle UI. The customer will observe a delay of approximately 1 hour for normalization and logs will be only visible after the normalization.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-25T22:37:07+00:00","modified":"2024-03-25T22:43:10+00:00","when":"2024-03-25T22:37:07+00:00","text":"Summary: High normalization latency in the US multi-region.\nDescription: We are experiencing an issue with Chronicle Security beginning on Monday, 2024-03-25 07:54 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-03-25 16:30 US/Pacific with current details.\nDiagnosis: Customers are unable to view the latest ingested logs on the Chronicle UI. The customer will observe a delay of approximately 1 hour for normalization and logs will be only visible after the normalization.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-25T21:56:35+00:00","modified":"2024-03-25T21:56:41+00:00","when":"2024-03-25T21:56:35+00:00","text":"Summary: High normalization latency in the US multi-region.\nDescription: We are experiencing an issue with Chronicle Security beginning on Monday, 2024-03-25 07:54 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-03-25 16:00 US/Pacific with current details.\nDiagnosis: The impacted customers would experience latency in viewing their ingested events within the Chronicle security UI.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-25T21:53:10+00:00","modified":"2024-03-25T21:53:17+00:00","when":"2024-03-25T21:53:10+00:00","text":"Summary: High normalization latency in the US multi-region.\nDescription: We are experiencing an issue with Chronicle Security beginning on Monday, 2024-03-25 07:54 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-03-25 15:00 US/Pacific with current details.\nDiagnosis: The impacted customers would experience latency in viewing their ingested events within the Chronicle security UI.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-03-26T01:01:04+00:00","modified":"2024-03-26T01:01:07+00:00","when":"2024-03-26T01:01:04+00:00","text":"The issue with Chronicle Security has been resolved for all affected customers as of Monday, 2024-03-25 17:54 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/TZ5k2iyxxrGHwjGm6cta","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"BEeKvKrrEY8KEScsH5zF","number":"17140378054666768392","begin":"2024-03-24T09:59:00+00:00","created":"2024-03-25T10:17:45+00:00","end":"2024-03-24T18:07:00+00:00","modified":"2024-03-25T11:08:54+00:00","external_desc":"Multiple instances unavailable due to database connection errors","updates":[{"created":"2024-03-25T10:28:41+00:00","modified":"2024-03-25T10:28:49+00:00","when":"2024-03-25T10:28:41+00:00","text":"We experienced an issue with Contact Center AI Platform beginning at Sunday, 2024-03-24 02:59 US/Pacific.\nThe issue has been resolved for all affected users as of Sunday, 2024-03-24 11:07 US/Pacific.\nWhen the issue was occurring, the customers were getting \"application errors\" when trying to contact the Contact Center and were not being redirected to the IVR. When trying to login into CCAI, users would default to “offline status” and unavailable.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-25T10:17:33+00:00","modified":"2024-03-25T10:26:30+00:00","when":"2024-03-25T10:17:33+00:00","text":"Summary: CCAI Platform instances are partially or fully unavailable\nDescription: We are experiencing an issue with Contact Center AI Platform.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2024-03-25 04:56 US/Pacific with current details.\nDiagnosis: Customers are getting \"application errors\" when trying to contact the Contact Center and are not being redirected to the IVR. When trying to login into CCAI users default to “offline status” and unavailable\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-03-25T10:28:41+00:00","modified":"2024-03-25T10:28:49+00:00","when":"2024-03-25T10:28:41+00:00","text":"We experienced an issue with Contact Center AI Platform beginning at Sunday, 2024-03-24 02:59 US/Pacific.\nThe issue has been resolved for all affected users as of Sunday, 2024-03-24 11:07 US/Pacific.\nWhen the issue was occurring, the customers were getting \"application errors\" when trying to contact the Contact Center and were not being redirected to the IVR. When trying to login into CCAI, users would default to “offline status” and unavailable.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"eSAGSSEKoxh8tTJucdYg","service_name":"Contact Center AI Platform","affected_products":[{"title":"Contact Center AI Platform","id":"eSAGSSEKoxh8tTJucdYg"}],"uri":"incidents/BEeKvKrrEY8KEScsH5zF","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"6QzHbYapW8ofPgmatadh","number":"9303654817046568654","begin":"2024-03-22T15:09:00+00:00","created":"2024-03-22T16:57:54+00:00","end":"2024-03-22T17:29:00+00:00","modified":"2024-03-22T21:40:20+00:00","external_desc":"We are experiencing an issue with Chronicle Security Dashboards","updates":[{"created":"2024-03-22T21:39:55+00:00","modified":"2024-03-22T21:39:55+00:00","when":"2024-03-22T21:39:55+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 22 March 2024 08:09\n**Incident End:** 22 March 2024 10:29\n**Duration:** 2 hours, 20 minutes\n**Affected Services and Features:**\nGoogle Cloud Platform - Chronicle SIEM\n**Regions/Zones:** Multi-regions: us, eu\n**Description:**\nChronicle SIEM experienced an issue with Chronicle Security Dashboards for a duration of 2 hours, 20 minutes. From preliminary analysis, the root cause of the issue was related to a recent product change that was deployed to production. Google Engineers rolled back the change which mitigated the issue at 10:29 US/Pacific\n**Customer Impact:**\n* The Chronicle Security Dashboards failed to load.\n* 100% failure rate for related API calls.\n* Affected customers would have experienced an error when accessing dashboards: \"An error occurred while loading dashboards. Please try refreshing the page in a few moments.\"","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-22T17:39:04+00:00","modified":"2024-03-22T17:39:08+00:00","when":"2024-03-22T17:39:04+00:00","text":"The issue with Chronicle Security dashboards has been resolved for all affected users as of Friday, 2024-03-22 10:29 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-22T17:16:10+00:00","modified":"2024-03-22T17:16:13+00:00","when":"2024-03-22T17:16:10+00:00","text":"Summary: We are experiencing an issue with Chronicle Security Dashboards\nDescription: Our engineering team has identified the root cause and have initiated a rollback.\nThe mitigation is expected to complete by Friday, 2024-03-22 11:30 US/Pacific.\nWe will provide more information by Friday, 2024-03-22 12:00 US/Pacific.\nDiagnosis: Customers will receive the following error:\n\"An error occurred while loading dashboards. Please try refreshing the page in a few moments.\"\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: europe","id":"europe"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-22T16:57:47+00:00","modified":"2024-03-22T16:57:56+00:00","when":"2024-03-22T16:57:47+00:00","text":"Summary: We are experiencing an issue with Chronicle Security Dashboards\nDescription: We are experiencing an issue with Chronicle Security dashboard beginning at Friday, 2024-03-22 08:09 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Friday, 2024-03-22 10:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers will receive the following error:\n\"An error occurred while loading dashboards. Please try refreshing the page in a few moments.\"\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-03-22T21:39:55+00:00","modified":"2024-03-22T21:39:55+00:00","when":"2024-03-22T21:39:55+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 22 March 2024 08:09\n**Incident End:** 22 March 2024 10:29\n**Duration:** 2 hours, 20 minutes\n**Affected Services and Features:**\nGoogle Cloud Platform - Chronicle SIEM\n**Regions/Zones:** Multi-regions: us, eu\n**Description:**\nChronicle SIEM experienced an issue with Chronicle Security Dashboards for a duration of 2 hours, 20 minutes. From preliminary analysis, the root cause of the issue was related to a recent product change that was deployed to production. Google Engineers rolled back the change which mitigated the issue at 10:29 US/Pacific\n**Customer Impact:**\n* The Chronicle Security Dashboards failed to load.\n* 100% failure rate for related API calls.\n* Affected customers would have experienced an error when accessing dashboards: \"An error occurred while loading dashboards. Please try refreshing the page in a few moments.\"","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/6QzHbYapW8ofPgmatadh","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: europe","id":"europe"},{"title":"Multi-region: us","id":"us"}]},{"id":"mjD4KVELWrBef7hEcQJ2","number":"16704950800264312872","begin":"2024-03-21T19:44:25+00:00","created":"2024-03-21T21:01:02+00:00","end":"2024-03-21T22:00:27+00:00","modified":"2024-03-21T22:00:29+00:00","external_desc":"We are experiencing an issue with Chronicle SIEM","updates":[{"created":"2024-03-21T22:00:26+00:00","modified":"2024-03-21T22:00:29+00:00","when":"2024-03-21T22:00:26+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Thursday, 2024-03-21 14:43 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-21T21:00:57+00:00","modified":"2024-03-21T21:01:04+00:00","when":"2024-03-21T21:00:57+00:00","text":"Summary: We are experiencing an issue with Chronicle SIEM\nDescription: We've received a report of an issue with Chronicle Security as of Thursday, 2024-03-21 12:44 US/Pacific.\nWe will provide more information by Thursday, 2024-03-21 16:30 US/Pacific.\nDiagnosis: Customers may experience Raw Log Search not showing log sources.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-03-21T22:00:26+00:00","modified":"2024-03-21T22:00:29+00:00","when":"2024-03-21T22:00:26+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Thursday, 2024-03-21 14:43 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/mjD4KVELWrBef7hEcQJ2","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Multi-region: us","id":"us"}]},{"id":"h4gHfwp3N5EtqocKqKnx","number":"17983315295502507375","begin":"2024-03-21T16:29:25+00:00","created":"2024-03-21T17:27:29+00:00","end":"2024-03-25T15:32:58+00:00","modified":"2024-03-25T15:32:59+00:00","external_desc":"Google Distributed Cloud (GDC) Edge customers are experiencing issues with creating new Google Kubernetes Engine (GKE) clusters.","updates":[{"created":"2024-03-25T15:32:46+00:00","modified":"2024-03-25T15:33:04+00:00","when":"2024-03-25T15:32:46+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved for all affected users as of Saturday, 2024-03-23 18:43 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-03-21T23:25:26+00:00","modified":"2024-03-21T23:25:28+00:00","when":"2024-03-21T23:25:26+00:00","text":"Summary: Google Distributed Cloud (GDC) Edge customers are experiencing issues with creating new Google Kubernetes Engine (GKE) clusters.\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Wednesday, 2024-03-20.\nOur engineering team has identified the fix for the issue and mitigation work is currently underway by our engineering team.\nWe will provide an update by Monday, 2024-03-25 17:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Customers attempting to create a new GKE cluster in a GDC Edge Zone may encounter timeout or internal errors , leading to cluster creation failures.\n- Existing clusters are not affected by the issue.\nWorkaround: Affected customers can run the following command\n- gcloud projects add-iam-policy-binding ${PROJ} --member serviceAccount:service-${PROJ}@gcp-sa-edgecontainercluster.iam.gserviceaccount.com --role roles/viewer\nPlease substitute ${PROJ} with appropriate PROJECT_NUMBER","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-03-21T21:08:01+00:00","modified":"2024-03-21T21:08:03+00:00","when":"2024-03-21T21:08:01+00:00","text":"Summary: Google Distributed Cloud (GDC) Edge customers are experiencing issues with creating new Google Kubernetes Engine (GKE) clusters.\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Wednesday, 2024-03-20.\nOur engineering team has identified the fix for the issue and working on identifying effective fix roll out plan.\nWe will provide an update by Thursday, 2024-03-21 16:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Customers attempting to create a new GKE cluster in a GDC Edge Zone may encounter timeout or internal errors , leading to cluster creation failures.\n- Existing clusters are not affected by the issue.\nWorkaround: Affected customers can run the following command\n- gcloud projects add-iam-policy-binding ${PROJ} --member serviceAccount:service-${PROJ}@gcp-sa-edgecontainercluster.iam.gserviceaccount.com --role roles/viewer\nPlease substitute ${PROJ} with appropriate PROJECT_NUMBER","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-03-21T18:58:29+00:00","modified":"2024-03-21T18:58:31+00:00","when":"2024-03-21T18:58:29+00:00","text":"Summary: Google Distributed Cloud (GDC) Edge customers are experiencing issues with creating new Google Kubernetes Engine (GKE) clusters.\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Wednesday, 2024-03-20.\nOur engineering team has identified the root cause and is working on a mitigation plan.\nWe will provide an update by Thursday, 2024-03-21 14:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Customers attempting to create a new GKE cluster in a GDC Edge Zone may encounter timeout or internal errors , leading to cluster creation failures.\n- Existing clusters are not affected by the issue.\nWorkaround: Affected customers can run the following command\n- gcloud projects add-iam-policy-binding ${PROJ} --member serviceAccount:service-${PROJ}@gcp-sa-edgecontainercluster.iam.gserviceaccount.com --role roles/viewer\nPlease substitute ${PROJ} with appropriate PROJECT_NUMBER","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-03-21T17:27:28+00:00","modified":"2024-03-21T17:27:30+00:00","when":"2024-03-21T17:27:28+00:00","text":"Summary: Google Distributed Cloud (GDC) Edge customers are experiencing issues with creating new Google Kubernetes Engine (GKE) clusters.\nDescription: We are experiencing an issue with Google Distributed Cloud Edge beginning on Wednesday, 2024-03-20.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-03-21 12:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- Customers attempting to create a new GKE cluster in a GDC Edge Zone may encounter timeout or internal errors , leading to cluster creation failures.\n- Existing clusters are not affected by the issue.\nWorkaround: Affected customers can run the following command\n- gcloud projects add-iam-policy-binding ${PROJ} --member serviceAccount:service-${PROJ}@gcp-sa-edgecontainercluster.iam.gserviceaccount.com --role roles/viewer\nPlease substitute ${PROJ} with appropriate PROJECT_NUMBER","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]}],"most_recent_update":{"created":"2024-03-25T15:32:46+00:00","modified":"2024-03-25T15:33:04+00:00","when":"2024-03-25T15:32:46+00:00","text":"The issue with Google Distributed Cloud Edge has been resolved for all affected users as of Saturday, 2024-03-23 18:43 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"JKyM3LJTqgETjRCvSK6w","service_name":"Google Distributed Cloud Edge","affected_products":[{"title":"Google Distributed Cloud Edge","id":"JKyM3LJTqgETjRCvSK6w"}],"uri":"incidents/h4gHfwp3N5EtqocKqKnx","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"vf8XEwxDCrtJ2DCEjiJT","number":"17145921820636574993","begin":"2024-03-20T19:41:56+00:00","created":"2024-03-20T19:42:00+00:00","end":"2024-03-20T19:55:21+00:00","modified":"2024-03-20T19:55:21+00:00","external_desc":"Issue with Dialogflow CX, Dialogflow ES, Agent Assist","updates":[{"created":"2024-03-20T19:55:19+00:00","modified":"2024-03-20T19:55:22+00:00","when":"2024-03-20T19:55:19+00:00","text":"We experienced issues with Agent Assist, Dialogflow CX, Dialogflow ES beginning at Wednesday, 2024-03-20 09:58 US/Pacific.\nThe issue impacted streaming data plane traffic (StreamingAnalyzeContent / StreamingDetectIntent) are returning internal or unavailable errors.\nThe issue has been resolved for all affected users as of Wednesday, 2024-03-20 11:40 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-20T19:41:58+00:00","modified":"2024-03-20T19:42:01+00:00","when":"2024-03-20T19:41:58+00:00","text":"Summary: Issue with Dialogflow CX, Dialogflow ES, Agent Assist\nDescription: We are investigating a potential issue with Agent Assist, Dialogflow CX, Dialogflow ES.\nWe will provide more information by Wednesday, 2024-03-20 13:30 US/Pacific.\nDiagnosis: Customers impacted by this issue may see may see internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-03-20T19:55:19+00:00","modified":"2024-03-20T19:55:22+00:00","when":"2024-03-20T19:55:19+00:00","text":"We experienced issues with Agent Assist, Dialogflow CX, Dialogflow ES beginning at Wednesday, 2024-03-20 09:58 US/Pacific.\nThe issue impacted streaming data plane traffic (StreamingAnalyzeContent / StreamingDetectIntent) are returning internal or unavailable errors.\nThe issue has been resolved for all affected users as of Wednesday, 2024-03-20 11:40 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"},{"title":"Agent Assist","id":"eUntUKqUrHdbBLNcVVXq"},{"title":"Dialogflow ES","id":"sQqrYvhjMT5crPHKWJFY"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/vf8XEwxDCrtJ2DCEjiJT","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"sdhbD1BRUpn3XFN5oam3","number":"12542426652389272334","begin":"2024-03-20T18:12:48+00:00","created":"2024-03-20T18:27:37+00:00","end":"2024-03-20T18:48:09+00:00","modified":"2024-03-20T18:48:09+00:00","external_desc":"We are experiencing an issue with Speech-to-Text","updates":[{"created":"2024-03-20T18:47:16+00:00","modified":"2024-03-20T18:48:10+00:00","when":"2024-03-20T18:47:16+00:00","text":"The issue with Speech-to-Text has been resolved for all affected users as of Wednesday, 2024-03-20 11:47 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-20T18:27:35+00:00","modified":"2024-03-20T18:27:39+00:00","when":"2024-03-20T18:27:35+00:00","text":"Summary: We are experiencing an issue with Speech-to-Text\nDescription: We are experiencing an issue with Speech-to-Text.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-03-20 12:27 US/Pacific with current details.\nDiagnosis: Customer may see internal errors\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-03-20T18:47:16+00:00","modified":"2024-03-20T18:48:10+00:00","when":"2024-03-20T18:47:16+00:00","text":"The issue with Speech-to-Text has been resolved for all affected users as of Wednesday, 2024-03-20 11:47 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Speech-to-Text","id":"5f5oET9B3whnSFHfwy4d"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/sdhbD1BRUpn3XFN5oam3","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"3MTbJCdH8swURGCvrkhe","number":"6703733452262114321","begin":"2024-03-20T16:58:00+00:00","created":"2024-03-20T21:33:04+00:00","end":"2024-03-20T21:45:00+00:00","modified":"2024-03-22T21:56:39+00:00","external_desc":"We experienced issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text","updates":[{"created":"2024-03-22T21:28:17+00:00","modified":"2024-03-22T21:56:39+00:00","when":"2024-03-22T21:28:17+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**First Impact Window Start:** 20 March 2024 09:58\n**First Impact Window end:** 20 March 2024 11:40\n**Duration:** 1 hour, 42 minutes\n**Second Impact Window Start:** 20 March 2024 14:15\n**Second Impact Window end:** 20 March 2024 14:45\n**Duration:** 30 minutes\nCumulative impact duration: 2 hours, 12 minutes\n**Affected Services and Features:**\n* Dialogflow ES\n* Dialogflow CX\n* Cloud Speech-to-Text\n* Agent Assist\n**Regions/Zones:**\n* Global\n* Regions: us-east1, us-central1, us-west1, northamerica-northeast1 [1]\n**Description:**\nDialogflow ES, Dialogflow CX, Cloud Speech-to-Text, and Agent Assist experienced two periods of elevated errors for streaming data plane traffic for durations of 1 hours, 42 minutes and 30 minutes, respectively. From preliminary analysis, the root cause of the issue was due to a recent configuration change for an internal critical dependency that serves as a backend gateway for the affected products.\n**Customer Impact:**\nImpacted customers encountered multiple periods of internal or unavailable errors for streaming API actions (StreamingAnalyzeContent, StreamingDetectIntent).\n* 20 March 2024 09:58 - 11:40 US/Pacific (1 hour, 42 minutes)\n* 20 March 2024 14:15 - 14:45 US/Pacific (30 minutes)\n**Additional details:**\nOn 20 March 2024, at 11:40 US/Pacific, Google engineers reversed the configuration change to the internal critical dependency, which temporarily alleviated the impact while a permanent solution was being developed.\nAt 14:15 US/Pacific, a previously scheduled rollout for the dependency service completed, unintentionally reverting the temporary mitigation that engineers had put in place. Engineers reapplied the temporary mitigation at 14:45 US/Pacific.\nOn 21 March 2024, at 16:31 US/Pacific, engineers effectively implemented a new version of the dependency service that includes the necessary mitigation measures, successfully preventing any further regressions.\nReference\n* [1] https://cloud.google.com/dialogflow/cx/docs/concept/region#avail","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-21T23:46:18+00:00","modified":"2024-03-21T23:46:21+00:00","when":"2024-03-21T23:46:18+00:00","text":"The issue with Agent Assist, Dialogflow CX, Dialogflow ES, Speech-to-Text has been resolved for all affected users as of Thursday, 2024-03-21 16:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-21T22:11:56+00:00","modified":"2024-03-21T22:11:59+00:00","when":"2024-03-21T22:11:56+00:00","text":"Summary: Issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text\nDescription: A temporary mitigation is currently in place. Currently, our engineers are rolling out a permanent fix.\nWe will provide an update by Thursday, 2024-03-21 18:00 US/Pacific with current details.\nWe apologize for any continued inconvenience this may cause.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-21T17:23:58+00:00","modified":"2024-03-21T17:24:07+00:00","when":"2024-03-21T17:23:58+00:00","text":"Summary: Issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text\nDescription: A temporary mitigation is currently in place. In the meanwhile, our engineers continue to investigate a permanent fix.\nWe will provide an update by Thursday, 2024-03-21 15:30 US/Pacific with current details.\nWe apologize for any continued inconvenience this may cause.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-21T16:47:44+00:00","modified":"2024-03-21T16:47:47+00:00","when":"2024-03-21T16:47:44+00:00","text":"Summary: Issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text\nDescription: A temporary mitigation is currently in place. In the meanwhile, our engineers are still working on a permanent fix.\nWe will provide an update by Thursday, 2024-03-21 11:30 US/Pacific with current details.\nWe apologize for any continued inconvenience this may cause.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-21T13:36:57+00:00","modified":"2024-03-21T13:37:13+00:00","when":"2024-03-21T13:36:57+00:00","text":"Summary: Issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text\nDescription: A temporary mitigation is currently in place. In the meanwhile, our engineers are still working on a permanent fix.\nWe will provide an update by Thursday, 2024-03-21 10:00 US/Pacific with current details.\nWe apologize for any continued inconvenience this may cause.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-20T22:54:12+00:00","modified":"2024-03-20T22:54:15+00:00","when":"2024-03-20T22:54:12+00:00","text":"Summary: Issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text\nDescription: Our engineering team has implemented a temporary mitigation that will require periodic manual intervention over the next 12 hours to maintain service stability.\nWe will provide an update by Thursday, 2024-03-21 07:00 US/Pacific with current details.\nWe apologize for any continued inconvenience this may cause.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-20T21:59:49+00:00","modified":"2024-03-20T21:59:53+00:00","when":"2024-03-20T21:59:49+00:00","text":"Summary: Issues with Agent Assist, Dialogflow CX, Dialogflow ES, \u0026 Cloud Speech-to-Text\nDescription: Our engineering team identified the issue and implemented a fix. Error rates are down significantly, and we continue to monitor for stability.\nWe will provide an update by Wednesday, 2024-03-20 16:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-03-20T21:33:02+00:00","modified":"2024-03-20T21:38:03+00:00","when":"2024-03-20T21:33:02+00:00","text":"Summary: We are experiencing issues with Agent Assist, Dialogflow CX, Dialogflow ES \u0026 Cloud Speech-to-Text\nDescription: We are experiencing an issue with Dialogflow CX, Dialogflow ES, Agent Assist \u0026 Cloud Speech-to-Text beginning at Wednesday, 2024-03-20 14:15 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-03-20 15:15 US/Pacific with current details.\nDiagnosis: The impacted customers would encounter internal or unavailable errors while using streaming APIs (StreamingAnalyzeContent, StreamingDetectIntent).\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-03-22T21:28:17+00:00","modified":"2024-03-22T21:56:39+00:00","when":"2024-03-22T21:28:17+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support .\n(All Times US/Pacific)\n**First Impact Window Start:** 20 March 2024 09:58\n**First Impact Window end:** 20 March 2024 11:40\n**Duration:** 1 hour, 42 minutes\n**Second Impact Window Start:** 20 March 2024 14:15\n**Second Impact Window end:** 20 March 2024 14:45\n**Duration:** 30 minutes\nCumulative impact duration: 2 hours, 12 minutes\n**Affected Services and Features:**\n* Dialogflow ES\n* Dialogflow CX\n* Cloud Speech-to-Text\n* Agent Assist\n**Regions/Zones:**\n* Global\n* Regions: us-east1, us-central1, us-west1, northamerica-northeast1 [1]\n**Description:**\nDialogflow ES, Dialogflow CX, Cloud Speech-to-Text, and Agent Assist experienced two periods of elevated errors for streaming data plane traffic for durations of 1 hours, 42 minutes and 30 minutes, respectively. From preliminary analysis, the root cause of the issue was due to a recent configuration change for an internal critical dependency that serves as a backend gateway for the affected products.\n**Customer Impact:**\nImpacted customers encountered multiple periods of internal or unavailable errors for streaming API actions (StreamingAnalyzeContent, StreamingDetectIntent).\n* 20 March 2024 09:58 - 11:40 US/Pacific (1 hour, 42 minutes)\n* 20 March 2024 14:15 - 14:45 US/Pacific (30 minutes)\n**Additional details:**\nOn 20 March 2024, at 11:40 US/Pacific, Google engineers reversed the configuration change to the internal critical dependency, which temporarily alleviated the impact while a permanent solution was being developed.\nAt 14:15 US/Pacific, a previously scheduled rollout for the dependency service completed, unintentionally reverting the temporary mitigation that engineers had put in place. Engineers reapplied the temporary mitigation at 14:45 US/Pacific.\nOn 21 March 2024, at 16:31 US/Pacific, engineers effectively implemented a new version of the dependency service that includes the necessary mitigation measures, successfully preventing any further regressions.\nReference\n* [1] https://cloud.google.com/dialogflow/cx/docs/concept/region#avail","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Speech-to-Text","id":"5f5oET9B3whnSFHfwy4d"},{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"},{"title":"Agent Assist","id":"eUntUKqUrHdbBLNcVVXq"},{"title":"Dialogflow ES","id":"sQqrYvhjMT5crPHKWJFY"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/3MTbJCdH8swURGCvrkhe","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"Tju36qhNez1jaeU1RjrP","number":"7163123401525948460","begin":"2024-03-12T22:41:56+00:00","created":"2024-03-12T22:42:04+00:00","end":"2024-03-12T23:29:40+00:00","modified":"2024-03-12T23:29:40+00:00","external_desc":"Batch - Service Issues","updates":[{"created":"2024-03-12T23:29:39+00:00","modified":"2024-03-12T23:29:42+00:00","when":"2024-03-12T23:29:39+00:00","text":"The issue with Batch has been resolved as of Tuesday, 2024-03-12 16:28 US/Pacific.\nWe understand that this issue impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-12T22:41:57+00:00","modified":"2024-03-12T22:42:06+00:00","when":"2024-03-12T22:41:57+00:00","text":"Summary: Batch - Service Issues\nDescription: At this time, Nvidia has fixed the underlying network connectivity issues experienced by GKE VMs to download the GPU drivers.\nThe support teams are checking to see if there are any other issues which may still be hampering customers.\nWe will get back with a current update by Tuesday, 2024-03-12 16:45 US/Pacific\nDiagnosis: Impacted customers with their VMs using Containerized Operating System (COS) would have been unable to download required NVIDIA GPU drivers.\nWorkaround: Customers could try a restart of services.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-03-12T23:29:39+00:00","modified":"2024-03-12T23:29:42+00:00","when":"2024-03-12T23:29:39+00:00","text":"The issue with Batch has been resolved as of Tuesday, 2024-03-12 16:28 US/Pacific.\nWe understand that this issue impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe thank you for your patience while we worked on resolving the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"8XjnU88URVtZrAL8KRvA","service_name":"Batch","affected_products":[{"title":"Batch","id":"8XjnU88URVtZrAL8KRvA"}],"uri":"incidents/Tju36qhNez1jaeU1RjrP","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"e6Fv5caQJ4MvSF68RdSv","number":"10940167850288764884","begin":"2024-03-12T20:33:44+00:00","created":"2024-03-12T20:59:27+00:00","end":"2024-03-13T00:13:18+00:00","modified":"2024-03-13T00:13:19+00:00","external_desc":"Customers may experience elevated latencies in Apigee Sense.","updates":[{"created":"2024-03-13T00:13:17+00:00","modified":"2024-03-13T00:13:20+00:00","when":"2024-03-13T00:13:17+00:00","text":"The issue with Apigee Sense has been resolved for all affected projects as of Tuesday, 2024-03-12 16:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-12T22:54:31+00:00","modified":"2024-03-12T22:54:37+00:00","when":"2024-03-12T22:54:31+00:00","text":"Summary: Customers may experience elevated latencies in Apigee Sense.\nDescription: Mitigation work is still underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-03-12 17:00 US/Pacific.\nDiagnosis: Impacted customers may experience delays in bot detection which may in turn affect blocking rules\nApigee Sense Protection feature is working as intended.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-12T21:50:15+00:00","modified":"2024-03-12T21:50:21+00:00","when":"2024-03-12T21:50:15+00:00","text":"Summary: Customers may experience elevated latencies in Apigee Sense.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-03-12 16:00 US/Pacific.\nDiagnosis: Impacted customers may experience delays in bot detection which may in turn affect blocking rules\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-12T20:59:24+00:00","modified":"2024-03-12T20:59:28+00:00","when":"2024-03-12T20:59:24+00:00","text":"Summary: Customers may experience elevated latencies in Apigee Sense API.\nDescription: We are experiencing an issue with Apigee Sense beginning at Tuesday, 2024-03-12 10:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-03-12 15:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience delays in bot detection which may in turn affect blocking rules\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-03-13T00:13:17+00:00","modified":"2024-03-13T00:13:20+00:00","when":"2024-03-13T00:13:17+00:00","text":"The issue with Apigee Sense has been resolved for all affected projects as of Tuesday, 2024-03-12 16:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"9Y13BNFy4fJydvjdsN3X","service_name":"Apigee","affected_products":[{"title":"Apigee","id":"9Y13BNFy4fJydvjdsN3X"}],"uri":"incidents/e6Fv5caQJ4MvSF68RdSv","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"aRSt8sTQLKMTVgdbbK6P","number":"11266531992261318449","begin":"2024-03-12T15:00:00+00:00","created":"2024-03-12T21:06:20+00:00","end":"2024-03-12T21:55:00+00:00","modified":"2024-03-13T18:11:19+00:00","external_desc":"Customers using Container-Optimized OS (COS) in Google Kubernetes Engine (GKE) were not able to fetch specific NVIDIA GPU drivers.","updates":[{"created":"2024-03-13T18:10:58+00:00","modified":"2024-03-13T18:10:58+00:00","when":"2024-03-13T18:10:58+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note that Google worked with the appropriate partner to resolve the underlying issue. This is the final version of the report and no further information will be provided here. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 12 March 2024 08:00\n**Incident End:** 12 March 2024 14:55\n**Duration:** 6 hours, 55 minutes\n**Affected Services and Features:**\nGoogle Kubernetes Engine (GKE)\n**Regions/Zones:** [All GPU regions and zones](https://cloud.google.com/compute/docs/gpus/gpu-regions-zones)\n**Description:**\nGoogle Kubernetes Engine experienced elevated errors due to failures in downloading some NVIDIA GPU drivers for use with Container-Optimized OS (COS) for a duration of 6 hours, 55 minutes. These failures in downloading the GPU drivers led to node unavailability in some cases and impacted customers using T4, L4, H100 80GB and A100 GPUs, [COS milestone](https://cloud.google.com/container-optimized-os/docs/release-notes) 105 or above, and those who were attempting to install GPU driver versions R525 and above.\nFrom the preliminary analysis, the root cause of the issue was an access issue to the storage bucket required for driver downloads. This is owned by our partner that supplies these GPU drivers. To limit the impact, Google Cloud took swift actions while the issue was happening, by halting automatic node recreations (which attempt GPU driver downloads) until the issue was mitigated. Other GKE features continued to operate normally without disruption.\n**Customer Impact:**\n* GKE users encountered an error \"Failed to download GPU driver installer, status: 403 Forbidden\" on the GPU node when installing affected GPU drivers using COS. In some cases, the GPU driver download failures led to node unavailability.\n* GPU driver downloads for GPU models P4, P100, V100, K80 were unaffected.\n--------","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-12T22:39:13+00:00","modified":"2024-03-12T22:51:00+00:00","when":"2024-03-12T22:39:13+00:00","text":"The issue with Google Kubernetes Engine affecting customers using NVIDIA GPU drivers on Container Optimized OS (COS) has been resolved as of Tuesday, 2024-03-12 15:09 US/Pacific.\nCustomers who have disabled auto-repair may need to recreate or restart the affected nodes to regain the functionality.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-03-12T21:06:19+00:00","modified":"2024-03-12T21:30:33+00:00","when":"2024-03-12T21:06:19+00:00","text":"Summary: Customers using Container-Optimized OS (COS) in Google Kubernetes Engine (GKE) may not be able to fetch specific NVIDIA GPU drivers\nDescription: We are investigating an issue with Google Kubernetes Engine affecting customers using NVIDIA GPU drivers on Container Optimized OS (COS). Affected Nodes that are newly created or recreated have non functional GPU drivers preventing functioning of workloads using the drivers. Some GPU drivers are unaffected (P4, P100, V100, K80).\nOur engineering team continues to work towards resolving the driver fetching issue.\nWe will provide more information by Tuesday, 2024-03-12 18:00 US/Pacific.\nWe apologize to all who are affected by the disruption.\nDiagnosis: GKE users will see error messages on the GPU node when installing the GPU driver of this nature - \"Failed to download GPU driver installer, status: 403 Forbidden\".\nWorkaround: None at this time. However, the issue can be mitigated by avoiding recreation of existing Nodes running GPUs. Note GCP has halted automatic Node recreation as a partial mitigation.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-03-13T18:10:58+00:00","modified":"2024-03-13T18:10:58+00:00","when":"2024-03-13T18:10:58+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note that Google worked with the appropriate partner to resolve the underlying issue. This is the final version of the report and no further information will be provided here. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 12 March 2024 08:00\n**Incident End:** 12 March 2024 14:55\n**Duration:** 6 hours, 55 minutes\n**Affected Services and Features:**\nGoogle Kubernetes Engine (GKE)\n**Regions/Zones:** [All GPU regions and zones](https://cloud.google.com/compute/docs/gpus/gpu-regions-zones)\n**Description:**\nGoogle Kubernetes Engine experienced elevated errors due to failures in downloading some NVIDIA GPU drivers for use with Container-Optimized OS (COS) for a duration of 6 hours, 55 minutes. These failures in downloading the GPU drivers led to node unavailability in some cases and impacted customers using T4, L4, H100 80GB and A100 GPUs, [COS milestone](https://cloud.google.com/container-optimized-os/docs/release-notes) 105 or above, and those who were attempting to install GPU driver versions R525 and above.\nFrom the preliminary analysis, the root cause of the issue was an access issue to the storage bucket required for driver downloads. This is owned by our partner that supplies these GPU drivers. To limit the impact, Google Cloud took swift actions while the issue was happening, by halting automatic node recreations (which attempt GPU driver downloads) until the issue was mitigated. Other GKE features continued to operate normally without disruption.\n**Customer Impact:**\n* GKE users encountered an error \"Failed to download GPU driver installer, status: 403 Forbidden\" on the GPU node when installing affected GPU drivers using COS. In some cases, the GPU driver download failures led to node unavailability.\n* GPU driver downloads for GPU models P4, P100, V100, K80 were unaffected.\n--------","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"LCSbT57h59oR4W98NHuz","service_name":"Google Kubernetes Engine","affected_products":[{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"}],"uri":"incidents/aRSt8sTQLKMTVgdbbK6P","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"ZBsbPWZsVpyuWnSBwFKP","number":"6896606308029484072","begin":"2024-03-05T21:39:42+00:00","created":"2024-03-05T22:10:23+00:00","end":"2024-03-06T04:52:31+00:00","modified":"2024-03-06T04:52:31+00:00","external_desc":"Some customers might experience elevated latency in UDM search for Chronicle security.","updates":[{"created":"2024-03-06T04:52:30+00:00","modified":"2024-03-06T04:52:32+00:00","when":"2024-03-06T04:52:30+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Tuesday, 2024-03-05 18:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-05T22:57:52+00:00","modified":"2024-03-05T22:57:54+00:00","when":"2024-03-05T22:57:52+00:00","text":"Summary: Some customers might experience elevated latency in UDM search for Chronicle security.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2024-03-05 21:30 US/Pacific.\nDiagnosis: Customers may see a degraded performance or it could completely error out.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-03-05T22:10:17+00:00","modified":"2024-03-05T22:10:24+00:00","when":"2024-03-05T22:10:17+00:00","text":"Summary: Some customers might experience elevated latency in UDM search for Chronicle security.\nDescription: We are experiencing an issue with Chronicle Security beginning at Monday, 2024-03-04 06:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-03-05 15:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may see a degraded performance or it could completely error out.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-03-06T04:52:30+00:00","modified":"2024-03-06T04:52:32+00:00","when":"2024-03-06T04:52:30+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Tuesday, 2024-03-05 18:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/ZBsbPWZsVpyuWnSBwFKP","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"M2qQyWWU9FKvVc2FfcaH","number":"184633653096742397","begin":"2024-02-29T07:50:00+00:00","created":"2024-02-29T11:54:37+00:00","end":"2024-02-29T17:50:00+00:00","modified":"2024-02-29T20:53:47+00:00","external_desc":"Chronicle Security and Chronicle SOAR experiencing high Remote Procedure Calls (RPC) error rate in Multiregion/us","updates":[{"created":"2024-02-29T20:53:28+00:00","modified":"2024-02-29T20:53:28+00:00","when":"2024-02-29T20:53:28+00:00","text":"A mini incident report has been posted on the Google\nCloud Service Health Dashboard at https://status.cloud.google.com/incidents/JAxa64hv45yMYzCpqx1T","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T14:29:30+00:00","modified":"2024-02-29T14:29:38+00:00","when":"2024-02-29T14:29:30+00:00","text":"The issue with Chronicle Security, Chronicle SOAR has been resolved for all affected users as of Thursday, 2024-02-29 06:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T13:26:49+00:00","modified":"2024-02-29T13:26:57+00:00","when":"2024-02-29T13:26:49+00:00","text":"Summary: Chronicle Security and Chronicle SOAR experiencing high Remote Procedure Calls (RPC) error rate in Multiregion/us\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-29 07:00 US/Pacific.\nDiagnosis: Impacted users may experience high failure rate for Backstory API calls\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T12:06:10+00:00","modified":"2024-02-29T12:06:16+00:00","when":"2024-02-29T12:06:10+00:00","text":"Summary: Chronicle Security and Chronicle SOAR experiencing high Remote Procedure Calls (RPC) error rate in Multiregion/us\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-29 05:40 US/Pacific.\nDiagnosis: Impacted users may experience high failure rate for Backstory API calls\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T11:54:33+00:00","modified":"2024-02-29T14:20:00+00:00","when":"2024-02-29T11:54:33+00:00","text":"Summary: High Remote Procedure Calls (RPC) error rate in Multiregion/us\nDescription: We are experiencing an issue with Chronicle Security and Chronicle SOAR connectors beginning on Wednesday, 2024-02-28 23:50 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-02-29 04:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted users may experience high failure rate for Backstory API calls\nWorkaround: None at this time","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-02-29T20:53:28+00:00","modified":"2024-02-29T20:53:28+00:00","when":"2024-02-29T20:53:28+00:00","text":"A mini incident report has been posted on the Google\nCloud Service Health Dashboard at https://status.cloud.google.com/incidents/JAxa64hv45yMYzCpqx1T","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"},{"title":"Chronicle SOAR","id":"GTT16Lf72XZKWArC9VxA"}],"uri":"incidents/M2qQyWWU9FKvVc2FfcaH","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"JAxa64hv45yMYzCpqx1T","number":"2264297634052042910","begin":"2024-02-29T07:50:00+00:00","created":"2024-02-29T15:34:33+00:00","end":"2024-02-29T17:50:00+00:00","modified":"2024-02-29T20:51:47+00:00","external_desc":"Chronicle Security, Chronicle SOAR. High Remote Procedure Calls (RPC) error rate in Multiregion/us","updates":[{"created":"2024-02-29T20:51:19+00:00","modified":"2024-02-29T20:51:19+00:00","when":"2024-02-29T20:51:19+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 28 February 2024 23:50\n**Incident End:** 29 February 2024 09:50\n**Duration:** 10 hours\n**Affected Services and Features:**\n* Chronicle Security - Security Information and Event Management (SIEM)\n* Chronicle Security Orchestration, Automation and Response (SOAR)\n**Regions/Zones:** US/multi-region\n**Description:**\nChronicle Security SIEM and Chronicle SOAR experienced elevated Remote Procedure Calls (RPC) error rate for Backstory API calls in US/multi-region for a total duration of 10 hours. A new service was activated which caused additional traffic to be generated and resource contention, which impacted overall RPC performance, latency, and error rate.\nOur engineers identified a subset of the root cause processes and eliminated them on Thursday, 29 February 2024 06:18 US/Pacific. However, some of these processes were missed in the initial analysis, causing additional traffic and resource contention at 7:13. Engineers took additional steps, further mitigating the issue at 09:50.\nAt this time, we do not believe any additional actions are needed to prevent recurrence of this issue.\n**Customer Impact:**\n* Backstory API calls would have failed with Remote Procedure Calls (RPC) errors.\n------","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T16:21:00+00:00","modified":"2024-02-29T16:21:08+00:00","when":"2024-02-29T16:21:00+00:00","text":"The issue with Chronicle Security, Chronicle SOAR has been resolved for all affected projects as of Thursday, 2024-02-29 08:20 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T15:34:29+00:00","modified":"2024-02-29T15:34:36+00:00","when":"2024-02-29T15:34:29+00:00","text":"Summary: Chronicle Security, Chronicle SOAR. High Remote Procedure Calls (RPC) error rate in Multiregion/us\nDescription: We are experiencing an issue with Chronicle Security, Chronicle SOAR.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-02-29 09:00 US/Pacific with current details.\nDiagnosis: Impacted users may experience high failure rate for Backstory API calls\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-02-29T20:51:19+00:00","modified":"2024-02-29T20:51:19+00:00","when":"2024-02-29T20:51:19+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 28 February 2024 23:50\n**Incident End:** 29 February 2024 09:50\n**Duration:** 10 hours\n**Affected Services and Features:**\n* Chronicle Security - Security Information and Event Management (SIEM)\n* Chronicle Security Orchestration, Automation and Response (SOAR)\n**Regions/Zones:** US/multi-region\n**Description:**\nChronicle Security SIEM and Chronicle SOAR experienced elevated Remote Procedure Calls (RPC) error rate for Backstory API calls in US/multi-region for a total duration of 10 hours. A new service was activated which caused additional traffic to be generated and resource contention, which impacted overall RPC performance, latency, and error rate.\nOur engineers identified a subset of the root cause processes and eliminated them on Thursday, 29 February 2024 06:18 US/Pacific. However, some of these processes were missed in the initial analysis, causing additional traffic and resource contention at 7:13. Engineers took additional steps, further mitigating the issue at 09:50.\nAt this time, we do not believe any additional actions are needed to prevent recurrence of this issue.\n**Customer Impact:**\n* Backstory API calls would have failed with Remote Procedure Calls (RPC) errors.\n------","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"},{"title":"Chronicle SOAR","id":"GTT16Lf72XZKWArC9VxA"}],"uri":"incidents/JAxa64hv45yMYzCpqx1T","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"NcJn1aK8CTHXMvqwaXGJ","number":"4661024780644636711","begin":"2024-02-28T22:53:07+00:00","created":"2024-02-28T23:05:58+00:00","end":"2024-02-29T12:58:42+00:00","modified":"2024-02-29T12:58:43+00:00","external_desc":"Chronicle security is experiencing issues with logs being uploaded through some forwarders.","updates":[{"created":"2024-02-29T12:58:41+00:00","modified":"2024-02-29T12:58:48+00:00","when":"2024-02-29T12:58:41+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Thursday, 2024-02-29 04:58 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T10:07:09+00:00","modified":"2024-02-29T10:07:23+00:00","when":"2024-02-29T10:07:09+00:00","text":"Summary: Chronicle security is experiencing issues with logs being uploaded through some forwarders.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-29 06:00 US/Pacific.\nDiagnosis: A small subset of customers would encounter error “rpc error: code = Unknown desc = Unknown Error” while ingesting logs within the forwarder which prevents logs from being uploaded to Chronicle through some forwarders.\nWorkaround: None at this moment.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T05:39:45+00:00","modified":"2024-02-29T05:39:49+00:00","when":"2024-02-29T05:39:45+00:00","text":"Summary: Chronicle security is experiencing issues with logs being uploaded through some forwarders.\nDescription:\nWe are experiencing an issue with Chronicle Security beginning on Wednesday, 2024-02-28 03:00 US/Pacific.\nOur engineering team continues to investigate the issue and is actively working to determine the mitigation plan.\nWe will provide an update by Thursday, 2024-02-29 06:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: A small subset of customers would encounter error “rpc error: code = Unknown desc = Unknown Error” while ingesting logs within the forwarder which prevents logs from being uploaded to Chronicle through some forwarders.\nWorkaround: None at this moment.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T01:57:51+00:00","modified":"2024-02-29T01:57:55+00:00","when":"2024-02-29T01:57:51+00:00","text":"Summary: Chronicle security is experiencing issues with logs being uploaded through some forwarders.\nDescription:\nWe are experiencing an issue with Chronicle Security beginning on Wednesday, 2024-02-28 03:00 US/Pacific.\nOur engineering team continues to investigate the issue and is actively working to determine the mitigation plan.\nWe will provide an update by Wednesday, 2024-02-28 23:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: A small subset of customers would encounter error “rpc error: code = Unknown desc = Unknown Error” while ingesting logs within the forwarder which prevents logs from being uploaded to Chronicle through some forwarders.\nWorkaround: None at this moment.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-29T00:41:14+00:00","modified":"2024-02-29T00:41:19+00:00","when":"2024-02-29T00:41:14+00:00","text":"Summary: Chronicle security is experiencing issues with logs being uploaded through some forwarders.\nDescription: We are experiencing an issue with Chronicle Security beginning on Wednesday, 2024-02-28 03:00 US/Pacific.\nOur engineering team continues to investigate the issue and is actively working to determine the mitigation plan.\nWe will provide an update by Wednesday, 2024-02-28 19:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: A small subset of customers would encounter error “rpc error: code = Unknown desc = Unknown Error” while ingesting logs within the forwarder which prevents logs from being uploaded to Chronicle through some forwarders.\nWorkaround: None at this moment.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2024-02-28T23:05:55+00:00","modified":"2024-02-28T23:05:58+00:00","when":"2024-02-28T23:05:55+00:00","text":"Summary: Chronicle security is experiencing issues with logs being uploaded through some forwarders.\nDescription: We are experiencing an issue with Chronicle Security beginning on Wednesday, 2024-02-28 03:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-02-28 17:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: A small subset of customers would encounter error “rpc error: code = Unknown desc = Unknown Error” while ingesting logs within the forwarder which prevents logs from being uploaded to Chronicle through some forwarders.\nWorkaround: None at this moment.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-02-29T12:58:41+00:00","modified":"2024-02-29T12:58:48+00:00","when":"2024-02-29T12:58:41+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Thursday, 2024-02-29 04:58 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/NcJn1aK8CTHXMvqwaXGJ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"cMASqwHSYhdvPDqDNcR4","number":"11906408633447940320","begin":"2024-02-27T18:56:43+00:00","created":"2024-02-28T14:31:39+00:00","end":"2024-02-29T19:19:14+00:00","modified":"2024-02-29T19:19:14+00:00","external_desc":"Issue with Google Cloud Console, Cloud Workstation, Cloud Shell, GKE and Persistent Disk.","updates":[{"created":"2024-02-29T19:19:12+00:00","modified":"2024-02-29T19:19:15+00:00","when":"2024-02-29T19:19:12+00:00","text":"Our Engineering team has mitigated the issue after we rolled back the recent change. For a few customers who are still observing the impact, we are working closely with them on a workaround.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-02-29T18:02:16+00:00","modified":"2024-02-29T18:02:20+00:00","when":"2024-02-29T18:02:16+00:00","text":"Summary: Issue with Google Cloud Console, Cloud Workstation, Cloud Shell, GKE and Persistent Disk.\nDescription: Mitigation work is currently underway by our engineering team. Our engineering team has identified a workaround and are in the process of implementing them for Cloud Workstation.\nWe do not have an ETA for full mitigation at this point.\nWe will provide more information by Thursday, 2024-02-29 12:00 US/Pacific.\nDiagnosis: **Cloud Console:** Some customers might be unable to use Cloud Shell.\n**Cloud Workstation:** The impacted customers are experiencing intermittent delays or failure with starting Workstation instances.\n**Persistent Disk:** Customers may experience issues while re-attaching and mounting Regional Persistent Disk to VMs and issuing I/Os.\n**GKE :** Customers may experience issues while attaching Regional Persistent Disk backed Persistent Volume to GKE Nodes\nWorkaround:\n**Cloud Shell:** Customers can try to use the ephemeral mode.\n**Cloud Workstation:** Users may retry the start workstation operation or create and start a new workstation.\n**Persistent Disk:** None at this time.\n**GKE:** None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-02-29T04:25:02+00:00","modified":"2024-02-29T04:25:06+00:00","when":"2024-02-29T04:25:02+00:00","text":"Summary: Issue with Google Cloud Console, Cloud Workstation, Cloud Shell, GKE and Persistent Disk.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-29 10:00 US/Pacific.\nDiagnosis:\n**Cloud Console:** Some customers might be unable to use Cloud Shell.\n**Cloud Workstation:** The impacted customers are experiencing intermittent delays or failure with starting Workstation instances.\n**Persistent Disk:** Customers may experience issues while attaching Regional Persistent Disk to VMs and issuing I/Os.\n**GKE:** Customers may experience issues while attaching Regional Persistent Disk backed Persistent Volume to GKE Nodes\nWorkaround:\n**Cloud Shell:** Customers can try to use the ephemeral mode.\n**Cloud Workstation:** Users may retry the start workstation operation or create and start a new workstation.\n**Persistent Disk:** None at this time.\n**GKE:** None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-02-29T03:40:02+00:00","modified":"2024-02-29T03:40:06+00:00","when":"2024-02-29T03:40:02+00:00","text":"Summary: Issue with Google Cloud Console, Cloud Workstation, Cloud Shell and Persistent Disk.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-29 10:00 US/Pacific.\nDiagnosis: **Cloud Console:** Some customers might be unable to use Cloud Shell.\n**Cloud Workstation:** The impacted customers are experiencing intermittent delays or failure with starting Workstation instances.\n**Persistent Disk:** Customers may experience issues while attaching Regional Persistent Disk to VMs and issuing I/Os.\nWorkaround: **Cloud Shell:** Customers can try to use the ephemeral mode.\n**Cloud Workstation:** Users may retry the start workstation operation or create and start a new workstation.\n**Persistent Disk:** None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-02-29T01:38:29+00:00","modified":"2024-02-29T01:38:32+00:00","when":"2024-02-29T01:38:29+00:00","text":"Summary: Cloud Workstations is experiencing intermittent issues with starting Workstation instances.\nDescription: Our engineering team continues to work on the mitigation steps.\nWe do not have an ETA for mitigation completion at this point.\nWe will provide more information by Wednesday, 2024-02-28 20:00 US/Pacific.\nWe deeply appreciate your patience and understanding while we work towards resolving the issue.\nDiagnosis: The impacted customers are experiencing intermittent delays or failure with starting Workstation instances.\nWorkaround: Users may retry the start workstation operation.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-02-28T20:52:42+00:00","modified":"2024-02-28T20:52:45+00:00","when":"2024-02-28T20:52:42+00:00","text":"Summary: Cloud Workstations is experiencing intermittent issues with starting Workstation instances.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-02-28 18:00 US/Pacific.\nDiagnosis: Affected customers are experiencing intermittent delays or failure with starting Workstation instances.\nWorkaround: Users may retry the start workstation operation.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2024-02-28T14:50:03+00:00","modified":"2024-02-28T14:50:10+00:00","when":"2024-02-28T14:50:03+00:00","text":"Summary: Cloud Workstations is experiencing intermittent issues with starting Workstation instances\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-02-28 13:00 US/Pacific.\nDiagnosis: Affected customers are experiencing intermittent delays or failure with starting Workstation instances.\nWorkaround: Users may retry the start workstation operation.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2024-02-28T14:31:30+00:00","modified":"2024-02-28T14:31:41+00:00","when":"2024-02-28T14:31:30+00:00","text":"Summary: Cloud Workstations is experiencing intermittent issues with starting Workstation instances\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-02-28 11:00 US/Pacific.\nDiagnosis: Affected customers are experiencing intermittent delays or failure with starting Workstation instances.\nWorkaround: Users may retry the start workstation operation.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]}],"most_recent_update":{"created":"2024-02-29T19:19:12+00:00","modified":"2024-02-29T19:19:15+00:00","when":"2024-02-29T19:19:12+00:00","text":"Our Engineering team has mitigated the issue after we rolled back the recent change. For a few customers who are still observing the impact, we are working closely with them on a workaround.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Workstations","id":"5UUXCiH1vfFHXmbDixrB"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"}],"uri":"incidents/cMASqwHSYhdvPDqDNcR4","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"PeLsdQ7XaTwTDQbMoRRh","number":"872468119704692812","begin":"2024-02-22T12:31:34+00:00","created":"2024-02-22T12:56:48+00:00","end":"2024-02-24T03:15:16+00:00","modified":"2024-02-24T03:15:17+00:00","external_desc":"Cloud Asset Data Ingestion Issue","updates":[{"created":"2024-02-24T03:15:10+00:00","modified":"2024-02-24T03:15:18+00:00","when":"2024-02-24T03:15:10+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Friday, 2024-02-23 19:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2024-02-22T17:57:49+00:00","modified":"2024-02-22T17:58:32+00:00","when":"2024-02-22T17:57:49+00:00","text":"Summary: Cloud Asset Data Ingestion Issue\nDescription: We have identified the cause of the Cloud Asset Metadata logs issue which started on Wednesday, 2024-02-14 02:50 US/Pacific.\nMitigation work is currently underway by our engineering team.\nThe mitigation expected time of completion has been revised to Friday, 2024-02-23 20:00 US/Pacific.\nWe will provide more information by Friday, 2024-02-23 21:00 US/Pacific.\nDiagnosis: Customers are unable to receive new Cloud Asset Metadata logs.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2024-02-22T13:36:21+00:00","modified":"2024-02-22T13:36:28+00:00","when":"2024-02-22T13:36:21+00:00","text":"Summary: Cloud Asset Data Ingestion Issue\nDescription: We have identified the cause of the Cloud Asset Metadata logs issue which started on Wednesday, 2024-02-14 02:50 US/Pacific. Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to be completed by Thursday, 2024-02-22 21:00 US/Pacific.\nWe will provide more information by Thursday, 2024-02-22 21:30 US/Pacific.\nDiagnosis: Customers are unable to receive new Cloud Asset Metadata logs.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2024-02-22T13:00:59+00:00","modified":"2024-02-22T13:01:09+00:00","when":"2024-02-22T13:00:59+00:00","text":"Summary: Cloud Asset Metadata logs Issue\nDescription: Our engineering team has identified the cause of this issue and mitigation work is currently underway.\nThe mitigation is expected to be completed by Thursday, 2024-02-22 21:00 US/Pacific.\nWe will provide more information by Thursday, 2024-02-22 21:30 US/Pacific.\nDiagnosis: Impacted customers are not receiving Cloud Asset Metadata logs\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2024-02-22T12:56:44+00:00","modified":"2024-02-22T12:56:51+00:00","when":"2024-02-22T12:56:44+00:00","text":"Summary: Cloud Asset Metadata logs Issue\nDescription: Our engineering team has identified the cause of this issue and mitigation work is currently underway.\nThe mitigation is expected to be completed by Thursday, 2024-02-22 21:00 US/Pacific.\nELSE, We do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-22 21:30 US/Pacific.\nDiagnosis: Impacted customers are not receiving Cloud Asset Metadata logs\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]}],"most_recent_update":{"created":"2024-02-24T03:15:10+00:00","modified":"2024-02-24T03:15:18+00:00","when":"2024-02-24T03:15:10+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Friday, 2024-02-23 19:15 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/PeLsdQ7XaTwTDQbMoRRh","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"id":"UVqE9hMiLVCpxgt9T9L9","number":"15175611085338028653","begin":"2024-02-15T14:27:25+00:00","created":"2024-02-15T14:54:20+00:00","end":"2024-02-15T21:04:47+00:00","modified":"2024-02-15T21:04:47+00:00","external_desc":"Legacy AutoML Tables training jobs failing","updates":[{"created":"2024-02-15T21:04:40+00:00","modified":"2024-02-15T21:04:48+00:00","when":"2024-02-15T21:04:40+00:00","text":"We now have confirmation that the issue with AutoML Tables has been resolved for all affected projects as of Thursday, 2024-02-15 11:00 US/Pacific, when the mitigation was applied.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-02-15T20:53:45+00:00","modified":"2024-02-15T20:53:49+00:00","when":"2024-02-15T20:53:45+00:00","text":"Summary: Legacy AutoML Tables training jobs failing\nDescription: After further investigation, our engineers has identified that more validation is required in order to confirm the effectiveness of the mitigation that was implemented, considering multiple intricacies.\nPlease be assured that we are continuing to work on this with highest priority.\nWe will provide more information by Thursday, 2024-02-15 15:30 US/Pacific.\nDiagnosis: Legacy AutoML Tables training jobs will fail with \"Internal Error\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-02-15T19:53:31+00:00","modified":"2024-02-15T19:53:34+00:00","when":"2024-02-15T19:53:31+00:00","text":"Summary: Legacy AutoML Tables training jobs failing\nDescription: Our Engineers are still performing validation, and we would be requiring additional time to validate. Please be assured that we are working on this with highest priority.\nWe will provide more information by Thursday, 2024-02-15 13:00 US/Pacific.\nDiagnosis: Legacy AutoML Tables training jobs will fail with \"Internal Error\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-02-15T18:29:14+00:00","modified":"2024-02-15T18:29:26+00:00","when":"2024-02-15T18:29:14+00:00","text":"Summary: Legacy AutoML Tables training jobs failing\nDescription: A fix has been rolled out to production by our Engineering team. We are currently in the process of validating the results to ensure the issue has been fully mitigated in production.\nWe will provide more information by Thursday, 2024-02-15 11:30 US/Pacific.\nDiagnosis: Legacy AutoML Tables training jobs will fail with \"Internal Error\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-02-15T15:56:31+00:00","modified":"2024-02-15T15:56:37+00:00","when":"2024-02-15T15:56:31+00:00","text":"Summary: Legacy AutoML Tables training jobs failing\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-02-15 10:30 US/Pacific.\nDiagnosis: Legacy AutoML Tables training jobs will fail with \"Internal Error\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-02-15T14:54:17+00:00","modified":"2024-02-15T14:54:25+00:00","when":"2024-02-15T14:54:17+00:00","text":"Summary: Legacy AutoML Tables training jobs failing\nDescription: We are experiencing an issue with Legacy AutoML Tables.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-02-15 08:05 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Legacy AutoML Tables training jobs will fail with \"Internal Error\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2024-02-15T21:04:40+00:00","modified":"2024-02-15T21:04:48+00:00","when":"2024-02-15T21:04:40+00:00","text":"We now have confirmation that the issue with AutoML Tables has been resolved for all affected projects as of Thursday, 2024-02-15 11:00 US/Pacific, when the mitigation was applied.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"AutoML Tables","id":"khZkkfZshNi7aiJGRoND"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/UVqE9hMiLVCpxgt9T9L9","currently_affected_locations":[],"previously_affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"id":"u6rQ2nNVbhAFqGCcTm58","number":"1117262759556150718","begin":"2024-02-14T17:45:00+00:00","created":"2024-02-14T19:05:44+00:00","end":"2024-02-14T20:52:00+00:00","modified":"2024-02-28T17:05:43+00:00","external_desc":"Multiple Google Cloud Products are experiencing issues in us-west1","updates":[{"created":"2024-02-21T21:39:40+00:00","modified":"2024-02-28T17:05:43+00:00","when":"2024-02-21T21:39:40+00:00","text":"# Incident Report\n## Summary\nOn 14 February 2024 from 09:45 AM to 12:52 PM US/Pacific, Google Cloud customers in us-west1 experienced control plane unavailability because of elevated latencies and errors. In addition, a few services experienced data plane unavailability for the same reason. The full list of impacted products and services are detailed below.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you.\n## Root Cause\nMost Google Cloud products and services use a regional metadata store to support their internal operations. The metadata store supports critical functions such as servicing customer requests and handling scale, load balancing, admin operations and for retrieving/storing metadata including server location information.\nThe regional metadata store continuously manages load by automatically adjusting compute capacity in response to changes in demand. When usage increases, additional resources are added and load is also balanced automatically. However, an unexpected spike in demand exceeded the system’s ability to quickly provision additional resources. As a result, multiple Google Cloud products and services in the region experienced elevated latencies and errors until the unexpected load was isolated.\n## Remediation and Prevention/Detection\nGoogle engineers were alerted to this problem by our internal monitoring system and throttled the spiking workloads on the underlying regional metadata store. This allowed Google Cloud products and services to read/write state at a normal rate allowing for healthy servicing of customer requests after the backlog of operations on the regional metadata store were processed.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n- Improve monitoring and alerting for an earlier detection of unexpected spikes in the regional metadata stores.\n- Enhance the ability of regional data stores to automatically throttle workloads more aggressively when experiencing unexpected spikes.\n## Detailed Description of Impact\n### Google Compute Engine\n* From 10:30 AM to 12:30 PM US/Pacific, a number of GCE APIs returned internal or timeout errors in the us-west1 region, across all zones. The overall error rate for most APIs remained around 1% with errors affecting around 33% of projects in the region.\n* From 10:00 AM to 12:00 PM US/Pacific, less than 0.004% VMs running in the region crashed, 80% of the crashed VMs recovered while 20% of the crashed VMs experienced delays in rescheduling.\n* From 10:00 AM to 12:00 PM US/Pacific, about 1% of read/write requests in the region for [guest attributes](https://cloud.google.com/compute/docs/metadata/overview#guest_attributes) failed or experienced latency exceeding 1 second.\n### Google Cloud Pub/Sub Lite\n* From 10:05 AM to 12:00 PM US/Pacific, customers may have experienced elevated end-to-end latency and publish request latency in us-west1. At peak impact, 65% of publish requests in the region failed with aborted, canceled, deadline exceeded, or unavailable errors, affecting up to 47% of projects publishing in the region. Resource administrative operations also displayed unavailability in the region.\n### App Engine Standard\n* From 09:45 AM to 11:42 AM US/Pacific, ~6% of customers in the region experienced deployment failures and latency on deployments for Google App Engine apps.\n### Cloud Functions\n* From 09:45 AM to 11:42 AM US/Pacific, ~6% of customers in the region experienced deployment failures and latency on deployments for Cloud Function apps.\n### Cloud Run\n* From 09:45 AM to 11:42 AM US/Pacific, ~2% of Customers in the region experienced deployment failures and latency on deployments for Cloud Run apps.\n### Dialogflow CX\n* From 9:45 AM to 11:45 AM US/Pacific, a percentage of Dialogflow requests returned internal or timeout errors in the us-west1 region. The error rate stayed below 5% before peaking at 100% around 11:00 US/Pacific.\n### Vertex AI products:\n* From about 10:15 AM to 11:35 AM US/Pacific, all Vertex AI services that heavily rely on metadata store operations including Online Prediction, Training, and Featurestore, ML Metadata and Notebooks experienced ~50% error rates (spiking to near 100% at times) in the region.\n### Google Cloud Pub/Sub\n* From 09:57 AM to 11:26 AM US/Pacific, Cloud Pub/Sub customers with traffic in us-west1 experienced publish errors and unavailability. The publish error rate peaked at ~99% for customers with publish traffic in us-west1. In addition, backlog stats metrics were unavailable for some customers who did not have publish or subscribe traffic in us-west1.\n### Cloud Memorystore\n* From 10:05 AM and 11:35 AM US/Pacific, customers creating, updating, deleting, importing, or exporting Redis Standalone or Cluster instances in us-west1 may have experienced failures. Around 17% of such requests failed with timeouts or internal errors.\n### Eventarc\n* From 10:30 AM and 11:30 AM US/Pacific Eventarc customers in us-west1 experienced event delivery delays for up to 50 minutes as we saw event publish errors in our dataplane that peaked at 100%. There are high error ratios and latencies for all control plane long running operations with peak error rate at 100% and peak latency of 55 minutes.\n### Dataproc Metastore\n* From 10:00 AM and 11:45 AM US/Pacific, all control plane operations were sporadically returning an internal or deadline exceeded error (differing ratios throughout the outage) in the region. The peak impact was from around 10:30 AM to 11:30 AM US/Pacific where only around 3.33% of operations were completed with OK.\n### Google Cloud Tasks\n* From 10:40 AM and 11:30 AM US/Pacific, Cloud Tasks’s main data operation (CreateTask) returned DEADLINE_EXCEEDED error for all requests in us-west1. This means customers in this region were not able to buffer Tasks and, subsequently, our system was not able to dispatch them.\n### Cloud Build\n* From 9:45 AM to 12:00 PM US/Pacific, using Cloud Build to create or retry builds may have failed in us-west1. ~15-20% of requests failed during the issue.\n### Cloud SQL\n* From 10:06 AM to 11:46 AM US/Pacific, Many operations requiring the regional metadata store in us-west1 timed out or failed, this affected Cloud SQL instance creations or any modifications to existing us-west1 instances. 30% of instance creations failed, 10% of export operations failed, and \u003c10% of update operations failed in us-west1.\n### Speech-to-text\n* From 10:30 AM to 11:30 AM US/Pacific, Speech-to-Text (STT) experienced a spike in server errors in us-west1. The issue primarily affected control plane traffic to client STT resources (e.g. CreateRecognizer) which experienced a spike in INTERNAL server errors. At peak, around 10% of Create.* traffic or around 0.5QPS of traffic returned such errors.\n### Cloud Load Balancing\n* From 9:50 AM to 12:52 PM US/Pacific, new Cloud Load Balancer creation was failing for load balancers with backends in the us-west1 region. Also, configuration changes on the same family of products could not be made. The data plane was not affected.\n### Cloud Networking\n* From 10:00 AM to 11:30 AM US/Pacific, Cloud NAT, Cloud Router, Cloud Interconnect, and Cloud VPN users experienced time-outs for add/delete/modify operations in us-west1 region .\n* Existing programming and forwarding rules were not impacted.\n### Cloud Deploy\n* From 9:45 AM to 11:40 AM US/Pacific, Cloud Deploy releases and rollouts in the region were either delayed or failed due to the inability to create builds with Cloud Build which was also affected by the regional metadata store issue. Whether the release/rollout was delayed or failed depended on whether retrying was successful. We also saw errors creating or updating Cloud Deploy resources due to metadata store RPC errors at the time.\n### Workflows\n* From 10:10 AM to 11:30 AM US/Pacific, Cloud Workflows experienced latency and availability issues in the us-west1 region, This issue impacted ~2% of customer projects and customers experienced internal errors like deadline_exceeded: metadata store reads could not be validated after transaction function returned error: context deadline exceeded. Example methods that were impacted: CancelExecutions, CreateExecutions, CreateWorkflows and TriggerPubsubExecution.\n### Cloud Logging\n* From 9:45 AM to 12:45 PM US/Pacific, Cloud Logs ingestion storage was delayed in the us-west1 region. This issue impacted ~12.5% of global buckets, but regional buckets do not seem to have been delayed.\n### Dataform\n* From 09:55 AM to 11:40 AM US/Pacific, our business critical consumer API availability had the lowest availability of 14.29% in us-west1 during the metadata store outage.\n* The metadata store RPC Error Ratio had the highest of 57.6807%. This metadata store is used for executing customer's release \u0026 workflow schedules.\n### Certificate Authority Service\n* From 9:50 AM to 11:40 AM US/Pacific, 3% of overall traffic to Certificate Authority Service in us-west1 experienced slowness and errors for control and data plane operations. Customers experienced an error rate of 85% for Create Certificate Revocation List requests, while other operations were affected at a rate between 1-15%.\n### VPC and Serverless VPC Access\n* From 9:48 AM to 11:16 AM US/Pacific Serverless VPC Access customers in us-west1 were unable to create, delete, modify or list Serverless VPC Access Connectors. We saw error rates hovering from 50% to 90% where customers would see DEADLINE_EXCEEDED. Serverless VPC Access Connector proxying functionality was unaffected by this incident.\n### Cloud Dataflow\n* From 10:03 AM to 12:22 PM US/Pacific, Dataflow customers in us-west1 experienced job submission failures peaking at 100%. ~6% of running streaming jobs experienced unusually high system watermarks. Up to 100% of running batch jobs were stuck and failed to make progress during the outage.\n### Cloud Key Management Service\n* From 10:32 AM to 11:24 AM US/Pacific, ~0.0046% of overall traffic for Cloud Key Management Service in us-west1 served errors (INTERNAL, UNAVAILABLE, or DEADLINE_EXCEEDED) for control and crypto operations.\n* Customers experienced an error rate of 0.000189% for Crypto operations (within SLO) due to serving path redundancy with another storage system.\n* Customers experienced an error rate of 0.28% for Control operations, mostly (CreateCryptoKey, CreateKeyRing, DestroyCryptoKeyVersion) were affected, potentially any metadata read/write operations could have been affected as well. Around ~0.0219% of resource projects are believed to have been affected during the outage.\n### Persistent Disk\n* From 10:35 AM to 12:10 PM US/Pacific, some Persistent Disk deletion flows were stuck in the us-west1 region. Affected customers would have perceived very long running disk delete operations without any errors. Less than 0.01 percent of projects were affected.\n### Cloud Data Loss Prevention\n* From 10:35 AM to 11:30 AM US/Pacific, around 60% of total requests encountered errors in us-west1.\n### Cloud Dataproc\n* From 10:00 AM to 12:07 PM US/Pacific, Dataproc customers were unable to perform cluster and batch operations in us-west1. At peak impact between 10:30 AM to 11:32 AM US/Pacific, 65% of requests to Dataproc returned errors mostly DEADLINE_EXCEEDED, with some requests like create cluster returning a 100% error rate during this period.\n### Dataplex Catalog\n* From 9:45 AM to 11:35 AM US/Pacific, Dataplex Catalog customers in us-west1 were unable to create, delete, modify, list or search data stored in Dataplex Catalog. Error rates up to 90% of requests where customers would see a server error, and increased latency overall. Customers using other regions were not affected by this incident.\n### Cloud Composer\n* From 9:48 AM to ~11:28 US/Pacific, some Cloud Composer customers in us-west1 experienced issues performing control plane operations like creating/deleting/updating environments and operations requiring control plane like snapshots could have been also impacted. Composer dataplane (aka Composer environment) was operating fine.\n* The problem was discovered by Composer probers and there is an impact on SLO of Composer Control Plane API availability.\n### Instances API\n* From 10:35 AM to 11:50 AM US/Pacific, all Snapshot Schedules for Persistent Disks in us-west1 were not created according to schedule and were created with delay.\n------\n***Added on 28 Feb 2024***\n### Document AI Warehouse\n* From 11:00 AM to 11:17 AM US/Pacific, the API returned server and client error messages in the US/multi-region.\n* During the impact window, all API requests experienced elevated error rate. Overall error rate intermittently spiked beyond 90%.\n### Google Kubernetes Engine\n* From 10:47 AM to 11:32 AM US/Pacific, customers may have experienced API call failures and in some cases cluster unavailability in the us-west1 region.\n* The API call failure rate peaked at 25% of API calls to the us-west1 region and the cluster unavailability in the us-west1 region peaked at 1% of clusters.\n### GKE Fleet Management\n* From 09:52 AM to 11:40 AM US/Pacific, customers may have experienced API call failures to add GKE clusters into Fleet in the us-west1 region.\n* The API call failure rate peaked at 100% of API calls to the us-west1 region on the Fleet (GKE Hub) services\n------\nTo summarize, multiple Google Cloud Products experienced unavailability and/or elevated error rates for services in the us-west1 region during this issue. This is the final version of the Incident Report.\n------\n------","status":"AVAILABLE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-15T17:40:15+00:00","modified":"2024-02-16T00:05:32+00:00","when":"2024-02-15T17:40:15+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 14 February 2024 10:30\n**Incident End:** 14 February 2024 13:10\n**Duration:** 2 hours, 40 minutes\n**Affected Services and Features:**\n* Artifact Registry\n* Certificate Authority Service\n* Cloud Build\n* Cloud Healthcare\n* Cloud Key Management Service\n* Cloud Load Balancing\n* Cloud Logging\n* Cloud Memorystore\n* Cloud Run\n* Cloud Spanner\n* Cloud SQL\n* Cloud Workflows\n* Data Catalog\n* Dataproc Metastore\n* Dialogflow CX\n* Eventarc\n* Google Cloud Console\n* Google Cloud Dataflow\n* Google Cloud Dataproc\n* Google Cloud Deploy\n* Google Cloud Networking\n* Google Cloud Pub/Sub\n* Google Cloud Tasks\n* Google Compute Engine\n* Hybrid Connectivity\n* Identity and Access Management\n* Persistent Disk\n* Pub/Sub Lite\n* Vertex AI AutoML Image\n* Vertex AI AutoML Tabular\n* Vertex AI AutoML Text\n* Vertex AI AutoML Video\n* Vertex AI Batch Prediction\n* Vertex AI Data Labeling\n* Vertex AI Explainable AI\n* Vertex AI Feature Store\n* Vertex AI Matching Engine\n* Vertex AI ML Metadata\n* Vertex AI Model Monitoring\n* Vertex AI Model Registry\n* Vertex AI Online Prediction\n* Vertex AI Pipelines\n* Vertex AI Search\n* Vertex AI TensorBoard\n* Vertex AI Training\n* Vertex AI Vizier\n* Vertex AI Workbench Instances\n* Virtual Private Cloud (VPC)\n**Regions/Zones:** us-west1\n**Description:**\nCustomers of multiple Google Cloud products experienced increased latency and error rates in us-west1 for a period of 2 hours, 40 minutes. From preliminary analysis, the root cause of the issue has been narrowed to an internal database resource allocation issue which caused reduced availability and increased latency for many GCP services in the region.\nOur engineering team mitigated the issue by isolating the problematic traffic and have implemented measures to prevent a recurrence.\nGoogle will complete a full Incident Report in the following days that will provide a detailed root cause.\n**Customer Impact:**\nDuring the time of impact, customers would have experienced high latency and error rates for GCP services in the us-west1 region.","status":"AVAILABLE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-14T21:07:00+00:00","modified":"2024-02-14T21:07:05+00:00","when":"2024-02-14T21:07:00+00:00","text":"The core issue affecting Google Cloud Products in us-west1 has been mitigated and all the affected products have full service restoration. We understand the disruption this may have caused and sincerely apologize for any inconvenience.\nThe root cause of the issue was identified to be an overloaded common infrastructure component. Our engineering team has mitigated the issue by isolating the traffic and have implemented measures to prevent a recurrence.\nIf you have questions or are still experiencing issues, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue. We will publish a preliminary analysis of this incident once we have completed our internal investigation.","status":"AVAILABLE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-14T20:24:50+00:00","modified":"2024-02-14T20:24:55+00:00","when":"2024-02-14T20:24:50+00:00","text":"Summary: Multiple Google Cloud Products are experiencing issues in us-west1\nDescription: We are experiencing an issue with multiple Google Cloud Products beginning on Wednesday, 2024-02-14 9:40 US/Pacific.\nOur engineers have identified and mitigated the underlying issue. Most of the affected products have recovered and we expect the remaining products to fully recover in the next 1 to 2 hours.\nThe following services have already recovered:\nGoogle Kubernetes Engine, Cloud Pub Sub, Virtual Private Cloud, VPC, VPC Serverless Access, Google Compute Engine, Dataplex Catalog, Cloud Interconnect, Cloud Workflows, Cloud Logging, Google Cloud Storage , Eventarc, Cloud SQL, Cloud Key Management Service, Cloud Run, Cloud Dataproc, Cloud Spanner, Diagflow, Cloud Tasks\nWe will provide an update by Wednesday, 2024-02-14 13:00 US/Pacific with current details.\nDiagnosis: Existing customer load balancers will continue to function. New load balancers or changes to existing load balancers will not propagate configs and changes to the configurations of load balancers may result in an error.\nConfiguration changes can not be made to the Regional Internal, Regional External, and Global External Application Load Balancers in the affected region.\nCustomers may see errors when making configuration changes.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-14T20:10:00+00:00","modified":"2024-02-14T20:10:04+00:00","when":"2024-02-14T20:10:00+00:00","text":"Summary: Multiple Google Cloud Products are experiencing issues in us-west1\nDescription: We are experiencing an issue with multiple Google Cloud Products beginning on Wednesday, 2024-02-14 9:40 US/Pacific.\nOur engineers have identified a common infrastructure component as the root cause and we are attempting a mitigation. As the mitigation progresses, some products may see partial recovery.\nThe following services have recovered:\nGoogle Kubernetes Engine, Cloud Pub Sub, Virtual Private Cloud, VPC, VPC Serverless Access, Google Compute Engine, Dataplex Catalog, Cloud Interconnect, Cloud Workflows, Cloud Logging, Google Cloud Storage , Eventarc, Cloud SQL, Cloud Key Management Service, Cloud Run, Cloud Dataproc\nWe do not have an ETA for mitigation at this point.\nWe will provide an update by Wednesday, 2024-02-14 12:45 US/Pacific with current details.\nDiagnosis: Existing customer load balancers will continue to function. New load balancers or changes to existing load balancers will not propagate configs and changes to the configurations of load balancers may result in an error.\nConfiguration changes can not be made to the Regional Internal, Regional External, and Global External Application Load Balancers in the affected region.\nCustomers may see errors when making configuration changes.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-14T19:47:05+00:00","modified":"2024-02-14T19:47:09+00:00","when":"2024-02-14T19:47:05+00:00","text":"Summary: Multiple Google Cloud Products are experiencing issues in us-west1\nDescription: We are experiencing an issue with multiple Google Cloud Products beginning on Wednesday, 2024-02-14 9:40 US/Pacific.\nOur engineers have identified a common infrastructure component as the root cause and we are attempting a mitigation. As the mitigation progresses, some products may see partial recovery.\nWe do not have an ETA for mitigation at this point.\nWe will provide an update by Wednesday, 2024-02-14 12:20 US/Pacific with current details.\nDiagnosis: Existing customer load balancers will continue to function. New load balancers or changes to existing load balancers will not propagate configs and changes to the configurations of load balancers may result in an error.\nConfiguration changes can not be made to the Regional Internal, Regional External, and Global External Application Load Balancers in the affected region.\nCustomers may see errors when making configuration changes.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-14T19:29:40+00:00","modified":"2024-02-14T19:29:41+00:00","when":"2024-02-14T19:29:40+00:00","text":"Summary: Multiple Google Cloud Products are experiencing issues in us-west1\nDescription: We are experiencing an issue with multiple Google Cloud Products beginning on Wednesday, 2024-02-14 9:40 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-02-14 12:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Existing customer load balancers will continue to function. New load balancers or changes to existing load balancers will not propagate configs and changes to the configurations of load balancers may result in an error.\nConfiguration changes can not be made to the Regional Internal, Regional External, and Global External Application Load Balancers in the affected region.\nCustomers may see errors when making configuration changes.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-02-14T19:05:26+00:00","modified":"2024-02-14T19:05:46+00:00","when":"2024-02-14T19:05:26+00:00","text":"Summary: We are experiencing an issue with Cloud Load Balancing.\nDescription: We are experiencing an issue with Cloud Load Balancing.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-02-14 12:44 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-02-21T21:39:40+00:00","modified":"2024-02-28T17:05:43+00:00","when":"2024-02-21T21:39:40+00:00","text":"# Incident Report\n## Summary\nOn 14 February 2024 from 09:45 AM to 12:52 PM US/Pacific, Google Cloud customers in us-west1 experienced control plane unavailability because of elevated latencies and errors. In addition, a few services experienced data plane unavailability for the same reason. The full list of impacted products and services are detailed below.\nTo our Google Cloud customers whose businesses were impacted during this outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you.\n## Root Cause\nMost Google Cloud products and services use a regional metadata store to support their internal operations. The metadata store supports critical functions such as servicing customer requests and handling scale, load balancing, admin operations and for retrieving/storing metadata including server location information.\nThe regional metadata store continuously manages load by automatically adjusting compute capacity in response to changes in demand. When usage increases, additional resources are added and load is also balanced automatically. However, an unexpected spike in demand exceeded the system’s ability to quickly provision additional resources. As a result, multiple Google Cloud products and services in the region experienced elevated latencies and errors until the unexpected load was isolated.\n## Remediation and Prevention/Detection\nGoogle engineers were alerted to this problem by our internal monitoring system and throttled the spiking workloads on the underlying regional metadata store. This allowed Google Cloud products and services to read/write state at a normal rate allowing for healthy servicing of customer requests after the backlog of operations on the regional metadata store were processed.\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n- Improve monitoring and alerting for an earlier detection of unexpected spikes in the regional metadata stores.\n- Enhance the ability of regional data stores to automatically throttle workloads more aggressively when experiencing unexpected spikes.\n## Detailed Description of Impact\n### Google Compute Engine\n* From 10:30 AM to 12:30 PM US/Pacific, a number of GCE APIs returned internal or timeout errors in the us-west1 region, across all zones. The overall error rate for most APIs remained around 1% with errors affecting around 33% of projects in the region.\n* From 10:00 AM to 12:00 PM US/Pacific, less than 0.004% VMs running in the region crashed, 80% of the crashed VMs recovered while 20% of the crashed VMs experienced delays in rescheduling.\n* From 10:00 AM to 12:00 PM US/Pacific, about 1% of read/write requests in the region for [guest attributes](https://cloud.google.com/compute/docs/metadata/overview#guest_attributes) failed or experienced latency exceeding 1 second.\n### Google Cloud Pub/Sub Lite\n* From 10:05 AM to 12:00 PM US/Pacific, customers may have experienced elevated end-to-end latency and publish request latency in us-west1. At peak impact, 65% of publish requests in the region failed with aborted, canceled, deadline exceeded, or unavailable errors, affecting up to 47% of projects publishing in the region. Resource administrative operations also displayed unavailability in the region.\n### App Engine Standard\n* From 09:45 AM to 11:42 AM US/Pacific, ~6% of customers in the region experienced deployment failures and latency on deployments for Google App Engine apps.\n### Cloud Functions\n* From 09:45 AM to 11:42 AM US/Pacific, ~6% of customers in the region experienced deployment failures and latency on deployments for Cloud Function apps.\n### Cloud Run\n* From 09:45 AM to 11:42 AM US/Pacific, ~2% of Customers in the region experienced deployment failures and latency on deployments for Cloud Run apps.\n### Dialogflow CX\n* From 9:45 AM to 11:45 AM US/Pacific, a percentage of Dialogflow requests returned internal or timeout errors in the us-west1 region. The error rate stayed below 5% before peaking at 100% around 11:00 US/Pacific.\n### Vertex AI products:\n* From about 10:15 AM to 11:35 AM US/Pacific, all Vertex AI services that heavily rely on metadata store operations including Online Prediction, Training, and Featurestore, ML Metadata and Notebooks experienced ~50% error rates (spiking to near 100% at times) in the region.\n### Google Cloud Pub/Sub\n* From 09:57 AM to 11:26 AM US/Pacific, Cloud Pub/Sub customers with traffic in us-west1 experienced publish errors and unavailability. The publish error rate peaked at ~99% for customers with publish traffic in us-west1. In addition, backlog stats metrics were unavailable for some customers who did not have publish or subscribe traffic in us-west1.\n### Cloud Memorystore\n* From 10:05 AM and 11:35 AM US/Pacific, customers creating, updating, deleting, importing, or exporting Redis Standalone or Cluster instances in us-west1 may have experienced failures. Around 17% of such requests failed with timeouts or internal errors.\n### Eventarc\n* From 10:30 AM and 11:30 AM US/Pacific Eventarc customers in us-west1 experienced event delivery delays for up to 50 minutes as we saw event publish errors in our dataplane that peaked at 100%. There are high error ratios and latencies for all control plane long running operations with peak error rate at 100% and peak latency of 55 minutes.\n### Dataproc Metastore\n* From 10:00 AM and 11:45 AM US/Pacific, all control plane operations were sporadically returning an internal or deadline exceeded error (differing ratios throughout the outage) in the region. The peak impact was from around 10:30 AM to 11:30 AM US/Pacific where only around 3.33% of operations were completed with OK.\n### Google Cloud Tasks\n* From 10:40 AM and 11:30 AM US/Pacific, Cloud Tasks’s main data operation (CreateTask) returned DEADLINE_EXCEEDED error for all requests in us-west1. This means customers in this region were not able to buffer Tasks and, subsequently, our system was not able to dispatch them.\n### Cloud Build\n* From 9:45 AM to 12:00 PM US/Pacific, using Cloud Build to create or retry builds may have failed in us-west1. ~15-20% of requests failed during the issue.\n### Cloud SQL\n* From 10:06 AM to 11:46 AM US/Pacific, Many operations requiring the regional metadata store in us-west1 timed out or failed, this affected Cloud SQL instance creations or any modifications to existing us-west1 instances. 30% of instance creations failed, 10% of export operations failed, and \u003c10% of update operations failed in us-west1.\n### Speech-to-text\n* From 10:30 AM to 11:30 AM US/Pacific, Speech-to-Text (STT) experienced a spike in server errors in us-west1. The issue primarily affected control plane traffic to client STT resources (e.g. CreateRecognizer) which experienced a spike in INTERNAL server errors. At peak, around 10% of Create.* traffic or around 0.5QPS of traffic returned such errors.\n### Cloud Load Balancing\n* From 9:50 AM to 12:52 PM US/Pacific, new Cloud Load Balancer creation was failing for load balancers with backends in the us-west1 region. Also, configuration changes on the same family of products could not be made. The data plane was not affected.\n### Cloud Networking\n* From 10:00 AM to 11:30 AM US/Pacific, Cloud NAT, Cloud Router, Cloud Interconnect, and Cloud VPN users experienced time-outs for add/delete/modify operations in us-west1 region .\n* Existing programming and forwarding rules were not impacted.\n### Cloud Deploy\n* From 9:45 AM to 11:40 AM US/Pacific, Cloud Deploy releases and rollouts in the region were either delayed or failed due to the inability to create builds with Cloud Build which was also affected by the regional metadata store issue. Whether the release/rollout was delayed or failed depended on whether retrying was successful. We also saw errors creating or updating Cloud Deploy resources due to metadata store RPC errors at the time.\n### Workflows\n* From 10:10 AM to 11:30 AM US/Pacific, Cloud Workflows experienced latency and availability issues in the us-west1 region, This issue impacted ~2% of customer projects and customers experienced internal errors like deadline_exceeded: metadata store reads could not be validated after transaction function returned error: context deadline exceeded. Example methods that were impacted: CancelExecutions, CreateExecutions, CreateWorkflows and TriggerPubsubExecution.\n### Cloud Logging\n* From 9:45 AM to 12:45 PM US/Pacific, Cloud Logs ingestion storage was delayed in the us-west1 region. This issue impacted ~12.5% of global buckets, but regional buckets do not seem to have been delayed.\n### Dataform\n* From 09:55 AM to 11:40 AM US/Pacific, our business critical consumer API availability had the lowest availability of 14.29% in us-west1 during the metadata store outage.\n* The metadata store RPC Error Ratio had the highest of 57.6807%. This metadata store is used for executing customer's release \u0026 workflow schedules.\n### Certificate Authority Service\n* From 9:50 AM to 11:40 AM US/Pacific, 3% of overall traffic to Certificate Authority Service in us-west1 experienced slowness and errors for control and data plane operations. Customers experienced an error rate of 85% for Create Certificate Revocation List requests, while other operations were affected at a rate between 1-15%.\n### VPC and Serverless VPC Access\n* From 9:48 AM to 11:16 AM US/Pacific Serverless VPC Access customers in us-west1 were unable to create, delete, modify or list Serverless VPC Access Connectors. We saw error rates hovering from 50% to 90% where customers would see DEADLINE_EXCEEDED. Serverless VPC Access Connector proxying functionality was unaffected by this incident.\n### Cloud Dataflow\n* From 10:03 AM to 12:22 PM US/Pacific, Dataflow customers in us-west1 experienced job submission failures peaking at 100%. ~6% of running streaming jobs experienced unusually high system watermarks. Up to 100% of running batch jobs were stuck and failed to make progress during the outage.\n### Cloud Key Management Service\n* From 10:32 AM to 11:24 AM US/Pacific, ~0.0046% of overall traffic for Cloud Key Management Service in us-west1 served errors (INTERNAL, UNAVAILABLE, or DEADLINE_EXCEEDED) for control and crypto operations.\n* Customers experienced an error rate of 0.000189% for Crypto operations (within SLO) due to serving path redundancy with another storage system.\n* Customers experienced an error rate of 0.28% for Control operations, mostly (CreateCryptoKey, CreateKeyRing, DestroyCryptoKeyVersion) were affected, potentially any metadata read/write operations could have been affected as well. Around ~0.0219% of resource projects are believed to have been affected during the outage.\n### Persistent Disk\n* From 10:35 AM to 12:10 PM US/Pacific, some Persistent Disk deletion flows were stuck in the us-west1 region. Affected customers would have perceived very long running disk delete operations without any errors. Less than 0.01 percent of projects were affected.\n### Cloud Data Loss Prevention\n* From 10:35 AM to 11:30 AM US/Pacific, around 60% of total requests encountered errors in us-west1.\n### Cloud Dataproc\n* From 10:00 AM to 12:07 PM US/Pacific, Dataproc customers were unable to perform cluster and batch operations in us-west1. At peak impact between 10:30 AM to 11:32 AM US/Pacific, 65% of requests to Dataproc returned errors mostly DEADLINE_EXCEEDED, with some requests like create cluster returning a 100% error rate during this period.\n### Dataplex Catalog\n* From 9:45 AM to 11:35 AM US/Pacific, Dataplex Catalog customers in us-west1 were unable to create, delete, modify, list or search data stored in Dataplex Catalog. Error rates up to 90% of requests where customers would see a server error, and increased latency overall. Customers using other regions were not affected by this incident.\n### Cloud Composer\n* From 9:48 AM to ~11:28 US/Pacific, some Cloud Composer customers in us-west1 experienced issues performing control plane operations like creating/deleting/updating environments and operations requiring control plane like snapshots could have been also impacted. Composer dataplane (aka Composer environment) was operating fine.\n* The problem was discovered by Composer probers and there is an impact on SLO of Composer Control Plane API availability.\n### Instances API\n* From 10:35 AM to 11:50 AM US/Pacific, all Snapshot Schedules for Persistent Disks in us-west1 were not created according to schedule and were created with delay.\n------\n***Added on 28 Feb 2024***\n### Document AI Warehouse\n* From 11:00 AM to 11:17 AM US/Pacific, the API returned server and client error messages in the US/multi-region.\n* During the impact window, all API requests experienced elevated error rate. Overall error rate intermittently spiked beyond 90%.\n### Google Kubernetes Engine\n* From 10:47 AM to 11:32 AM US/Pacific, customers may have experienced API call failures and in some cases cluster unavailability in the us-west1 region.\n* The API call failure rate peaked at 25% of API calls to the us-west1 region and the cluster unavailability in the us-west1 region peaked at 1% of clusters.\n### GKE Fleet Management\n* From 09:52 AM to 11:40 AM US/Pacific, customers may have experienced API call failures to add GKE clusters into Fleet in the us-west1 region.\n* The API call failure rate peaked at 100% of API calls to the us-west1 region on the Fleet (GKE Hub) services\n------\nTo summarize, multiple Google Cloud Products experienced unavailability and/or elevated error rates for services in the us-west1 region during this issue. This is the final version of the Incident Report.\n------\n------","status":"AVAILABLE","affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Vertex AI AutoML Image","id":"1UYAL6urhdrrr6emD8ce"},{"title":"Vertex AI Matching Engine","id":"2iFHW5WNBkTWjX5k3jnZ"},{"title":"Vertex AI AutoML Tabular","id":"5CfpoAfoMxWT4zGzX4yW"},{"title":"Pub/Sub Lite","id":"5DWkcStmv4dFHRHLaRXb"},{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Cloud Key Management Service","id":"67cSySTL7dwJZo9JWUGU"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"},{"title":"Cloud Run","id":"9D7d2iNBQWN24zc1VamE"},{"title":"Vertex AI TensorBoard","id":"9TtWTtvEUYnPidW2z4aM"},{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"},{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"},{"title":"Cloud Workflows","id":"C4P62W9Xc2zZ1Sk52bbw"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Cloud Spanner","id":"EcNGGUgBtBLrtm4mWvqC"},{"title":"Vertex AI Explainable AI","id":"FJ8YR1L7CvooMVxbTSD4"},{"title":"Vertex AI Workbench User Managed Notebooks","id":"GewfNoG6JvJ1kjnL9jb8"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Cloud Memorystore","id":"LGPLu3M5pcUAKU1z6eP3"},{"title":"Dataproc Metastore","id":"PXZh68NPz9auRyo4tVfy"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"},{"title":"Certificate Authority Service","id":"PvdE3tt1VdxKXzSyd8WF"},{"title":"Artifact Registry","id":"QbBuuiRdsLpMr9WmGwm5"},{"title":"Vertex AI Vizier","id":"SNSm5dv1cRELFSGhuvpc"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Vertex AI Data Labeling","id":"T3mQNvrm8YJVLM11N4bq"},{"title":"Google Cloud Dataflow","id":"T9bFoXPqG8w8g1YbWTKY"},{"title":"Data Catalog","id":"TFedVRYgKGRGMSJrUpup"},{"title":"Vertex AI Model Registry","id":"U213XEHGhALownYuxpFM"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"},{"title":"Eventarc","id":"YaFawoMaXnqgY4keUBnW"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"},{"title":"Vertex AI Training","id":"baQeYW2fsPA2vvLCqN93"},{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"},{"title":"Cloud Build","id":"fw8GzBdZdqy4THau7e1y"},{"title":"Vertex AI AutoML Video","id":"gWWqvMgwnZ1a5VQ2kjUw"},{"title":"Vertex AI AutoML Text","id":"h5DuWxu8KdW1qipvwBta"},{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"},{"title":"Vertex AI Pipelines","id":"jgyDXnasZm6pwoCR1uif"},{"title":"Vertex AI Feature Store","id":"qnhcw8D1c8iMEwP8TzkA"},{"title":"Vertex AI ML Metadata","id":"sWNXmWFmBKpXg2xeTNkH"},{"title":"Vertex AI Online Prediction","id":"sdXM79fz1FS6ekNpu37K"},{"title":"Vertex AI Model Monitoring","id":"t8Vzas4iWLRcCyNYcvpN"},{"title":"Google Cloud Tasks","id":"tMWyzhyKK4rAzAf7x62h"},{"title":"Vertex AI Batch Prediction","id":"yVW8aiPWipjd3j67XzDL"},{"title":"Google Cloud Dataproc","id":"yjXrEg3Yvy26BauMwr69"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"},{"title":"Healthcare and Life Sciences","id":"zgodfdJcHiKkGxQYixiZ"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"GKE fleet management","id":"4osgZCUJuuh3whY4B8tt"},{"title":"Document AI Warehouse","id":"uu5hjNKf41zvsFyn98ry"}],"uri":"incidents/u6rQ2nNVbhAFqGCcTm58","currently_affected_locations":[],"previously_affected_locations":[{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"kY3Qq5EQkhU8n1AaRuDs","number":"7652025231155988815","begin":"2024-02-08T16:10:00+00:00","created":"2024-02-08T20:52:22+00:00","end":"2024-02-08T18:30:00+00:00","modified":"2024-02-08T20:54:09+00:00","external_desc":"Metrics unavailable for External Load Balancer","updates":[{"created":"2024-02-08T20:53:51+00:00","modified":"2024-02-08T20:53:51+00:00","when":"2024-02-08T20:53:51+00:00","text":"We experienced a degradation with Cloud Load Balancing metrics beginning on Thursday, 2024-02-08 08:10 US/Pacific.\nCustomers’ would have observed their load balancer metrics unavailable, potentially leading to their monitoring alerts firing incorrectly.\nThe issue has been resolved for all affected users as of Thursday, 2024-02-08 10:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-02-08T20:52:23+00:00","modified":"2024-02-08T20:52:23+00:00","when":"2024-02-08T20:52:23+00:00","text":"We are investigating a potential issue with Cloud Load Balancing.\nWe will provide more information by Thursday, 2024-02-08 13:00 US/Pacific.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-02-08T20:53:51+00:00","modified":"2024-02-08T20:53:51+00:00","when":"2024-02-08T20:53:51+00:00","text":"We experienced a degradation with Cloud Load Balancing metrics beginning on Thursday, 2024-02-08 08:10 US/Pacific.\nCustomers’ would have observed their load balancer metrics unavailable, potentially leading to their monitoring alerts firing incorrectly.\nThe issue has been resolved for all affected users as of Thursday, 2024-02-08 10:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"ix7u9beT8ivBdjApTif3","service_name":"Cloud Load Balancing","affected_products":[{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"}],"uri":"incidents/kY3Qq5EQkhU8n1AaRuDs","currently_affected_locations":[],"previously_affected_locations":[{"title":"Johannesburg (africa-south1)","id":"africa-south1"},{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"McSxWsRNvAPn7SbWGGig","number":"4948956724526989141","begin":"2024-02-07T11:04:00+00:00","created":"2024-02-07T11:39:25+00:00","end":"2024-02-07T13:03:00+00:00","modified":"2024-02-14T21:51:46+00:00","external_desc":"Multiple product outage in europe-west8-b","updates":[{"created":"2024-02-14T21:51:46+00:00","modified":"2024-02-14T21:51:46+00:00","when":"2024-02-14T21:51:46+00:00","text":"# Incident Report\n## Summary\nOn 7 February 2024, multiple Google Cloud services experienced a partial zonal service outage in the europe-west8-b zone for a duration of 1 hour, 8 minutes. To our customers whose services were impacted during this service outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe europe-west8 region recently received a networking upgrade to increase capacity and improve resilience. In order to minimize the risk, Google executes these as \"make-before-break,\" adding new capacity before decommissioning old capacity. The final step of this upgrade was to remove now-unused fiber connections between zones in the region. Deployment automation creates work orders for onsite technicians to remove unused fiber cabling from network devices and fiber patch panels.\nOn 7 February 2024, between 02:21 and 02:46 US/Pacific, onsite technicians performing this planned network maintenance inadvertently unplugged several fibers that were adjacent to those in the work order, but still in use for production traffic. As a result, a portion of the europe-west8-b zone unintentionally became isolated from a portion of the backbone network at 02:46 US/Pacific.\n## Remediation and Prevention\nGoogle engineers were alerted to the partial outage via internal monitoring on 7 February 2024, 02:56 US/Pacific and immediately started an investigation. Once the nature and scope of the issue became clear, Google engineers began reverting the fiber changes and restoring network capacity at 03:52 US/Pacific.\nSufficient capacity to serve customer traffic was restored by 03:54 US/Pacific, mitigating impact to the affected products. Full capacity was restored by 04:07 US/Pacific.\nGoogle is committed to preventing recurrence of this incident. The following actions have been identified:\n* Google engineers have paused all work of this kind globally, starting on 8 February 2024. This pause will remain in effect until the actions below have been implemented to reduce the risk of recurrence.\n* Complete the rollout of an enhanced physical work safety program, which includes updates to the current process for execution of planned work related to interfaces or devices serving customer traffic. The following action items within that program are relevant to this incident: * Creation of an automated notification system for the start / end of planned work related to interfaces or devices serving customer traffic. * Division of planned work into execution batches. * Require operational team supervision and monitoring for planned work. * Add multi-step verification of traffic status before fibers are disconnected. * Include additional controls as part of working procedures for the execution of critical tasks to ensure compliance with documented processes.\n## Detailed Description of Impact\nOn 7 February 2024, from 02:46 to 03:54 US/Pacific, multiple Google Cloud services experienced a partial zonal service outage for europe-west8-b. Affected services included:\n**Google Kubernetes Engine**\n* GKE clusters in europe-west8-b were unavailable.\n* Customers may also have experienced failures when attempting to create, delete, or modify VMs in the affected zone.\n**Cloud Key Management Service (KMS)**\n* Cloud KMS experienced a partial zonal outage for services in europe-west8-b, including Hardware Security Module (HSM), External Key Manager (EKM), Secret Manager, and Private CA.\n**Google Cloud Bigtable**\n* Customers experienced a service outage with non-high availability instances in europe-west8-b for the duration of the outage.\n**Virtual Private Cloud (VPC)**\n* VPC customers may have experienced increased packet loss in the affected zone.\n**Google Cloud Deploy**\n* Customers in europe-west8-b would have experienced errors creating rollouts and releases.\n**Google Compute Engine**\n* VMs in a subset of europe-west8-b were unreachable for the duration of the outage.\n* VM creations and deletions in europe-west8-c started failing as services failed over to that zone.\n**Persistent Disk (PD)**\n* PD devices in a subset of europe-west8-b would have been unavailable for the duration of the outage.\n* PD services related to snapshots, creation of new disks, and image creation for the affected zone would have experienced failures.\n* A small number of VMs with Persistent Disks in a different zone within the region saw guest errors.\n**Google Cloud Networking**\n* Cloud NAT, Cloud Interconnect, Cloud VPN, Cloud VR were unavailable for europe-west8-b.\n* Cloud Network programming was delayed for all customers in the europe-west8-b zone.\n**Service Directory**\n* Customers in europe-west8-b experienced read and write errors for the duration of the outage.\n**Traffic Director**\n* Stale configurations and load balancing assignments for all customers in europe-west8-b. This would have appeared as configuration updates not propagating and load balancing assignments not reacting to changes in load.\n* All newly restarted clients would have been unable to load configuration and receive load balancing assignments.\n**Cloud Load Balancing**\n* Approximately 50% of load balancers/target pools in europe-west8-b were unreachable.\n* Customers with Load Balancers configured in this zone would find them inconsistently available. If no Load Balancers were configured and available in another zone or region, requests to the customer's project would result in 500 errors.\n**Google Cloud DNS**\n* Customers in europe-west8-b would have been unable to write new DNS records for the duration of the outage.\n* Cloud DNS public name servers were unreachable from europe-west8-b during the outage, and intermittently unreachable from other zones in the region as a result of throttling as traffic moved from europe-west8-b to other zones in region\n**Memorystore for Redis**\n* Cloud Redis Standalone instances in europe-west8-b were unavailable for the duration of the outage.\n**Cloud BigQuery**\n* A small number of projects (one customer) may have experienced errors for API calls for a period of approximately 15 minutes for the tabledata.insertAll API. A very low error rate might have been present for jobs.insert and jobs.query operations as well, but these were mitigated much quicker through automatic recovery mechanisms.\n**Cloud Dataflow**\n* A small number of customer projects may have experienced stuck streaming jobs in europe-west8-b during the duration of the incident.\n**Cloud Build**\n* Builds would have terminated with status “INTERNAL_ERROR” for approximately 20% of builds in europe-west8 for the first approximately 20 minutes of the outage. Intra-region failover healed the user impact thereafter.\n**Cloud SQL**\n* A small number of instance create operations failed in the europe-west8 region.\n* Existing HA instances were moved to a healthy zone to restore connectivity.\n* Existing Zonal instances in europe-west8-b would have remained unavailable for the duration of the outage.\n**Cloud Armor**\n* Propagation of updates to Cloud Armor Security policies in Cloud Console stalled globally.\n**Cloud Filestore**\n* Up to 100% error rates for CreateBackup, CreateInstance, CreateSnapshot, and DeleteInstance operations for europe-west8-b zone.\n* Zonal instances in the affected zone were unavailable.","status":"AVAILABLE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"}]},{"created":"2024-02-07T19:55:16+00:00","modified":"2024-02-07T19:56:22+00:00","when":"2024-02-07T19:55:16+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 7 Feb 2024 02:46\n**Incident End:** 7 Feb 2024 03:54\n**Duration:** 1 hour, 8 minutes\n**Affected Services and Features:**\nGoogle Kubernetes Engine\nCloud Key Management Service\nGoogle BigQuery\nGoogle Cloud Bigtable\nVirtual Private Cloud (VPC)\nGoogle Cloud Deploy\nGoogle Compute Engine\nPersistent Disk\nGoogle Cloud Networking\nService Directory\nTraffic Director\nCloud Load Balancing\nGoogle Cloud DNS\nMemorystore for Redis\nCloud Dataflow\nCloud Build\nCloud SQL\nCloud Armor\nCloud Filestore\n**Regions/Zones:** Europe-west8-b\n**Description:**\nMultiple Google Cloud products experienced service unavailability for 1 hour and 8 minutes in europe-west8-b. The preliminary root cause appears to be a network decommissioning maintenance activity that was not executed as planned. Google will complete a full Incident Report in the following days that will provide a detailed root cause.\n**Customer Impact:**\nDuring the impact timeframe:\nMost customer services using this zone were unavailable.","status":"AVAILABLE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"}]},{"created":"2024-02-07T13:03:52+00:00","modified":"2024-02-07T19:21:57+00:00","when":"2024-02-07T13:03:52+00:00","text":"Summary: Multiple product outage in europe-west8-b\nThe multi product outage in europe-west8-b has been resolved for all affected users as of Wednesday, 2024-02-07 05:02 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"}]},{"created":"2024-02-07T12:58:18+00:00","modified":"2024-02-07T19:27:27+00:00","when":"2024-02-07T12:58:18+00:00","text":"Summary: Multiple product outage in europe-west8-b\nMitigation work is currently underway by our engineering team.. The mitigation is expected to complete by Wednesday, 2024-02-07 05:15 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 05:15 US/Pacific with current details.\nDiagnosis: Persistent Disk Customers are unable to their services in europe-west8-b.\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"}]},{"created":"2024-02-07T12:49:11+00:00","modified":"2024-02-07T19:28:24+00:00","when":"2024-02-07T12:49:11+00:00","text":"Summary: Multiple product outage in europe-west8-b\nMitigation work is currently underway by our engineering team.. The mitigation is expected to complete by Wednesday, 2024-02-07 05:15 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 05:00 US/Pacific with current details.\nDiagnosis: Persistent Disk Customers are unable to their services in europe-west8-b.\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"}]},{"created":"2024-02-07T12:35:50+00:00","modified":"2024-02-07T12:36:10+00:00","when":"2024-02-07T12:35:50+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build, Cloud SQL, Cloud Filestore beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team.. The mitigation is expected to complete by Wednesday, 2024-02-07 05:00 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 05:00 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T12:26:37+00:00","modified":"2024-02-07T12:26:59+00:00","when":"2024-02-07T12:26:37+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Google Kubernetes Engine, Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build, Cloud SQL, Cloud Filestore, Identity and Access Management beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. The mitigation is expected to complete by Wednesday, 2024-02-07 04:45 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 04:45 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T12:24:14+00:00","modified":"2024-02-07T12:24:35+00:00","when":"2024-02-07T12:24:14+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Google Kubernetes Engine, Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build, Cloud SQL, Cloud Filestore, Identity and Access Management beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. The mitigation is expected to complete by Wednesday, 2024-02-07 04:45 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 04:45 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T12:14:34+00:00","modified":"2024-02-07T12:14:55+00:00","when":"2024-02-07T12:14:34+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Google Kubernetes Engine, Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build, Cloud SQL, Cloud Filestore, Identity and Access Management beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. The mitigation is expected to complete by Wednesday, 2024-02-07 04:30 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 04:30 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T12:11:23+00:00","modified":"2024-02-07T12:11:40+00:00","when":"2024-02-07T12:11:23+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Google Kubernetes Engine, Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build, Cloud SQL, Cloud Filestore beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. The mitigation is expected to complete by Wednesday, 2024-02-07 04:30 US/Pacific.\nWe will provide an update by Wednesday, 2024-02-07 04:30 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T12:04:28+00:00","modified":"2024-02-07T12:04:51+00:00","when":"2024-02-07T12:04:28+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Google Kubernetes Engine, Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build, Cloud SQLbeginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. We do not have an ETA for mitigation at this point.\nWe will provide an update by Wednesday, 2024-02-07 04:30 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T12:00:23+00:00","modified":"2024-02-07T12:00:41+00:00","when":"2024-02-07T12:00:23+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow, Cloud Build beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. We do not have an ETA for mitigation at this point.\nWe will provide an update by Wednesday, 2024-02-07 04:15 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T11:58:13+00:00","modified":"2024-02-07T11:58:32+00:00","when":"2024-02-07T11:58:13+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore, Cloud Dataflow beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Mitigation work is currently underway by our engineering team. We do not have an ETA for mitigation at this point.\nWe will provide an update by Wednesday, 2024-02-07 04:15 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T11:54:12+00:00","modified":"2024-02-07T11:54:31+00:00","when":"2024-02-07T11:54:12+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Our engineering team continues to investigate the issue and identify affected products and services.\nWe will provide an update by Wednesday, 2024-02-07 04:15 US/Pacific with current details.\nDiagnosis: Customers are unable to reach any of the impacted Google Cloud products in europe-west8-b.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T11:52:22+00:00","modified":"2024-02-07T11:52:40+00:00","when":"2024-02-07T11:52:22+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Cloud Key Management Service, Google BigQuery, Google Cloud Bigtable, Virtual Private Cloud (VPC), Google Cloud Deploy, Google Compute Engine, Persistent Disk, Google Cloud Networking, Service Directory, Traffic Director, Cloud Load Balancing, Google Cloud DNS, Cloud Memorystore beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Our engineering team continues to investigate the issue and identify affected products and services.\nWe will provide an update by Wednesday, 2024-02-07 04:15 US/Pacific with current details.\nDiagnosis: Most customers will find all VMs in europe-west8-b will be unreachable.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"created":"2024-02-07T11:39:15+00:00","modified":"2024-02-07T11:39:31+00:00","when":"2024-02-07T11:39:15+00:00","text":"Summary: Multiple product outage in europe-west8-b\nDescription: We are experiencing an issue with Virtual Private Cloud (VPC) beginning on Wednesday, 2024-02-07 02:46 US/Pacific.\nOther products are likely impacted. Our engineering team continues to investigate the issue and identify affected products and services.\nWe will provide an update by Wednesday, 2024-02-07 04:00 US/Pacific with current details.\nDiagnosis: Most customers will find all VMs in europe-west8-b will be unreachable.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-02-14T21:51:46+00:00","modified":"2024-02-14T21:51:46+00:00","when":"2024-02-14T21:51:46+00:00","text":"# Incident Report\n## Summary\nOn 7 February 2024, multiple Google Cloud services experienced a partial zonal service outage in the europe-west8-b zone for a duration of 1 hour, 8 minutes. To our customers whose services were impacted during this service outage, we sincerely apologize. This is not the level of quality and reliability we strive to offer you, and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe europe-west8 region recently received a networking upgrade to increase capacity and improve resilience. In order to minimize the risk, Google executes these as \"make-before-break,\" adding new capacity before decommissioning old capacity. The final step of this upgrade was to remove now-unused fiber connections between zones in the region. Deployment automation creates work orders for onsite technicians to remove unused fiber cabling from network devices and fiber patch panels.\nOn 7 February 2024, between 02:21 and 02:46 US/Pacific, onsite technicians performing this planned network maintenance inadvertently unplugged several fibers that were adjacent to those in the work order, but still in use for production traffic. As a result, a portion of the europe-west8-b zone unintentionally became isolated from a portion of the backbone network at 02:46 US/Pacific.\n## Remediation and Prevention\nGoogle engineers were alerted to the partial outage via internal monitoring on 7 February 2024, 02:56 US/Pacific and immediately started an investigation. Once the nature and scope of the issue became clear, Google engineers began reverting the fiber changes and restoring network capacity at 03:52 US/Pacific.\nSufficient capacity to serve customer traffic was restored by 03:54 US/Pacific, mitigating impact to the affected products. Full capacity was restored by 04:07 US/Pacific.\nGoogle is committed to preventing recurrence of this incident. The following actions have been identified:\n* Google engineers have paused all work of this kind globally, starting on 8 February 2024. This pause will remain in effect until the actions below have been implemented to reduce the risk of recurrence.\n* Complete the rollout of an enhanced physical work safety program, which includes updates to the current process for execution of planned work related to interfaces or devices serving customer traffic. The following action items within that program are relevant to this incident: * Creation of an automated notification system for the start / end of planned work related to interfaces or devices serving customer traffic. * Division of planned work into execution batches. * Require operational team supervision and monitoring for planned work. * Add multi-step verification of traffic status before fibers are disconnected. * Include additional controls as part of working procedures for the execution of critical tasks to ensure compliance with documented processes.\n## Detailed Description of Impact\nOn 7 February 2024, from 02:46 to 03:54 US/Pacific, multiple Google Cloud services experienced a partial zonal service outage for europe-west8-b. Affected services included:\n**Google Kubernetes Engine**\n* GKE clusters in europe-west8-b were unavailable.\n* Customers may also have experienced failures when attempting to create, delete, or modify VMs in the affected zone.\n**Cloud Key Management Service (KMS)**\n* Cloud KMS experienced a partial zonal outage for services in europe-west8-b, including Hardware Security Module (HSM), External Key Manager (EKM), Secret Manager, and Private CA.\n**Google Cloud Bigtable**\n* Customers experienced a service outage with non-high availability instances in europe-west8-b for the duration of the outage.\n**Virtual Private Cloud (VPC)**\n* VPC customers may have experienced increased packet loss in the affected zone.\n**Google Cloud Deploy**\n* Customers in europe-west8-b would have experienced errors creating rollouts and releases.\n**Google Compute Engine**\n* VMs in a subset of europe-west8-b were unreachable for the duration of the outage.\n* VM creations and deletions in europe-west8-c started failing as services failed over to that zone.\n**Persistent Disk (PD)**\n* PD devices in a subset of europe-west8-b would have been unavailable for the duration of the outage.\n* PD services related to snapshots, creation of new disks, and image creation for the affected zone would have experienced failures.\n* A small number of VMs with Persistent Disks in a different zone within the region saw guest errors.\n**Google Cloud Networking**\n* Cloud NAT, Cloud Interconnect, Cloud VPN, Cloud VR were unavailable for europe-west8-b.\n* Cloud Network programming was delayed for all customers in the europe-west8-b zone.\n**Service Directory**\n* Customers in europe-west8-b experienced read and write errors for the duration of the outage.\n**Traffic Director**\n* Stale configurations and load balancing assignments for all customers in europe-west8-b. This would have appeared as configuration updates not propagating and load balancing assignments not reacting to changes in load.\n* All newly restarted clients would have been unable to load configuration and receive load balancing assignments.\n**Cloud Load Balancing**\n* Approximately 50% of load balancers/target pools in europe-west8-b were unreachable.\n* Customers with Load Balancers configured in this zone would find them inconsistently available. If no Load Balancers were configured and available in another zone or region, requests to the customer's project would result in 500 errors.\n**Google Cloud DNS**\n* Customers in europe-west8-b would have been unable to write new DNS records for the duration of the outage.\n* Cloud DNS public name servers were unreachable from europe-west8-b during the outage, and intermittently unreachable from other zones in the region as a result of throttling as traffic moved from europe-west8-b to other zones in region\n**Memorystore for Redis**\n* Cloud Redis Standalone instances in europe-west8-b were unavailable for the duration of the outage.\n**Cloud BigQuery**\n* A small number of projects (one customer) may have experienced errors for API calls for a period of approximately 15 minutes for the tabledata.insertAll API. A very low error rate might have been present for jobs.insert and jobs.query operations as well, but these were mitigated much quicker through automatic recovery mechanisms.\n**Cloud Dataflow**\n* A small number of customer projects may have experienced stuck streaming jobs in europe-west8-b during the duration of the incident.\n**Cloud Build**\n* Builds would have terminated with status “INTERNAL_ERROR” for approximately 20% of builds in europe-west8 for the first approximately 20 minutes of the outage. Intra-region failover healed the user impact thereafter.\n**Cloud SQL**\n* A small number of instance create operations failed in the europe-west8 region.\n* Existing HA instances were moved to a healthy zone to restore connectivity.\n* Existing Zonal instances in europe-west8-b would have remained unavailable for the duration of the outage.\n**Cloud Armor**\n* Propagation of updates to Cloud Armor Security policies in Cloud Console stalled globally.\n**Cloud Filestore**\n* Up to 100% error rates for CreateBackup, CreateInstance, CreateSnapshot, and DeleteInstance operations for europe-west8-b zone.\n* Zonal instances in the affected zone were unavailable.","status":"AVAILABLE","affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"}]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"},{"title":"Persistent Disk","id":"SzESm2Ux129pjDGKWD68"},{"title":"Cloud Filestore","id":"jog4nyYkquiLeSK5s26q"},{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"},{"title":"Cloud Memorystore","id":"LGPLu3M5pcUAKU1z6eP3"},{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"},{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"},{"title":"Google Cloud Deploy","id":"6z5SnvJrJMJQSdJmUQjH"},{"title":"Google Cloud DNS","id":"TUZUsWSJUVJGW97Jq2sH"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"},{"title":"Google Kubernetes Engine","id":"LCSbT57h59oR4W98NHuz"},{"title":"Identity and Access Management","id":"adnGEDEt9zWzs8uF1oKA"},{"title":"Service Directory","id":"vmq8TsEZwitKYM6V9BaM"},{"title":"Traffic Director","id":"NroZwL2UMMionesUGP87"},{"title":"Virtual Private Cloud (VPC)","id":"BSGtCUnz6ZmyajsjgTKv"}],"uri":"incidents/McSxWsRNvAPn7SbWGGig","currently_affected_locations":[],"previously_affected_locations":[{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Global","id":"global"}]},{"id":"27uG86K8f5E2L5Zz8WSt","number":"11866285583224557244","begin":"2024-01-31T14:17:00+00:00","created":"2024-01-31T14:37:51+00:00","end":"2024-01-31T14:55:16+00:00","modified":"2024-01-31T14:55:17+00:00","external_desc":"Customers are experiencing an increased latencies on SCC APIs, for notifications up to 4 hours.","updates":[{"created":"2024-01-31T14:55:15+00:00","modified":"2024-01-31T14:55:22+00:00","when":"2024-01-31T14:55:15+00:00","text":"The issue with Cloud Security Command Center is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-31T14:39:33+00:00","modified":"2024-01-31T14:39:44+00:00","when":"2024-01-31T14:39:33+00:00","text":"Summary: Customers are experiencing an increased latencies on SCC APIs, for notifications up to 4 hours.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2024-01-31 08:00 US/Pacific.\nDiagnosis: Customes may experience an:\n- increased latencies on SCC APIs.\n- increased latencies for notifications up to 4 hours.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-31T14:37:49+00:00","modified":"2024-01-31T14:41:12+00:00","when":"2024-01-31T14:37:49+00:00","text":"Summary: Customers are experiencing an increased latencies on SCC APIs, for notifications up to 4 hours.\nDescription: We are experiencing an issue with Cloud Security Command Center.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-01-31 08:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customes may experience an:\n- increased latencies on SCC APIs.\n- increased latencies for notifications up to 4 hours.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-01-31T14:55:15+00:00","modified":"2024-01-31T14:55:22+00:00","when":"2024-01-31T14:55:15+00:00","text":"The issue with Cloud Security Command Center is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"csyyfUYy88hkeqbv23Mc","service_name":"Cloud Security Command Center","affected_products":[{"title":"Cloud Security Command Center","id":"csyyfUYy88hkeqbv23Mc"}],"uri":"incidents/27uG86K8f5E2L5Zz8WSt","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"bMAYzVmupVWVNUjPhchQ","number":"4350269359116945818","begin":"2024-01-23T17:30:00+00:00","created":"2024-01-23T19:12:22+00:00","end":"2024-01-25T06:00:00+00:00","modified":"2024-01-31T14:28:30+00:00","external_desc":"Multiple regions: Cloud Logging was experiencing issues with displaying log data in Google Cloud Console","updates":[{"created":"2024-01-31T01:58:47+00:00","modified":"2024-01-31T14:28:30+00:00","when":"2024-01-31T01:58:47+00:00","text":"# Incident Report\n## Summary\nOn Tuesday, 23 January 2024 at 09:30 PT, Cloud Logging, and Google Cloud products and services that rely on Cloud Logging, experienced delays ingesting logs originating from us-central1, us-east1, europe-west1, europe-north1, and asia-east1. This resulted in customers not being able to view these logs in Google Cloud Console or other places that make use of the logging query APIs during the impact windows for each region:\n* us-central1:\t9:30-13:30 PT (4h)\n* us-east1:\t9:30-10:20 PT (50m)\n* europe-west1:\t9:30-14:15 PT (4h45m)\n* europe-north1:\t9:30-14:15 PT (4h45m)\n* asia-east1:\t9:30-10:10 PT (40m)\nExports of logs as well as writing to log-based metrics were also delayed in these regions during the impact windows. The issue also affected alerting through Cloud Console for Personalized Service Health (PSH).\nTo our Google Cloud customers who were affected, we sincerely apologize. This is not the level of quality and reliability we strive to offer you and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe first time data is received for a log bucket that has Log Analytics enabled, Cloud Logging dynamically provisions resources necessary to ingest and store logs in BigQuery for that bucket. This requires updating state in a configuration database, which is accessed during log ingestion and routing. This configuration is required to ensure data is stored in compliance with each customer's organization settings.\nAs part of ongoing feature development, Cloud Logging Engineers increased traffic from a new set of projects to Log Analytics. These projects were ingesting logs in multiple regions and the ramp-up resulted in a large number of concurrent dynamic provisioning requests in the five regions listed above. This caused contention and slowdowns accessing the configuration database. While the Log Router has a load-shedding mechanism to protect against loss of throughput in such situations, there was a previously unknown latent issue that caused the problematic traffic to not be isolated quickly enough in a separate buffer. As a result, Log Router throughput was reduced by about 40% in the impacted regions, causing a log processing backlog to form.\nThe primary impact was that recently written logs were not visible to queries and log exports were delayed for log data originating from any of the impacted regions. This delay also affected Log Analytics, log-based metrics, and other Google products and services that rely on log data, including Personalized Service Health. No log entries were permanently lost, and all log entries were eventually successfully ingested, indexed for queries, and exported to configured destinations.\nTo quickly process the large backlog of data, the Log Analytics ingesters and log-based metrics pipeline were scaled up. This scale up led to two additional unintended secondary impacts.\n* **Log-based Metrics:** Queries for some log-based metrics with high cardinality were degraded for some users for about 25 hours following the event start time of 09:30 PT on 23 January 2024. Queries for such log-based metrics would timeout if the query time interval overlapped with the interval containing high cardinality values, as it is less efficient to store and query high cardinality data points. The internal cardinality of the metrics were increased because the writer tasks had scaled up to ingest the backlog and there was an increased number of late arriving logs. The issue was resolved when the high cardinality data aged out of the 25 hour in-memory retention window.\n* **Log Analytics:** Queries in Log Analytics would not return recently ingested data during the outage period until 24 January 2024 02:00 PT because the ingester scale up led to exceeding an internal connection quota limit.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via a support case and internal alerts on Tuesday, 23 January 2024 09:55 PT and immediately started an investigation. Once they determined that the feature ramp-up was the cause of the outage, they rolled back the feature ramp-up at 11:15 PT. This reduced contention but the recovery process was slow because the backlog already contained many logs that triggered provisioning. A change was then rolled out to accelerate the recovery of the impacted regions by disabling provisioning for the affected logs. The rollout completed by 13:40 PT, and the backlog was fully processed in all impacted regions by 14:15 PT.\nTo remediate the log-based metrics issue, Google engineers scaled down the number of writer tasks to reduce cardinality of the generated metrics. The degraded query performance was mitigated by 24 January 2024 22:00 PT as the high cardinality data aged out of the in-memory retention window.\nTo remediate the issue in Log Analytics, Google engineers raised the internal connection quota and changed the connection type used by the buffer processor from exclusive connections to shared multiplexed connections. These changes mitigated the issue for Log Analytics and the buffered logs were fully processed by 24 January 2024 02:00 PT. The mitigation will also reduce the likelihood of a future occurrence of connection quota issues in Log Analytics.\nGoogle is committed to preventing recurrence of this incident. The following actions are in progress:\n* Change the rollout process used to implement these types of traffic ramp-ups, to reduce the blast radius of any issues that result.\n* Reduce the timeout for database operations in the Log Router, to ensure that problematic data is isolated more quickly.\n* Implement fault injection testing under load to verify that Log Router throughput can be maintained during both failures and slow operation of the configuration database.\n* Change the dynamic provisioning process to eliminate this dependency from the log routing critical path.\n* Improve monitoring, alerting, and playbooks so engineers are notified of and able to respond to log entry backlogs more quickly.\n* Improve the log-based metrics pipeline processing to reduce the cardinality issues that were caused by scaling up writers to consume the backlog.\nGoogle is committed to quickly and continually improving our technology and operations to prevent service disruptions. We appreciate your patience and apologize again for the impact to your organization. We thank you for your business.\n## Detailed Description of Impact\n### Cloud Logging\n* Log ingestion in Cloud Logging was delayed for logs ingested from us-central1, us-east1, europe-west1, europe-north1, and asia-east1 regions. Ingestion of logs from other regions was not impacted. The following are the impact windows for each affected region on 23 January 2024: * us-central1:\t9:30-13:30 PT (4h)\t~50% of messages * us-east1:\t9:30-10:20 PT (50m)\t~40% of messages * europe-west1:\t9:30-14:15 PT (4h45m)\t~75% of messages * europe-north1:\t9:30-14:15 PT (4h45m)\t~1% of messages * asia-east1:\t9:30-10:10 PT (40m)\t~20% of messages\n* Queries of logs from the impacted regions would not return recently written data during the outage period.\n* Exports of logs from the impacted regions to BigQuery, GCS, and Cloud PubSub destinations were delayed.\n* Graphs and queries for log-based metrics may have appeared to have missing or incomplete data during the outage period. Alerts that depended on this data may have been missed.\n* Log-based metrics queries for some high cardinality metrics experienced degraded performance for some users for about 25 hours following the event due to in-memory retention of high cardinality data.\n* No logs were lost during the incident. However some log-based metrics, derived from the logs, may have gaps in the corresponding data points during the outage period.\n* Ingestion to Log Analytics was degraded for a duration of 12 hours.\n### Personalized Service Health\n* PSH Alerting through Cloud Console would have been unavailable during the impact windows for each affected region.\n* Additionally, messages written by PSH to Cloud Logging were also delayed for the affected regions.\n* The ability to view incident status on the PSH Dashboard and integrations to the PSH API were not impacted.","status":"AVAILABLE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-24T20:45:12+00:00","modified":"2024-01-24T20:45:12+00:00","when":"2024-01-24T20:45:12+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 23 Jan, 2024 09:30\n**Incident End for us-east1 logs delay:** 23 Jan 2024 10:20\n**Incident End for us-central1 logs delay:** 23 Jan 2024 13:45\n**Incident End for europe-west1 logs delay:** 23 Jan 2024 14:15\n**Incident End for Cloud Metrics backfill :** 23 Jan, 2024 18:05\n**Cumulative Duration:** 8 hours, 35 minutes\n**Affected Services and Features:**\n- Google Cloud Logging\n- Cloud Dataflow\n- Personalized Service Health\n- Google Cloud products and services that rely on Cloud Logging.\n**Regions/Zones:** us-central1, europe-west1, us-east1\n**Description:**\nCloud Logging experienced delays ingesting logs that originated from us-central1, us-east1, and europe-west1 resulting in customers not being able to view these logs during that time in Google Cloud Console or other places that make use of the Logging query APIs. Exports of logs as well as writing to [log-based metrics](https://cloud.google.com/logging/docs/logs-based-metrics) were also delayed during this period.\nIngestion delays in Cloud Logging had a downstream impact on Google Cloud products and services that rely on Cloud Logging.\nFrom our preliminary investigations, the root cause of the issue is a roll out of an internal feature for Cloud Trace that uses Cloud Logging. The rollout of the new feature caused an unexpected contention in accessing the configuration database used for Log Routing, which caused a backlog in the ingestion pipeline.\nThe issue was mitigated by rolling back the internal feature of Cloud Trace which concluded at 13:45 US/Pacific and the logs in the pending queue were gradually processed, mitigating the delayed logs issue by 14:15 US/Pacific. The issue with log-based metrics where the logs were not written to corresponding data points was mitigated at 18:05 US/Pacific.\nGoogle will complete a full Incident Report in the following days to provide a full root cause.\n**Customer Impact:**\n**Impact to Google Cloud products and services:**\n* Log ingestion in Cloud Logging was delayed for logs ingested from us-central1, europe-west1, and us-east1. Ingestion of logs from other regions was not impacted.\n* Log queries, exports of logs (to BigQuery, GCS, or Cloud PubSub destinations), and writing corresponding data points to [log-based metrics](https://cloud.google.com/logging/docs/logs-based-metrics) were delayed for the three impacted regions.\n* No logs were lost during the incident. However some [log-based metrics](https://cloud.google.com/logging/docs/logs-based-metrics), derived from the logs, may have gaps in the corresponding data points during the outage period.\n**Impact to Personalized Service Health (PSH):**\n* The PSH dashboard displayed outage status, however the notification system to customers was down during the incident.\n* In addition to this, the log messages written by PSH were also impacted in the locations above.\n* Integrations to the PSH API were not impacted.\n**Additional Information:**\n* A subset of customers are experiencing delays with querying historical log-based metrics data and our engineers are continuing to work on resolving the issue for those customers via a separate incident.\n------------------------------","status":"AVAILABLE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-24T02:05:57+00:00","modified":"2024-01-24T02:05:59+00:00","when":"2024-01-24T02:05:57+00:00","text":"The issue with Cloud Logging, Personalized Service Health has been resolved for all affected users as of Tuesday, 2024-01-23 17:50 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-23T23:17:41+00:00","modified":"2024-01-23T23:17:48+00:00","when":"2024-01-23T23:17:41+00:00","text":"Summary: Multiple regions: Cloud Logging is experiencing issues with displaying log data in Google Cloud Console\nDescription: Our engineering team has narrowed down the root cause to a feature rollout that started at 09:30 US/Pacific. The new feature usage caused contention in the backend database.\nAfter further investigation, the impact is narrowed down to logs from GCP products or services in us-central1, us-east1, and europe-west1 destined to any Cloud Logging buckets. We apologize for any confusion by the previous communications.\nus-east1 recovered as of 10:20 US/Pacific and us-central1 recovered as of 13:42 US/Pacific.\nOur engineers have completed the roll back of the new feature and are currently working on processing the logs in the pending queue in us-central1, europe-west1, Global. The rollout to clear the logs backlog has completed to all the affected regions. The logs backlog for us-east1 completed as of 10:20 US/Pacific, for us-central1 completed as of 13:42 US/Pacific and for europe-west1 completed as of 14:15 US/Pacific\n**Remaining Impact:** Customers using [log based metrics](https://cloud.google.com/logging/docs/logs-based-metrics) may observe metrics with no backing logs. Our engineering teams are working on mitigating the metrics backfill. We do not have an ETA for mitigation of log based metrics issue at this point.\nThe notification error rate for PSH has reduced significantly and we believe that the majority of the backed up notifications are delivered. As we gradually process the remaining logs in the pending queue, customers may see an increase of historical outage notifications from PSH for the affected time period. We apologize for the inconvenience.\nWe will provide more information by Tuesday, 2024-01-23 20:00 US/Pacific.\nDiagnosis: - Customers using [log based metrics](https://cloud.google.com/logging/docs/logs-based-metrics) may observe metrics with no backing logs.\n- Other impacts that were previously reported for Cloud Logging and PSH notifications should now be resolved.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-23T22:24:16+00:00","modified":"2024-01-23T22:31:15+00:00","when":"2024-01-23T22:24:16+00:00","text":"Summary: Multiple regions: Cloud Logging is experiencing issues with displaying log data in Google Cloud Console.\nDescription: Our engineering team has narrowed down the root cause to a feature rollout that started at 09:30 US/Pacific. The new feature usage caused contention in the backend database.\nAfter further investigation, the impact is narrowed down to logs from GCP products or services in us-central1, us-east1, and europe-west1 destined to any Cloud Logging buckets. We apologize for any confusion by the previous communications.\nus-east1 recovered as of 10:20 US/Pacific and us-central1 recovered as of 13:42 US/Pacific.\nThe rollout to clear the logs backlog has completed to all the affected regions. The logs backlog for us-east1 completed as of 10:20 US/Pacific, for us-central1 completed as of 13:42 US/Pacific and for europe-west1 completed as of 14:15 US/Pacific\n**Remaining Impact:** Customers using [log based metrics](https://cloud.google.com/logging/docs/logs-based-metrics) may observe metrics with no backing logs. Our engineering teams are working on mitigating the metrics backfill. We do not have an ETA for mitigation of log based metrics issue at this point.\nThe notification error rate for PSH has reduced significantly and we believe that the majority of the backed up notifications are delivered. As we gradually process the remaining logs in the pending queue, customers may see an increase of historical outage notifications from PSH for the affected time period. We apologize for the inconvenience.\nWe will provide more information by Tuesday, 2024-01-23 15:30 US/Pacific.\nDiagnosis:\n- Customers using [log based metrics](https://cloud.google.com/logging/docs/logs-based-metrics) may observe metrics with no backing logs.\n- Other impacts that were previously reported for Cloud Logging and PSH notifications should now be resolved.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-23T21:17:18+00:00","modified":"2024-01-23T21:18:41+00:00","when":"2024-01-23T21:17:18+00:00","text":"Summary: Multiple regions: Cloud Logging Data is experiencing issues with displaying log data in Google Cloud Console\nDescription: We are experiencing an issue with Cloud Logging, Cloud Dataflow, and Personalized Service Health (PSH).\nAfter further investigation, the impact is narrowed down to logs from GCP products or services in us-central1, us-east1, and europe-west1 destined to any Cloud Logging buckets. We apologize for any confusion by the previous communications.\nus-east1 recovered as of 10:20 US/Pacific.\nOur engineering team has narrowed down the root cause to a feature rollout that started at 09:30 US/Pacific. The new feature usage caused contention in the backend database.\nOur engineers have completed the roll back of the new feature and are currently working on processing the logs in the pending queue in us-central1, europe-west1, Global. Currently, the ETA for the full mitigation within the hour.\nAs we gradually process the logs in the pending queue, customers may see an increase of historical outage notifications from PSH for the affected time period. We apologize for the inconvenience.\nWe will provide more information by Tuesday, 2024-01-23 14:00 US/Pacific.\nDiagnosis:\n* Customers are unable to receive logs at this time in the Google Cloud Console.\n* Cloud Dataflow is impacted and we are working to identify other downstream impacts.\n* GCP products and services using Cloud Logging are potentially impacted.\n* Customers using PSH notifications are also impacted. While the PSH dashboard is available and outage communications are displayed on it, customers will not get notifications on the outages until issue resolution. Additionally, log messages written by PSH are also impacted. Integrations to the PSH API are not impacted.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-23T20:56:06+00:00","modified":"2024-01-23T20:57:21+00:00","when":"2024-01-23T20:56:06+00:00","text":"Summary: Global: Cloud Logging Data is experiencing issues with displaying log data in Google Cloud Console\nDescription: We are experiencing an issue with Cloud Logging, Cloud Dataflow, and Personalized Service Health (PSH).\nAfter further investigation, the impact is narrowed down to Cloud Logging customers using buckets configured in us-central1, us-east1, europe-west1, and Global rather than all the regions as previously mentioned. We apologize for any confusion this may have caused.\nOur engineering team has narrowed down the root cause to a feature rollout that started at 09:30 US/Pacific. The new feature usage caused contention in the backend database.\nOur engineers have completed the roll back of the new feature and are currently working on processing the logs in the pending queue in us-central1, europe-west1,Global buckets. us-east1 recovered as of 10:20 US/Pacific. Currently, the ETA for the full mitigation is 1 hour.\nWe will provide more information by Tuesday, 2024-01-23 14:00 US/Pacific.\nDiagnosis:\n* Customers are unable to receive logs at this time in the Google Cloud Console.\n* Cloud Dataflow is impacted and we are working to identify other downstream impacts.\n* GCP products and services using Cloud Logging are potentially impacted.\n* Customers using PSH notifications are also impacted. While the PSH dashboard is available and outage communications are displayed on it, customers will not get notifications on the outages until issue resolution. Additionally, log messages written by PSH are also impacted. Integrations to the PSH API are not impacted.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2024-01-23T20:24:43+00:00","modified":"2024-01-23T20:25:58+00:00","when":"2024-01-23T20:24:43+00:00","text":"Summary: Global: Cloud Logging Data is experiencing issues with displaying log data in Google Cloud Console\nDescription: We are experiencing an issue with Cloud Logging, Cloud Dataflow, and Personalized Service Health (PSH).\nOur engineering team has narrowed down the root cause to a feature rollout that started at 09:30 US/Pacific. The new feature usage caused contention in the backend database.\nOur engineers have completed the roll back of the new feature and are currently working on processing the logs in the pending queue. Currently, the ETA for the full mitigation is 3 hours.\nWe will provide more information by Tuesday, 2024-01-23 13:00 US/Pacific.\nDiagnosis:\n* Customers are unable to receive logs at this time in the Google Cloud Console.\n* Cloud Dataflow is impacted and we are working to identify other downstream impacts.\n* GCP products and services using Cloud Logging are potentially impacted.\n* Customers using PSH notifications are also impacted. While the PSH dashboard is available and outage communications are displayed on it, customers will not get notifications on the outages until issue resolution. Additionally, log messages written by PSH are also impacted. Integrations to the PSH API are not impacted.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-23T19:31:27+00:00","modified":"2024-01-23T19:31:28+00:00","when":"2024-01-23T19:31:27+00:00","text":"Summary: Global: Cloud Logging Data Unavailable in Google Cloud Console\nDescription: We are experiencing an issue with Cloud Logging.\nOur engineering team continues to investigate the issue.\nWe identified the potential root cause and working on a mitigation strategy.\nWe will provide more information by Tuesday, 2024-01-23 12:00 US/Pacific.\nDiagnosis: Customers are unable to receive fresh logs at this time in the Google Cloud Console.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-23T19:12:22+00:00","modified":"2024-01-23T19:12:23+00:00","when":"2024-01-23T19:12:22+00:00","text":"Summary: Global: Cloud Logging Data Unavailable in Google Cloud Console\nDescription: We've received a report of an issue with Cloud Logging as of Tuesday, 2024-01-23 10:36 US/Pacific.\nWe will provide more information by Tuesday, 2024-01-23 11:35 US/Pacific.\nDiagnosis: Cloud Logging data is unavailable in the Google Cloud Console.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-01-31T01:58:47+00:00","modified":"2024-01-31T14:28:30+00:00","when":"2024-01-31T01:58:47+00:00","text":"# Incident Report\n## Summary\nOn Tuesday, 23 January 2024 at 09:30 PT, Cloud Logging, and Google Cloud products and services that rely on Cloud Logging, experienced delays ingesting logs originating from us-central1, us-east1, europe-west1, europe-north1, and asia-east1. This resulted in customers not being able to view these logs in Google Cloud Console or other places that make use of the logging query APIs during the impact windows for each region:\n* us-central1:\t9:30-13:30 PT (4h)\n* us-east1:\t9:30-10:20 PT (50m)\n* europe-west1:\t9:30-14:15 PT (4h45m)\n* europe-north1:\t9:30-14:15 PT (4h45m)\n* asia-east1:\t9:30-10:10 PT (40m)\nExports of logs as well as writing to log-based metrics were also delayed in these regions during the impact windows. The issue also affected alerting through Cloud Console for Personalized Service Health (PSH).\nTo our Google Cloud customers who were affected, we sincerely apologize. This is not the level of quality and reliability we strive to offer you and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nThe first time data is received for a log bucket that has Log Analytics enabled, Cloud Logging dynamically provisions resources necessary to ingest and store logs in BigQuery for that bucket. This requires updating state in a configuration database, which is accessed during log ingestion and routing. This configuration is required to ensure data is stored in compliance with each customer's organization settings.\nAs part of ongoing feature development, Cloud Logging Engineers increased traffic from a new set of projects to Log Analytics. These projects were ingesting logs in multiple regions and the ramp-up resulted in a large number of concurrent dynamic provisioning requests in the five regions listed above. This caused contention and slowdowns accessing the configuration database. While the Log Router has a load-shedding mechanism to protect against loss of throughput in such situations, there was a previously unknown latent issue that caused the problematic traffic to not be isolated quickly enough in a separate buffer. As a result, Log Router throughput was reduced by about 40% in the impacted regions, causing a log processing backlog to form.\nThe primary impact was that recently written logs were not visible to queries and log exports were delayed for log data originating from any of the impacted regions. This delay also affected Log Analytics, log-based metrics, and other Google products and services that rely on log data, including Personalized Service Health. No log entries were permanently lost, and all log entries were eventually successfully ingested, indexed for queries, and exported to configured destinations.\nTo quickly process the large backlog of data, the Log Analytics ingesters and log-based metrics pipeline were scaled up. This scale up led to two additional unintended secondary impacts.\n* **Log-based Metrics:** Queries for some log-based metrics with high cardinality were degraded for some users for about 25 hours following the event start time of 09:30 PT on 23 January 2024. Queries for such log-based metrics would timeout if the query time interval overlapped with the interval containing high cardinality values, as it is less efficient to store and query high cardinality data points. The internal cardinality of the metrics were increased because the writer tasks had scaled up to ingest the backlog and there was an increased number of late arriving logs. The issue was resolved when the high cardinality data aged out of the 25 hour in-memory retention window.\n* **Log Analytics:** Queries in Log Analytics would not return recently ingested data during the outage period until 24 January 2024 02:00 PT because the ingester scale up led to exceeding an internal connection quota limit.\n## Remediation and Prevention\nGoogle engineers were alerted to the outage via a support case and internal alerts on Tuesday, 23 January 2024 09:55 PT and immediately started an investigation. Once they determined that the feature ramp-up was the cause of the outage, they rolled back the feature ramp-up at 11:15 PT. This reduced contention but the recovery process was slow because the backlog already contained many logs that triggered provisioning. A change was then rolled out to accelerate the recovery of the impacted regions by disabling provisioning for the affected logs. The rollout completed by 13:40 PT, and the backlog was fully processed in all impacted regions by 14:15 PT.\nTo remediate the log-based metrics issue, Google engineers scaled down the number of writer tasks to reduce cardinality of the generated metrics. The degraded query performance was mitigated by 24 January 2024 22:00 PT as the high cardinality data aged out of the in-memory retention window.\nTo remediate the issue in Log Analytics, Google engineers raised the internal connection quota and changed the connection type used by the buffer processor from exclusive connections to shared multiplexed connections. These changes mitigated the issue for Log Analytics and the buffered logs were fully processed by 24 January 2024 02:00 PT. The mitigation will also reduce the likelihood of a future occurrence of connection quota issues in Log Analytics.\nGoogle is committed to preventing recurrence of this incident. The following actions are in progress:\n* Change the rollout process used to implement these types of traffic ramp-ups, to reduce the blast radius of any issues that result.\n* Reduce the timeout for database operations in the Log Router, to ensure that problematic data is isolated more quickly.\n* Implement fault injection testing under load to verify that Log Router throughput can be maintained during both failures and slow operation of the configuration database.\n* Change the dynamic provisioning process to eliminate this dependency from the log routing critical path.\n* Improve monitoring, alerting, and playbooks so engineers are notified of and able to respond to log entry backlogs more quickly.\n* Improve the log-based metrics pipeline processing to reduce the cardinality issues that were caused by scaling up writers to consume the backlog.\nGoogle is committed to quickly and continually improving our technology and operations to prevent service disruptions. We appreciate your patience and apologize again for the impact to your organization. We thank you for your business.\n## Detailed Description of Impact\n### Cloud Logging\n* Log ingestion in Cloud Logging was delayed for logs ingested from us-central1, us-east1, europe-west1, europe-north1, and asia-east1 regions. Ingestion of logs from other regions was not impacted. The following are the impact windows for each affected region on 23 January 2024: * us-central1:\t9:30-13:30 PT (4h)\t~50% of messages * us-east1:\t9:30-10:20 PT (50m)\t~40% of messages * europe-west1:\t9:30-14:15 PT (4h45m)\t~75% of messages * europe-north1:\t9:30-14:15 PT (4h45m)\t~1% of messages * asia-east1:\t9:30-10:10 PT (40m)\t~20% of messages\n* Queries of logs from the impacted regions would not return recently written data during the outage period.\n* Exports of logs from the impacted regions to BigQuery, GCS, and Cloud PubSub destinations were delayed.\n* Graphs and queries for log-based metrics may have appeared to have missing or incomplete data during the outage period. Alerts that depended on this data may have been missed.\n* Log-based metrics queries for some high cardinality metrics experienced degraded performance for some users for about 25 hours following the event due to in-memory retention of high cardinality data.\n* No logs were lost during the incident. However some log-based metrics, derived from the logs, may have gaps in the corresponding data points during the outage period.\n* Ingestion to Log Analytics was degraded for a duration of 12 hours.\n### Personalized Service Health\n* PSH Alerting through Cloud Console would have been unavailable during the impact windows for each affected region.\n* Additionally, messages written by PSH to Cloud Logging were also delayed for the affected regions.\n* The ability to view incident status on the PSH Dashboard and integrations to the PSH API were not impacted.","status":"AVAILABLE","affected_locations":[{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"}],"uri":"incidents/bMAYzVmupVWVNUjPhchQ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Multi-region: eu","id":"eu"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Multi-region: us","id":"us"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"oSkQCweQ7xWmgu1g1Jps","number":"17163514131967453186","begin":"2024-01-23T15:45:00+00:00","created":"2024-01-23T17:08:03+00:00","end":"2024-01-23T17:20:00+00:00","modified":"2024-01-23T20:51:33+00:00","external_desc":"Multi-Region: Google Cloud VMWare Engine Portal Unavailable in Google Cloud Console","updates":[{"created":"2024-01-23T20:51:33+00:00","modified":"2024-01-23T20:51:33+00:00","when":"2024-01-23T20:51:33+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 23 January, 2024 07:45\n**Incident End:** 23 January, 2024 09:20\n**Duration:** 1 hour, 35 minutes\n**Affected Services and Features:**\nGoogle Cloud VMware Engine\n**Regions/Zones:** Global\n**Description:**\nGoogle Cloud VMware Engine (GCVE) customers were unable to navigate to the GCVE Portal in Cloud Console, or perform management actions for VMware resources using the GCVE API or gcloud CLI, for a duration of 1 hour 35 minutes. From preliminary analysis, the root cause of the issue was expired certificates.\nThe issue was mitigated on 23 January, 2024 09:20 US/Pacific by installing new certificates.\n**Customer Impact:**\nGCVE customers were unable to access the GCVE Portal through Cloud Console and would have experienced errors when using the GCVE API and gcloud CLI to perform management actions on VMware resources. VMware workloads were not impacted and remained accessible through vCenter.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-01-23T17:21:10+00:00","modified":"2024-01-23T17:21:12+00:00","when":"2024-01-23T17:21:10+00:00","text":"The issue with Google Cloud VMWare Engine Portal in the Google Cloud Console has been resolved for all affected users as of Tuesday, 2024-01-23 09:20 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},{"created":"2024-01-23T17:08:02+00:00","modified":"2024-01-23T17:08:05+00:00","when":"2024-01-23T17:08:02+00:00","text":"Summary: Multi-Region: Google Cloud VMWare Engine Portal Unavailable in Google Cloud Console\nDescription: We are experiencing an issue with Google Cloud VMWare Engine Portal.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2024-01-23 09:40 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers are unable to navigate to the Google Cloud VMWare Engine Portal in the Google Cloud Console. The UI, API, and CLI are not available and there is no impact to VM workloads.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-01-23T20:51:33+00:00","modified":"2024-01-23T20:51:33+00:00","when":"2024-01-23T20:51:33+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 23 January, 2024 07:45\n**Incident End:** 23 January, 2024 09:20\n**Duration:** 1 hour, 35 minutes\n**Affected Services and Features:**\nGoogle Cloud VMware Engine\n**Regions/Zones:** Global\n**Description:**\nGoogle Cloud VMware Engine (GCVE) customers were unable to navigate to the GCVE Portal in Cloud Console, or perform management actions for VMware resources using the GCVE API or gcloud CLI, for a duration of 1 hour 35 minutes. From preliminary analysis, the root cause of the issue was expired certificates.\nThe issue was mitigated on 23 January, 2024 09:20 US/Pacific by installing new certificates.\n**Customer Impact:**\nGCVE customers were unable to access the GCVE Portal through Cloud Console and would have experienced errors when using the GCVE API and gcloud CLI to perform management actions on VMware resources. VMware workloads were not impacted and remained accessible through vCenter.","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Los Angeles (us-west2)","id":"us-west2"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"VMWare engine","id":"9H6gWUHvb2ZubeoxzQ1Y"},{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"}],"uri":"incidents/oSkQCweQ7xWmgu1g1Jps","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"Mqgi3ZvBPdfMeQ5CduK2","number":"5835883944386932019","begin":"2024-01-17T15:44:00+00:00","created":"2024-01-17T16:22:43+00:00","end":"2024-01-17T17:58:13+00:00","modified":"2024-01-17T22:26:01+00:00","external_desc":"Document AI training fails on UI","updates":[{"created":"2024-01-17T22:26:01+00:00","modified":"2024-01-17T22:26:01+00:00","when":"2024-01-17T22:26:01+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 17 January 2024 07:43\n**Incident End:** 17 Januar, 2024 09:52\n**Duration:** 2 hours, 9 minutes\n**Affected Services and Features:**\nDocument AI\n**Regions/Zones:**\nMumbai (asia-south1)\nSingapore (asia-southeast1)\nSydney (australia-southeast1)\nMulti-region: euLondon (europe-west2)\nFrankfurt (europe-west3)\nMontréal (northamerica-northeast1)\nMulti-region: us\n**Description:**\nDocument AI custom extractor experienced failing with \"Internal error encountered\" in multiple regions for a duration of 2 hours and 9 minutes. During the incident there was a workaround provided; customers were able to use “call the API” to do training instead of doing it on UI.\nFrom preliminary analysis the root cause of the issue was a rollout misalignment. This was fixed by speeding up the rollout, which was then completed successfully, resolving impact.\n**Customer Impact:**\nDuring the incident customers may have experienced:\n- Failed training requests on Document AI custom extractor with an error \"Internal error encountered\" on UI.\n- Direct API calls were not affected.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"}]},{"created":"2024-01-17T17:58:12+00:00","modified":"2024-01-17T17:58:14+00:00","when":"2024-01-17T17:58:12+00:00","text":"The issue with Document AI has been resolved for all affected users as of Wednesday, 2024-01-17 09:57 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"}]},{"created":"2024-01-17T17:21:07+00:00","modified":"2024-01-17T17:21:10+00:00","when":"2024-01-17T17:21:07+00:00","text":"Summary: Document AI training fails on UI\nDescription: Mitigation work is currently underway by our engineering team.\nWe will provide more information by Wednesday, 2024-01-17 11:30 US/Pacific.\nDiagnosis: All training requests on Document AI custom extractor are currently failing with \"Internal error encountered\" on UI. Direct API call is not affected.\nWorkaround: Customer could use call the API to do training instead of doing it on UI","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"}]},{"created":"2024-01-17T16:28:16+00:00","modified":"2024-01-17T16:28:23+00:00","when":"2024-01-17T16:28:16+00:00","text":"Summary: Document AI training fails on UI\nDescription: We are experiencing an issue with Document AI beginning on Wednesday, 2024-01-17 04:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-01-17 09:30 US/Pacific with current details.\nDiagnosis: All training requests on Document AI custom extractor are currently failing with \"Internal error encountered\" on UI. Direct API call is not affected.\nWorkaround: Customer could use call the API to do training instead of doing it on UI","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"}]},{"created":"2024-01-17T16:22:34+00:00","modified":"2024-01-17T16:22:46+00:00","when":"2024-01-17T16:22:34+00:00","text":"Summary: Document AI training fails on UI\nDescription: We are experiencing an issue with Document AI beginning on Wednesday, 2024-01-17 04:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2024-01-17 09:30 US/Pacific with current details.\nDiagnosis: All training requests on Document AI custom extractor are currently failing with \"Internal error encountered\" on UI. Direct API call is not affected.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: eu","id":"eu"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2024-01-17T22:26:01+00:00","modified":"2024-01-17T22:26:01+00:00","when":"2024-01-17T22:26:01+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 17 January 2024 07:43\n**Incident End:** 17 Januar, 2024 09:52\n**Duration:** 2 hours, 9 minutes\n**Affected Services and Features:**\nDocument AI\n**Regions/Zones:**\nMumbai (asia-south1)\nSingapore (asia-southeast1)\nSydney (australia-southeast1)\nMulti-region: euLondon (europe-west2)\nFrankfurt (europe-west3)\nMontréal (northamerica-northeast1)\nMulti-region: us\n**Description:**\nDocument AI custom extractor experienced failing with \"Internal error encountered\" in multiple regions for a duration of 2 hours and 9 minutes. During the incident there was a workaround provided; customers were able to use “call the API” to do training instead of doing it on UI.\nFrom preliminary analysis the root cause of the issue was a rollout misalignment. This was fixed by speeding up the rollout, which was then completed successfully, resolving impact.\n**Customer Impact:**\nDuring the incident customers may have experienced:\n- Failed training requests on Document AI custom extractor with an error \"Internal error encountered\" on UI.\n- Direct API calls were not affected.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Document AI","id":"GWuqLi6DKb1DkzyRtRuD"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/Mqgi3ZvBPdfMeQ5CduK2","currently_affected_locations":[],"previously_affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: eu","id":"eu"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Multi-region: us","id":"us"}]},{"id":"i5we7WjHzCpiXZty3hAr","number":"8823472421243147085","begin":"2024-01-11T23:35:47+00:00","created":"2024-01-12T00:00:40+00:00","end":"2024-01-12T02:32:34+00:00","modified":"2024-01-12T02:32:35+00:00","external_desc":"Google Cloud AppSheet experienced issues with loading images that are hosted in Google Drive.","updates":[{"created":"2024-01-12T02:32:34+00:00","modified":"2024-01-12T02:32:36+00:00","when":"2024-01-12T02:32:34+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Thursday, 2024-01-11 18:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-01-12T01:42:25+00:00","modified":"2024-01-12T01:42:27+00:00","when":"2024-01-12T01:42:25+00:00","text":"Summary: Google Cloud AppSheet is experiencing issues with loading images that are hosted in Google Drive.\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Thursday, 2024-01-11 18:30 US/Pacific.\nWe will provide more information by Thursday, 2024-01-11 19:15 US/Pacific.\nDiagnosis: The customers who use Google Drive and links of the form **https://drive.google.com/uc?export=view\u0026id=","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-01-12T00:46:34+00:00","modified":"2024-01-12T00:46:37+00:00","when":"2024-01-12T00:46:34+00:00","text":"Summary: Google Cloud AppSheet is experiencing issues with loading images that are hosted in Google Drive.\nDescription: We are experiencing an issue with AppSheet beginning on Thursday, 2024-01-08 13:00 US/Pacific.\nOur engineering team is actively investigating the issue.\nWe will provide an update by Thursday, 2024-01-11 18:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The customers who use the Google drive to host their application are unable to load the images from the Google drive.\nWorkaround: The impacted customers to relocate the images to their designated Drive folder and modify all relevant data sources to reflect the updated image file paths.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2024-01-12T00:00:39+00:00","modified":"2024-01-12T00:00:41+00:00","when":"2024-01-12T00:00:39+00:00","text":"Summary: Google Cloud AppSheet is experiencing issues with viewing images that are hosted in Google Drive.\nDescription: We are experiencing an issue with AppSheet beginning on Thursday, 2024-01-08 13:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-01-11 17:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The customers who use the drive to host their application are unable to load the images from the Google drive.\nWorkaround: The customer would have to move the images into their Drive folder and then update all their data sources to point to the images differently.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2024-01-12T02:32:34+00:00","modified":"2024-01-12T02:32:36+00:00","when":"2024-01-12T02:32:34+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Thursday, 2024-01-11 18:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FWjKi5U7KX4FUUPThHAJ","service_name":"AppSheet","affected_products":[{"title":"AppSheet","id":"FWjKi5U7KX4FUUPThHAJ"}],"uri":"incidents/i5we7WjHzCpiXZty3hAr","currently_affected_locations":[],"previously_affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"aeWwtkfc5kcE2BhrbqhJ","number":"10490855890240805502","begin":"2024-01-11T18:26:40+00:00","created":"2024-01-11T18:48:44+00:00","end":"2024-01-11T23:19:01+00:00","modified":"2024-01-11T23:19:01+00:00","external_desc":"Vertex AI Search: Delays in Retail Search model training","updates":[{"created":"2024-01-11T23:19:00+00:00","modified":"2024-01-11T23:19:02+00:00","when":"2024-01-11T23:19:00+00:00","text":"The issue with Recommendation AI, Vertex AI Search has been resolved as of Thursday, 2024-01-11 14:50 US/Pacific.\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-11T22:06:11+00:00","modified":"2024-01-11T22:06:14+00:00","when":"2024-01-11T22:06:11+00:00","text":"Summary: Vertex AI Search: Retail Search model training delayed\nDescription: Engineers have partially mitigated the issue.\nAt this time Retail Search model training has resumed for new events. Existing, unprocessed events are also processing, however this is likely to take some time to complete.\nWe do not have an ETA for full resolution at this point.\nWe will provide an update by Friday, 2024-01-12 13:00 US/Pacific with current details.\nDiagnosis: Retail Search model training may be delayed or stopped.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-11T21:50:49+00:00","modified":"2024-01-11T21:50:51+00:00","when":"2024-01-11T21:50:49+00:00","text":"Summary: Vertex AI Search: Retail Search model training delayed\nDescription: Mitigation work remains underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-01-11 17:00 US/Pacific.\nDiagnosis: Retail Search model training may be delayed or stopped.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-11T20:06:23+00:00","modified":"2024-01-11T20:06:27+00:00","when":"2024-01-11T20:06:23+00:00","text":"Summary: Vertex AI Search: Retail Search model training delayed\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-01-11 14:00 US/Pacific.\nDiagnosis: Retail Search model training may be delayed or stopped.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-11T19:07:30+00:00","modified":"2024-01-11T19:07:33+00:00","when":"2024-01-11T19:07:30+00:00","text":"Summary: Vertex AI Search: Retail Search model training delayed\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-01-11 12:15 US/Pacific.\nDiagnosis: Retail Search model training may be delayed or stopped.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-11T18:48:42+00:00","modified":"2024-01-11T18:48:46+00:00","when":"2024-01-11T18:48:42+00:00","text":"Summary: Vertex AI Search: Retail Search model training delayed\nDescription: We are experiencing an issue with Recommendation AI, Vertex AI Search.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-01-11 12:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Retail Search model training may be delayed or stopped.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2024-01-11T23:19:00+00:00","modified":"2024-01-11T23:19:02+00:00","when":"2024-01-11T23:19:00+00:00","text":"The issue with Recommendation AI, Vertex AI Search has been resolved as of Thursday, 2024-01-11 14:50 US/Pacific.\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nThank you for choosing us.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Recommendation AI","id":"jWSoZzR1kkyiDi9C5GMM"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/aeWwtkfc5kcE2BhrbqhJ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"mnuAqMHUJc8siEuv1TBq","number":"15000840369579848738","begin":"2024-01-11T09:13:33+00:00","created":"2024-01-11T10:22:00+00:00","end":"2024-01-11T13:23:28+00:00","modified":"2024-01-11T13:23:28+00:00","external_desc":"Users may observe issues while using Chronicle Security with Microsoft Azure Storage","updates":[{"created":"2024-01-11T13:23:26+00:00","modified":"2024-01-11T13:23:34+00:00","when":"2024-01-11T13:23:26+00:00","text":"The issue with Chronicle Security which impacts Azure storage feeds is believed to be affecting a very small number of projects and our Engineering Team is working on it.\nWhile new files are now being ingested without delay, previously impacted files will have delayed ingestion till Thursday, 2024-01-11 14:00 US/Pacific when full resolution is expected to complete.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2024-01-11T12:56:48+00:00","modified":"2024-01-11T12:56:56+00:00","when":"2024-01-11T12:56:48+00:00","text":"Summary: Users may observe issues while using Chronicle Security with Microsoft Azure Storage\nDescription: We believe the issue with Chronicle Security which impacts Azure storage feeds is partially resolved.\nWhile new files are being ingested without delay, previously impacted files will have delayed ingestion.\nOur engineering team continues to work towards full resolution.\nFull resolution is expected to be completed by Thursday, 2024-01-11 14:00 US/Pacific.\nWe will provide an update by Thursday, 2024-01-11 14:30 US/Pacific with current details.\nDiagnosis: Users may experience delayed ingestion for feeds with azure storage while using Chronicle Security\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2024-01-11T11:45:46+00:00","modified":"2024-01-11T11:45:57+00:00","when":"2024-01-11T11:45:46+00:00","text":"Summary: Users may observe issues while using Chronicle Security with Microsoft Azure Storage\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2024-01-11 06:00 US/Pacific.\nDiagnosis: Users may experience delayed ingestion for feeds with azure storage while using Chronicle Security\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2024-01-11T10:21:48+00:00","modified":"2024-01-11T10:22:02+00:00","when":"2024-01-11T10:21:48+00:00","text":"Summary: Users may observe issues while using Chronicle Security with Microsoft Azure Storage\nDescription: We are experiencing an issue with Chronicle Security which impacts Azure storage feeds .\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2024-01-11 04:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users may experience delayed ingestion for feeds with azure storage while using Chronicle Security\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]}],"most_recent_update":{"created":"2024-01-11T13:23:26+00:00","modified":"2024-01-11T13:23:34+00:00","when":"2024-01-11T13:23:26+00:00","text":"The issue with Chronicle Security which impacts Azure storage feeds is believed to be affecting a very small number of projects and our Engineering Team is working on it.\nWhile new files are now being ingested without delay, previously impacted files will have delayed ingestion till Thursday, 2024-01-11 14:00 US/Pacific when full resolution is expected to complete.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/mnuAqMHUJc8siEuv1TBq","currently_affected_locations":[],"previously_affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"id":"ZvBMWa5Z8yhfCwbp5xTp","number":"13443163250055784425","begin":"2024-01-10T15:23:00+00:00","created":"2024-01-11T17:26:45+00:00","end":"2024-01-12T11:08:00+00:00","modified":"2024-01-22T18:33:53+00:00","external_desc":"Multiple Google Cloud products are impacted by a Cloud Monitoring issue","updates":[{"created":"2024-01-22T18:33:53+00:00","modified":"2024-01-22T18:33:53+00:00","when":"2024-01-22T18:33:53+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 10 January 2024, Google Cloud Monitoring and all Google Cloud Products that expose Google Cloud Monitoring experienced dashboard delays and metric query failures (Initial degradation started on 09 January 2024 8:30 am PST, due to data staleness) for a duration of 1 day, 19 hours, 45 minutes and service metric data unavailability (which started on 3 January 2024 11:23 PST with a low impact window until 10 January 2024 09:30 PST) with significant impact window starting 10 January 2024 9:30 PST for a duration of 7 hours, 15 minutes. To our Google Cloud Monitoring and Google Cloud Products customers who were affected, we sincerely apologize. This is not the level of quality and reliability we strive to offer you and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nGoogle Cloud Monitoring experienced two distinct issues that impacted system metric data for most Google Cloud Products.\nMetric Data Queries:\nMetric data is stored in-memory prior to being stored on-disk. Initial degradation started on 09 January 2024 8:30 am PST, due to data staleness. A configuration change in data replication for us-central1 triggered a bottleneck in the pipeline responsible for moving data to disk for querying. Initially, this bottleneck induced backlog did not cause user-visible impact, given data continued to be served from the in-memory tier for the most recent 24h.\nWhen the pipeline blockage was mitigated 10 January 2024 8:00 am PST, the entire 20 hours backlog of files was rapidly ingested into the system that serves queries from disk. But, the resulting huge number of files triggered yet-another bottleneck in the on-disk system, causing high latency or failure for most queries.\nMetric Data Unavailability:\nA combination of two changes - one permissions-related and another scheduling-related - caused certain Cloud Metrics to be unavailable.\nThese changes were rolled out on 03 January 2024 11:23 PST. The impact from these changes was limited initially, but when mitigation was attempted on 09 January 2024 2:07 PST, it induced a bigger issue. The new problem surfaced due to a higher rate of server restarts.\n## Remediation and Prevention\nMetric Data Queries:\nGoogle engineers were alerted to the (not yet user-visible) data staleness issue by internal SLIs (Service Level Indicators) on 09 January 2024 16:58 PST and immediately started an investigation. Staleness began 09 January 2024 08:30:00 PST. When the high latency replica was removed at 10 January 2024 07:23 PST, the processing pipeline returned to normalcy.\nWhile response to this first issue was still ongoing, engineers were alerted by user-facing SLIs 10 January 2024 07:29 PST of user-visible query unavailability that had begun at 07:23 PST. They reconfigured the system to remove bottlenecks and increased the overall amount of compute resources available. This eventually reduced the backlog and returned to normalcy. All query availability/latency SLI also recovered fully at 12 January 2024 03:15 PST.\nMetric Data Unavailability:\nGoogle engineers were alerted by SLIs (Service Level Indicators) 09 January 2024 23:51 PST and immediately started an investigation.\nThe initial attempt at mitigating the problem caused wider issues. We stopped this mitigation and re-applied a newer patch that fixed the root-cause correctly. All remaining errors were resolved on 10 January 2024 16:40 PST.\nRemediation:\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* Right-size the ingestion backlog processing that keeps up with load demand without affecting query availability.\n* Reassess and address the ingestion pipeline for bottlenecks that might induce this or related backlog issues.\n* Revisit the process for applying mitigations with better health indicators for side-effects.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-12T21:02:04+00:00","modified":"2024-01-12T21:02:04+00:00","when":"2024-01-12T21:02:04+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Metric data unavailability:** * **Low impact window start:** 03 January, 2024 11:23 * **Low impact window end :** 10 January, 2024 09:30 * **Duration:** 6 days, 22 hours, 7 minutes. * **Significant impact window start:** 10 January, 2024 09:30 * **Significant impact window end:** 10 January, 2024 16:45 * **Duration:** 7 hours, 15 minutes\n**Metric data queries issue start:** 10 January, 2024 07:23\n**Metric data queries issue end:** 12 January, 2024 03:08\n**Duration:** 1 day, 19 hours, 45 minutes\n**Affected Services and Features:**\nGoogle Cloud Monitoring and All Google Cloud Products that expose Google Cloud Monitoring.\n**Regions/Zones:** Global\n**Description:**\nGoogle Cloud Monitoring experienced two distinct issues (metric data unavailability, metric data query failures) that impacted system metric data for most Google Cloud Products.\nBetween 03 January 2024, 11:23 and 10 January 2024, 16:45 US/Pacific a small number of Google Cloud Monitoring users experienced sporadic issues where metric data was unavailable creating gaps in the metric data. From preliminary analysis, the root cause was a rollout to increase the monitoring platform’s reliability, which inadvertently introduced an issue that is triggered upon monitoring server restart.\nBetween 03 January 11:23 and 10 January 09:30 US/Pacific, the rate of monitoring server restarts (and thus chances to trigger the issue) was very low. On 10 January starting 09:30 US/Pacific, restarts for a subsequent rollout triggered the issue more frequently. By 10 January 16:45 US/Pacific, engineers had mitigated the issue by rolling back the change that triggered the issue on monitoring server restarts.\nBetween 10 January 2024, 07:23 US/Pacific and 12 January 2024, 03:08 US/Pacific querying of metric data that is older than 24 hours in us-central1 experienced significant delays leading to query failures. This also led to issues loading some Cloud Console dashboards. From preliminary analysis, the root cause of the issue is a failure in the pipeline responsible for data transmission from our memory component to storage component, hence creating a backlog. This issue was mitigated by resolving the failure and clearing the backlog.\nGoogle will complete a full IR in the following days that will provide a full root cause.\n**Customer Impact:**\n**Impact of Metric data unavailability:** * Customers impacted by this issue may have experienced delays and in some cases may not have seen the metric data in their monitoring dashboards. Some dashboards may have failed to load entirely. * GCP and third party products and services that consume these metrics via Cloud monitoring API would have experienced the stated issues. This includes products that rely on metrics for autoscaling. * The issue did not affect any custom metrics.\n**Impact on Metric data queries:** * Customers querying metric data that is older than 24 hours in us-central1 experienced significant delays leading to query failures.\n* This also led to issues loading some Cloud Console dashboards.\n--------------------------------------------------","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-12T15:19:14+00:00","modified":"2024-01-12T15:19:23+00:00","when":"2024-01-12T15:19:14+00:00","text":"The issue with Cloud Monitoring, Google Cloud Console has been resolved for all affected users as of Friday, 2024-01-12 06:29 US/Pacific.\nWe will publish an analysis of this incident once we have completed our internal investigation.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-12T03:26:22+00:00","modified":"2024-01-12T03:44:14+00:00","when":"2024-01-12T03:26:22+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nOur Engineering team identified two distinct issues causing the impact and are continuing to work on applying mitigations.\nThe first problem causing gaps in monitoring data has been completely mitigated. There should be no further impact on alerting, or gaps in monitoring data.\nThe mitigation for the second problem causing latency in querying metric data in us-central1 is currently running, we expect the latency to return to normal levels by 2024-01-12 18:00 US/Pacific.\nWe will provide a progress update by Friday, 2024-01-12 08:00 US/Pacific with current details.\nDiagnosis:\n* Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard. Some dashboards may fail to load entirely.\n* GCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues. This includes products that rely on metrics for autoscaling.\nWorkaround: Customers can load the metric data in their monitoring dashboards by excluding us-central1 using a location filter.\nQueries against data from after 2024-01-10 16:00 should be faster.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2024-01-11T23:14:26+00:00","modified":"2024-01-11T23:14:30+00:00","when":"2024-01-11T23:14:26+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nOur Engineering team identified two distinct issues causing the impact and are continuing to work on applying mitigations.\nWe are rolling out the mitigation for the gaps in monitoring data and estimate we are 90% mitigated. Engineers are evaluating the timeline to complete mitigation.\nWe have tested a mitigation for the delays in querying metric data in us-central1, the mitigation appears to reduce the backlog and we anticipate the delays to gradually return to base levels over the next 24 hours.\nWe will provide a progress update by Friday, 2024-01-12 08:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard. Some dashboards may fail to load entirely.\nGCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues. This includes products that rely on metrics for autoscaling.\nAlerting mechanisms reliant on this metric data may experience the issue too.\nThe issue affects alerts for both Google Cloud service defined and custom metrics. The metric data for custom metrics is unaffected by the issue.\nWorkaround: Customers that are querying metric data older than 24 hours can workaround timeout failures in loading the data by querying for data that is not older than 24 hours.\nAlternatively, customers can load the metric data in their monitoring dashboards by excluding us-central1 using a location filter.\nCustomers can continue to write and query custom metric data newer than 24 hours without any issues.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-11T20:39:17+00:00","modified":"2024-01-11T20:39:19+00:00","when":"2024-01-11T20:39:17+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nOur Engineering team identified two distinct issues causing the impact and are continuing to work on applying mitigations.\nWe identified a potential fix for the issue where metrics are not available and are testing it in some affected regions. The testing is taking longer than anticipated and it is expected to take another 90 minutes.\nThe issue with delays in querying metric data in us-central1 is being actively investigated for a mitigation. Our Engineering team currently understands the cause of this issue and has taken measures to limit the volume of backlog in the region.\nThe delays are more significant for metric data between 2024-01--09 16:00 US/Pacific and 2024-01-10 16:00 US/Pacific leading to query failures. Some customers may encounter query delays and failures with data outside this window.\nWe will provide an update by Thursday, 2024-01-11 15:00 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard. Some dashboards may fail to load entirely.\nGCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues. This includes products that rely on metrics for autoscaling.\nAlerting mechanisms reliant on this metric data may experience the issue too.\nThe issue affects alerts for both Google Cloud service defined and custom metrics. The metric data for custom metrics is unaffected by the issue.\nWorkaround: Customers that are querying metric data older than 24 hours can workaround timeout failures in loading the data by querying for data that is not older than 24 hours.\nAlternatively, customers can load the metric data in their monitoring dashboards by excluding us-central1 using a location filter.\nCustomers can continue to write and query custom metric data newer than 24 hours without any issues.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-11T19:29:06+00:00","modified":"2024-01-11T19:29:08+00:00","when":"2024-01-11T19:29:06+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nOur Engineering team identified two distinct issues causing the impact and are continuing to work on applying mitigations.\nWe identified a potential fix for the issue where metrics are not available and are testing it in some affected regions. We currently do not have an ETA for the fix roll out.\nThe issue with delays in querying metric data in us-central1 that is older than 24 hours is being actively investigated for a mitigation. Our Engineering team currently understands the cause of this issue and has taken measures to limit the volume of backlog in the region.\nWe will provide an update by Thursday, 2024-01-11 12:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard. Some dashboards may fail to load entirely.\nGCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues. This includes products that rely on metrics for autoscaling.\nAlerting mechanisms reliant on this metric data may experience the issue too.\nThe issue affects alerts for both Google Cloud service defined and custom metrics. The metric data for custom metrics is unaffected by the issue.\nWorkaround: Customers that are querying metric data older than 24 hours can workaround timeout failures in loading the data by querying for data that is not older than 24 hours.\nAlternatively, customers can load the metric data in their monitoring dashboards by excluding us-central1 using a location filter.\nCustomers can continue to write and query custom metric data newer than 24 hours without any issues.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-11T18:23:55+00:00","modified":"2024-01-11T18:23:57+00:00","when":"2024-01-11T18:23:55+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nWe are currently working on applying mitigations. We do not have an ETA.\nWe will provide an update by Thursday, 2024-01-11 11:30 US/Pacific with current details.\nDiagnosis: - Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard. Some dashboards may fail to load entirely.\n- GCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues. This includes products that rely on metrics for autoscaling.\n- Alerting mechanisms reliant on this metric data may experience the issue too.\nThe issue affects alerts for both Google Cloud service defined and custom metrics. The metric data for custom metrics is unaffected by the issue.\nWorkaround: - Customers that are querying metric data older than 24 hours can workaround timeout failures in loading the data by querying for data that is not older than 24 hours.\n- Alternatively, customers can load the metric data in their monitoring dashboards by excluding us-central1 using a location filter.\n- Customers can continue to write and query custom metric data newer than 24 hours without any issues.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-11T17:40:01+00:00","modified":"2024-01-11T17:40:03+00:00","when":"2024-01-11T17:40:01+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nWe are currently working on applying mitigations. We do not have an ETA.\nWe will provide an update by Thursday, 2024-01-11 10:30 US/Pacific with current details.\nDiagnosis: - Customers impacted by this issue may experience delays and in some cases may not see the metric data in their monitoring dashboard. Some dashboards may fail to load entirely.\n- GCP and third party products and services that consume these metrics via Cloud monitoring API may experience these above stated issues. This includes products that rely on metrics for autoscaling.\n- Alerting mechanisms reliant on this metric data may experience the issue too.\nThe issue only affects Google Cloud service defined metrics and does not impact any custom metrics.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2024-01-11T17:26:40+00:00","modified":"2024-01-11T17:26:53+00:00","when":"2024-01-11T17:26:40+00:00","text":"Summary: Multiple Google Cloud products are impacted by a Cloud Monitoring issue\nDescription: We are experiencing an issue with Cloud monitoring which is impacting metrics and dashboards related to multiple products.\nWe are currently working on applying mitigations. We do not have an ETA.\nWe will provide an update by Thursday, 2024-01-11 10:00 US/Pacific with current details.\nDiagnosis: Customers may experience intermittent issues when querying metrics. Impact is more visible when querying metrics older than 24 hours.\nWorkaround: None at this time.","status":"SERVICE_OUTAGE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2024-01-22T18:33:53+00:00","modified":"2024-01-22T18:33:53+00:00","when":"2024-01-22T18:33:53+00:00","text":"# Incident Report\n## Summary\nOn Wednesday, 10 January 2024, Google Cloud Monitoring and all Google Cloud Products that expose Google Cloud Monitoring experienced dashboard delays and metric query failures (Initial degradation started on 09 January 2024 8:30 am PST, due to data staleness) for a duration of 1 day, 19 hours, 45 minutes and service metric data unavailability (which started on 3 January 2024 11:23 PST with a low impact window until 10 January 2024 09:30 PST) with significant impact window starting 10 January 2024 9:30 PST for a duration of 7 hours, 15 minutes. To our Google Cloud Monitoring and Google Cloud Products customers who were affected, we sincerely apologize. This is not the level of quality and reliability we strive to offer you and we are taking immediate steps to improve the platform’s performance and availability.\n## Root Cause\nGoogle Cloud Monitoring experienced two distinct issues that impacted system metric data for most Google Cloud Products.\nMetric Data Queries:\nMetric data is stored in-memory prior to being stored on-disk. Initial degradation started on 09 January 2024 8:30 am PST, due to data staleness. A configuration change in data replication for us-central1 triggered a bottleneck in the pipeline responsible for moving data to disk for querying. Initially, this bottleneck induced backlog did not cause user-visible impact, given data continued to be served from the in-memory tier for the most recent 24h.\nWhen the pipeline blockage was mitigated 10 January 2024 8:00 am PST, the entire 20 hours backlog of files was rapidly ingested into the system that serves queries from disk. But, the resulting huge number of files triggered yet-another bottleneck in the on-disk system, causing high latency or failure for most queries.\nMetric Data Unavailability:\nA combination of two changes - one permissions-related and another scheduling-related - caused certain Cloud Metrics to be unavailable.\nThese changes were rolled out on 03 January 2024 11:23 PST. The impact from these changes was limited initially, but when mitigation was attempted on 09 January 2024 2:07 PST, it induced a bigger issue. The new problem surfaced due to a higher rate of server restarts.\n## Remediation and Prevention\nMetric Data Queries:\nGoogle engineers were alerted to the (not yet user-visible) data staleness issue by internal SLIs (Service Level Indicators) on 09 January 2024 16:58 PST and immediately started an investigation. Staleness began 09 January 2024 08:30:00 PST. When the high latency replica was removed at 10 January 2024 07:23 PST, the processing pipeline returned to normalcy.\nWhile response to this first issue was still ongoing, engineers were alerted by user-facing SLIs 10 January 2024 07:29 PST of user-visible query unavailability that had begun at 07:23 PST. They reconfigured the system to remove bottlenecks and increased the overall amount of compute resources available. This eventually reduced the backlog and returned to normalcy. All query availability/latency SLI also recovered fully at 12 January 2024 03:15 PST.\nMetric Data Unavailability:\nGoogle engineers were alerted by SLIs (Service Level Indicators) 09 January 2024 23:51 PST and immediately started an investigation.\nThe initial attempt at mitigating the problem caused wider issues. We stopped this mitigation and re-applied a newer patch that fixed the root-cause correctly. All remaining errors were resolved on 10 January 2024 16:40 PST.\nRemediation:\nGoogle is committed to preventing a repeat of this issue in the future and is completing the following actions:\n* Right-size the ingestion backlog processing that keeps up with load demand without affecting query availability.\n* Reassess and address the ingestion pipeline for bottlenecks that might induce this or related backlog issues.\n* Revisit the process for applying mitigations with better health indicators for side-effects.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_OUTAGE","severity":"high","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Monitoring","id":"3zaaDb7antc73BM1UAVT"},{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"}],"uri":"incidents/ZvBMWa5Z8yhfCwbp5xTp","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"Xe5q2HhtpUjk3U3xHoRo","number":"13360981359277222505","begin":"2024-01-09T19:00:00+00:00","created":"2024-01-10T09:51:40+00:00","end":"2024-01-10T09:30:00+00:00","modified":"2024-01-10T09:53:13+00:00","external_desc":"We experienced an issue with AppSheet","updates":[{"created":"2024-01-10T09:52:13+00:00","modified":"2024-01-10T09:52:13+00:00","when":"2024-01-10T09:52:13+00:00","text":"We experienced an issue with AppSheet beginning at Wednesday, 2024-01-09 11:00 US/Pacific. Self-diagnosis: Clicking on PDF links or external links in apps would take the users to a blocking \"App Upgrade Required\" screen. The issue has been resolved for all affected users as of Wednesday, 2024-01-10 01:30 US/Pacific. We thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2024-01-10T09:51:41+00:00","modified":"2024-01-10T09:53:13+00:00","when":"2024-01-10T09:51:41+00:00","text":"We've received a report of an issue with AppSheet as of Wednesday, 2024-01-10 01:38 US/Pacific. We will provide more information by Wednesday, 2024-01-10 02:10 US/Pacific.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2024-01-10T09:52:13+00:00","modified":"2024-01-10T09:52:13+00:00","when":"2024-01-10T09:52:13+00:00","text":"We experienced an issue with AppSheet beginning at Wednesday, 2024-01-09 11:00 US/Pacific. Self-diagnosis: Clicking on PDF links or external links in apps would take the users to a blocking \"App Upgrade Required\" screen. The issue has been resolved for all affected users as of Wednesday, 2024-01-10 01:30 US/Pacific. We thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FWjKi5U7KX4FUUPThHAJ","service_name":"AppSheet","affected_products":[{"title":"AppSheet","id":"FWjKi5U7KX4FUUPThHAJ"}],"uri":"incidents/Xe5q2HhtpUjk3U3xHoRo","currently_affected_locations":[],"previously_affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Global","id":"global"},{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"FEqaKJVHxpsX1r3JewdC","number":"7204858372249872871","begin":"2023-12-19T06:30:00+00:00","created":"2023-12-19T07:44:35+00:00","end":"2023-12-19T07:40:00+00:00","modified":"2023-12-19T18:49:18+00:00","external_desc":"Users are experiencing elevated latency while using AppSheet","updates":[{"created":"2023-12-19T18:49:14+00:00","modified":"2023-12-19T18:49:14+00:00","when":"2023-12-19T18:49:14+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 18 December 2023, 22:30\n**Incident End:** 18 December 2023, 23:40\n**Duration:** 1 hours, 10 minutes\n**Affected Services and Features:**\nGoogle AppSheet\n**Regions/Zones:** Global\n**Description:**\nGoogle AppSheet experienced elevated latency for a duration of 1 hour, 10 minutes. From preliminary analysis, the root cause of the issue is increased load on AppSheet's primary backend database resulting in resource contention which was mitigated by freeing up resources on the primary database.\n**Customer Impact:**\nCustomers may have seen an increase in error rate (spiked to around 15 percent of call volume) and an increase in latency. Both symptoms would have been present across most use cases, but especially during any interaction that required authentication or loading/updating customer applications.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2023-12-19T07:59:16+00:00","modified":"2023-12-19T07:59:19+00:00","when":"2023-12-19T07:59:16+00:00","text":"The issue with AppSheet has been resolved for all affected users as of Monday, 2023-12-18 23:40 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2023-12-19T07:44:30+00:00","modified":"2023-12-19T07:44:37+00:00","when":"2023-12-19T07:44:30+00:00","text":"Summary: Users are experiencing elevated latency while using AppSheet\nDescription: We are experiencing an issue with AppSheet beginning at Monday, 2023-12-18 22:30 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-19 00:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Users may experience elevated latency or error messages while using AppSheet.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2023-12-19T18:49:14+00:00","modified":"2023-12-19T18:49:14+00:00","when":"2023-12-19T18:49:14+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 18 December 2023, 22:30\n**Incident End:** 18 December 2023, 23:40\n**Duration:** 1 hours, 10 minutes\n**Affected Services and Features:**\nGoogle AppSheet\n**Regions/Zones:** Global\n**Description:**\nGoogle AppSheet experienced elevated latency for a duration of 1 hour, 10 minutes. From preliminary analysis, the root cause of the issue is increased load on AppSheet's primary backend database resulting in resource contention which was mitigated by freeing up resources on the primary database.\n**Customer Impact:**\nCustomers may have seen an increase in error rate (spiked to around 15 percent of call volume) and an increase in latency. Both symptoms would have been present across most use cases, but especially during any interaction that required authentication or loading/updating customer applications.","status":"AVAILABLE","affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"FWjKi5U7KX4FUUPThHAJ","service_name":"AppSheet","affected_products":[{"title":"AppSheet","id":"FWjKi5U7KX4FUUPThHAJ"}],"uri":"incidents/FEqaKJVHxpsX1r3JewdC","currently_affected_locations":[],"previously_affected_locations":[{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"VQZHbjM2iZW9RuSsbcLS","number":"8126021022765349008","begin":"2023-12-12T16:28:29+00:00","created":"2023-12-12T16:35:14+00:00","end":"2023-12-12T22:12:07+00:00","modified":"2023-12-12T22:12:07+00:00","external_desc":"Customers might be unable to activate automatic offer approval","updates":[{"created":"2023-12-12T22:12:06+00:00","modified":"2023-12-12T22:12:09+00:00","when":"2023-12-12T22:12:06+00:00","text":"The issue with Google Cloud Marketplace is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-12T16:44:06+00:00","modified":"2023-12-12T16:44:18+00:00","when":"2023-12-12T16:44:06+00:00","text":"Summary: Customers might be unable to activate automatic offer approval\nDescription: Mitigation work is currently underway by our engineering team.\nThe mitigation is expected to complete by Tuesday, 2023-12-12 17:00 US/Pacific.\nWe will provide more information by Tuesday, 2023-12-12 17:00 US/Pacific.\nDiagnosis: Customers are experiencing an error message \"There was an issue turning on automatic approval, try again\" when trying to set automatic approval on for private offers.\nWorkaround: None at the moment. If the issue is urgent, the customers may reach raise a case for the issue, asking us to manually set up the auto approval.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-12T16:37:31+00:00","modified":"2023-12-12T16:37:36+00:00","when":"2023-12-12T16:37:31+00:00","text":"Summary: Customers might be unable to activate automatic offer approval\nDescription: We are experiencing an issue with Google Cloud Marketplace.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-12 17:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers are experiencing an error message \"There was an issue turning on automatic approval, try again\" when trying to set automatic approval on for private offers.\nWorkaround: None at the moment. If the issue is urgent, the customers may reach raise a case for the issue, asking us to manually set up the auto approval.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-12T16:35:11+00:00","modified":"2023-12-12T16:35:18+00:00","when":"2023-12-12T16:35:11+00:00","text":"Summary: Customers might be unable to activate automatic offer approval\nDescription: We are experiencing an issue with Google Cloud Marketplace.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-12 17:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers are experiencing an error message \"There was an issue turning on automatic approval, try again\" when trying to set automatic approval on for private offers.\nWorkaround: None at the moment.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2023-12-12T22:12:06+00:00","modified":"2023-12-12T22:12:09+00:00","when":"2023-12-12T22:12:06+00:00","text":"The issue with Google Cloud Marketplace is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nNo further updates will be provided here.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Marketplace","id":"M34rUHuRgyHXMfbUCSq9"},{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"}],"uri":"incidents/VQZHbjM2iZW9RuSsbcLS","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"YAYwqvoSrqRZZjznUJmC","number":"2581158717408485527","begin":"2023-12-12T10:28:00+00:00","created":"2023-12-13T16:36:27+00:00","end":"2023-12-13T17:22:00+00:00","modified":"2023-12-13T17:22:46+00:00","external_desc":"Workspace creation and release configs failing in certain regions","updates":[{"created":"2023-12-13T17:22:41+00:00","modified":"2023-12-13T17:22:41+00:00","when":"2023-12-13T17:22:41+00:00","text":"The issue is still ongoing and mitigation is in progress. Further updates on this mitigation efforts will be posted to the following dashboard link :\nhttps://status.cloud.google.com/incidents/KbrmWX6NnL88h4LcMxqr\nWe apologize for any confusions caused by this.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2023-12-13T16:49:02+00:00","modified":"2023-12-13T16:49:02+00:00","when":"2023-12-13T16:49:02+00:00","text":"Description:\nMitigation work is currently underway by our engineering team.\n**The fix is rolled out to asia-east1, and us-east1 regions and users in these regions should no longer experience the issue.**\n**Full mitigation is expected to complete by Wednesday, 2023-12-13 11:15 US/Pacific.** We will provide more information by Wednesday, 2023-12-13 11:20 US/Pacific.\nDiagnosis: Some customers can't create workspaces and have release configs which fail to compile in the affected regions.\nWorkflow config schedules continue to run against the latest compiled result from the underlying release config.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2023-12-13T16:36:27+00:00","modified":"2023-12-13T16:36:27+00:00","when":"2023-12-13T16:36:27+00:00","text":"Description: We are experiencing an issue with Dataform beginning on Tuesday, 2023-12-12 02:28 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-13 10:00 US/Pacific with current details.\nDiagnosis: Some customers can't create workspaces and have release configs which fail to compile in the affected regions.\nWorkflow config schedules continue to run against the latest compiled result from the underlying release config.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2023-12-13T17:22:41+00:00","modified":"2023-12-13T17:22:41+00:00","when":"2023-12-13T17:22:41+00:00","text":"The issue is still ongoing and mitigation is in progress. Further updates on this mitigation efforts will be posted to the following dashboard link :\nhttps://status.cloud.google.com/incidents/KbrmWX6NnL88h4LcMxqr\nWe apologize for any confusions caused by this.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"JSShQKADMU3uXYNbCRCh","service_name":"Dataform","affected_products":[{"title":"Dataform","id":"JSShQKADMU3uXYNbCRCh"}],"uri":"incidents/YAYwqvoSrqRZZjznUJmC","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"KbrmWX6NnL88h4LcMxqr","number":"2265934424465553145","begin":"2023-12-11T09:00:00+00:00","created":"2023-12-13T17:20:02+00:00","end":"2023-12-13T17:56:00+00:00","modified":"2023-12-13T18:07:59+00:00","external_desc":"Workspace creation and release configs failing in certain regions","updates":[{"created":"2023-12-13T17:56:53+00:00","modified":"2023-12-13T17:56:55+00:00","when":"2023-12-13T17:56:53+00:00","text":"The fix roll out completed to asia-east1, and us-east1 regions as of 2023-12-13 08:45 US/Pacific.\nThe fix roll out completed to us-west1, and europe-west1 regions as of 2023-12-13 09:04 US/Pacific.\nThe fix roll out completed to asia-southeast1 and europe-west3 regions as of 2023-12-13 09:23 US/Pacific.\nThe fix roll out to rest of the regions completed as of Wednesday, 2023-12-13 09:50 US/Pacific, resolving the issue with Dataform for all affected users\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2023-12-13T17:20:01+00:00","modified":"2023-12-13T18:03:47+00:00","when":"2023-12-13T17:20:01+00:00","text":"Summary: Workspace creation and release configs failing in certain regions\nDescription: Mitigation work is currently underway by our engineering team.\n**The fix roll out completed to asia-east1, and us-east1 regions as of 2023-12-13 08:45 US/Pacific and to us-west1, and europe-west1 regions as of 2023-12-13 09:04. Users in these four regions should no longer encounter the issue.**\nFull mitigation is expected to complete by Wednesday, 2023-12-13 11:15 US/Pacific.\nWe will provide more information by Wednesday, 2023-12-13 11:20 US/Pacific.\nDiagnosis: Some customers can't create workspaces and have release configs which fail to compile in the affected regions.\nWorkflow config schedules continue to run against the latest compiled result from the underlying release config.\nWorkaround: None at this time.\n**For the updates on this issue prior to this communication, please refer to our prior post at https://status.cloud.google.com/incidents/YAYwqvoSrqRZZjznUJmC. We apologize for any confusion caused by the two separate links**","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2023-12-13T17:56:53+00:00","modified":"2023-12-13T17:56:55+00:00","when":"2023-12-13T17:56:53+00:00","text":"The fix roll out completed to asia-east1, and us-east1 regions as of 2023-12-13 08:45 US/Pacific.\nThe fix roll out completed to us-west1, and europe-west1 regions as of 2023-12-13 09:04 US/Pacific.\nThe fix roll out completed to asia-southeast1 and europe-west3 regions as of 2023-12-13 09:23 US/Pacific.\nThe fix roll out to rest of the regions completed as of Wednesday, 2023-12-13 09:50 US/Pacific, resolving the issue with Dataform for all affected users\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"JSShQKADMU3uXYNbCRCh","service_name":"Dataform","affected_products":[{"title":"Dataform","id":"JSShQKADMU3uXYNbCRCh"}],"uri":"incidents/KbrmWX6NnL88h4LcMxqr","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"2VWzzrZoqnm7BuLiTGxf","number":"12029529627950782046","begin":"2023-12-09T13:26:33+00:00","created":"2023-12-09T13:55:30+00:00","end":"2023-12-09T14:33:46+00:00","modified":"2023-12-09T14:33:47+00:00","external_desc":"Some Chronicle Security users in Europe multi-region are experiencing elevate latencies of Normalized events for Ingested logs","updates":[{"created":"2023-12-09T14:33:39+00:00","modified":"2023-12-09T14:33:52+00:00","when":"2023-12-09T14:33:39+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Saturday, 2023-12-09 06:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"created":"2023-12-09T13:55:27+00:00","modified":"2023-12-09T13:55:34+00:00","when":"2023-12-09T13:55:27+00:00","text":"Summary: Some Chronicle Security users in Europe multi-region are experiencing elevate latencies of Normalized events for Ingested logs\nDescription: We are experiencing an issue with Chronicle Security.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Saturday, 2023-12-09 07:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Affected customers may notice delays in getting Normalized events for Ingested logs through Forwarder and Direct ingestion.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]}],"most_recent_update":{"created":"2023-12-09T14:33:39+00:00","modified":"2023-12-09T14:33:52+00:00","when":"2023-12-09T14:33:39+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Saturday, 2023-12-09 06:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/2VWzzrZoqnm7BuLiTGxf","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"id":"eReZJt87sr6aF4WS6k1S","number":"7848051590514422663","begin":"2023-12-07T19:00:00+00:00","created":"2023-12-07T20:50:17+00:00","end":"2023-12-07T20:32:00+00:00","modified":"2023-12-14T00:45:53+00:00","external_desc":"Elevated errors in Google Cloud Console","updates":[{"created":"2023-12-14T00:45:53+00:00","modified":"2023-12-14T00:45:53+00:00","when":"2023-12-14T00:45:53+00:00","text":"A full incident report has been posted on the Google Workspace Status Dashboard [1].\n[1]. https://www.google.com/appsstatus/dashboard/incidents/jPWZ4p7bTSq65P1b9Gfn","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-08T15:37:30+00:00","modified":"2023-12-08T15:37:30+00:00","when":"2023-12-08T15:37:30+00:00","text":"A mini incident report has been posted on the Google Workspace Status Dashboard [1].\n[1]. https://www.google.com/appsstatus/dashboard/incidents/jPWZ4p7bTSq65P1b9Gfn","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-07T21:03:07+00:00","modified":"2023-12-07T21:03:10+00:00","when":"2023-12-07T21:03:07+00:00","text":"The issue with Google Cloud Console has been resolved for all affected users as of Thursday, 2023-12-07 13:00 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-07T20:50:16+00:00","modified":"2023-12-07T20:50:19+00:00","when":"2023-12-07T20:50:16+00:00","text":"Summary: Elevated errors in Google Cloud Console\nDescription: We are experiencing an issue with Google Cloud Console.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-12-07 13:47 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Affected customers may experience 502 errors when trying to access the Cloud Console.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2023-12-14T00:45:53+00:00","modified":"2023-12-14T00:45:53+00:00","when":"2023-12-14T00:45:53+00:00","text":"A full incident report has been posted on the Google Workspace Status Dashboard [1].\n[1]. https://www.google.com/appsstatus/dashboard/incidents/jPWZ4p7bTSq65P1b9Gfn","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"Wdsr1n5vyDvCt78qEifm","service_name":"Google Cloud Console","affected_products":[{"title":"Google Cloud Console","id":"Wdsr1n5vyDvCt78qEifm"}],"uri":"incidents/eReZJt87sr6aF4WS6k1S","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"evX7KURnrjFU2BcHWxM7","number":"6895437348921103761","begin":"2023-12-07T12:37:56+00:00","created":"2023-12-07T12:49:32+00:00","end":"2023-12-07T14:49:14+00:00","modified":"2023-12-07T14:49:15+00:00","external_desc":"Cloud Interconnect users may be impacted in us-east4","updates":[{"created":"2023-12-07T14:49:12+00:00","modified":"2023-12-07T14:49:20+00:00","when":"2023-12-07T14:49:12+00:00","text":"The issue with Hybrid Connectivity has been resolved for all affected users as of Thursday, 2023-12-07 06:42 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-07T14:11:36+00:00","modified":"2023-12-07T14:11:45+00:00","when":"2023-12-07T14:11:36+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-12-07 03:10 US/Pacific.\nOur engineering team continues to investigate the issue. We will provide an update by Thursday, 2023-12-07 07:00 US/Pacific with current details.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA0 (zone1 interconnects).\nWorkaround: There is no workaround for the management plane issue at this time. In regard to existing attachments, customers not experiencing connectivity problems are advised to take no action. Customers experiencing problems reaching GCP destinations outside us-east4 via HA0 (zone1 interconnects)attachments in us-east4 should consider manually failing over to HA1 (zone2 interconnects) attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-07T13:51:02+00:00","modified":"2023-12-07T13:51:10+00:00","when":"2023-12-07T13:51:02+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-12-07 03:10 US/Pacific.\nOur engineering team continues to investigate the issue. We will provide an update by Thursday, 2023-12-07 06:30 US/Pacific with current details.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA0 (zone1 interconnects).\nWorkaround: There is no workaround for the management plane issue at this time. In regard to existing attachments, customers not experiencing connectivity problems are advised to take no action. Customers experiencing problems reaching GCP destinations outside us-east4 via HA0 (zone1 interconnects)attachments in us-east4 should consider manually failing over to HA1 (zone2 interconnects) attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-07T13:08:34+00:00","modified":"2023-12-07T13:08:43+00:00","when":"2023-12-07T13:08:34+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-12-07 03:10 US/Pacific.\nOur engineering team continues to investigate the issue. We will provide an update by Thursday, 2023-12-07 06:00 US/Pacific with current details.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA0.\nWorkaround: There is no workaround for the management plane issue at this time. In regard to existing attachments, customers not experiencing connectivity problems are advised to take no action. Customers experiencing problems reaching GCP destinations outside us-east4 via HA0 attachments in us-east4 should consider manually failing over to HA1 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-07T12:49:19+00:00","modified":"2023-12-07T12:49:37+00:00","when":"2023-12-07T12:49:19+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-12-07 03:10 US/Pacific.\nOur engineering team continues to investigate the issue. We will provide an update by Thursday, 2023-12-07 05:30 US/Pacific with current details.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA0.\nWorkaround: There is no workaround for the management plane issue at this time. In regard to existing attachments, customers not experiencing connectivity problems are advised to take no action. Customers experiencing problems reaching GCP destinations outside us-east4 via HA0 attachments in us-east4 should consider manually failing over to HA1 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]}],"most_recent_update":{"created":"2023-12-07T14:49:12+00:00","modified":"2023-12-07T14:49:20+00:00","when":"2023-12-07T14:49:12+00:00","text":"The issue with Hybrid Connectivity has been resolved for all affected users as of Thursday, 2023-12-07 06:42 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/evX7KURnrjFU2BcHWxM7","currently_affected_locations":[],"previously_affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"id":"w1AAhtuDdCRPMwwes77R","number":"13021206007992625426","begin":"2023-12-07T01:23:43+00:00","created":"2023-12-07T01:42:33+00:00","end":"2023-12-07T02:08:17+00:00","modified":"2023-12-07T02:08:17+00:00","external_desc":"Dataproc Metastore - Service Issues","updates":[{"created":"2023-12-07T02:08:16+00:00","modified":"2023-12-07T02:08:18+00:00","when":"2023-12-07T02:08:16+00:00","text":"**The issue with Dataproc Metastore has been resolved as of Wednesday, 2023-12-06 18:02 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\n**Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.**\nWe take pride in providing our customers with a reliable and highly available service.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"created":"2023-12-07T01:42:32+00:00","modified":"2023-12-07T01:42:34+00:00","when":"2023-12-07T01:42:32+00:00","text":"Summary: We've received a report of an issue with Dataproc Metastore.\nDescription: We are experiencing an issue with Dataproc Metastore beginning at Wednesday, 2023-12-06 14:47 US/Pacific.\nOur engineering team continues to investigate the issue and ascertain a mitigation to fix the issue.\nWe will provide an update by Wednesday, 2023-12-06 19:30 US/Pacific with current details.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Customers would be unable to:-\n1. Create new DPMS instances.\n2. Update existing DPMS instances involving scaling config or tier updates.\n3. Perform full Restore of existing backups.\n4. Import metadata into DPMS\nCreate Dataproc Metastore Federation APIs.\nWorkaround: There is no known workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]}],"most_recent_update":{"created":"2023-12-07T02:08:16+00:00","modified":"2023-12-07T02:08:18+00:00","when":"2023-12-07T02:08:16+00:00","text":"**The issue with Dataproc Metastore has been resolved as of Wednesday, 2023-12-06 18:02 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\n**Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.**\nWe take pride in providing our customers with a reliable and highly available service.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Dataproc Metastore","id":"PXZh68NPz9auRyo4tVfy"},{"title":"Google Cloud Dataproc","id":"yjXrEg3Yvy26BauMwr69"}],"uri":"incidents/w1AAhtuDdCRPMwwes77R","currently_affected_locations":[],"previously_affected_locations":[{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"}]},{"id":"e7ezY5K66RuQWMfwZ5Xn","number":"13254784907174133796","begin":"2023-12-07T00:20:47+00:00","created":"2023-12-07T00:42:56+00:00","end":"2023-12-07T01:17:37+00:00","modified":"2023-12-07T01:17:37+00:00","external_desc":"There were reports of an issue with Artifact Registry.","updates":[{"created":"2023-12-07T01:17:36+00:00","modified":"2023-12-07T01:17:38+00:00","when":"2023-12-07T01:17:36+00:00","text":"**The issue with Artifact Registry has been resolved as of Wednesday, 2023-12-06 17:12 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service,.\n**Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.**\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"}]},{"created":"2023-12-07T00:42:55+00:00","modified":"2023-12-07T00:42:57+00:00","when":"2023-12-07T00:42:55+00:00","text":"Summary: We've received a report of an issue with Artifact Registry.\nDescription: We are experiencing an issue with Artifact Registry beginning at Wednesday, 2023-12-06 15:47 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-06 18:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"}]}],"most_recent_update":{"created":"2023-12-07T01:17:36+00:00","modified":"2023-12-07T01:17:38+00:00","when":"2023-12-07T01:17:36+00:00","text":"**The issue with Artifact Registry has been resolved as of Wednesday, 2023-12-06 17:12 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service,.\n**Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.**\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Cloud Developer Tools","id":"BGJQ6jbGK4kUuBTQFZ1G"},{"title":"Artifact Registry","id":"QbBuuiRdsLpMr9WmGwm5"}],"uri":"incidents/e7ezY5K66RuQWMfwZ5Xn","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: asia","id":"asia"},{"title":"Taiwan (asia-east1)","id":"asia-east1"}]},{"id":"vh5oHC1uUbCZAPgh2bUR","number":"9398447983011722719","begin":"2023-12-06T23:36:06+00:00","created":"2023-12-07T00:40:38+00:00","end":"2023-12-07T01:24:50+00:00","modified":"2023-12-07T01:24:51+00:00","external_desc":"We are experiencing a reduction in the notice window for the preemption VMs.","updates":[{"created":"2023-12-07T01:24:50+00:00","modified":"2023-12-07T01:24:52+00:00","when":"2023-12-07T01:24:50+00:00","text":"The issue with preemption VM is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-07T00:40:36+00:00","modified":"2023-12-07T00:40:40+00:00","when":"2023-12-07T00:40:36+00:00","text":"Summary: We are experiencing a reduction in the notice window for the preemption VMs.\nDescription: We are experiencing an issue with Google Compute Engine.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-06 17:45 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: The impacted customer workloads may experience graceful shutdown periods shorter than expected.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2023-12-07T01:24:50+00:00","modified":"2023-12-07T01:24:52+00:00","when":"2023-12-07T01:24:50+00:00","text":"The issue with preemption VM is believed to be affecting a very small number of customers and our Engineering Team is working on it.\nIf you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved.\nWe thank you for your patience while we're working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"L3ggmi3Jy4xJmgodFA9K","service_name":"Google Compute Engine","affected_products":[{"title":"Google Compute Engine","id":"L3ggmi3Jy4xJmgodFA9K"}],"uri":"incidents/vh5oHC1uUbCZAPgh2bUR","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"nbpgpjMvyi3pe1RAi9Ca","number":"12739548872825557042","begin":"2023-12-06T15:15:11+00:00","created":"2023-12-06T16:05:49+00:00","end":"2023-12-06T18:08:14+00:00","modified":"2023-12-06T18:08:14+00:00","external_desc":"Cloud Workstations: Unable to start workstations in europe-west3 and us-east1","updates":[{"created":"2023-12-06T18:08:11+00:00","modified":"2023-12-06T18:08:16+00:00","when":"2023-12-06T18:08:11+00:00","text":"The issue with Cloud Workstations has been resolved for all affected users as of Wednesday, 2023-12-06 10:08 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2023-12-06T16:50:25+00:00","modified":"2023-12-06T16:50:27+00:00","when":"2023-12-06T16:50:25+00:00","text":"Summary: Cloud Workstations: Unable to start workstations in europe-west3 and us-east1\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2023-12-06 12:30 US/Pacific.\nDiagnosis: Unable to start workstations in europe-west3 and us-east1\nWorkaround: Try using workstations that are not created from configurations located in europe-west3 or us-east1","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"created":"2023-12-06T16:05:46+00:00","modified":"2023-12-06T16:05:52+00:00","when":"2023-12-06T16:05:46+00:00","text":"Summary: Cloud Workstations: Unable to start workstations in europe-west3 and us-east1\nDescription: We are experiencing an issue with Cloud Workstations.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-06 09:30 US/Pacific with current details.\nDiagnosis: Unable to start workstations in europe-west3 and us-east1\nWorkaround: Try using workstations that are not created from configurations located in europe-west3 or us-east1","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"South Carolina (us-east1)","id":"us-east1"}]}],"most_recent_update":{"created":"2023-12-06T18:08:11+00:00","modified":"2023-12-06T18:08:16+00:00","when":"2023-12-06T18:08:11+00:00","text":"The issue with Cloud Workstations has been resolved for all affected users as of Wednesday, 2023-12-06 10:08 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"5UUXCiH1vfFHXmbDixrB","service_name":"Cloud Workstations","affected_products":[{"title":"Cloud Workstations","id":"5UUXCiH1vfFHXmbDixrB"}],"uri":"incidents/nbpgpjMvyi3pe1RAi9Ca","currently_affected_locations":[],"previously_affected_locations":[{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"South Carolina (us-east1)","id":"us-east1"}]},{"id":"fDouU9r72TkBZQ5b4mYN","number":"11549624859285201261","begin":"2023-12-06T13:00:22+00:00","created":"2023-12-06T13:02:45+00:00","end":"2023-12-06T13:26:19+00:00","modified":"2023-12-06T13:26:19+00:00","external_desc":"Dialogflow/Speech-to-text errors when using Audio in Europe","updates":[{"created":"2023-12-06T13:26:08+00:00","modified":"2023-12-06T13:26:24+00:00","when":"2023-12-06T13:26:08+00:00","text":"We experienced an issue with Dialogflow CX, Dialogflow ES, Speech-to-text beginning on Tuesday, 2023-12-05 23:37 US/Pacific.\nImpacted users experienced Dialogflow errors when using audio or when using Cloud Speech-to-text individually.\nThe issue has been resolved for all affected users as of Wednesday, 2023-12-06 02:24 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"created":"2023-12-06T13:02:35+00:00","modified":"2023-12-06T13:02:50+00:00","when":"2023-12-06T13:02:35+00:00","text":"Summary: We've received a report of issue with Dialogflow\nDescription: We are investigating a potential issue with Dialogflow CX, Dialogflow ES.\nWe will provide more information by Wednesday, 2023-12-06 05:35 US/Pacific.\nDiagnosis: Impacted users may experience Failed requests\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2023-12-06T13:26:08+00:00","modified":"2023-12-06T13:26:24+00:00","when":"2023-12-06T13:26:08+00:00","text":"We experienced an issue with Dialogflow CX, Dialogflow ES, Speech-to-text beginning on Tuesday, 2023-12-05 23:37 US/Pacific.\nImpacted users experienced Dialogflow errors when using audio or when using Cloud Speech-to-text individually.\nThe issue has been resolved for all affected users as of Wednesday, 2023-12-06 02:24 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Speech-to-Text","id":"5f5oET9B3whnSFHfwy4d"},{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"},{"title":"Dialogflow ES","id":"sQqrYvhjMT5crPHKWJFY"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/fDouU9r72TkBZQ5b4mYN","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"}]},{"id":"QceUAfBRzT1B6qQmnCmW","number":"18081293374092183415","begin":"2023-12-06T12:20:00+00:00","created":"2023-12-06T20:41:37+00:00","end":"2023-12-07T00:47:00+00:00","modified":"2023-12-07T09:45:20+00:00","external_desc":"Google App Engine (GAE) Flex new deployments were experiencing issues with app availability.","updates":[{"created":"2023-12-07T09:41:19+00:00","modified":"2023-12-07T09:41:19+00:00","when":"2023-12-07T09:41:19+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 6 December 2023 04:20\n**Incident End:** 6 December 2023 16:47\n**Duration:** 12 hours, 27 minutes\n**Affected Services and Features:**\nGoogle App Engine\n**Regions/Zones:** asia-east1, asia-east2, asia-northeast1, asia-northeast2, asia-northeast3, asia-south1, asia-southeast1, asia-southeast2, australia-southeast1, europe-central2, europe-west1, europe-west2, europe-west3, europe-west6, northamerica-northeast1, southamerica-east1, us-central1, us-east1, us-east4, us-west1, us-west2, us-west3, us-west4\n**Description:**\nStarting on 6 December 2023 at 04:20, Google App Engine Flex experienced app unavailability issues for a duration of 12 hours and 27 minutes. This occurred due to an invalid backend service configuration update that led to L7 Load Balancer state inconsistency across regions. The issue was mitigated at 16:47 once this configuration change was rolled back.\n**Customer Impact:**\n* Google App Engine Flex deployments may have completed successfully, but the app became unavailable after some time.\n* Affected customers encountered the error message \"httpRequest.status=502 , resource.type=\"http_load_balancer\", jsonPayload.statusDetails=\"failed_to_pick_backend\"","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-07T00:49:48+00:00","modified":"2023-12-07T00:49:50+00:00","when":"2023-12-07T00:49:48+00:00","text":"**The issue with Google App Engine has been resolved as of Wednesday, 2023-12-06 16:48 US/Pacific.**\nWe understand that this issue has impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service,\nPlease rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-06T22:49:33+00:00","modified":"2023-12-06T22:49:35+00:00","when":"2023-12-06T22:49:33+00:00","text":"Summary: Google App Engine (GAE) Flex new deployments are experiencing issues with app availability.\nDescription: We have been experiencing an issue with Google App Engine Flex deployments beginning on Wednesday, 2023-12-06 04:20 US/Pacific.\nOur engineering team rolled out mitigation to europe-west2 and a few other impacted locations globally.\n**The current telemetry and monitoring is showing healthy progress across all regions where the fix has been rolled out.**\nOur engineers continue to roll out the fix globally to ensure all locations are mitigated as quickly as possible.\n**We will provide an update by Wednesday, 2023-12-06 17:00 US/Pacific with current details.**\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis:\n- For affected customers, new GAE Flex deployments may complete successfully but the app could become unavailable after some time. Affected customers will see an error message similar to “httpRequest.status=502 , resource.type=\"http_load_balancer\", jsonPayload.statusDetails=\"failed_to_pick_backend\".\n- Applications that are running on existing deployments are unaffected.\nWorkaround: There is no known workaround at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-06T21:49:28+00:00","modified":"2023-12-06T21:49:30+00:00","when":"2023-12-06T21:49:28+00:00","text":"Summary: Google App Engine (GAE) Flex new deployments are experiencing issues with app availability.\nDescription: We are experiencing an issue with Google App Engine Flex deployments beginning at Wednesday, 2023-12-06 04:20 US/Pacific.\nOur engineering team rolled out mitigation to europe-west2 and the current telemetry shows no further issues in that region. Our engineers are closely monitoring the region to ensure the mitigation works and are continuing to roll out mitigation in other regions.\nWe will provide an update by Wednesday, 2023-12-06 15:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- For affected customers, new GAE Flex deployments may complete successfully but the app could become unavailable after some time. Affected customers will see an error message similar to “httpRequest.status=502 , resource.type=\"http_load_balancer\", jsonPayload.statusDetails=\"failed_to_pick_backend\".\n- Applications that are running on existing deployments are unaffected.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-06T21:26:13+00:00","modified":"2023-12-06T21:26:15+00:00","when":"2023-12-06T21:26:13+00:00","text":"Summary: Google App Engine (GAE) Flex new deployments are experiencing issues with app availability.\nDescription: We are experiencing an issue with Google App Engine Flex deployments beginning at Wednesday, 2023-12-06 04:20 US/Pacific.\nOur engineering team has identified a potential mitigation for the issue and is currently testing the mitigation.\nWe will provide an update by Wednesday, 2023-12-06 14:15 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis:\n- For affected customers, new GAE Flex deployments may complete successfully but the app could become unavailable after some time.\n- Applications that are running on existing deployments are unaffected.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-06T20:41:36+00:00","modified":"2023-12-06T20:41:39+00:00","when":"2023-12-06T20:41:36+00:00","text":"Summary: Google App Engine (GAE) Flex new deployments are experiencing issues with app availability.\nDescription: We are experiencing an issue with Google App Engine Flex deployments beginning at Wednesday, 2023-12-06 04:20 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-06 13:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: For affected customers, new GAE Flex deployments may complete successfully but the app could become unavailable after some time.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2023-12-07T09:41:19+00:00","modified":"2023-12-07T09:41:19+00:00","when":"2023-12-07T09:41:19+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support\n(All Times US/Pacific)\n**Incident Start:** 6 December 2023 04:20\n**Incident End:** 6 December 2023 16:47\n**Duration:** 12 hours, 27 minutes\n**Affected Services and Features:**\nGoogle App Engine\n**Regions/Zones:** asia-east1, asia-east2, asia-northeast1, asia-northeast2, asia-northeast3, asia-south1, asia-southeast1, asia-southeast2, australia-southeast1, europe-central2, europe-west1, europe-west2, europe-west3, europe-west6, northamerica-northeast1, southamerica-east1, us-central1, us-east1, us-east4, us-west1, us-west2, us-west3, us-west4\n**Description:**\nStarting on 6 December 2023 at 04:20, Google App Engine Flex experienced app unavailability issues for a duration of 12 hours and 27 minutes. This occurred due to an invalid backend service configuration update that led to L7 Load Balancer state inconsistency across regions. The issue was mitigated at 16:47 once this configuration change was rolled back.\n**Customer Impact:**\n* Google App Engine Flex deployments may have completed successfully, but the app became unavailable after some time.\n* Affected customers encountered the error message \"httpRequest.status=502 , resource.type=\"http_load_balancer\", jsonPayload.statusDetails=\"failed_to_pick_backend\"","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"kchyUtnkMHJWaAva8aYc","service_name":"Google App Engine","affected_products":[{"title":"Google App Engine","id":"kchyUtnkMHJWaAva8aYc"}],"uri":"incidents/QceUAfBRzT1B6qQmnCmW","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"MBW2kQdaVkRMamE5WekQ","number":"8137735602008945237","begin":"2023-12-06T09:57:18+00:00","created":"2023-12-06T09:57:28+00:00","end":"2023-12-06T09:58:14+00:00","modified":"2023-12-06T09:58:14+00:00","external_desc":"This issue is believed to be affecting a very small number of projects and our Engineering Team is working on it. If you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved. No further updates will be provided here. We thank you for your patience while we are working on resolving the issue.","updates":[{"created":"2023-12-06T09:58:12+00:00","modified":"2023-12-06T09:58:21+00:00","when":"2023-12-06T09:58:12+00:00","text":"This issue is believed to be affecting a very small number of projects and our Engineering Team is working on it. If you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved. No further updates will be provided here. We thank you for your patience while we are working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-06T09:57:20+00:00","modified":"2023-12-06T09:57:34+00:00","when":"2023-12-06T09:57:20+00:00","text":"Summary: We are investigating a potential issue with Google Cloud Networking.\nDescription: We are investigating a potential issue with Google Cloud Networking.\nWe will provide more information by Wednesday, 2023-12-06 02:30 US/Pacific.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2023-12-06T09:58:12+00:00","modified":"2023-12-06T09:58:21+00:00","when":"2023-12-06T09:58:12+00:00","text":"This issue is believed to be affecting a very small number of projects and our Engineering Team is working on it. If you have questions or are impacted, please open a case with the Support Team and we will work with you until this issue is resolved. No further updates will be provided here. We thank you for your patience while we are working on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"VNJxzcH58QmTt5H6pnT6","service_name":"Google Cloud Networking","affected_products":[{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/MBW2kQdaVkRMamE5WekQ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"MJhBi7Do88HwrgCKdQbJ","number":"2177513740116346203","begin":"2023-12-05T17:24:53+00:00","created":"2023-12-05T17:38:30+00:00","end":"2023-12-05T18:43:59+00:00","modified":"2023-12-05T18:43:59+00:00","external_desc":"Google Cloud Networking customers may experience a high latency and 500 errrors","updates":[{"created":"2023-12-05T18:43:58+00:00","modified":"2023-12-05T18:44:00+00:00","when":"2023-12-05T18:43:58+00:00","text":"The issue with Google Cloud Networking has been resolved for all affected projects as of Tuesday, 2023-12-05 10:10 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-05T18:18:59+00:00","modified":"2023-12-05T18:19:01+00:00","when":"2023-12-05T18:18:59+00:00","text":"Summary: Google Cloud Networking customers may experience a high latency and 500 errrors\nDescription: Mitigation is in place and engineers are seeing improvements in latency and error rates. Engineers are continuing to monitor for full recovery.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 11:30 US/Pacific.\nDiagnosis: Customers may experience high latency and 500 errors.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-05T18:01:42+00:00","modified":"2023-12-05T18:01:44+00:00","when":"2023-12-05T18:01:42+00:00","text":"Summary: Google Cloud Networking customers may experience a high latency and 500 errrors\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 11:20 US/Pacific.\nDiagnosis: Customers may experience high latency and 500 errors.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-05T17:38:29+00:00","modified":"2023-12-05T17:38:30+00:00","when":"2023-12-05T17:38:29+00:00","text":"Summary: Google Cloud Networking customers may experience a high latency and 500 errrors\nDescription: We are experiencing an issue with Google Cloud Networking beginning at Tuesday, 2023-12-05 06:45 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: None at this time.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2023-12-05T18:43:58+00:00","modified":"2023-12-05T18:44:00+00:00","when":"2023-12-05T18:43:58+00:00","text":"The issue with Google Cloud Networking has been resolved for all affected projects as of Tuesday, 2023-12-05 10:10 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"VNJxzcH58QmTt5H6pnT6","service_name":"Google Cloud Networking","affected_products":[{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/MJhBi7Do88HwrgCKdQbJ","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"LyHPNqyuz2QtJGM9f33a","number":"3199611930441853032","begin":"2023-12-05T16:42:10+00:00","created":"2023-12-05T17:36:27+00:00","end":"2023-12-05T22:49:52+00:00","modified":"2023-12-05T22:49:52+00:00","external_desc":"Dialogflow CX agent training experienced latency in \"Global Locale\" region","updates":[{"created":"2023-12-05T22:49:51+00:00","modified":"2023-12-05T22:49:53+00:00","when":"2023-12-05T22:49:51+00:00","text":"The issue with Dialogflow CX has been resolved for all affected users as of Tuesday, 2023-12-05 14:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T21:38:46+00:00","modified":"2023-12-05T21:38:48+00:00","when":"2023-12-05T21:38:46+00:00","text":"Summary: Dialogflow CX agent training is experiencing latency in \"Global Locale\" region\nDescription: Google Engineering has completed a fix rollout, and service is observed as recovering.\nWe are now monitoring for full recovery and will provide another update once service is determined restored.\nWe will provide an update by Tuesday, 2023-12-05 15:15 US/Pacific with current details.\nDiagnosis: Some users may be experiencing latency with Dialogflow CX agent training. In addition some users may also be experiencing pending trainings for Dialogflow ES.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T20:22:01+00:00","modified":"2023-12-05T20:22:04+00:00","when":"2023-12-05T20:22:01+00:00","text":"Summary: Dialogflow CX agent training is experiencing latency in \"Global Locale\" region\nDescription: Mitigation work is currently underway by our engineering team.\nGoogle Engineering is rolling out a fix, currently the ETA for completion is by Tuesday, 2023-12-05 13:30 US/Pacific.\nWe will provide more information by Tuesday, 2023-12-05 13:45 US/Pacific.\nDiagnosis: Some users may be experiencing latency with Dialogflow CX agent training. In addition some users may also be experiencing pending trainings for Dialogflow ES.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T19:07:02+00:00","modified":"2023-12-05T19:07:04+00:00","when":"2023-12-05T19:07:02+00:00","text":"Summary: Dialogflow CX agent training is experiencing latency in \"Global Locale\" region\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 13:15 US/Pacific.\nDiagnosis: The impacted customers were experiencing latency with CX agent training. However, no impact on other Dialogflow services.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T19:06:31+00:00","modified":"2023-12-05T19:06:33+00:00","when":"2023-12-05T19:06:31+00:00","text":"Summary: Dialogflow CX agent training is experiencing latency in \"Global Locale\" region\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 12:00 US/Pacific.\nDiagnosis: The impacted customers were experiencing latency with CX agent training. However, no impact on other Dialogflow services.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T18:00:39+00:00","modified":"2023-12-05T18:00:41+00:00","when":"2023-12-05T18:00:39+00:00","text":"Summary: Dialogflow CX agent training is experiencing latency in \"Global Locale\" region\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 11:00 US/Pacific.\nDiagnosis: The impacted customers were experiencing latency with CX agent training. However, no impact on other Dialogflow services.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T17:36:25+00:00","modified":"2023-12-05T17:36:28+00:00","when":"2023-12-05T17:36:25+00:00","text":"Summary: Dialogflow CX agent training is experiencing latency in \"Global Locale\" region\nDescription: We are experiencing an issue with Dialogflow CX.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 10:30 US/Pacific with current details.\nDiagnosis: The impacted customers were experiencing latency with CX agent training. However, no impact on other Dialogflow services.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2023-12-05T22:49:51+00:00","modified":"2023-12-05T22:49:53+00:00","when":"2023-12-05T22:49:51+00:00","text":"The issue with Dialogflow CX has been resolved for all affected users as of Tuesday, 2023-12-05 14:30 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Dialogflow CX","id":"BnCicQdHSdxaCv8Ya6Vm"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/LyHPNqyuz2QtJGM9f33a","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"RSPjBhzxSrQwEnnDQGn7","number":"17001697624612530391","begin":"2023-12-05T14:40:03+00:00","created":"2023-12-05T14:54:32+00:00","end":"2023-12-07T01:32:13+00:00","modified":"2023-12-07T01:32:13+00:00","external_desc":"Cloud Logging exports to Cloud Storage were delayed up to 10h in us-central1","updates":[{"created":"2023-12-07T01:32:11+00:00","modified":"2023-12-07T01:32:14+00:00","when":"2023-12-07T01:32:11+00:00","text":"The issue with Cloud Logging has been resolved for all affected users as of Wednesday, 2023-12-06 16:22 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-07T01:03:08+00:00","modified":"2023-12-07T01:03:11+00:00","when":"2023-12-07T01:03:08+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: Mitigation work has been completed and backlogs are processing currently.\nWe expect backlogs to be cleared by Wednesday, 2023-12-06 17:30 US/Pacific..\nWe will provide more information by Wednesday, 2023-12-06 18:00 US/Pacific.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T23:49:58+00:00","modified":"2023-12-06T23:50:00+00:00","when":"2023-12-06T23:49:58+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: Mitigation work has been completed and backlogs are processing currently.\nWe expect backlogs to be cleared by Wednesday, 2023-12-06 16:30 US/Pacific..\nWe will provide more information by Wednesday, 2023-12-06 17:00 US/Pacific.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T17:39:33+00:00","modified":"2023-12-06T17:39:37+00:00","when":"2023-12-06T17:39:33+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: Mitigation work is completed and clearing the backlog.\nWe do not have an ETA for the backlog to clear.\nWe will provide more information by Wednesday, 2023-12-06 16:00 US/Pacific.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T21:58:55+00:00","modified":"2023-12-05T21:58:57+00:00","when":"2023-12-05T21:58:55+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Wednesday, 2023-12-06 10:00 US/Pacific.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T18:45:08+00:00","modified":"2023-12-05T18:45:10+00:00","when":"2023-12-05T18:45:08+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: Our engineering team has determined that further investigation is required to mitigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 14:00 US/Pacific with current details.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T16:49:54+00:00","modified":"2023-12-05T16:49:58+00:00","when":"2023-12-05T16:49:54+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 11:00 US/Pacific.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T16:03:19+00:00","modified":"2023-12-05T16:03:28+00:00","when":"2023-12-05T16:03:19+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: We are experiencing an issue with Cloud Logging beginning on Monday, 2023-12-04 18:20 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 09:00 US/Pacific with current details.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T15:49:52+00:00","modified":"2023-12-05T15:50:00+00:00","when":"2023-12-05T15:49:52+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: We are experiencing an issue with Cloud Logging beginning on Monday, 2023-12-04 18:20 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 08:30 US/Pacific with current details.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T14:54:29+00:00","modified":"2023-12-05T14:54:38+00:00","when":"2023-12-05T14:54:29+00:00","text":"Summary: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nDescription: We are experiencing an issue with Cloud Logging beginning on Monday, 2023-12-04 18:20 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 08:00 US/Pacific with current details.\nDiagnosis: Cloud Logging exports to Cloud Storage are delayed up to 10h in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2023-12-07T01:32:11+00:00","modified":"2023-12-07T01:32:14+00:00","when":"2023-12-07T01:32:11+00:00","text":"The issue with Cloud Logging has been resolved for all affected users as of Wednesday, 2023-12-06 16:22 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"}],"uri":"incidents/RSPjBhzxSrQwEnnDQGn7","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"agVmSRSdsPaNEw1mB81r","number":"9132238716454288383","begin":"2023-12-05T14:17:37+00:00","created":"2023-12-05T14:29:55+00:00","end":"2023-12-05T15:26:51+00:00","modified":"2023-12-05T15:26:51+00:00","external_desc":"This incident is being merged with an existing incident. All future updates will be provided there: https://status.cloud.google.com/incidents/TzTbUF4sxVhj1mcK9j25","updates":[{"created":"2023-12-05T15:26:41+00:00","modified":"2023-12-05T15:26:54+00:00","when":"2023-12-05T15:26:41+00:00","text":"This incident is being merged with an existing incident. All future updates will be provided there: https://status.cloud.google.com/incidents/TzTbUF4sxVhj1mcK9j25","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T14:51:06+00:00","modified":"2023-12-05T14:51:13+00:00","when":"2023-12-05T14:51:06+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable due to an underlying networking issue beginning on Monday, 2023-12-04 20:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 09:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated errors and increased latency when reading and writing data to their instances in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T14:29:47+00:00","modified":"2023-12-05T14:30:00+00:00","when":"2023-12-05T14:29:47+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable due to an underlying networking issue.beginning on Monday, 2023-12-04 20:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 07:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated errors and increased latency when reading and writing data to their instances in us-central1\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2023-12-05T15:26:41+00:00","modified":"2023-12-05T15:26:54+00:00","when":"2023-12-05T15:26:41+00:00","text":"This incident is being merged with an existing incident. All future updates will be provided there: https://status.cloud.google.com/incidents/TzTbUF4sxVhj1mcK9j25","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"LfZSuE3xdQU46YMFV5fy","service_name":"Google Cloud Bigtable","affected_products":[{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"}],"uri":"incidents/agVmSRSdsPaNEw1mB81r","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"TzTbUF4sxVhj1mcK9j25","number":"10196743695129868541","begin":"2023-12-05T03:50:00+00:00","created":"2023-12-05T15:19:35+00:00","end":"2023-12-06T11:55:00+00:00","modified":"2023-12-07T20:59:01+00:00","external_desc":"Cloud Bigtable customers experiencing errors and increased latency in us-central1","updates":[{"created":"2023-12-07T20:59:01+00:00","modified":"2023-12-07T20:59:01+00:00","when":"2023-12-07T20:59:01+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 4 December 2023 19:50\n**Incident End:** 6 December 2023 03:55\n**Duration:**\n* Partial mitigation: 15 hours\n* Full mitigation: 1 day, 8 hours, 5 minutes\n**Affected Services and Features:**\nGoogle Cloud Bigtable\n**Regions/Zones:** us-central1\n**Description:**\nApproximately 0.06% of Google Cloud Bigtable dataplane requests experienced increased latency and errors in the us-central1 region. A partial mitigation was applied 15 hours from the start of the incident and the incident was fully mitigated after 1 day, 8 hours and 5 minutes. The root cause of the issue was an isolation failure in the machines that were running Cloud Bigtable servers, this manifested primarily as a low rate of network packet loss. The change that triggered the isolation issue has been identified and rolled back.\n**Customer Impact:**\nImpacted Bigtable customers experienced increased latency and low level of connection errors in the us-central1 region.\n**Additional details:**\nThe issue was first detected by automated monitoring on 4 December 2023 at 23:35 and engineers immediately started an investigation.\nAs part of the investigation, on 5 December 2023 at 14:40, engineers redirected some traffic away from a subset of the affected machines which partially mitigated impact.\nEngineers subsequently identified the trigger, and mitigated the issue on 6 December 2023 at 03:55, which fully resolved the latency. Additionally, engineers have put in place controls to prevent recurrence.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T16:23:59+00:00","modified":"2023-12-06T16:24:09+00:00","when":"2023-12-06T16:23:59+00:00","text":"The issue with Google Cloud Bigtable, Google Cloud Tasks has been resolved for all affected users as of Wednesday, 2023-12-06 08:23 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T11:00:24+00:00","modified":"2023-12-06T11:00:32+00:00","when":"2023-12-06T11:00:24+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable, Google Cloud Tasks.\nThe issue with Google Cloud Bigtable is resolved for the majority of our customers.\nHowever, some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency. Our engineers continue to work on mitigating the impact. We do not have an ETA for full resolution at this point.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-06 11:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T10:02:29+00:00","modified":"2023-12-06T11:02:44+00:00","when":"2023-12-06T10:02:29+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable, Google Cloud Networking, Google Cloud Tasks.\nThe issue with Google Cloud Bigtable is resolved for the majority of our customers. However, some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency. Our engineers continue to work on mitigating the impact. We do not have an ETA for full resolution at this point.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-12-06 03:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T05:20:22+00:00","modified":"2023-12-06T11:02:33+00:00","when":"2023-12-06T05:20:22+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: The issue with Google Cloud Bigtable is resolved for the majority of our customers. However, some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency.\nOur engineers continue to work on mitigating the impact. We do not have an ETA for full resolution at this point.\nWe will provide an update by Wednesday, 2023-12-06 02:30 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T01:27:50+00:00","modified":"2023-12-06T11:03:36+00:00","when":"2023-12-06T01:27:50+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: The issue with Google Cloud Bigtable is resolved for the majority of our customers. However, some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency.\nOur engineers continue to work on mitigating the impact. We do not have an ETA for full resolution at this point.\nWe will provide an update by Tuesday, 2023-12-05 21:30 US/Pacific with current details.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-06T00:20:16+00:00","modified":"2023-12-06T11:03:27+00:00","when":"2023-12-06T00:20:16+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: The issue with Google Cloud Bigtable is resolved for the majority of our customers. However, some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency.\nOur engineers continue to work on mitigating the impact. We do not have an ETA for full resolution at this point.\nWe will provide an update by Tuesday, 2023-12-05 17:30 US/Pacific with current details.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T22:44:46+00:00","modified":"2023-12-06T11:03:16+00:00","when":"2023-12-05T22:44:46+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: The issue with Google Cloud Bigtable is resolved for the majority of our customers. However, some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency.\nOur engineers continue to work on mitigating the impact. We do not have an ETA for full resolution at this point.\nWe will provide an update by Tuesday, 2023-12-05 16:30 US/Pacific with current details.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T21:52:27+00:00","modified":"2023-12-06T11:03:07+00:00","when":"2023-12-05T21:52:27+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: The issue with Google Cloud Bigtable is resolved for the majority of our customers. However some customers that are running non-replicated clusters in us-central1 may continue to experience elevated errors and latency.\nWe do not have an ETA for full resolution at this point.\nWe will provide an update by Tuesday, 2023-12-05 15:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T20:52:55+00:00","modified":"2023-12-05T20:52:57+00:00","when":"2023-12-05T20:52:55+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We believe the issue with Google Cloud Bigtable is partially resolved for majority of our customers.\nWe do not have an ETA for full resolution at this point.\nWe will provide an update by Tuesday, 2023-12-05 14:00 US/Pacific with current details.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T19:31:24+00:00","modified":"2023-12-05T19:31:26+00:00","when":"2023-12-05T19:31:24+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable beginning on Monday, 2023-12-04 20:00 US/Pacific.\nThe investigation from our Google engineers has confirmed that there is no impact specific to Google Kubernetes Engine.\nWe are actively pursuing several mitigation steps currently.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-12-05 12:45 US/Pacific.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T18:57:28+00:00","modified":"2023-12-05T18:57:30+00:00","when":"2023-12-05T18:57:28+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable beginning on Monday, 2023-12-04 20:00 US/Pacific.\nThe investigation from our Google engineers has confirmed that there is no impact specific to Google Kubernetes Engine.\nPlease be assured that our engineering team continues to investigate the issue for Google Cloud Bigtable and Networking components, with highest priority.\nWe will provide an update by Tuesday, 2023-12-05 12:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T17:53:14+00:00","modified":"2023-12-05T17:53:16+00:00","when":"2023-12-05T17:53:14+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable beginning on Monday, 2023-12-04 20:00 US/Pacific.\nThe investigation from our Google engineers has confirmed that there is no impact specific to Google Kubernetes Engine.\nOur engineering team continues to investigate the issue for Google Cloud Bigtable and Networking components.\nWe will provide an update by Tuesday, 2023-12-05 11:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T17:08:52+00:00","modified":"2023-12-05T17:08:55+00:00","when":"2023-12-05T17:08:52+00:00","text":"Summary: Cloud Bigtable customers experiencing errors and increased latency in us-central1\nDescription: We are experiencing an issue with Google Cloud Bigtable and Google Kubernetes Engine beginning on Monday, 2023-12-04 20:00 US/Pacific.\nOur investigation has identified that this was not caused due to Google Cloud Networking.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 10:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T15:51:16+00:00","modified":"2023-12-05T15:51:25+00:00","when":"2023-12-05T15:51:16+00:00","text":"Summary: Network issue causing customers to experience elevated errors and increased latency across multiple services in us-central1\nDescription: We are experiencing a Google Cloud Networking issue impacting Google Cloud Bigtable and Google Kubernetes Engine beginning on Monday, 2023-12-04 20:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 09:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted customers may experience elevated errors and increased latency on the impacted services in us-central1.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T15:19:33+00:00","modified":"2023-12-05T15:55:18+00:00","when":"2023-12-05T15:19:33+00:00","text":"Summary: Network issue causing customers to experience elevated errors and increased latency across multiple services in us-central1\nDescription: We are experiencing an issue with Google Cloud Networking beginning at Monday, 2023-12-04 20:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Tuesday, 2023-12-05 07:50 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers may experience an elevated errors and increased latency in us-central1.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2023-12-07T20:59:01+00:00","modified":"2023-12-07T20:59:01+00:00","when":"2023-12-07T20:59:01+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 4 December 2023 19:50\n**Incident End:** 6 December 2023 03:55\n**Duration:**\n* Partial mitigation: 15 hours\n* Full mitigation: 1 day, 8 hours, 5 minutes\n**Affected Services and Features:**\nGoogle Cloud Bigtable\n**Regions/Zones:** us-central1\n**Description:**\nApproximately 0.06% of Google Cloud Bigtable dataplane requests experienced increased latency and errors in the us-central1 region. A partial mitigation was applied 15 hours from the start of the incident and the incident was fully mitigated after 1 day, 8 hours and 5 minutes. The root cause of the issue was an isolation failure in the machines that were running Cloud Bigtable servers, this manifested primarily as a low rate of network packet loss. The change that triggered the isolation issue has been identified and rolled back.\n**Customer Impact:**\nImpacted Bigtable customers experienced increased latency and low level of connection errors in the us-central1 region.\n**Additional details:**\nThe issue was first detected by automated monitoring on 4 December 2023 at 23:35 and engineers immediately started an investigation.\nAs part of the investigation, on 5 December 2023 at 14:40, engineers redirected some traffic away from a subset of the affected machines which partially mitigated impact.\nEngineers subsequently identified the trigger, and mitigated the issue on 6 December 2023 at 03:55, which fully resolved the latency. Additionally, engineers have put in place controls to prevent recurrence.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Bigtable","id":"LfZSuE3xdQU46YMFV5fy"},{"title":"Google Cloud Tasks","id":"tMWyzhyKK4rAzAf7x62h"}],"uri":"incidents/TzTbUF4sxVhj1mcK9j25","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"FpKXnbLCRjWzZ8CaTNhR","number":"8606622783909453128","begin":"2023-12-04T23:31:56+00:00","created":"2023-12-04T23:31:59+00:00","end":"2023-12-05T17:57:38+00:00","modified":"2023-12-05T17:57:38+00:00","external_desc":"Document AI availability has stabilized in Multi-region: us.","updates":[{"created":"2023-12-05T17:57:37+00:00","modified":"2023-12-05T17:57:39+00:00","when":"2023-12-05T17:57:37+00:00","text":"Document AI availability has stabilized for all affected users as of Tuesday, 2023-12-05 09:03 US/Pacific.\nWe apologize to all who are affected by the disruption.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-05T17:29:56+00:00","modified":"2023-12-05T17:29:58+00:00","when":"2023-12-05T17:29:56+00:00","text":"Summary: Document AI is experiencing a degradation in availability in Multi-region: us. We have identified the root cause and are working urgently to resolve it.\nDescription: Document AI is experiencing a degradation in availability in Multi-region: us.\nWe have identified the root cause of the issue and are taking next steps to resolve it.\n**We estimate the service will be restored by Dec 5, 2023.**\n**We will provide an update by Tuesday, 2023-12-05 10:30 US/Pacific.**\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers using Document AI to process documents with DocumentProcessorService-ProcessDocument API using v1, v1beta3 or uiv1beta3 versions may observe the following errors:\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: Process the volume in a different region by creating a processor in a new region and importing the desired processor version as explained in the below documentation.\nhttps://cloud.google.com/document-ai/docs/create-processor\nhttps://cloud.google.com/document-ai/docs/manage-processor-versions#import","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-05T05:17:43+00:00","modified":"2023-12-05T05:17:46+00:00","when":"2023-12-05T05:17:43+00:00","text":"Summary: Document AI is experiencing a degradation in availability in Multi-region: us. We have identified the root cause and are working urgently to resolve it.\nDescription: Document AI is experiencing a degradation in availability in Multi-region: us.\nWe have identified the root cause of the issue and are taking next steps to resolve it.\n**We estimate the service will be restored by Dec 5, 2023.**\n**We will provide an update by Tuesday, 2023-12-05 09:30 US/Pacific.**\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers using Document AI to process documents with DocumentProcessorService-ProcessDocument API using v1, v1beta3 or uiv1beta3 versions may observe the following errors:\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: Process the volume in a different region by creating a processor in a new region and importing the desired processor version as explained in the below documentation.\nhttps://cloud.google.com/document-ai/docs/create-processor\nhttps://cloud.google.com/document-ai/docs/manage-processor-versions#import","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-05T02:14:30+00:00","modified":"2023-12-05T10:09:02+00:00","when":"2023-12-05T02:14:30+00:00","text":"Summary: Document AI is experiencing a degradation in availability in Multi-region: us. We have identified the root cause and are urgently working to resolve it.\nDescription: Document AI is experiencing a degradation in availability in the Multi-region: us.\nWe have identified the root cause of the issue and are taking next steps to resolve it.\n**We estimate the service will be restored by Dec 5, 2023.**\n**We will provide an update by Tuesday, 2023-12-05 09:30 US/Pacific.**\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers using Document AI to process documents with DocumentProcessorService-ProcessDocument API using v1, v1beta3 or uiv1beta3 versions may observe the following errors:\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: Process the volume in a different region by creating a processor in a new region and importing the desired processor version as explained in the below documentation.\nhttps://cloud.google.com/document-ai/docs/create-processor\nhttps://cloud.google.com/document-ai/docs/manage-processor-versions#import","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-05T00:25:26+00:00","modified":"2023-12-05T00:25:29+00:00","when":"2023-12-05T00:25:26+00:00","text":"Summary: We've received a report of an issue with Document AI in us multiregion\nDescription: **We are experiencing an issue with Document AI beginning on Monday, 2023-12-04 13:00 US/Pacific.**\nOur engineering team continues to investigate and work towards mitigating the issue. At this time, we do not have a specific ETA for the mitigation work to be completed.\nPlease rest assured that our engineering teams are working diligently to mitigate all known impacts.\nWe understand that this issue has impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue\n**We will provide an update by Monday, 2023-12-04 18:30 US/Pacific with current details.**\nDiagnosis: Customers using Document AI to process documents with 'DocumentProcessorService-ProcessDocument' API using v1, v1beta3 or uiv1beta3 versions may observe the following errors.\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: There is no known workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-04T23:31:57+00:00","modified":"2023-12-04T23:32:00+00:00","when":"2023-12-04T23:31:57+00:00","text":"Summary: We've received a report of an issue with Document AI in us multiregion\nDescription: We are experiencing an issue with Document AI beginning at Monday, 2023-12-04 13:00 US/Pacific. Mitigation work is currently underway by our engineering team.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 16:30 US/Pacific with current details.\nDiagnosis: Customers using Document AI to process documents with 'DocumentProcessorService-ProcessDocument' API using v1, v1beta3 or uiv1beta3 versions may observe the following errors.\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: There is no known workaround at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2023-12-05T17:57:37+00:00","modified":"2023-12-05T17:57:39+00:00","when":"2023-12-05T17:57:37+00:00","text":"Document AI availability has stabilized for all affected users as of Tuesday, 2023-12-05 09:03 US/Pacific.\nWe apologize to all who are affected by the disruption.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Document AI","id":"GWuqLi6DKb1DkzyRtRuD"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/FpKXnbLCRjWzZ8CaTNhR","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"3CMcRiqNus4M1xFysuvw","number":"2989687476778546891","begin":"2023-12-04T22:15:05+00:00","created":"2023-12-04T22:15:22+00:00","end":"2023-12-05T10:30:55+00:00","modified":"2023-12-05T10:30:56+00:00","external_desc":"We are experiencing an issue with Cloud Interconnect in Chicago, USA","updates":[{"created":"2023-12-05T10:30:53+00:00","modified":"2023-12-05T10:31:01+00:00","when":"2023-12-05T10:30:53+00:00","text":"The issue with Google Cloud Networking has been resolved for all affected users as of Tuesday, 2023-12-05 02:14 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T08:32:07+00:00","modified":"2023-12-05T08:32:09+00:00","when":"2023-12-05T08:32:07+00:00","text":"Summary: We are experiencing an issue with Cloud Interconnect in Chicago, USA\nDescription: We are experiencing an issue with Cloud Interconnect in Google Cloud Interconnect locations [“ord-zone1-7”] (https://cloud.google.com/network-connectivity/docs/interconnect/concepts/choosing-colocation-facilities#locations-table) in Chicago, USA. Mitigation work is currently underway by our engineering team.\n**Our engineers have ascertained a mitigation to fix the issue in hand.**\nAt this time, we do not have an ETA for the completion of all the steps and processes involved.\n**We will provide more information by Tuesday, 2023-12-05 03:00 US/Pacific.**\nPlease rest assured that our engineering teams are working diligently to mitigate all known impacts.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Affected customers will have non operating interconnects or interconnect attachments due to ongoing device repairs in the metro.\nWorkaround: Affected customers can reroute traffic using their redundant interconnect if available.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T04:42:07+00:00","modified":"2023-12-05T04:42:18+00:00","when":"2023-12-05T04:42:07+00:00","text":"Summary: We are experiencing an issue with Cloud Interconnect in Chicago, USA\nDescription: We are experiencing an issue with Cloud Interconnect in Google Cloud Interconnect locations [“ord-zone1-7”] (https://cloud.google.com/network-connectivity/docs/interconnect/concepts/choosing-colocation-facilities#locations-table) in Chicago, USA. Mitigation work is currently underway by our engineering team.\n**Our engineers have ascertained a mitigation to fix the issue in hand.**\nAt this time, we do not have an ETA for the completion of all the steps and processes involved.\n**We will provide more information by Tuesday, 2023-12-05 01:00 US/Pacific.**\nPlease rest assured that our engineering teams are working diligently to mitigate all known impacts.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Affected customers will have non operating interconnects or interconnect attachments due to ongoing device repairs in the metro.\nWorkaround: Affected customers can reroute traffic using their redundant interconnect if available.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-05T01:20:35+00:00","modified":"2023-12-05T01:20:37+00:00","when":"2023-12-05T01:20:35+00:00","text":"Summary: We are experiencing an issue with Cloud Interconnect in Chicago, USA\nDescription: We are experiencing an issue with Cloud Interconnect in Google Cloud Interconnect locations [“ord-zone1-7”] (https://cloud.google.com/network-connectivity/docs/interconnect/concepts/choosing-colocation-facilities#locations-table) in Chicago, USA. Mitigation work is currently underway by our engineering team.\n**Our engineers have ascertained a mitigation to fix the issue in hand.**\nAt this time, we do not have an ETA for the completion of all the steps and processes involved.\n**We will provide more information by Monday, 2023-12-04 21:00 US/Pacific.**\nPlease rest assured that our engineering teams are working diligently to mitigate all known impacts.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Affected customers will have non operating interconnects or interconnect attachments due to ongoing device repairs in the metro.\nWorkaround: Affected customers can reroute traffic using their redundant interconnect if available.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"created":"2023-12-04T22:15:06+00:00","modified":"2023-12-04T22:15:23+00:00","when":"2023-12-04T22:15:06+00:00","text":"Summary: We are experiencing an issue with Cloud Interconnect in Chicago, USA\nDescription: We are experiencing an issue with Cloud Interconnect in Chicago, USA. Mitigation work is currently underway by our engineering team.\nWe will provide more information by Monday, 2023-12-04 18:00 US/Pacific.\nDiagnosis: Affected customers will have non operating interconnects or interconnect attachments.\nWorkaround: If available, customer can reroute traffic using a different interconnect.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]}],"most_recent_update":{"created":"2023-12-05T10:30:53+00:00","modified":"2023-12-05T10:31:01+00:00","when":"2023-12-05T10:30:53+00:00","text":"The issue with Google Cloud Networking has been resolved for all affected users as of Tuesday, 2023-12-05 02:14 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"VNJxzcH58QmTt5H6pnT6","service_name":"Google Cloud Networking","affected_products":[{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/3CMcRiqNus4M1xFysuvw","currently_affected_locations":[],"previously_affected_locations":[{"title":"Iowa (us-central1)","id":"us-central1"}]},{"id":"yRiHPE2GDhujJx5kMtXU","number":"6721443783123155093","begin":"2023-12-04T13:52:32+00:00","created":"2023-12-04T14:22:18+00:00","end":"2023-12-04T16:10:49+00:00","modified":"2023-12-04T16:10:49+00:00","external_desc":"Google Cloud Load Balancer may drop traffic for the new forwarding rules created after 29 Nov","updates":[{"created":"2023-12-04T16:10:46+00:00","modified":"2023-12-04T16:10:54+00:00","when":"2023-12-04T16:10:46+00:00","text":"The issue with Cloud Load Balancing has been resolved for all newly created forwarding rules as of Monday, 2023-12-04 07:58 US/Pacific.\nIf a Global IP for a load balancer falls into the 34.49.0.0/16 VIP range, the customer will need to re-create the corresponding forwarding rule with a Global IP attached to it.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-04T15:04:06+00:00","modified":"2023-12-04T15:04:13+00:00","when":"2023-12-04T15:04:06+00:00","text":"Summary: Google Cloud Load Balancer may drop traffic for the new forwarding rules created after 14 Nov\nDescription: We are experiencing an issue with Google Cloud Networking beginning on Tuesday, 2023-11-14 2023 03:51 US/Pacific..\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 10:00 US/Pacific with current details.\nDiagnosis: All requests that are sent to certain Global IPs for the new forwarding rules will time out.\nWorkaround: A customer may recreate an affected forwarding rule and a Global IP attached to it","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-04T14:55:13+00:00","modified":"2023-12-04T14:55:22+00:00","when":"2023-12-04T14:55:13+00:00","text":"Summary: Google Cloud Load Balancer may drop traffic for the new forwarding rules created after 14 Nov\nDescription: We are experiencing an issue with Google Cloud Networking beginning on Tuesday, 2023-11-14 2023 03:51 US/Pacific..\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 07:30 US/Pacific with current details.\nDiagnosis: All requests that are sent to certain Global IPs for the new forwarding rules will time out.\nWorkaround: A customer may recreate an affected forwarding rule and a Global IP attached to it","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-04T14:22:15+00:00","modified":"2023-12-04T14:22:23+00:00","when":"2023-12-04T14:22:15+00:00","text":"Summary: Google Cloud Load Balancer may drop traffic for the new forwarding rules created after 14 Nov\nDescription: We are experiencing an issue with Google Cloud Networking beginning on Tuesday, 2023-11-14 2023 03:51 US/Pacific..\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 07:00 US/Pacific with current details.\nDiagnosis: All requests that are sent to certain Global IPs for the new forwarding rules will time out.\nWorkaround: A customer may recreate an affected forwarding rule and a Global IP attached to it","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2023-12-04T16:10:46+00:00","modified":"2023-12-04T16:10:54+00:00","when":"2023-12-04T16:10:46+00:00","text":"The issue with Cloud Load Balancing has been resolved for all newly created forwarding rules as of Monday, 2023-12-04 07:58 US/Pacific.\nIf a Global IP for a load balancer falls into the 34.49.0.0/16 VIP range, the customer will need to re-create the corresponding forwarding rule with a Global IP attached to it.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"},{"title":"Cloud Load Balancing","id":"ix7u9beT8ivBdjApTif3"}],"uri":"incidents/yRiHPE2GDhujJx5kMtXU","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Global","id":"global"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"7MPqhQxAAMjAqC7v5ntx","number":"4629398123513391708","begin":"2023-12-04T13:27:01+00:00","created":"2023-12-04T13:29:44+00:00","end":"2023-12-05T04:14:28+00:00","modified":"2023-12-05T04:14:28+00:00","external_desc":"Inbound Forwarding zones may be unreachable","updates":[{"created":"2023-12-05T04:14:26+00:00","modified":"2023-12-05T04:14:29+00:00","when":"2023-12-05T04:14:26+00:00","text":"The issue with Google Cloud DNS, Google Cloud Networking has been resolved for all affected users as of Monday, 2023-12-04 19:37 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T02:07:55+00:00","modified":"2023-12-05T02:07:57+00:00","when":"2023-12-05T02:07:55+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: Our engineering team continues to work on the identified mitigation and is in the midst of completing all the steps and processes necessary for it to be rolled out completely.\nAt this time, we do not have an ETA for the completion of this roll out.\nWe understand that this issue has impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we work to mitigate the issue.\nWe will provide more information by Monday, 2023-12-04 21:30 US/Pacific.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: * Customers can try moving \"Cloud DNS Inbound forwarding\" to the non-peered project. However, some customers can't edit their projects.\n* Customers may also be able drain one of their VPN/Interconnect HA shards.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-05T01:45:06+00:00","modified":"2023-12-05T01:45:08+00:00","when":"2023-12-05T01:45:06+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2023-12-04 19:15 US/Pacific.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: * Customer can try moving \"Cloud DNS Inbound forwarding\" to the non-peered project. However, some customers can't edit their projects.\n* Customers may also be able drain one of their VPN/Interconnect HA shards.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T19:19:08+00:00","modified":"2023-12-04T19:19:14+00:00","when":"2023-12-04T19:19:08+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2023-12-04 19:00 US/Pacific.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: * Customer can try moving \"Cloud DNS Inbound forwarding\" to the non-peered project. However, some customers can't edit their projects.\n* Customers may also be able drain one of their VPN/Interconnect HA shards.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T18:56:01+00:00","modified":"2023-12-04T18:56:04+00:00","when":"2023-12-04T18:56:01+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering teams are still actively investigating the issue. We do not have an ETA of mitigation at this point.\nWe will provide an update by Monday, 2023-12-04 14:00 US/Pacific with current details.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: * Customer can try moving \"Cloud DNS Inbound forwarding\" to the non-peered project. However, some customers can't edit their projects.\n* Customers may also be able drain one of their VPN/Interconnect HA shards.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T18:43:56+00:00","modified":"2023-12-04T18:43:58+00:00","when":"2023-12-04T18:43:56+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering teams are still actively investigating the issue. We do not have an ETA of mitigation at this point.\nWe will provide an update by Monday, 2023-12-04 14:00 US/Pacific with current details.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2023-12-04T16:59:42+00:00","modified":"2023-12-04T16:59:44+00:00","when":"2023-12-04T16:59:42+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering teams are actively investigating the issue. We do not have an ETA of mitigation at this point.\nWe will provide an update by Monday, 2023-12-04 11:00 US/Pacific with current details.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]},{"created":"2023-12-04T15:05:20+00:00","modified":"2023-12-04T15:05:28+00:00","when":"2023-12-04T15:05:20+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering teams are actively investigating the issue. We do not have an ETA of mitigation at this point.\nWe will provide an update by Monday, 2023-12-04 09:00 US/Pacific with current details.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T14:01:06+00:00","modified":"2023-12-04T14:01:13+00:00","when":"2023-12-04T14:01:06+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 07:15 US/Pacific with current details.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T13:55:22+00:00","modified":"2023-12-04T13:55:29+00:00","when":"2023-12-04T13:55:22+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering team continues to investigate the issue and the regions currently impacted.\nWe will provide an update by Monday, 2023-12-04 07:00 US/Pacific with current details.\nDiagnosis: DNS resolution from on-prem clients to GCP seems to be interrupted when Cloud DNS inbound server policies are applied\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T13:29:41+00:00","modified":"2023-12-04T13:29:49+00:00","when":"2023-12-04T13:29:41+00:00","text":"Summary: Inbound Forwarding zones may be unreachable\nDescription: We are experiencing an issue with Google Cloud DNS.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 06:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Impacted users may experience failure in Cloud DNS Forwarding Zones.\nWorkaround: None at this time","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Global","id":"global"}]}],"most_recent_update":{"created":"2023-12-05T04:14:26+00:00","modified":"2023-12-05T04:14:29+00:00","when":"2023-12-05T04:14:26+00:00","text":"The issue with Google Cloud DNS, Google Cloud Networking has been resolved for all affected users as of Monday, 2023-12-04 19:37 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Google Cloud DNS","id":"TUZUsWSJUVJGW97Jq2sH"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/7MPqhQxAAMjAqC7v5ntx","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"}]},{"id":"gj9Kee7osacEQTCaHZbW","number":"17478373101296507525","begin":"2023-12-04T11:14:05+00:00","created":"2023-12-04T11:33:32+00:00","end":"2023-12-04T18:02:25+00:00","modified":"2023-12-04T18:02:26+00:00","external_desc":"High Normalisation latency in Europe","updates":[{"created":"2023-12-04T18:02:19+00:00","modified":"2023-12-04T18:02:29+00:00","when":"2023-12-04T18:02:19+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Monday, 2023-12-04 09:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"created":"2023-12-04T16:20:06+00:00","modified":"2023-12-04T16:20:14+00:00","when":"2023-12-04T16:20:06+00:00","text":"Summary: High Normalisation latency in Europe\nDescription: Mitigation work is still underway by our engineering team.\nCurrent data indicates that our engineers' mitigation efforts were successful, however residual backlogs will require several hours for processing to finalize.\nWe will provide more information by Monday, 2023-12-04 11:40 US/Pacific.\nDiagnosis: Customers in Europe will observe a delay in when their log gets normalized.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"created":"2023-12-04T12:48:45+00:00","modified":"2023-12-04T12:48:50+00:00","when":"2023-12-04T12:48:45+00:00","text":"Summary: High Normalisation latency in Europe\nDescription: We are experiencing an issue with Chronicle Security beginning at Monday, 2023-12-04 00:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 09:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers in Europe will observe a delay in when their log gets normalized.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"created":"2023-12-04T11:33:24+00:00","modified":"2023-12-04T11:33:37+00:00","when":"2023-12-04T11:33:24+00:00","text":"Summary: High Normalisation latency in Europe\nDescription: We are experiencing an issue with Chronicle Security beginning at Monday, 2023-12-04 00:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 05:00 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers in Europe will observe a delay in when their log gets normalized.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2023-12-04T18:02:19+00:00","modified":"2023-12-04T18:02:29+00:00","when":"2023-12-04T18:02:19+00:00","text":"The issue with Chronicle Security has been resolved for all affected users as of Monday, 2023-12-04 09:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/gj9Kee7osacEQTCaHZbW","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: europe","id":"europe"}]},{"id":"YbV2DEHWkBk3WVRekrUc","number":"1380528713060438965","begin":"2023-12-04T10:14:02+00:00","created":"2023-12-04T10:14:55+00:00","end":"2023-12-04T10:17:51+00:00","modified":"2023-12-04T17:59:11+00:00","external_desc":"Cloud Logging customers experiencing issues while querying logs","updates":[{"created":"2023-12-04T17:59:11+00:00","modified":"2023-12-04T17:59:11+00:00","when":"2023-12-04T17:59:11+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 3 Dec 2023 16:00\n**Incident End:** 3 Dec 2023 20:22\n**Duration:** 4 hours, 22 minutes\n**Reoccurrence Start:** 3 Dec 2023 22:25\n**Recurrence End:** 4 Dec 2023 01:18\n**Recurrence Duration:** 2 hours, 53 minutes\n**Affected Services and Features:** Cloud Logging\n**Regions/Zones:** Global\n**Description:**\nImpacted users were unable to load the most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API for the log entries that were ingested during the outage (historical queries still worked). From preliminary investigation, the root cause has been identified as a log processing component being in a non-functional state, which was triggered by a cluster migration stuck in a bad state.\n**Customer Impact:**\nAround 12 percent of user queries were failing globally for log entries ingested in the interval from 16:00 to 20:22 and from 22:25 to 01:18 the next day.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-04T10:17:49+00:00","modified":"2023-12-04T10:17:56+00:00","when":"2023-12-04T10:17:49+00:00","text":"We experienced an issue with Cloud Logging beginning at Sunday, 2023-12-03 22:25 US/Pacific.\nThe issue has been resolved for all affected users as of Monday, 2023-12-04 01:18 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-12-04T10:14:46+00:00","modified":"2023-12-04T10:15:00+00:00","when":"2023-12-04T10:14:46+00:00","text":"Summary: Cloud Logging customers experiencing issues while querying logs\nDescription: We are investigating a potential issue with Cloud Logging.\nWe will provide more information by Monday, 2023-12-04 02:45 US/Pacific.\nDiagnosis: Impacted users may not be able to load most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2023-12-04T17:59:11+00:00","modified":"2023-12-04T17:59:11+00:00","when":"2023-12-04T17:59:11+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 3 Dec 2023 16:00\n**Incident End:** 3 Dec 2023 20:22\n**Duration:** 4 hours, 22 minutes\n**Reoccurrence Start:** 3 Dec 2023 22:25\n**Recurrence End:** 4 Dec 2023 01:18\n**Recurrence Duration:** 2 hours, 53 minutes\n**Affected Services and Features:** Cloud Logging\n**Regions/Zones:** Global\n**Description:**\nImpacted users were unable to load the most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API for the log entries that were ingested during the outage (historical queries still worked). From preliminary investigation, the root cause has been identified as a log processing component being in a non-functional state, which was triggered by a cluster migration stuck in a bad state.\n**Customer Impact:**\nAround 12 percent of user queries were failing globally for log entries ingested in the interval from 16:00 to 20:22 and from 22:25 to 01:18 the next day.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"}],"uri":"incidents/YbV2DEHWkBk3WVRekrUc","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"cJsK3GwcQs7jeEYivFkP","number":"15036911643151044626","begin":"2023-12-04T07:21:28+00:00","created":"2023-12-04T08:23:18+00:00","end":"2023-12-04T11:03:23+00:00","modified":"2023-12-04T11:03:24+00:00","external_desc":"We've received a report of an issue with Document AI in us multiregion","updates":[{"created":"2023-12-04T11:03:15+00:00","modified":"2023-12-04T11:03:29+00:00","when":"2023-12-04T11:03:15+00:00","text":"The issue with Document AI has been resolved for all affected projects as of Monday, 2023-12-04 03:03 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-04T10:19:11+00:00","modified":"2023-12-04T10:19:17+00:00","when":"2023-12-04T10:19:11+00:00","text":"Summary: We've received a report of an issue with Document AI in us multiregion\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2023-12-04 03:30 US/Pacific.\nDiagnosis: Customers using Document AI to process documents with 'DocumentProcessorService-ProcessDocument' API using v1, v1beta3 or uiv1beta3 versions may observe the following errors\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-04T09:34:25+00:00","modified":"2023-12-04T09:34:39+00:00","when":"2023-12-04T09:34:25+00:00","text":"Summary: We've received a report of an issue with Document AI in us multiregion\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Monday, 2023-12-04 02:30 US/Pacific.\nDiagnosis: Customers using Document AI to process documents with 'DocumentProcessorService-ProcessDocument' API using v1, v1beta3 or uiv1beta3 versions may observe the following errors\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-04T09:06:49+00:00","modified":"2023-12-04T09:24:11+00:00","when":"2023-12-04T09:06:49+00:00","text":"Summary: We've received a report of an issue with Document AI in us multiregion.\nDescription: We are experiencing an issue with Document AI.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 02:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers using Document AI to process documents with 'DocumentProcessorService-ProcessDocument' API using v1, v1beta3 or uiv1beta3 versions may observe the following errors\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"created":"2023-12-04T08:23:14+00:00","modified":"2023-12-04T09:23:47+00:00","when":"2023-12-04T08:23:14+00:00","text":"Summary: We've received a report of an issue with Document AI in us multiregion\nDescription: We are experiencing an issue with Document AI.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Monday, 2023-12-04 01:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Customers using Document AI to process documents with 'DocumentProcessorService-ProcessDocument' API using v1, v1beta3 or uiv1beta3 versions may observe the following errors\nError1: HTTP Mapping: 499 Client Closed Request (CANCELLED);\nError2: HTTP Mapping: 504 Gateway Timeout (DEADLINE_EXCEEDED)\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2023-12-04T11:03:15+00:00","modified":"2023-12-04T11:03:29+00:00","when":"2023-12-04T11:03:15+00:00","text":"The issue with Document AI has been resolved for all affected projects as of Monday, 2023-12-04 03:03 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: us","id":"us"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Document AI","id":"GWuqLi6DKb1DkzyRtRuD"},{"title":"Cloud Machine Learning","id":"z9PfKanGZYvYNUbnKzRJ"}],"uri":"incidents/cJsK3GwcQs7jeEYivFkP","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: us","id":"us"}]},{"id":"RjNTHWDbP1x8zQk8U7nL","number":"15395828564550950904","begin":"2023-12-04T02:26:58+00:00","created":"2023-12-04T03:36:32+00:00","end":"2023-12-04T04:28:10+00:00","modified":"2023-12-04T18:01:49+00:00","external_desc":"Cloud Logging customers experiencing issues while querying logs","updates":[{"created":"2023-12-04T18:01:49+00:00","modified":"2023-12-04T18:01:49+00:00","when":"2023-12-04T18:01:49+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 3 Dec 2023 16:00\n**Incident End:** 3 Dec 2023 20:22\n**Duration:** 4 hours, 22 minutes\n**Reoccurrence Start:** 3 Dec 2023 22:25\n**Recurrence End:** 4 Dec 2023 01:18\n**Recurrence Duration:** 2 hours, 53 minutes\n**Affected Services and Features:** Cloud Logging\n**Regions/Zones:** Global\n**Description:**\nImpacted users were unable to load the most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API for the log entries that were ingested during the outage (historical queries still worked). From preliminary investigation, the root cause has been identified as a log processing component being in a non-functional state, which was triggered by a cluster migration stuck in a bad state.\n**Customer Impact:**\nAround 12 percent of user queries were failing globally for log entries ingested in the interval from 16:00 to 20:22 and from 22:25 to 01:18 the next day.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T04:28:07+00:00","modified":"2023-12-04T04:28:11+00:00","when":"2023-12-04T04:28:07+00:00","text":"The issue with Cloud Logging has been resolved for all affected users as of Sunday, 2023-12-03 20:22 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T04:23:28+00:00","modified":"2023-12-04T04:23:30+00:00","when":"2023-12-04T04:23:28+00:00","text":"Summary: Cloud Logging customers experiencing issues while querying logs\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Sunday, 2023-12-03 21:00 US/Pacific.\nDiagnosis: Impacted users may not be able to load most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T03:56:29+00:00","modified":"2023-12-04T03:56:34+00:00","when":"2023-12-04T03:56:29+00:00","text":"Summary: Cloud Logging customers experiencing issues while querying logs\nDescription: We are experiencing an issue with Cloud Logging beginning at Sunday, 2023-12-03 16:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Sunday, 2023-12-03 20:30 US/Pacific with current details.\nDiagnosis: Impacted users may not be able to load most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Global","id":"global"}]},{"created":"2023-12-04T03:36:30+00:00","modified":"2023-12-04T03:36:33+00:00","when":"2023-12-04T03:36:30+00:00","text":"Summary: Cloud Logging customers with log buckets in us-east4 and global regions experiencing issues while querying logs\nDescription: We are experiencing an issue with Cloud Logging beginning at Sunday, 2023-12-03 16:00 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Sunday, 2023-12-03 20:30 US/Pacific with current details.\nDiagnosis: Impacted users may not be able to load most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API. All customers who have log buckets in us-east4 and few customers with log buckets in global location are experiencing this issue.\nWorkaround: None at this time.","status":"SERVICE_DISRUPTION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]}],"most_recent_update":{"created":"2023-12-04T18:01:49+00:00","modified":"2023-12-04T18:01:49+00:00","when":"2023-12-04T18:01:49+00:00","text":"# Mini Incident Report\nWe apologize for the inconvenience this service disruption/outage may have caused. We would like to provide some information about this incident below. Please note, this information is based on our best knowledge at the time of posting and is subject to change as our investigation continues. If you have experienced impact outside of what is listed below, please reach out to Google Cloud Support using https://cloud.google.com/support.\n(All Times US/Pacific)\n**Incident Start:** 3 Dec 2023 16:00\n**Incident End:** 3 Dec 2023 20:22\n**Duration:** 4 hours, 22 minutes\n**Reoccurrence Start:** 3 Dec 2023 22:25\n**Recurrence End:** 4 Dec 2023 01:18\n**Recurrence Duration:** 2 hours, 53 minutes\n**Affected Services and Features:** Cloud Logging\n**Regions/Zones:** Global\n**Description:**\nImpacted users were unable to load the most recent logs in Google Cloud Console UI in Log Explorer or while querying logs using API for the log entries that were ingested during the outage (historical queries still worked). From preliminary investigation, the root cause has been identified as a log processing component being in a non-functional state, which was triggered by a cluster migration stuck in a bad state.\n**Customer Impact:**\nAround 12 percent of user queries were failing globally for log entries ingested in the interval from 16:00 to 20:22 and from 22:25 to 01:18 the next day.","status":"AVAILABLE","affected_locations":[{"title":"Global","id":"global"}]},"status_impact":"SERVICE_DISRUPTION","severity":"medium","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Operations","id":"DixAowEQm45KgqXKP5tR"},{"title":"Cloud Logging","id":"PuCJ6W2ovoDhLcyvZ1xa"}],"uri":"incidents/RjNTHWDbP1x8zQk8U7nL","currently_affected_locations":[],"previously_affected_locations":[{"title":"Global","id":"global"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"id":"xxgdp8Ztoqx7REhSZp3q","number":"13540893492172184303","begin":"2023-12-01T11:13:58+00:00","created":"2023-12-01T11:55:01+00:00","end":"2023-12-01T18:45:29+00:00","modified":"2023-12-01T18:45:29+00:00","external_desc":"JSON_SET function has stopped working","updates":[{"created":"2023-12-01T18:45:29+00:00","modified":"2023-12-01T18:45:31+00:00","when":"2023-12-01T18:45:29+00:00","text":"The issue with Google BigQuery has been resolved for all affected users as of Friday, 2023-12-01 10:32 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-01T17:13:10+00:00","modified":"2023-12-01T17:13:13+00:00","when":"2023-12-01T17:13:10+00:00","text":"Summary: JSON_SET function has stopped working\nDescription: We believe the issue with JSON_SET function is now resolved in US and EU multi-regions.\nThe issue is partially mitigated in us-east4.\nOur engineers are continuing to work on fully mitigating the issue in us-east4 region.\nFull resolution is expected to complete by Friday, 2023-12-01 11:00 US/Pacific.\nWe will provide an update by Friday, 2023-12-01 11:15 US/Pacific with current details.\nDiagnosis: Customers may receive an error message when running queries with JSON_SET: Invalid number of parameters passed to function: expected at least 4, got 3'\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-01T14:53:39+00:00","modified":"2023-12-01T14:54:02+00:00","when":"2023-12-01T14:53:39+00:00","text":"Summary: JSON_SET function has stopped working\nDescription: We believe the issue with JSON_SET function is now resolved in US and EU multi-regions.\nWe are still working on mitigating the issue in us-east4 region. We do not have an ETA for full resolution at this point.\nWe will provide an update by Friday, 2023-12-01 09:30 US/Pacific with current details.\nDiagnosis: Customers may receive an error message when running queries with JSON_SET: Invalid number of parameters passed to function: expected at least 4, got 3'\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-01T12:48:14+00:00","modified":"2023-12-01T12:48:22+00:00","when":"2023-12-01T12:48:14+00:00","text":"Summary: JSON_SET function has stopped working\nDescription: Mitigation work is still underway by our engineering team.\nWe will provide more information by Friday, 2023-12-01 08:00 US/Pacific.\nDiagnosis: Customers may receive an error message when running queries with JSON_SET: Invalid number of parameters passed to function: expected at least 4, got 3'\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-12-01T11:54:53+00:00","modified":"2023-12-01T11:55:07+00:00","when":"2023-12-01T11:54:53+00:00","text":"Summary: JSON_SET function has stopped working\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Friday, 2023-12-01 05:00 US/Pacific.\nDiagnosis: Customers may receive an error message when running queries with JSON_SET: Invalid number of parameters passed to function: expected at least 4, got 3'\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]}],"most_recent_update":{"created":"2023-12-01T18:45:29+00:00","modified":"2023-12-01T18:45:31+00:00","when":"2023-12-01T18:45:29+00:00","text":"The issue with Google BigQuery has been resolved for all affected users as of Friday, 2023-12-01 10:32 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"9CcrhHUcFevXPSVaSxkf","service_name":"Google BigQuery","affected_products":[{"title":"Google BigQuery","id":"9CcrhHUcFevXPSVaSxkf"}],"uri":"incidents/xxgdp8Ztoqx7REhSZp3q","currently_affected_locations":[],"previously_affected_locations":[{"title":"Multi-region: eu","id":"eu"},{"title":"Multi-region: us","id":"us"},{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"id":"2Eb3RruxF6KBCY53CMXF","number":"16349217310401048061","begin":"2023-11-30T18:47:49+00:00","created":"2023-11-30T19:15:17+00:00","end":"2023-11-30T20:22:58+00:00","modified":"2023-11-30T20:22:58+00:00","external_desc":"Google Cloud Interconnect in Santiago, Chile was experiencing availability issues.","updates":[{"created":"2023-11-30T20:22:57+00:00","modified":"2023-11-30T20:22:59+00:00","when":"2023-11-30T20:22:57+00:00","text":"**The issue with Google Cloud Interconnect has been resolved as of Thursday, 2023-11-30 12:20 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service, Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"}]},{"created":"2023-11-30T19:51:11+00:00","modified":"2023-11-30T19:51:13+00:00","when":"2023-11-30T19:51:11+00:00","text":"Summary: Google Cloud Interconnect in Santiago, Chile is experiencing availability issues.\nDescription: Our engineering team has identified a mitigation and have commenced work on it.\nAt this time, we do not have a specific ETA. Please rest assured that our engineering teams are working diligently to mitigate all known impacts.\nWe will provide more information by Thursday, 2023-11-30 13:00 US/Pacific.\nWe sincerely appreciate your patience and understanding as we work to resolve the issue as quickly as possible.\nDiagnosis: Customers using Cloud interconnect in Santiago, Chile may experience availability issues, being unable to access GCP services via scl-zone1-7594 and scl-zone2-7594.\nOther GCP services in southamerica-west1 are not affected. The region should be accessible from interconnects outside of Santiago, Chile.\nWorkaround: 1.Customers who have availed a 99.9% up-time do not have a workaround.\n2.Customers who have opted for a 99.99% up-time could fail-over to other regions.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"}]},{"created":"2023-11-30T19:15:15+00:00","modified":"2023-11-30T19:15:18+00:00","when":"2023-11-30T19:15:15+00:00","text":"Summary: Google Cloud Interconnect in Santiago, Chile is experiencing availability issues.\nDescription: We are experiencing an issue with physical connections in Google Cloud Interconnect in Santiago, Chile.\nMultiple [Edge Availability Domains](https://cloud.google.com/network-connectivity/docs/interconnect/concepts/choosing-colocation-facilities#south-america) scl-zone1-7594 and scl-zone2-7594 are affected.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 12:00 US/Pacific with current details.\nDiagnosis: Customers using Cloud interconnect in Santiago, Chile may experience availability issues, being unable to access GCP services via scl-zone1-7594 and scl-zone2-7594.\nOther GCP services in southamerica-west1 are not affected. The region should be accessible from interconnects outside of Santiago, Chile.\nWorkaround: 1.Customers who have availed a 99.9% up-time do not have a workaround.\n2.Customers who have opted for a 99.99% up-time could fail-over to other regions.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"}]}],"most_recent_update":{"created":"2023-11-30T20:22:57+00:00","modified":"2023-11-30T20:22:59+00:00","when":"2023-11-30T20:22:57+00:00","text":"**The issue with Google Cloud Interconnect has been resolved as of Thursday, 2023-11-30 12:20 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service, Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/2Eb3RruxF6KBCY53CMXF","currently_affected_locations":[],"previously_affected_locations":[{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"}]},{"id":"uFEvPBbjq8JeDcJZe71h","number":"13466028752710606762","begin":"2023-11-30T17:41:17+00:00","created":"2023-11-30T18:01:19+00:00","end":"2023-11-30T20:54:42+00:00","modified":"2023-11-30T20:54:42+00:00","external_desc":"Chronicle IoC Matches page failing to load data","updates":[{"created":"2023-11-30T20:54:41+00:00","modified":"2023-11-30T20:54:43+00:00","when":"2023-11-30T20:54:41+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Thursday, 2023-11-30 12:54 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2023-11-30T20:52:31+00:00","modified":"2023-11-30T20:52:33+00:00","when":"2023-11-30T20:52:31+00:00","text":"Summary: Chronicle IoC Matches page failing to load data\nDescription: Mitigation work is still underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2023-11-30 14:30 US/Pacific.\nDiagnosis: The Chronicle IoC Matches page may fail to load data with \"Error Fetching IoCs.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2023-11-30T18:53:33+00:00","modified":"2023-11-30T18:53:36+00:00","when":"2023-11-30T18:53:33+00:00","text":"Summary: Chronicle IoC Matches page failing to load data\nDescription: Mitigation work is still underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2023-11-30 13:00 US/Pacific.\nDiagnosis: The Chronicle IoC Matches page may fail to load data with \"Error Fetching IoCs.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},{"created":"2023-11-30T18:01:19+00:00","modified":"2023-11-30T18:01:20+00:00","when":"2023-11-30T18:01:19+00:00","text":"Summary: Chronicle IoC Matches page failing to load data\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Thursday, 2023-11-30 11:00 US/Pacific.\nDiagnosis: The Chronicle IoC Matches page may fail to load data with \"Error Fetching IoCs.\"\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Multi-region: us","id":"us"}]}],"most_recent_update":{"created":"2023-11-30T20:54:41+00:00","modified":"2023-11-30T20:54:43+00:00","when":"2023-11-30T20:54:41+00:00","text":"The issue with Chronicle Security has been resolved for all affected projects as of Thursday, 2023-11-30 12:54 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"FHwvkSZ6RzzDYAvDZXMM","service_name":"Chronicle Security","affected_products":[{"title":"Chronicle Security","id":"FHwvkSZ6RzzDYAvDZXMM"}],"uri":"incidents/uFEvPBbjq8JeDcJZe71h","currently_affected_locations":[],"previously_affected_locations":[{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Multi-region: europe","id":"europe"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Multi-region: us","id":"us"}]},{"id":"4opGnk7MBUeKuRaZVR5v","number":"15460696263549240538","begin":"2023-11-30T13:42:26+00:00","created":"2023-11-30T14:05:57+00:00","end":"2023-11-30T20:44:26+00:00","modified":"2023-11-30T20:44:26+00:00","external_desc":"Cloud Interconnect users may have been impacted in us-east4","updates":[{"created":"2023-11-30T20:44:24+00:00","modified":"2023-11-30T20:44:27+00:00","when":"2023-11-30T20:44:24+00:00","text":"**The issue with Cloud Interconnect has been resolved as of Thursday, 2023-11-30 12:41 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service, Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T19:54:42+00:00","modified":"2023-11-30T19:54:45+00:00","when":"2023-11-30T19:54:42+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: Our engineering team has identified a mitigation for the issue and are currently working on implementing it.\nAt this time, we do not have a specific ETA for the mitigation to be completed.\nWe will provide more information by Thursday, 2023-11-30 13:30 US/Pacific.\nWe sincerely appreciate your patience and understanding as we work to resolve it as quickly as possible.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA1. Existing attachments are believed to be unaffected.\nBetween approximately 05:09 and 07:03 PST, customers with Cloud Interconnect attachments in us-east4 trying to communicate with endpoints in other GCP regions may have experienced traffic imbalances in the GCP-onprem direction, as traffic may have concentrated on HA0 attachments.\nWorkaround: There is no workaround for the management plane issue at this time.\n**In regard to existing attachments, customers not experiencing connectivity problems are advised to take no action.**\nCustomers experiencing problems reaching GCP destinations outside us-east4 via HA1 attachments in us-east4 should consider manually failing over to HA0 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T16:50:00+00:00","modified":"2023-11-30T16:50:06+00:00","when":"2023-11-30T16:50:00+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue. Please rest assured that our engineering teams are working diligently to mitigate all known impacts.\n**We will provide an update by Thursday, 2023-11-30 12:00 US/Pacific with current details.**\nWe sincerely appreciate your patience and understanding as we work to resolve this issue as quickly as possible.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA1. Existing attachments are believed to be unaffected.\nBetween approximately 05:09 and 07:03 PST, customers with Cloud Interconnect attachments in us-east4 trying to communicate with endpoints in other GCP regions may have experienced traffic imbalances in the GCP-onprem direction, as traffic may have concentrated on HA0 attachments.\nWorkaround: There is no workaround for the management plane issue at this time.\n**In regard to existing attachments, customers not experiencing connectivity problems are advised to take no action.**\nCustomers experiencing problems reaching GCP destinations outside us-east4 via HA1 attachments in us-east4 should consider manually failing over to HA0 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T15:53:48+00:00","modified":"2023-11-30T15:53:57+00:00","when":"2023-11-30T15:53:48+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 09:00 US/Pacific with current details.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA1. Existing attachments are believed to be unaffected.\nBetween approximately 05:09 and 07:03 PST, customers with Cloud Interconnect attachments in us-east4 trying to communicate with endpoints in other GCP regions may have experienced traffic imbalances in the GCP-onprem direction, as traffic may have concentrated on HA0 attachments.\nWorkaround: There is no workaround for the management plane issue at this time.\nIn regard to existing attachments, customers not experiencing connectivity problems are advised to take no action. Customers experiencing problems reaching GCP destinations outside us-east4 via HA1 attachments in us-east4 should consider manually failing over to HA0 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T15:26:00+00:00","modified":"2023-11-30T15:26:08+00:00","when":"2023-11-30T15:26:00+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 08:00 US/Pacific with current details.\nDiagnosis: Customers may experience problems creating, updating or deleting Cloud Interconnect attachments in us-east4 HA1. Existing attachments are believed to be unaffected.\nBetween approximately 05:09 and 07:03 PST, customers with Cloud Interconnect attachments in us-east4 trying to communicate with endpoints in other GCP regions may have experienced traffic imbalances in the GCP-onprem direction, as traffic may have concentrated on HA0 attachments.\nWorkaround: There is no workaround for the management plane issue at this time.\nIn regard to existing attachments, customers not experiencing connectivity problems are advised to take no action. Customers experiencing problems reaching GCP destinations outside us-east4 via HA1 attachments in us-east4 should consider manually failing over to HA0 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T14:59:10+00:00","modified":"2023-11-30T14:59:19+00:00","when":"2023-11-30T14:59:10+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 07:30 US/Pacific with current details.\nDiagnosis: Customers with Cloud Interconnect attachments in us-east4 trying to communicate with endpoints in other GCP regions may experience traffic imbalances in the GCP-onprem direction, as traffic may concentrate on HA0 attachments.\nWorkaround: Customers not experiencing connectivity problems are advised to take no action.\nCustomers experiencing problems reaching GCP destinations outside us-east4 via HA1 attachments in us-east4 should consider manually failing over to HA0 attachments.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T14:54:39+00:00","modified":"2023-11-30T14:54:48+00:00","when":"2023-11-30T14:54:39+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 07:30 US/Pacific with current details.\nDiagnosis: Customer may may observe significant data plane loss on Cloud Interconnect and/or Cloud VPN\nWorkaround: The problem only affects HA1 so customers should be able to fail over to HA0","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T14:21:04+00:00","modified":"2023-11-30T14:21:13+00:00","when":"2023-11-30T14:21:04+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 07:00 US/Pacific with current details.\nDiagnosis: Customer may may observe significant data plane loss on Cloud Interconnect and/or Cloud VPN\nWorkaround: The problem only affects HA1 so customers should be able to fail over to HA0","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"created":"2023-11-30T14:05:47+00:00","modified":"2023-11-30T14:06:02+00:00","when":"2023-11-30T14:05:47+00:00","text":"Summary: Cloud Interconnect users may be impacted in us-east4\nDescription: We are experiencing an issue with Hybrid Connectivity beginning on Thursday, 2023-11-30 03:25 US/Pacific.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Thursday, 2023-11-30 06:30 US/Pacific with current details.\nDiagnosis: Customer may may observe significant data plane loss on Cloud Interconnect and/or Cloud VPN\nWorkaround: The problem only affects HA1 so customers should be able to fail over to HA0","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]}],"most_recent_update":{"created":"2023-11-30T20:44:24+00:00","modified":"2023-11-30T20:44:27+00:00","when":"2023-11-30T20:44:24+00:00","text":"**The issue with Cloud Interconnect has been resolved as of Thursday, 2023-11-30 12:41 US/Pacific.**\nWe understand that this issue had impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we worked to mitigate the issue.\nWe take pride in providing our customers with a reliable and highly available service, Please rest assured of the fact that we are committed to doing everything we can to prevent issues like this from happening in the future.\n**Thank you for choosing us.**","status":"AVAILABLE","affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"zall","service_name":"Multiple Products","affected_products":[{"title":"Hybrid Connectivity","id":"5x6CGnZvSHQZ26KtxpK1"},{"title":"Google Cloud Networking","id":"VNJxzcH58QmTt5H6pnT6"}],"uri":"incidents/4opGnk7MBUeKuRaZVR5v","currently_affected_locations":[],"previously_affected_locations":[{"title":"Northern Virginia (us-east4)","id":"us-east4"}]},{"id":"ioWXqWq3141HMTVnpLyV","number":"594944414108139090","begin":"2023-11-29T16:21:22+00:00","created":"2023-11-29T16:40:39+00:00","end":"2023-11-29T17:36:44+00:00","modified":"2023-11-29T17:36:45+00:00","external_desc":"Global: Cloud Monitoring Metrics unavailable or underreported for Cloud Pub/Sub","updates":[{"created":"2023-11-29T17:36:44+00:00","modified":"2023-11-29T17:36:46+00:00","when":"2023-11-29T17:36:44+00:00","text":"The issue with Cloud Monitoring and Google Cloud Pub/Sub has been resolved for all affected projects as of Wednesday, 2023-11-29 09:34 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-29T17:26:50+00:00","modified":"2023-11-29T17:26:52+00:00","when":"2023-11-29T17:26:50+00:00","text":"Summary: Global: Cloud Monitoring Metrics unavailable or underreported for Cloud Pub/Sub\nDescription: We are experiencing an issue with Cloud Monitoring and Cloud Pub/Sub. The issue started on Wednesday, 2023-11-29 04:52 US/Pacific with more significant impact beginning around 08:00 US/Pacific. There is no known impact on Cloud Pub/Sub administrative, publish, or subscribe operations at this time.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-11-29 10:30 US/Pacific with current details.\nDiagnosis: Customers impacted by this issue may see Cloud Monitoring metrics for Cloud Pub/Sub that show no or underreported values. Any alerting based on these metrics may fire erroneously.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-29T16:40:38+00:00","modified":"2023-11-29T16:40:40+00:00","when":"2023-11-29T16:40:38+00:00","text":"Summary: Global: Cloud Monitoring Metrics unavailable or underreported for Cloud Pub/Sub\nDescription: We are experiencing an issue with Google Cloud Pub/Sub metrics in Cloud Monitoring.\nOur engineering team continues to investigate the issue.\nWe will provide an update by Wednesday, 2023-11-29 09:30 US/Pacific with current details.\nWe apologize to all who are affected by the disruption.\nDiagnosis: Cloud Monitoring metrics for Cloud Pub/Sub may be unavailable or underreported.\nWorkaround: None at this time.","status":"SERVICE_INFORMATION","affected_locations":[]}],"most_recent_update":{"created":"2023-11-29T17:36:44+00:00","modified":"2023-11-29T17:36:46+00:00","when":"2023-11-29T17:36:44+00:00","text":"The issue with Cloud Monitoring and Google Cloud Pub/Sub has been resolved for all affected projects as of Wednesday, 2023-11-29 09:34 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"dFjdLh2v6zuES6t9ADCB","service_name":"Google Cloud Pub/Sub","affected_products":[{"title":"Google Cloud Pub/Sub","id":"dFjdLh2v6zuES6t9ADCB"}],"uri":"incidents/ioWXqWq3141HMTVnpLyV","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"id":"7rJNpj5UKVxHFhdAHUzy","number":"1043845825104180878","begin":"2023-11-27T13:01:07+00:00","created":"2023-11-27T13:54:40+00:00","end":"2023-11-29T21:56:34+00:00","modified":"2023-11-29T21:56:34+00:00","external_desc":"Some multiple-table DMLs intermittently are not working on MySQL versions 8.0.30+ with query insights enabled.","updates":[{"created":"2023-11-29T21:56:33+00:00","modified":"2023-11-29T21:56:35+00:00","when":"2023-11-29T21:56:33+00:00","text":"The issue with Google Cloud SQL has been resolved for all affected users as of Wednesday, 2023-11-29 13:55 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-28T17:46:01+00:00","modified":"2023-11-28T17:46:03+00:00","when":"2023-11-28T17:46:01+00:00","text":"Summary: Some multiple-table DMLs intermittently are not working on MySQL versions 8.0.30+ with query insights enabled.\nDescription: Our engineers and support teams continue to work on the mitigation identified.\nWe understand that this issue has impacted your ability to access and use our services, we deeply appreciate your patience and cooperation while we work towards resolving the issue in hand.\nAt the current rate of progress, we expect to complete the mitigation work by Wednesday, 2023-11-29 14:00 US/Pacific.\nWe will provide more information by Wednesday, 2023-11-29 14:00 US/Pacific.\nWe sincerely apologize for any inconvenience and troubles being caused at this time.\nDiagnosis: Some multiple-table DMLs intermittently don't work on Cloud SQL MySQL versions 8.0.30+ with query insights enabled. A multiple-table DML is a DML affecting more than one table, for example “UPDATE tableA join tableB ON","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-27T17:35:38+00:00","modified":"2023-11-27T17:35:42+00:00","when":"2023-11-27T17:35:38+00:00","text":"Summary: Some multiple-table DMLs intermittently don't work on MySQL versions 8.0.30+ with query insights enabled.\nDescription: Mitigation work is currently underway by our engineering team.\nWe do not have an ETA for mitigation at this point.\nWe will provide more information by Tuesday, 2023-11-28 10:00 US/Pacific.\nDiagnosis: Some multiple-table DMLs intermittently don't work on Cloud SQL MySQL versions 8.0.30+ with query insights enabled. A multiple-table DML is a DML affecting more than one table, for example “UPDATE tableA join tableB ON","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-27T16:47:30+00:00","modified":"2023-11-27T16:47:36+00:00","when":"2023-11-27T16:47:30+00:00","text":"Summary: Some multiple-table DMLs intermittently don't work on MySQL versions 8.0.30+ with query insights enabled.\nDescription: We are investigating a potential issue with Google Cloud SQL.\nWe will provide more information by Monday, 2023-11-27 10:00 US/Pacific.\nDiagnosis: Some multiple-table DMLs intermittently don't work on Cloud SQL MySQL versions 8.0.30+ with query insights enabled. A multiple-table DML is a DML affecting more than one table, for example “UPDATE tableA join tableB ON","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-27T14:51:50+00:00","modified":"2023-11-27T14:51:58+00:00","when":"2023-11-27T14:51:50+00:00","text":"Summary: Some multiple-table DMLs intermittently don't work on MySQL versions 8.0.30+ with query insights enabled.\nDescription: We are investigating a potential issue with Google Cloud SQL.\nWe will provide more information by Monday, 2023-11-27 09:00 US/Pacific.\nDiagnosis: Some multiple-table DMLs intermittently don't work on Cloud SQL MySQL versions 8.0.30+ with query insights enabled. A multiple-table DML is a DML affecting more than one table, for example “UPDATE tableA join tableB ON","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},{"created":"2023-11-27T13:54:37+00:00","modified":"2023-11-27T13:54:45+00:00","when":"2023-11-27T13:54:37+00:00","text":"Summary: Some multiple-table DMLs intermittently don't work on MySQL versions 8.0.30+ with query insights enabled.\nDescription: We are investigating a potential issue with Google Cloud SQL.\nWe will provide more information by Monday, 2023-11-27 07:00 US/Pacific.\nDiagnosis: Some multiple-table DMLs intermittently don't work on Cloud SQL MySQL versions 8.0.30+ with query insights enabled. A multiple-table DML is a DML affecting more than one table, for example “UPDATE tableA join tableB ON","status":"SERVICE_INFORMATION","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}],"most_recent_update":{"created":"2023-11-29T21:56:33+00:00","modified":"2023-11-29T21:56:35+00:00","when":"2023-11-29T21:56:33+00:00","text":"The issue with Google Cloud SQL has been resolved for all affected users as of Wednesday, 2023-11-29 13:55 US/Pacific.\nWe thank you for your patience while we worked on resolving the issue.","status":"AVAILABLE","affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]},"status_impact":"SERVICE_INFORMATION","severity":"low","service_key":"hV87iK5DcEXKgWU2kDri","service_name":"Google Cloud SQL","affected_products":[{"title":"Google Cloud SQL","id":"hV87iK5DcEXKgWU2kDri"}],"uri":"incidents/7rJNpj5UKVxHFhdAHUzy","currently_affected_locations":[],"previously_affected_locations":[{"title":"Taiwan (asia-east1)","id":"asia-east1"},{"title":"Hong Kong (asia-east2)","id":"asia-east2"},{"title":"Tokyo (asia-northeast1)","id":"asia-northeast1"},{"title":"Osaka (asia-northeast2)","id":"asia-northeast2"},{"title":"Seoul (asia-northeast3)","id":"asia-northeast3"},{"title":"Mumbai (asia-south1)","id":"asia-south1"},{"title":"Delhi (asia-south2)","id":"asia-south2"},{"title":"Singapore (asia-southeast1)","id":"asia-southeast1"},{"title":"Jakarta (asia-southeast2)","id":"asia-southeast2"},{"title":"Sydney (australia-southeast1)","id":"australia-southeast1"},{"title":"Melbourne (australia-southeast2)","id":"australia-southeast2"},{"title":"Warsaw (europe-central2)","id":"europe-central2"},{"title":"Finland (europe-north1)","id":"europe-north1"},{"title":"Madrid (europe-southwest1)","id":"europe-southwest1"},{"title":"Belgium (europe-west1)","id":"europe-west1"},{"title":"Berlin (europe-west10)","id":"europe-west10"},{"title":"Turin (europe-west12)","id":"europe-west12"},{"title":"London (europe-west2)","id":"europe-west2"},{"title":"Frankfurt (europe-west3)","id":"europe-west3"},{"title":"Netherlands (europe-west4)","id":"europe-west4"},{"title":"Zurich (europe-west6)","id":"europe-west6"},{"title":"Milan (europe-west8)","id":"europe-west8"},{"title":"Paris (europe-west9)","id":"europe-west9"},{"title":"Doha (me-central1)","id":"me-central1"},{"title":"Dammam (me-central2)","id":"me-central2"},{"title":"Tel Aviv (me-west1)","id":"me-west1"},{"title":"Montréal (northamerica-northeast1)","id":"northamerica-northeast1"},{"title":"Toronto (northamerica-northeast2)","id":"northamerica-northeast2"},{"title":"São Paulo (southamerica-east1)","id":"southamerica-east1"},{"title":"Santiago (southamerica-west1)","id":"southamerica-west1"},{"title":"Iowa (us-central1)","id":"us-central1"},{"title":"South Carolina (us-east1)","id":"us-east1"},{"title":"Northern Virginia (us-east4)","id":"us-east4"},{"title":"Columbus (us-east5)","id":"us-east5"},{"title":"Dallas (us-south1)","id":"us-south1"},{"title":"Oregon (us-west1)","id":"us-west1"},{"title":"Los Angeles (us-west2)","id":"us-west2"},{"title":"Salt Lake City (us-west3)","id":"us-west3"},{"title":"Las Vegas (us-west4)","id":"us-west4"}]}]