From 6a7b88e78891167a2d77af8fae69b90aada6de59 Mon Sep 17 00:00:00 2001 From: Alexander Narsudinov Date: Fri, 27 Oct 2023 20:47:25 +0200 Subject: [PATCH] Increase the buffer size of objects channel in `removeObjectsOneByOne` I guess this is pretty reasonable to have the working queue with the same size as the maximum number of workers. --- pkg/s3/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/s3/client.go b/pkg/s3/client.go index 6e63e6d..7a1b1ec 100644 --- a/pkg/s3/client.go +++ b/pkg/s3/client.go @@ -171,7 +171,7 @@ func (client *s3Client) removeObjects(bucketName, prefix string) error { // will delete files one by one without file lock func (client *s3Client) removeObjectsOneByOne(bucketName, prefix string) error { parallelism := 16 - objectsCh := make(chan minio.ObjectInfo, 1) + objectsCh := make(chan minio.ObjectInfo, parallelism) guardCh := make(chan int, parallelism) var listErr error var totalObjects int64 = 0