Golang - gziping mongodb 查找查询的游标数据时出错,写入文件并解压缩
Golang - Error while gzipping mongodb find query's cursor data, writing to a file and decompressing it
我正在迭代一个 mongodb 游标并对数据进行 gzip 压缩并发送到 S3 对象。尝试使用 gzip -d
解压缩上传的文件时,出现以下错误,
gzip: 9.log.gz: invalid compressed data--crc error
gzip: 9.log.gz: invalid compressed data--length error
下面给出了我用于迭代、压缩、上传的代码,
// CursorReader struct acts as reader wrapper on top of mongodb cursor
type CursorReader struct {
Csr *mongo.Cursor
}
// Read func reads the data from cursor and puts it into byte array
func (cr *CursorReader) Read(p []byte) (n int, err error) {
dataAvail := cr.Csr.Next(context.TODO())
if !dataAvail {
n = 0
err = io.EOF
if cr.Csr.Close(context.TODO()) != nil {
fmt.Fprintf(os.Stderr, "Error: MongoDB: getting logs: close cursor: %s", err)
}
return
}
var b bytes.Buffer
w := gzip.NewWriter(&b)
w.Write([]byte(cr.Csr.Current.String() + "\n"))
w.Close()
n = copy(p, []byte(b.String()))
err = nil
return
}
cursor, err := coll.Find(ctx, filter) // runs the find query and returns cursor
csrRdr := new(CursorReader) // creates a new cursorreader instance
csrRdr.Csr = cursor // assigning the find cursor to cursorreader instance
_, err = s3Uploader.Upload(&s3manager.UploadInput{ // Uploading the data to s3 in parts
Bucket: aws.String("bucket"),
Key: aws.String("key")),
Body: csrRdr,
})
如果数据很少,那么我就没有遇到问题。但是如果数据很大,那么我就会出错。到目前为止我调试过的东西,试图压缩 1500 个文件,每个文件大小为 15MB,出现错误。即使我尝试将 gzip 字节直接写入本地文件,但我遇到了同样的错误。
问题似乎是在 func(*CursorReader) Read([]byte) (int, error)
中重复调用 gzip.NewWriter()
您正在为 Read
的每次调用分配一个新的 gzip.Writer
。 gzip
压缩是有状态的,因此您只能对所有操作使用一个 Writer
实例。
解决方案 #1
您的问题的一个相当直接的解决方案是读取游标中的所有行并将其传递给 gzip.Writer
并将 gzip 压缩后的内容存储到内存缓冲区中。
var cursor, _ = collection.Find(context.TODO(), filter)
defer cursor.Close(context.TODO())
// prepare a buffer to hold gzipped data
var buffer bytes.Buffer
var gz = gzip.NewWriter(&buffer)
defer gz.Close()
for cursor.Next(context.TODO()) {
if _, err = io.WriteString(gz, cursor.Current.String()); err != nil {
// handle error somehow ¯\_(ツ)_/¯
}
}
// you can now use buffer as io.Reader
// and it'll contain gzipped data for your serialized rows
_, err = s3.Upload(&s3.UploadInput{
Bucket: aws.String("..."),
Key: aws.String("...")),
Body: &buffer,
})
解决方案 #2
另一种解决方案是使用 io.Pipe()
and goroutines 创建一个流来按需读取和压缩数据,而不是在内存缓冲区中。如果您正在读取的数据非常大并且您无法将所有数据保存在内存中,这将非常有用。
var cursor, _ = collection.Find(context.TODO(), filter)
defer cursor.Close(context.TODO())
// create pipe endpoints
reader, writer := io.Pipe()
// note: io.Pipe() returns a synchronous in-memory pipe
// reads and writes block on one another
// make sure to go through docs once.
// now, since reads and writes on a pipe blocks
// we must move to a background goroutine else
// all our writes would block forever
go func() {
// order of defer here is important
// see:
// make sure gzip stream is closed before the pipe
// to ensure data is flushed properly
defer writer.Close()
var gz = gzip.NewWriter(writer)
defer gz.Close()
for cursor.Next(context.Background()) {
if _, err = io.WriteString(gz, cursor.Current.String()); err != nil {
// handle error somehow ¯\_(ツ)_/¯
}
}
}()
// you can now use reader as io.Reader
// and it'll contain gzipped data for your serialized rows
_, err = s3.Upload(&s3.UploadInput{
Bucket: aws.String("..."),
Key: aws.String("...")),
Body: reader,
})
我正在迭代一个 mongodb 游标并对数据进行 gzip 压缩并发送到 S3 对象。尝试使用 gzip -d
解压缩上传的文件时,出现以下错误,
gzip: 9.log.gz: invalid compressed data--crc error
gzip: 9.log.gz: invalid compressed data--length error
下面给出了我用于迭代、压缩、上传的代码,
// CursorReader struct acts as reader wrapper on top of mongodb cursor
type CursorReader struct {
Csr *mongo.Cursor
}
// Read func reads the data from cursor and puts it into byte array
func (cr *CursorReader) Read(p []byte) (n int, err error) {
dataAvail := cr.Csr.Next(context.TODO())
if !dataAvail {
n = 0
err = io.EOF
if cr.Csr.Close(context.TODO()) != nil {
fmt.Fprintf(os.Stderr, "Error: MongoDB: getting logs: close cursor: %s", err)
}
return
}
var b bytes.Buffer
w := gzip.NewWriter(&b)
w.Write([]byte(cr.Csr.Current.String() + "\n"))
w.Close()
n = copy(p, []byte(b.String()))
err = nil
return
}
cursor, err := coll.Find(ctx, filter) // runs the find query and returns cursor
csrRdr := new(CursorReader) // creates a new cursorreader instance
csrRdr.Csr = cursor // assigning the find cursor to cursorreader instance
_, err = s3Uploader.Upload(&s3manager.UploadInput{ // Uploading the data to s3 in parts
Bucket: aws.String("bucket"),
Key: aws.String("key")),
Body: csrRdr,
})
如果数据很少,那么我就没有遇到问题。但是如果数据很大,那么我就会出错。到目前为止我调试过的东西,试图压缩 1500 个文件,每个文件大小为 15MB,出现错误。即使我尝试将 gzip 字节直接写入本地文件,但我遇到了同样的错误。
问题似乎是在 func(*CursorReader) Read([]byte) (int, error)
gzip.NewWriter()
您正在为 Read
的每次调用分配一个新的 gzip.Writer
。 gzip
压缩是有状态的,因此您只能对所有操作使用一个 Writer
实例。
解决方案 #1
您的问题的一个相当直接的解决方案是读取游标中的所有行并将其传递给 gzip.Writer
并将 gzip 压缩后的内容存储到内存缓冲区中。
var cursor, _ = collection.Find(context.TODO(), filter)
defer cursor.Close(context.TODO())
// prepare a buffer to hold gzipped data
var buffer bytes.Buffer
var gz = gzip.NewWriter(&buffer)
defer gz.Close()
for cursor.Next(context.TODO()) {
if _, err = io.WriteString(gz, cursor.Current.String()); err != nil {
// handle error somehow ¯\_(ツ)_/¯
}
}
// you can now use buffer as io.Reader
// and it'll contain gzipped data for your serialized rows
_, err = s3.Upload(&s3.UploadInput{
Bucket: aws.String("..."),
Key: aws.String("...")),
Body: &buffer,
})
解决方案 #2
另一种解决方案是使用 io.Pipe()
and goroutines 创建一个流来按需读取和压缩数据,而不是在内存缓冲区中。如果您正在读取的数据非常大并且您无法将所有数据保存在内存中,这将非常有用。
var cursor, _ = collection.Find(context.TODO(), filter)
defer cursor.Close(context.TODO())
// create pipe endpoints
reader, writer := io.Pipe()
// note: io.Pipe() returns a synchronous in-memory pipe
// reads and writes block on one another
// make sure to go through docs once.
// now, since reads and writes on a pipe blocks
// we must move to a background goroutine else
// all our writes would block forever
go func() {
// order of defer here is important
// see:
// make sure gzip stream is closed before the pipe
// to ensure data is flushed properly
defer writer.Close()
var gz = gzip.NewWriter(writer)
defer gz.Close()
for cursor.Next(context.Background()) {
if _, err = io.WriteString(gz, cursor.Current.String()); err != nil {
// handle error somehow ¯\_(ツ)_/¯
}
}
}()
// you can now use reader as io.Reader
// and it'll contain gzipped data for your serialized rows
_, err = s3.Upload(&s3.UploadInput{
Bucket: aws.String("..."),
Key: aws.String("...")),
Body: reader,
})