From b89f608ed071e5d58ea5e84b333aea79e501f265 Mon Sep 17 00:00:00 2001 From: sina Date: Wed, 27 Jul 2022 20:27:00 +0430 Subject: change chunk size --- Readme.md | 4 ++-- chunk.go | 24 ++++++++++++------------ chunkio.png | Bin 23961 -> 15803 bytes 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Readme.md b/Readme.md index ecadf1f..889ef55 100644 --- a/Readme.md +++ b/Readme.md @@ -13,9 +13,9 @@ with associated io chunk and io stream interfaces. ### io interfaces: -- **chunkReader**: read and open() data in chunks, there is 8byte + 16byte overhead per chunk. read data can be used safely. this reader has a chunk size in-memory buffer, large chunk size can make application to runs out of memory, thus this is most suitable for sliced data, like network data transmit and so.. +- **chunkReader**: read and open() data in chunks, there is 4byte + 16byte overhead per chunk. read data can be used safely. this reader has a chunk size in-memory buffer, large chunk size can make application to runs out of memory, thus this is most suitable for sliced data, like network data transmit and so.. -- **chunkReader**: seal() and write data in chunks, there is 8byte + 16byte overhead per chunk. this writer has a chunk size in-memory buffer, large chunk size can make application to runs out of memory, thus this is most suitable for sliced data, like network data transmit and so.. +- **chunkReader**: seal() and write data in chunks, there is 4byte + 16byte overhead per chunk. this writer has a chunk size in-memory buffer, large chunk size can make application to runs out of memory, thus this is most suitable for sliced data, like network data transmit and so..

chunkio

diff --git a/chunk.go b/chunk.go index 3b56d62..45ea84c 100644 --- a/chunk.go +++ b/chunk.go @@ -9,7 +9,7 @@ import ( "snix.ir/rabbitio" ) -const cmrk = 0x08 // chunk size indicator, +const cmrk = 0x04 // chunk size indicator, // without this reader cannot calculate actual size of plaintext // additional data func, return value is used as AD in Seal and Open @@ -38,7 +38,7 @@ type chunkWriter struct { // ciphertext, each chunk has its own tag and cmrk value. // this reader has a chunk size in-memory buffer, large chunk size can make application to runs // out of memory, thus is most suitable for sliced data, like network data transmit and so.. -func NewChunkReader(r io.Reader, chnk int, a cipher.AEAD, nonce []byte, f AdditionalFunc) (*chunkReader, error) { +func NewChunkReader(r io.Reader, chnk uint32, a cipher.AEAD, nonce []byte, f AdditionalFunc) (*chunkReader, error) { if len(nonce) != rabbitio.IVXLen && len(nonce) != 0 { return nil, rabbitio.ErrInvalidIVX @@ -48,7 +48,7 @@ func NewChunkReader(r io.Reader, chnk int, a cipher.AEAD, nonce []byte, f Additi aead: a, buff: []byte{}, nonce: make([]byte, len(nonce)), - csize: chnk, + csize: int(chnk), rader: r, adexe: f, } @@ -64,7 +64,7 @@ func NewChunkReader(r io.Reader, chnk int, a cipher.AEAD, nonce []byte, f Additi // plaintext, each chunk has its own tag and cmrk value. // this writer has a chunk size in-memory buffer, large chunk size can make application to // runs out of memory, thus is most suitable for sliced data, like network data transmit and so.. -func NewChunkWriter(w io.Writer, chnk int, a cipher.AEAD, nonce []byte, f AdditionalFunc) (*chunkWriter, error) { +func NewChunkWriter(w io.Writer, chnk uint32, a cipher.AEAD, nonce []byte, f AdditionalFunc) (*chunkWriter, error) { if len(nonce) != rabbitio.IVXLen && len(nonce) != 0 { return nil, rabbitio.ErrInvalidIVX @@ -73,7 +73,7 @@ func NewChunkWriter(w io.Writer, chnk int, a cipher.AEAD, nonce []byte, f Additi aead: a, buff: []byte{}, nonce: make([]byte, len(nonce)), - csize: chnk, + csize: int(chnk), writer: w, adexe: f, } @@ -96,7 +96,7 @@ func (w *chunkWriter) Close() error { // Write writes plaintext chunk into the sale() and underlying writer // write would not report overhead data (chunk size marker and poly1305 tag) in -// written return value. for each chunk there is 8+16 byte overhead data. +// written return value. for each chunk there is 4+16 byte overhead data. // AdFunc will be triggered for each chunk of data func (w *chunkWriter) Write(b []byte) (n int, err error) { w.buff = b @@ -119,7 +119,7 @@ func (w *chunkWriter) write() (int, error) { if len(w.buff) > 0 { s := copy(chnk[cmrk:len(chnk)-w.aead.Overhead()], w.buff) w.buff = w.buff[s:] - copy(chnk[0:cmrk], uint64Little(uint64(s))) + copy(chnk[0:cmrk], uint32Little(uint32(s))) w.aead.Seal(chnk[:0], w.nonce, chnk[:cmrk+w.csize], w.adexe()) _, err = w.writer.Write(chnk) @@ -135,7 +135,7 @@ func (w *chunkWriter) write() (int, error) { // Read reads and open() ciphertext chunk from underlying reader // read would not report overhead data (chunk size marker and poly1305 tag) in its // return value. if the read data from underlying reader is corrupted, ErrAuthMsg -// error will be returned. for each chunk there is 8+16 byte overhead data. +// error will be returned. for each chunk there is 4+16 byte overhead data. // AdFunc will be triggered for each chunk of data func (r *chunkReader) Read(b []byte) (int, error) { @@ -181,7 +181,7 @@ func (r *chunkReader) read() (int, error) { var n int size := cmrk + r.csize + r.aead.Overhead() chnk := make([]byte, size) - chLE := uint64Little(uint64(r.csize)) + chLE := uint32Little(uint32(r.csize)) si, err := io.ReadFull(r.rader, chnk) if err != nil { @@ -198,7 +198,7 @@ func (r *chunkReader) read() (int, error) { n += r.csize r.buff = append(r.buff, chnk[cmrk:cmrk+r.csize]...) } else { - f := binary.LittleEndian.Uint64(chnk[0:cmrk]) + f := binary.LittleEndian.Uint32(chnk[0:cmrk]) n += int(f) r.buff = append(r.buff, chnk[cmrk:cmrk+f]...) } @@ -207,8 +207,8 @@ func (r *chunkReader) read() (int, error) { return n, err } -func uint64Little(n uint64) []byte { +func uint32Little(n uint32) []byte { b := make([]byte, cmrk) - binary.LittleEndian.PutUint64(b, n) + binary.LittleEndian.PutUint32(b, n) return b } diff --git a/chunkio.png b/chunkio.png index c79cd0e..699f256 100644 Binary files a/chunkio.png and b/chunkio.png differ -- cgit v1.2.3