Unwind all the zero-copy BIO pair machinery.

This was only used by Chromium and was since replaced with a custom BIO.
Though it meant a new ring buffer implementation, custom BIOs seem a
better solution for folks who wish to do particularly complicated
things, until the new SSL API is available. External-buffer BIO pairs
were effectively a really confusing and leaky abstraction over a ring
buffer anyway.

Change-Id: I0e201317ff87cdccb17b2f8c260ee5bb06c74771
Reviewed-on: https://boringssl-review.googlesource.com/12626
Commit-Queue: Adam Langley <agl@google.com>
Reviewed-by: Adam Langley <agl@google.com>
This commit is contained in:
David Benjamin 2016-12-06 22:24:52 -05:00 committed by Adam Langley
parent 33b1d4f575
commit ed1d288a91
3 changed files with 16 additions and 530 deletions

View File

@ -135,152 +135,6 @@ static bool TestSocketConnect() {
return true;
}
// BioReadZeroCopyWrapper is a wrapper around the zero-copy APIs to make
// testing easier.
static size_t BioReadZeroCopyWrapper(BIO *bio, uint8_t *data, size_t len) {
uint8_t *read_buf;
size_t read_buf_offset;
size_t available_bytes;
size_t len_read = 0;
do {
if (!BIO_zero_copy_get_read_buf(bio, &read_buf, &read_buf_offset,
&available_bytes)) {
return 0;
}
available_bytes = std::min(available_bytes, len - len_read);
memmove(data + len_read, read_buf + read_buf_offset, available_bytes);
BIO_zero_copy_get_read_buf_done(bio, available_bytes);
len_read += available_bytes;
} while (len - len_read > 0 && available_bytes > 0);
return len_read;
}
// BioWriteZeroCopyWrapper is a wrapper around the zero-copy APIs to make
// testing easier.
static size_t BioWriteZeroCopyWrapper(BIO *bio, const uint8_t *data,
size_t len) {
uint8_t *write_buf;
size_t write_buf_offset;
size_t available_bytes;
size_t len_written = 0;
do {
if (!BIO_zero_copy_get_write_buf(bio, &write_buf, &write_buf_offset,
&available_bytes)) {
return 0;
}
available_bytes = std::min(available_bytes, len - len_written);
memmove(write_buf + write_buf_offset, data + len_written, available_bytes);
BIO_zero_copy_get_write_buf_done(bio, available_bytes);
len_written += available_bytes;
} while (len - len_written > 0 && available_bytes > 0);
return len_written;
}
static bool TestZeroCopyBioPairs() {
// Test read and write, especially triggering the ring buffer wrap-around.
uint8_t bio1_application_send_buffer[1024];
uint8_t bio2_application_recv_buffer[1024];
const size_t kLengths[] = {254, 255, 256, 257, 510, 511, 512, 513};
// These trigger ring buffer wrap around.
const size_t kPartialLengths[] = {0, 1, 2, 3, 128, 255, 256, 257, 511, 512};
static const size_t kBufferSize = 512;
srand(1);
for (size_t i = 0; i < sizeof(bio1_application_send_buffer); i++) {
bio1_application_send_buffer[i] = rand() & 255;
}
// Transfer bytes from bio1_application_send_buffer to
// bio2_application_recv_buffer in various ways.
for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kLengths); i++) {
for (size_t j = 0; j < OPENSSL_ARRAY_SIZE(kPartialLengths); j++) {
size_t total_write = 0;
size_t total_read = 0;
BIO *bio1, *bio2;
if (!BIO_new_bio_pair(&bio1, kBufferSize, &bio2, kBufferSize)) {
return false;
}
bssl::UniquePtr<BIO> bio1_scoper(bio1);
bssl::UniquePtr<BIO> bio2_scoper(bio2);
total_write += BioWriteZeroCopyWrapper(
bio1, bio1_application_send_buffer, kLengths[i]);
// This tests interleaved read/write calls. Do a read between zero copy
// write calls.
uint8_t *write_buf;
size_t write_buf_offset;
size_t available_bytes;
if (!BIO_zero_copy_get_write_buf(bio1, &write_buf, &write_buf_offset,
&available_bytes)) {
return false;
}
// Free kPartialLengths[j] bytes in the beginning of bio1 write buffer.
// This enables ring buffer wrap around for the next write.
total_read += BIO_read(bio2, bio2_application_recv_buffer + total_read,
kPartialLengths[j]);
size_t interleaved_write_len = std::min(kPartialLengths[j],
available_bytes);
// Write the data for the interleaved write call. If the buffer becomes
// empty after a read, the write offset is normally set to 0. Check that
// this does not happen for interleaved read/write and that
// |write_buf_offset| is still valid.
memcpy(write_buf + write_buf_offset,
bio1_application_send_buffer + total_write, interleaved_write_len);
if (BIO_zero_copy_get_write_buf_done(bio1, interleaved_write_len)) {
total_write += interleaved_write_len;
}
// Do another write in case |write_buf_offset| was wrapped.
total_write += BioWriteZeroCopyWrapper(
bio1, bio1_application_send_buffer + total_write,
kPartialLengths[j] - interleaved_write_len);
// Drain the rest.
size_t bytes_left = BIO_pending(bio2);
total_read += BioReadZeroCopyWrapper(
bio2, bio2_application_recv_buffer + total_read, bytes_left);
if (total_read != total_write) {
fprintf(stderr, "Lengths not equal in round (%u, %u)\n", (unsigned)i,
(unsigned)j);
return false;
}
if (total_read > kLengths[i] + kPartialLengths[j]) {
fprintf(stderr, "Bad lengths in round (%u, %u)\n", (unsigned)i,
(unsigned)j);
return false;
}
if (memcmp(bio1_application_send_buffer, bio2_application_recv_buffer,
total_read) != 0) {
fprintf(stderr, "Buffers not equal in round (%u, %u)\n", (unsigned)i,
(unsigned)j);
return false;
}
}
}
return true;
}
static bool TestPrintf() {
// Test a short output, a very long one, and various sizes around
// 256 (the size of the buffer) to ensure edge cases are correct.
@ -429,7 +283,6 @@ int main(void) {
if (!TestSocketConnect() ||
!TestPrintf() ||
!TestZeroCopyBioPairs() ||
!TestASN1()) {
return 1;
}

View File

@ -72,12 +72,6 @@ struct bio_bio_st {
size_t offset; /* valid iff buf != NULL; 0 if len == 0 */
size_t size;
uint8_t *buf; /* "size" elements (if != NULL) */
char buf_externally_allocated; /* true iff buf was externally allocated. */
char zero_copy_read_lock; /* true iff a zero copy read operation
* is in progress. */
char zero_copy_write_lock; /* true iff a zero copy write operation
* is in progress. */
size_t request; /* valid iff peer != NULL; 0 if len != 0,
* otherwise set by peer to number of bytes
@ -145,263 +139,12 @@ static int bio_free(BIO *bio) {
bio_destroy_pair(bio);
}
if (!b->buf_externally_allocated) {
OPENSSL_free(b->buf);
}
OPENSSL_free(b->buf);
OPENSSL_free(b);
return 1;
}
static size_t bio_zero_copy_get_read_buf(struct bio_bio_st* peer_b,
uint8_t** out_read_buf,
size_t* out_buf_offset) {
size_t max_available;
if (peer_b->len > peer_b->size - peer_b->offset) {
/* Only the first half of the ring buffer can be read. */
max_available = peer_b->size - peer_b->offset;
} else {
max_available = peer_b->len;
}
*out_read_buf = peer_b->buf;
*out_buf_offset = peer_b->offset;
return max_available;
}
int BIO_zero_copy_get_read_buf(BIO* bio, uint8_t** out_read_buf,
size_t* out_buf_offset,
size_t* out_available_bytes) {
struct bio_bio_st* b;
struct bio_bio_st* peer_b;
size_t max_available;
*out_available_bytes = 0;
BIO_clear_retry_flags(bio);
if (!bio->init) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED);
return 0;
}
b = bio->ptr;
if (!b || !b->peer) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
peer_b = b->peer->ptr;
if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
if (peer_b->zero_copy_read_lock) {
OPENSSL_PUT_ERROR(BIO, BIO_R_INVALID_ARGUMENT);
return 0;
}
peer_b->request = 0; /* Is not used by zero-copy API. */
max_available =
bio_zero_copy_get_read_buf(peer_b, out_read_buf, out_buf_offset);
assert(peer_b->buf != NULL);
if (max_available > 0) {
peer_b->zero_copy_read_lock = 1;
}
*out_available_bytes = max_available;
return 1;
}
int BIO_zero_copy_get_read_buf_done(BIO* bio, size_t bytes_read) {
struct bio_bio_st* b;
struct bio_bio_st* peer_b;
size_t max_available;
size_t dummy_read_offset;
uint8_t* dummy_read_buf;
assert(BIO_get_retry_flags(bio) == 0);
if (!bio->init) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED);
return 0;
}
b = bio->ptr;
if (!b || !b->peer) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
peer_b = b->peer->ptr;
if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
if (!peer_b->zero_copy_read_lock) {
OPENSSL_PUT_ERROR(BIO, BIO_R_INVALID_ARGUMENT);
return 0;
}
max_available =
bio_zero_copy_get_read_buf(peer_b, &dummy_read_buf, &dummy_read_offset);
if (bytes_read > max_available) {
OPENSSL_PUT_ERROR(BIO, BIO_R_INVALID_ARGUMENT);
return 0;
}
assert(peer_b->len >= bytes_read);
peer_b->len -= bytes_read;
assert(peer_b->offset + bytes_read <= peer_b->size);
/* Move read offset. If zero_copy_write_lock == 1 we must advance the
* offset even if buffer becomes empty, to make sure
* write_offset = (offset + len) mod size does not change. */
if (peer_b->offset + bytes_read == peer_b->size ||
(!peer_b->zero_copy_write_lock && peer_b->len == 0)) {
peer_b->offset = 0;
} else {
peer_b->offset += bytes_read;
}
bio->num_read += bytes_read;
peer_b->zero_copy_read_lock = 0;
return 1;
}
static size_t bio_zero_copy_get_write_buf(struct bio_bio_st* b,
uint8_t** out_write_buf,
size_t* out_buf_offset) {
size_t write_offset;
size_t max_available;
assert(b->len <= b->size);
write_offset = b->offset + b->len;
if (write_offset >= b->size) {
/* Only the first half of the ring buffer can be written to. */
write_offset -= b->size;
/* write up to the start of the ring buffer. */
max_available = b->offset - write_offset;
} else {
/* write up to the end the buffer. */
max_available = b->size - write_offset;
}
*out_write_buf = b->buf;
*out_buf_offset = write_offset;
return max_available;
}
int BIO_zero_copy_get_write_buf(BIO* bio, uint8_t** out_write_buf,
size_t* out_buf_offset,
size_t* out_available_bytes) {
struct bio_bio_st* b;
struct bio_bio_st* peer_b;
size_t max_available;
*out_available_bytes = 0;
BIO_clear_retry_flags(bio);
if (!bio->init) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED);
return 0;
}
b = bio->ptr;
if (!b || !b->buf || !b->peer) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
peer_b = b->peer->ptr;
if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
assert(b->buf != NULL);
if (b->zero_copy_write_lock) {
OPENSSL_PUT_ERROR(BIO, BIO_R_INVALID_ARGUMENT);
return 0;
}
b->request = 0;
if (b->closed) {
/* Bio is already closed. */
OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE);
return 0;
}
max_available = bio_zero_copy_get_write_buf(b, out_write_buf, out_buf_offset);
if (max_available > 0) {
b->zero_copy_write_lock = 1;
}
*out_available_bytes = max_available;
return 1;
}
int BIO_zero_copy_get_write_buf_done(BIO* bio, size_t bytes_written) {
struct bio_bio_st* b;
struct bio_bio_st* peer_b;
size_t rest;
size_t dummy_write_offset;
uint8_t* dummy_write_buf;
if (!bio->init) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED);
return 0;
}
b = bio->ptr;
if (!b || !b->buf || !b->peer) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
peer_b = b->peer->ptr;
if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD);
return 0;
}
b->request = 0;
if (b->closed) {
/* BIO is already closed. */
OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE);
return 0;
}
if (!b->zero_copy_write_lock) {
OPENSSL_PUT_ERROR(BIO, BIO_R_INVALID_ARGUMENT);
return 0;
}
rest = bio_zero_copy_get_write_buf(b, &dummy_write_buf, &dummy_write_offset);
if (bytes_written > rest) {
OPENSSL_PUT_ERROR(BIO, BIO_R_INVALID_ARGUMENT);
return 0;
}
bio->num_write += bytes_written;
/* Move write offset. */
b->len += bytes_written;
b->zero_copy_write_lock = 0;
return 1;
}
static int bio_read(BIO *bio, char *buf, int size_) {
size_t size = size_;
size_t rest;
@ -422,7 +165,7 @@ static int bio_read(BIO *bio, char *buf, int size_) {
peer_b->request = 0; /* will be set in "retry_read" situation */
if (buf == NULL || size == 0 || peer_b->zero_copy_read_lock) {
if (buf == NULL || size == 0) {
return 0;
}
@ -467,10 +210,7 @@ static int bio_read(BIO *bio, char *buf, int size_) {
memcpy(buf, peer_b->buf + peer_b->offset, chunk);
peer_b->len -= chunk;
/* If zero_copy_write_lock == 1 we must advance the offset even if buffer
* becomes empty, to make sure write_offset = (offset + len) % size
* does not change. */
if (peer_b->len || peer_b->zero_copy_write_lock) {
if (peer_b->len) {
peer_b->offset += chunk;
assert(peer_b->offset <= peer_b->size);
if (peer_b->offset == peer_b->size) {
@ -504,10 +244,6 @@ static int bio_write(BIO *bio, const char *buf, int num_) {
assert(b->peer != NULL);
assert(b->buf != NULL);
if (b->zero_copy_write_lock) {
return 0;
}
b->request = 0;
if (b->closed) {
/* we already closed */
@ -564,9 +300,8 @@ static int bio_write(BIO *bio, const char *buf, int num_) {
return num;
}
static int bio_make_pair(BIO* bio1, BIO* bio2,
size_t writebuf1_len, uint8_t* ext_writebuf1,
size_t writebuf2_len, uint8_t* ext_writebuf2) {
static int bio_make_pair(BIO *bio1, BIO *bio2, size_t writebuf1_len,
size_t writebuf2_len) {
struct bio_bio_st *b1, *b2;
assert(bio1 != NULL);
@ -580,23 +315,14 @@ static int bio_make_pair(BIO* bio1, BIO* bio2,
return 0;
}
assert(b1->buf_externally_allocated == 0);
assert(b2->buf_externally_allocated == 0);
if (b1->buf == NULL) {
if (writebuf1_len) {
b1->size = writebuf1_len;
}
if (!ext_writebuf1) {
b1->buf_externally_allocated = 0;
b1->buf = OPENSSL_malloc(b1->size);
if (b1->buf == NULL) {
OPENSSL_PUT_ERROR(BIO, ERR_R_MALLOC_FAILURE);
return 0;
}
} else {
b1->buf = ext_writebuf1;
b1->buf_externally_allocated = 1;
b1->buf = OPENSSL_malloc(b1->size);
if (b1->buf == NULL) {
OPENSSL_PUT_ERROR(BIO, ERR_R_MALLOC_FAILURE);
return 0;
}
b1->len = 0;
b1->offset = 0;
@ -606,16 +332,10 @@ static int bio_make_pair(BIO* bio1, BIO* bio2,
if (writebuf2_len) {
b2->size = writebuf2_len;
}
if (!ext_writebuf2) {
b2->buf_externally_allocated = 0;
b2->buf = OPENSSL_malloc(b2->size);
if (b2->buf == NULL) {
OPENSSL_PUT_ERROR(BIO, ERR_R_MALLOC_FAILURE);
return 0;
}
} else {
b2->buf = ext_writebuf2;
b2->buf_externally_allocated = 1;
b2->buf = OPENSSL_malloc(b2->size);
if (b2->buf == NULL) {
OPENSSL_PUT_ERROR(BIO, ERR_R_MALLOC_FAILURE);
return 0;
}
b2->len = 0;
b2->offset = 0;
@ -624,13 +344,9 @@ static int bio_make_pair(BIO* bio1, BIO* bio2,
b1->peer = bio2;
b1->closed = 0;
b1->request = 0;
b1->zero_copy_read_lock = 0;
b1->zero_copy_write_lock = 0;
b2->peer = bio1;
b2->closed = 0;
b2->request = 0;
b2->zero_copy_read_lock = 0;
b2->zero_copy_write_lock = 0;
bio1->init = 1;
bio2->init = 1;
@ -744,24 +460,11 @@ static const BIO_METHOD methods_biop = {
static const BIO_METHOD *bio_s_bio(void) { return &methods_biop; }
int BIO_new_bio_pair(BIO** bio1_p, size_t writebuf1,
BIO** bio2_p, size_t writebuf2) {
return BIO_new_bio_pair_external_buf(bio1_p, writebuf1, NULL, bio2_p,
writebuf2, NULL);
}
int BIO_new_bio_pair_external_buf(BIO** bio1_p, size_t writebuf1_len,
uint8_t* ext_writebuf1,
BIO** bio2_p, size_t writebuf2_len,
uint8_t* ext_writebuf2) {
int BIO_new_bio_pair(BIO** bio1_p, size_t writebuf1_len,
BIO** bio2_p, size_t writebuf2_len) {
BIO *bio1 = NULL, *bio2 = NULL;
int ret = 0;
/* External buffers must have sizes greater than 0. */
if ((ext_writebuf1 && !writebuf1_len) || (ext_writebuf2 && !writebuf2_len)) {
goto err;
}
bio1 = BIO_new(bio_s_bio());
if (bio1 == NULL) {
goto err;
@ -771,8 +474,7 @@ int BIO_new_bio_pair_external_buf(BIO** bio1_p, size_t writebuf1_len,
goto err;
}
if (!bio_make_pair(bio1, bio2, writebuf1_len, ext_writebuf1, writebuf2_len,
ext_writebuf2)) {
if (!bio_make_pair(bio1, bio2, writebuf1_len, writebuf2_len)) {
goto err;
}
ret = 1;

View File

@ -616,18 +616,6 @@ OPENSSL_EXPORT int BIO_do_connect(BIO *bio);
OPENSSL_EXPORT int BIO_new_bio_pair(BIO **out1, size_t writebuf1, BIO **out2,
size_t writebuf2);
/* BIO_new_bio_pair_external_buf is the same as |BIO_new_bio_pair| with the
* difference that the caller keeps ownership of the write buffers
* |ext_writebuf1_len| and |ext_writebuf2_len|. This is useful when using zero
* copy API for read and write operations, in cases where the buffers need to
* outlive the BIO pairs. It returns one on success and zero on error. */
OPENSSL_EXPORT int BIO_new_bio_pair_external_buf(BIO** bio1_p,
size_t writebuf1_len,
uint8_t* ext_writebuf1,
BIO** bio2_p,
size_t writebuf2_len,
uint8_t* ext_writebuf2);
/* BIO_ctrl_get_read_request returns the number of bytes that the other side of
* |bio| tried (unsuccessfully) to read. */
OPENSSL_EXPORT size_t BIO_ctrl_get_read_request(BIO *bio);
@ -643,63 +631,6 @@ OPENSSL_EXPORT size_t BIO_ctrl_get_write_guarantee(BIO *bio);
OPENSSL_EXPORT int BIO_shutdown_wr(BIO *bio);
/* Zero copy versions of BIO_read and BIO_write for BIO pairs. */
/* BIO_zero_copy_get_read_buf initiates a zero copy read operation.
* |out_read_buf| is set to the internal read buffer, and |out_buf_offset| is
* set to the current read position of |out_read_buf|. The number of bytes
* available for read from |out_read_buf| + |out_buf_offset| is returned in
* |out_available_bytes|. Note that this function might report fewer bytes
* available than |BIO_pending|, if the internal ring buffer is wrapped. It
* returns one on success. In case of error it returns zero and pushes to the
* error stack.
*
* The zero copy read operation is completed by calling
* |BIO_zero_copy_get_read_buf_done|. Neither |BIO_zero_copy_get_read_buf| nor
* any other I/O read operation may be called while a zero copy read operation
* is active. */
OPENSSL_EXPORT int BIO_zero_copy_get_read_buf(BIO* bio,
uint8_t** out_read_buf,
size_t* out_buf_offset,
size_t* out_available_bytes);
/* BIO_zero_copy_get_read_buf_done must be called after reading from a BIO using
* |BIO_zero_copy_get_read_buf| to finish the read operation. The |bytes_read|
* argument is the number of bytes read.
*
* It returns one on success. In case of error it returns zero and pushes to the
* error stack. */
OPENSSL_EXPORT int BIO_zero_copy_get_read_buf_done(BIO* bio, size_t bytes_read);
/* BIO_zero_copy_get_write_buf initiates a zero copy write operation.
* |out_write_buf| is set to to the internal write buffer, and |out_buf_offset|
* is set to the current write position of |out_write_buf|.
* The number of bytes available for write from |out_write_buf| +
* |out_buf_offset| is returned in |out_available_bytes|. Note that this
* function might report fewer bytes available than
* |BIO_ctrl_get_write_guarantee|, if the internal buffer is wrapped. It returns
* one on success. In case of error it returns zero and pushes to the error
* stack.
*
* The zero copy write operation is completed by calling
* |BIO_zero_copy_get_write_buf_done|. Neither |BIO_zero_copy_get_write_buf|
* nor any other I/O write operation may be called while a zero copy write
* operation is active. */
OPENSSL_EXPORT int BIO_zero_copy_get_write_buf(BIO* bio,
uint8_t** out_write_buf,
size_t* out_buf_offset,
size_t* out_available_bytes);
/* BIO_zero_copy_get_write_buf_done must be called after writing to a BIO using
* |BIO_zero_copy_get_write_buf| to finish the write operation. The
* |bytes_written| argument gives the number of bytes written.
*
* It returns one on success. In case of error it returns zero and pushes to the
* error stack. */
OPENSSL_EXPORT int BIO_zero_copy_get_write_buf_done(BIO* bio,
size_t bytes_written);
/* BIO_NOCLOSE and |BIO_CLOSE| can be used as symbolic arguments when a "close
* flag" is passed to a BIO function. */
#define BIO_NOCLOSE 0