@@ -418,31 +418,41 @@ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
418
418
}
419
419
}
420
420
421
- namespace internal {
422
- uint16* WorkingMemory::GetHashTable (size_t input_size, int * table_size) {
423
- // Use smaller hash table when input.size() is smaller, since we
424
- // fill the table, incurring O(hash table size) overhead for
425
- // compression, and if the input is short, we won't need that
426
- // many hash table entries anyway.
421
+ namespace {
422
+ uint32 CalculateTableSize (uint32 input_size) {
427
423
assert (kMaxHashTableSize >= 256 );
428
- size_t htsize = 256 ;
429
- while (htsize < kMaxHashTableSize && htsize < input_size) {
430
- htsize <<= 1 ;
424
+ if (input_size > kMaxHashTableSize ) {
425
+ return kMaxHashTableSize ;
431
426
}
432
-
433
- uint16* table;
434
- if (htsize <= ARRAYSIZE (small_table_)) {
435
- table = small_table_;
436
- } else {
437
- if (large_table_ == NULL ) {
438
- large_table_ = new uint16[kMaxHashTableSize ];
439
- }
440
- table = large_table_;
427
+ if (input_size < 256 ) {
428
+ return 256 ;
441
429
}
430
+ return 1u << (32 - __builtin_clz (input_size - 1 ));
431
+ }
432
+ } // namespace
442
433
434
+ namespace internal {
435
+ WorkingMemory::WorkingMemory (size_t input_size) {
436
+ const size_t max_fragment_size = std::min (input_size, kBlockSize );
437
+ const size_t table_size = CalculateTableSize (max_fragment_size);
438
+ size_ = table_size * sizeof (*table_) + max_fragment_size +
439
+ MaxCompressedLength (max_fragment_size);
440
+ mem_ = std::allocator<char >().allocate (size_);
441
+ table_ = reinterpret_cast <uint16*>(mem_);
442
+ input_ = mem_ + table_size * sizeof (*table_);
443
+ output_ = input_ + max_fragment_size;
444
+ }
445
+
446
+ WorkingMemory::~WorkingMemory () {
447
+ std::allocator<char >().deallocate (mem_, size_);
448
+ }
449
+
450
+ uint16* WorkingMemory::GetHashTable (size_t fragment_size,
451
+ int * table_size) const {
452
+ const size_t htsize = CalculateTableSize (fragment_size);
453
+ memset (table_, 0 , htsize * sizeof (*table_));
443
454
*table_size = htsize;
444
- memset (table, 0 , htsize * sizeof (*table));
445
- return table;
455
+ return table_;
446
456
}
447
457
} // end namespace internal
448
458
@@ -942,17 +952,6 @@ bool GetUncompressedLength(Source* source, uint32* result) {
942
952
return decompressor.ReadUncompressedLength (result);
943
953
}
944
954
945
- struct Deleter {
946
- Deleter () : size_(0 ) {}
947
- explicit Deleter (size_t size) : size_(size) {}
948
-
949
- void operator ()(char * ptr) const {
950
- std::allocator<char >().deallocate (ptr, size_);
951
- }
952
-
953
- size_t size_;
954
- };
955
-
956
955
size_t Compress (Source* reader, Sink* writer) {
957
956
size_t written = 0 ;
958
957
size_t N = reader->Available ();
@@ -962,9 +961,7 @@ size_t Compress(Source* reader, Sink* writer) {
962
961
writer->Append (ulength, p-ulength);
963
962
written += (p - ulength);
964
963
965
- internal::WorkingMemory wmem;
966
- std::unique_ptr<char , Deleter> scratch;
967
- std::unique_ptr<char , Deleter> scratch_output;
964
+ internal::WorkingMemory wmem (N);
968
965
969
966
while (N > 0 ) {
970
967
// Get next block to compress (without copying if possible)
@@ -980,26 +977,19 @@ size_t Compress(Source* reader, Sink* writer) {
980
977
pending_advance = num_to_read;
981
978
fragment_size = num_to_read;
982
979
} else {
983
- // Read into scratch buffer
984
- if (scratch == NULL ) {
985
- // If this is the last iteration, we want to allocate N bytes
986
- // of space, otherwise the max possible kBlockSize space.
987
- // num_to_read contains exactly the correct value
988
- scratch = {
989
- std::allocator<char >().allocate (num_to_read), Deleter (num_to_read)};
990
- }
991
- memcpy (scratch.get (), fragment, bytes_read);
980
+ char * scratch = wmem.GetScratchInput ();
981
+ memcpy (scratch, fragment, bytes_read);
992
982
reader->Skip (bytes_read);
993
983
994
984
while (bytes_read < num_to_read) {
995
985
fragment = reader->Peek (&fragment_size);
996
986
size_t n = std::min<size_t >(fragment_size, num_to_read - bytes_read);
997
- memcpy (scratch. get () + bytes_read, fragment, n);
987
+ memcpy (scratch + bytes_read, fragment, n);
998
988
bytes_read += n;
999
989
reader->Skip (n);
1000
990
}
1001
991
assert (bytes_read == num_to_read);
1002
- fragment = scratch. get () ;
992
+ fragment = scratch;
1003
993
fragment_size = num_to_read;
1004
994
}
1005
995
assert (fragment_size == num_to_read);
@@ -1013,17 +1003,13 @@ size_t Compress(Source* reader, Sink* writer) {
1013
1003
1014
1004
// Need a scratch buffer for the output, in case the byte sink doesn't
1015
1005
// have room for us directly.
1016
- if (scratch_output == NULL ) {
1017
- scratch_output =
1018
- {std::allocator<char >().allocate (max_output), Deleter (max_output)};
1019
- } else {
1020
- // Since we encode kBlockSize regions followed by a region
1021
- // which is <= kBlockSize in length, a previously allocated
1022
- // scratch_output[] region is big enough for this iteration.
1023
- }
1024
- char * dest = writer->GetAppendBuffer (max_output, scratch_output.get ());
1025
- char * end = internal::CompressFragment (fragment, fragment_size,
1026
- dest, table, table_size);
1006
+
1007
+ // Since we encode kBlockSize regions followed by a region
1008
+ // which is <= kBlockSize in length, a previously allocated
1009
+ // scratch_output[] region is big enough for this iteration.
1010
+ char * dest = writer->GetAppendBuffer (max_output, wmem.GetScratchOutput ());
1011
+ char * end = internal::CompressFragment (fragment, fragment_size, dest, table,
1012
+ table_size);
1027
1013
writer->Append (dest, end - dest);
1028
1014
written += (end - dest);
1029
1015
0 commit comments