Skip to content

Commit

Permalink
Bug 1393659 - Fix inconsistent handling of max_post_bytes and max_req…
Browse files Browse the repository at this point in the history
…uest_bytes r=markh

MozReview-Commit-ID: 4jwpAYNuoQj
  • Loading branch information
Thom Chiovoloni committed Sep 14, 2017
1 parent f0f0f1c commit 5b2757b
Show file tree
Hide file tree
Showing 10 changed files with 140 additions and 154 deletions.
12 changes: 4 additions & 8 deletions services/sync/modules/constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,6 @@ PASSWORDS_STORE_BATCH_SIZE: 50,
// (how many records are fetched at a time from the server when batching is used).
DEFAULT_DOWNLOAD_BATCH_SIZE: 1000,


// Default maximum size for a record payload
DEFAULT_MAX_RECORD_PAYLOAD_BYTES: 262144, // 256KB

// score thresholds for early syncs
SINGLE_USER_THRESHOLD: 1000,
MULTI_DEVICE_THRESHOLD: 300,
Expand All @@ -86,10 +82,10 @@ SCORE_UPDATE_DELAY: 100,
// observed spurious idle/back events and short enough to pre-empt user activity.
IDLE_OBSERVER_BACK_DELAY: 100,

// Max number of records or bytes to upload in a single POST - we'll do multiple POSTS if either
// MAX_UPLOAD_RECORDS or MAX_UPLOAD_BYTES is hit)
MAX_UPLOAD_RECORDS: 100,
MAX_UPLOAD_BYTES: 1024 * 1023, // just under 1MB
// Duplicate URI_LENGTH_MAX from Places (from nsNavHistory.h), used to discard
// tabs with huge uris during tab sync.
URI_LENGTH_MAX: 65536,

MAX_HISTORY_UPLOAD: 5000,
MAX_HISTORY_DOWNLOAD: 5000,

Expand Down
16 changes: 1 addition & 15 deletions services/sync/modules/engines.js
Original file line number Diff line number Diff line change
Expand Up @@ -945,14 +945,6 @@ SyncEngine.prototype = {
Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
},

get maxRecordPayloadBytes() {
let serverConfiguration = this.service.serverConfiguration;
if (serverConfiguration && serverConfiguration.max_record_payload_bytes) {
return serverConfiguration.max_record_payload_bytes;
}
return DEFAULT_MAX_RECORD_PAYLOAD_BYTES;
},

/*
* Returns a changeset for this sync. Engine implementations can override this
* method to bypass the tracker for certain or all changed items.
Expand Down Expand Up @@ -1679,13 +1671,6 @@ SyncEngine.prototype = {
this._log.trace("Outgoing: " + out);

out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
let payloadLength = JSON.stringify(out.payload).length;
if (payloadLength > this.maxRecordPayloadBytes) {
if (this.allowSkippedRecord) {
this._modified.delete(id); // Do not attempt to sync that record again
}
throw new Error(`Payload too big: ${payloadLength} bytes`);
}
ok = true;
} catch (ex) {
this._log.warn("Error creating record", ex);
Expand All @@ -1704,6 +1689,7 @@ SyncEngine.prototype = {
++counts.failed;
if (!this.allowSkippedRecord) {
Observers.notify("weave:engine:sync:uploaded", counts, this.name);
this._log.warn(`Failed to enqueue record "${id}" (aborting)`, error);
throw error;
}
this._modified.delete(id);
Expand Down
5 changes: 3 additions & 2 deletions services/sync/modules/engines/tabs.js
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ TabStore.prototype = {
continue;
}

if (current.url.length >= (MAX_UPLOAD_BYTES - 1000)) {
if (current.url.length > URI_LENGTH_MAX) {
this._log.trace("Skipping over-long URL.");
continue;
}
Expand Down Expand Up @@ -218,8 +218,9 @@ TabStore.prototype = {
// We use byteLength here because the data is not encrypted in ascii yet.
let size = new TextEncoder("utf-8").encode(JSON.stringify(tabs)).byteLength;
let origLength = tabs.length;
const maxPayloadSize = this.engine.service.getMaxRecordPayloadSize();
// See bug 535326 comment 8 for an explanation of the estimation
const MAX_TAB_SIZE = this.engine.maxRecordPayloadBytes / 4 * 3 - 1500;
const MAX_TAB_SIZE = maxPayloadSize / 4 * 3 - 1500;
if (size > MAX_TAB_SIZE) {
// Estimate a little more than the direct fraction to maximize packing
let cutoff = Math.ceil(tabs.length * MAX_TAB_SIZE / size);
Expand Down
80 changes: 44 additions & 36 deletions services/sync/modules/record.js
Original file line number Diff line number Diff line change
Expand Up @@ -791,35 +791,39 @@ Collection.prototype = {
return Resource.prototype.post.call(this, data);
}
let getConfig = (name, defaultVal) => {
// serverConfiguration is allowed to be missing during tests.
if (this._service.serverConfiguration && this._service.serverConfiguration.hasOwnProperty(name)) {
return this._service.serverConfiguration[name];
}
return defaultVal;
}
};

// On a server that does not support the batch API, we expect the /info/configuration
// endpoint to provide "max_record_payload_bytes" and "max_request_bytes" and limits.
// On a server that supports the batching API, we expect "max_record_payload_bytes"
// (as before), as well as "max_post_bytes", "max_post_records", "max_total_bytes" and
// "max_total_records". Much of the complexity here and in enqueue is attempting to
// handle both these cases simultaneously.
let config = {
max_post_bytes: getConfig("max_post_bytes", MAX_UPLOAD_BYTES),
max_post_records: getConfig("max_post_records", MAX_UPLOAD_RECORDS),
// Note that from the server's POV, max_post_bytes is the sum of payload
// lengths, but we treat it equivalently to max_request_bytes (which is
// payload + metadata lengths).
max_post_bytes: getConfig("max_post_bytes",
getConfig("max_request_bytes", 260 * 1024)),

max_post_records: getConfig("max_post_records", Infinity),

max_batch_bytes: getConfig("max_total_bytes", Infinity),
max_batch_records: getConfig("max_total_records", Infinity),
}

// Handle config edge cases
if (config.max_post_records <= 0) { config.max_post_records = MAX_UPLOAD_RECORDS; }
if (config.max_batch_records <= 0) { config.max_batch_records = Infinity; }
if (config.max_post_bytes <= 0) { config.max_post_bytes = MAX_UPLOAD_BYTES; }
if (config.max_batch_bytes <= 0) { config.max_batch_bytes = Infinity; }
max_record_payload_bytes: getConfig("max_record_payload_bytes", 256 * 1024),
};

// Max size of BSO payload is 256k. This assumes at most 4k of overhead,
// which sounds like plenty. If the server says it can't handle this, we
// might have valid records we can't sync, so we give up on syncing.
let requiredMax = 260 * 1024;
if (config.max_post_bytes < requiredMax) {
if (config.max_post_bytes <= config.max_record_payload_bytes) {
this._log.error("Server configuration max_post_bytes is too low", config);
throw new Error("Server configuration max_post_bytes is too low");
}

this._log.trace("new PostQueue created with config", config);
return new PostQueue(poster, timestamp, config, log, postCallback);
},
};
Expand Down Expand Up @@ -889,38 +893,42 @@ PostQueue.prototype = {
if (!jsonRepr) {
throw new Error("You must only call this with objects that explicitly support JSON");
}

let bytes = JSON.stringify(jsonRepr);

// Do a flush if we can't add this record without exceeding our single-request
// limits, or without exceeding the total limit for a single batch.
let newLength = this.queued.length + bytes.length + 2; // extras for leading "[" / "," and trailing "]"
// Tests sometimes return objects without payloads, and we just use the
// byte length for those cases.
let payloadLength = jsonRepr.payload ? jsonRepr.payload.length : bytes.length;
if (payloadLength > this.config.max_record_payload_bytes) {
return { enqueued: false, error: new Error("Single record too large to submit to server") };
}

let maxAllowedBytes = Math.min(256 * 1024, this.config.max_post_bytes);
// The `+ 2` is to account for the 2-byte (maximum) overhead (one byte for
// the leading comma or "[", which all records will have, and the other for
// the final trailing "]", only present for the last record).
let newLength = this.queued.length + bytes.length + 2;
let newRecordCount = this.numQueued + 1;

let postSizeExceeded = this.numQueued >= this.config.max_post_records ||
newLength >= maxAllowedBytes;
// Note that the max_post_records and max_batch_records server limits are
// inclusive (e.g. if the max_post_records == 100, it will allow a post with
// 100 records), but the byte limits are not. (See
// https://github.com/mozilla-services/server-syncstorage/issues/73)

let batchSizeExceeded = (this.numQueued + this.numAlreadyBatched) >= this.config.max_batch_records ||
(newLength + this.bytesAlreadyBatched) >= this.config.max_batch_bytes;
// Have we exceeeded the maximum size or record count for a single POST?
let postSizeExceeded = newRecordCount > this.config.max_post_records ||
newLength >= this.config.max_post_bytes;

let singleRecordTooBig = bytes.length + 2 > maxAllowedBytes;
// Have we exceeded the maximum size or record count for the entire batch?
let batchSizeExceeded = (newRecordCount + this.numAlreadyBatched) > this.config.max_batch_records ||
(newLength + this.bytesAlreadyBatched) >= this.config.max_batch_bytes;

if (postSizeExceeded || batchSizeExceeded) {
this.log.trace(`PostQueue flushing due to postSizeExceeded=${postSizeExceeded}, batchSizeExceeded=${batchSizeExceeded}` +
`, max_batch_bytes: ${this.config.max_batch_bytes}, max_post_bytes: ${this.config.max_post_bytes}`);

if (singleRecordTooBig) {
return { enqueued: false, error: new Error("Single record too large to submit to server") };
}

this.log.trace("PostQueue flushing due to ", { postSizeExceeded, batchSizeExceeded });
// We need to write the queue out before handling this one, but we only
// commit the batch (and thus start a new one) if the batch is full.
// Note that if a single record is too big for the batch or post, then
// the batch may be empty, and so we don't flush in that case.
if (this.numQueued) {
await this.flush(batchSizeExceeded || singleRecordTooBig);
}
await this.flush(batchSizeExceeded);
}

// Either a ',' or a '[' depending on whether this is the first record.
this.queued += this.numQueued ? "," : "[";
this.queued += bytes;
Expand Down
10 changes: 10 additions & 0 deletions services/sync/modules/service.js
Original file line number Diff line number Diff line change
Expand Up @@ -618,6 +618,16 @@ Sync11Service.prototype = {
}
},

getMaxRecordPayloadSize() {
let config = this.serverConfiguration;
if (!config || !config.max_record_payload_bytes) {
this._log.warn("No config or incomplete config in getMaxRecordPayloadSize."
+ " Are we running tests?");
return 256 * 1024;
}
return config.max_record_payload_bytes;
},

async verifyLogin(allow40XRecovery = true) {
if (!this.identity.username) {
this._log.warn("No username in verifyLogin.");
Expand Down
Loading

0 comments on commit 5b2757b

Please sign in to comment.