} struct.
*/
function toInt32Proto(serializer, val) {
if (serializer.useProto3Json || isNullOrUndefined(val)) {
return val;
}
else {
return { value: val };
}
}
/**
* Returns a number (or null) from a google.protobuf.Int32Value proto.
*/
function fromInt32Proto(val) {
let result;
if (typeof val === 'object') {
result = val.value;
}
else {
result = val;
}
return isNullOrUndefined(result) ? null : result;
}
/**
* Returns a value for a Date that's appropriate to put into a proto.
*/
function toTimestamp(serializer, timestamp) {
if (serializer.useProto3Json) {
// Serialize to ISO-8601 date format, but with full nano resolution.
// Since JS Date has only millis, let's only use it for the seconds and
// then manually add the fractions to the end.
const jsDateStr = new Date(timestamp.seconds * 1000).toISOString();
// Remove .xxx frac part and Z in the end.
const strUntilSeconds = jsDateStr.replace(/\.\d*/, '').replace('Z', '');
// Pad the fraction out to 9 digits (nanos).
const nanoStr = ('000000000' + timestamp.nanoseconds).slice(-9);
return `${strUntilSeconds}.${nanoStr}Z`;
}
else {
return {
seconds: '' + timestamp.seconds,
nanos: timestamp.nanoseconds
// eslint-disable-next-line @typescript-eslint/no-explicit-any
};
}
}
function fromTimestamp(date) {
const timestamp = normalizeTimestamp(date);
return new Timestamp(timestamp.seconds, timestamp.nanos);
}
/**
* Returns a value for bytes that's appropriate to put in a proto.
*
* Visible for testing.
*/
function toBytes(serializer, bytes) {
if (serializer.useProto3Json) {
return bytes.toBase64();
}
else {
return bytes.toUint8Array();
}
}
/**
* Returns a ByteString based on the proto string value.
*/
function fromBytes(serializer, value) {
if (serializer.useProto3Json) {
hardAssert(value === undefined || typeof value === 'string');
return ByteString.fromBase64String(value ? value : '');
}
else {
hardAssert(value === undefined || value instanceof Uint8Array);
return ByteString.fromUint8Array(value ? value : new Uint8Array());
}
}
function toVersion(serializer, version) {
return toTimestamp(serializer, version.toTimestamp());
}
function fromVersion(version) {
hardAssert(!!version);
return SnapshotVersion.fromTimestamp(fromTimestamp(version));
}
function toResourceName(databaseId, path) {
return fullyQualifiedPrefixPath(databaseId)
.child('documents')
.child(path)
.canonicalString();
}
function fromResourceName(name) {
const resource = ResourcePath.fromString(name);
hardAssert(isValidResourceName(resource));
return resource;
}
function toName(serializer, key) {
return toResourceName(serializer.databaseId, key.path);
}
function fromName(serializer, name) {
const resource = fromResourceName(name);
if (resource.get(1) !== serializer.databaseId.projectId) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different project: ' +
resource.get(1) +
' vs ' +
serializer.databaseId.projectId);
}
if (resource.get(3) !== serializer.databaseId.database) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different database: ' +
resource.get(3) +
' vs ' +
serializer.databaseId.database);
}
return new DocumentKey(extractLocalPathFromResourceName(resource));
}
function toQueryPath(serializer, path) {
return toResourceName(serializer.databaseId, path);
}
function fromQueryPath(name) {
const resourceName = fromResourceName(name);
// In v1beta1 queries for collections at the root did not have a trailing
// "/documents". In v1 all resource paths contain "/documents". Preserve the
// ability to read the v1beta1 form for compatibility with queries persisted
// in the local target cache.
if (resourceName.length === 4) {
return ResourcePath.emptyPath();
}
return extractLocalPathFromResourceName(resourceName);
}
function getEncodedDatabaseId(serializer) {
const path = new ResourcePath([
'projects',
serializer.databaseId.projectId,
'databases',
serializer.databaseId.database
]);
return path.canonicalString();
}
function fullyQualifiedPrefixPath(databaseId) {
return new ResourcePath([
'projects',
databaseId.projectId,
'databases',
databaseId.database
]);
}
function extractLocalPathFromResourceName(resourceName) {
hardAssert(resourceName.length > 4 && resourceName.get(4) === 'documents');
return resourceName.popFirst(5);
}
/** Creates a Document proto from key and fields (but no create/update time) */
function toMutationDocument(serializer, key, fields) {
return {
name: toName(serializer, key),
fields: fields.value.mapValue.fields
};
}
function toDocument(serializer, document) {
return {
name: toName(serializer, document.key),
fields: document.data.value.mapValue.fields,
updateTime: toTimestamp(serializer, document.version.toTimestamp()),
createTime: toTimestamp(serializer, document.createTime.toTimestamp())
};
}
function fromDocument(serializer, document, hasCommittedMutations) {
const key = fromName(serializer, document.name);
const version = fromVersion(document.updateTime);
// If we read a document from persistence that is missing createTime, it's due
// to older SDK versions not storing this information. In such cases, we'll
// set the createTime to zero. This can be removed in the long term.
const createTime = document.createTime
? fromVersion(document.createTime)
: SnapshotVersion.min();
const data = new ObjectValue({ mapValue: { fields: document.fields } });
const result = MutableDocument.newFoundDocument(key, version, createTime, data);
if (hasCommittedMutations) {
result.setHasCommittedMutations();
}
return hasCommittedMutations ? result.setHasCommittedMutations() : result;
}
function fromFound(serializer, doc) {
hardAssert(!!doc.found);
assertPresent(doc.found.name);
assertPresent(doc.found.updateTime);
const key = fromName(serializer, doc.found.name);
const version = fromVersion(doc.found.updateTime);
const createTime = doc.found.createTime
? fromVersion(doc.found.createTime)
: SnapshotVersion.min();
const data = new ObjectValue({ mapValue: { fields: doc.found.fields } });
return MutableDocument.newFoundDocument(key, version, createTime, data);
}
function fromMissing(serializer, result) {
hardAssert(!!result.missing);
hardAssert(!!result.readTime);
const key = fromName(serializer, result.missing);
const version = fromVersion(result.readTime);
return MutableDocument.newNoDocument(key, version);
}
function fromBatchGetDocumentsResponse(serializer, result) {
if ('found' in result) {
return fromFound(serializer, result);
}
else if ('missing' in result) {
return fromMissing(serializer, result);
}
return fail();
}
function fromWatchChange(serializer, change) {
let watchChange;
if ('targetChange' in change) {
assertPresent(change.targetChange);
// proto3 default value is unset in JSON (undefined), so use 'NO_CHANGE'
// if unset
const state = fromWatchTargetChangeState(change.targetChange.targetChangeType || 'NO_CHANGE');
const targetIds = change.targetChange.targetIds || [];
const resumeToken = fromBytes(serializer, change.targetChange.resumeToken);
const causeProto = change.targetChange.cause;
const cause = causeProto && fromRpcStatus(causeProto);
watchChange = new WatchTargetChange(state, targetIds, resumeToken, cause || null);
}
else if ('documentChange' in change) {
assertPresent(change.documentChange);
const entityChange = change.documentChange;
assertPresent(entityChange.document);
assertPresent(entityChange.document.name);
assertPresent(entityChange.document.updateTime);
const key = fromName(serializer, entityChange.document.name);
const version = fromVersion(entityChange.document.updateTime);
const createTime = entityChange.document.createTime
? fromVersion(entityChange.document.createTime)
: SnapshotVersion.min();
const data = new ObjectValue({
mapValue: { fields: entityChange.document.fields }
});
const doc = MutableDocument.newFoundDocument(key, version, createTime, data);
const updatedTargetIds = entityChange.targetIds || [];
const removedTargetIds = entityChange.removedTargetIds || [];
watchChange = new DocumentWatchChange(updatedTargetIds, removedTargetIds, doc.key, doc);
}
else if ('documentDelete' in change) {
assertPresent(change.documentDelete);
const docDelete = change.documentDelete;
assertPresent(docDelete.document);
const key = fromName(serializer, docDelete.document);
const version = docDelete.readTime
? fromVersion(docDelete.readTime)
: SnapshotVersion.min();
const doc = MutableDocument.newNoDocument(key, version);
const removedTargetIds = docDelete.removedTargetIds || [];
watchChange = new DocumentWatchChange([], removedTargetIds, doc.key, doc);
}
else if ('documentRemove' in change) {
assertPresent(change.documentRemove);
const docRemove = change.documentRemove;
assertPresent(docRemove.document);
const key = fromName(serializer, docRemove.document);
const removedTargetIds = docRemove.removedTargetIds || [];
watchChange = new DocumentWatchChange([], removedTargetIds, key, null);
}
else if ('filter' in change) {
// TODO(dimond): implement existence filter parsing with strategy.
assertPresent(change.filter);
const filter = change.filter;
assertPresent(filter.targetId);
const count = filter.count || 0;
const existenceFilter = new ExistenceFilter(count);
const targetId = filter.targetId;
watchChange = new ExistenceFilterChange(targetId, existenceFilter);
}
else {
return fail();
}
return watchChange;
}
function fromWatchTargetChangeState(state) {
if (state === 'NO_CHANGE') {
return 0 /* WatchTargetChangeState.NoChange */;
}
else if (state === 'ADD') {
return 1 /* WatchTargetChangeState.Added */;
}
else if (state === 'REMOVE') {
return 2 /* WatchTargetChangeState.Removed */;
}
else if (state === 'CURRENT') {
return 3 /* WatchTargetChangeState.Current */;
}
else if (state === 'RESET') {
return 4 /* WatchTargetChangeState.Reset */;
}
else {
return fail();
}
}
function versionFromListenResponse(change) {
// We have only reached a consistent snapshot for the entire stream if there
// is a read_time set and it applies to all targets (i.e. the list of
// targets is empty). The backend is guaranteed to send such responses.
if (!('targetChange' in change)) {
return SnapshotVersion.min();
}
const targetChange = change.targetChange;
if (targetChange.targetIds && targetChange.targetIds.length) {
return SnapshotVersion.min();
}
if (!targetChange.readTime) {
return SnapshotVersion.min();
}
return fromVersion(targetChange.readTime);
}
function toMutation(serializer, mutation) {
let result;
if (mutation instanceof SetMutation) {
result = {
update: toMutationDocument(serializer, mutation.key, mutation.value)
};
}
else if (mutation instanceof DeleteMutation) {
result = { delete: toName(serializer, mutation.key) };
}
else if (mutation instanceof PatchMutation) {
result = {
update: toMutationDocument(serializer, mutation.key, mutation.data),
updateMask: toDocumentMask(mutation.fieldMask)
};
}
else if (mutation instanceof VerifyMutation) {
result = {
verify: toName(serializer, mutation.key)
};
}
else {
return fail();
}
if (mutation.fieldTransforms.length > 0) {
result.updateTransforms = mutation.fieldTransforms.map(transform => toFieldTransform(serializer, transform));
}
if (!mutation.precondition.isNone) {
result.currentDocument = toPrecondition(serializer, mutation.precondition);
}
return result;
}
function fromMutation(serializer, proto) {
const precondition = proto.currentDocument
? fromPrecondition(proto.currentDocument)
: Precondition.none();
const fieldTransforms = proto.updateTransforms
? proto.updateTransforms.map(transform => fromFieldTransform(serializer, transform))
: [];
if (proto.update) {
assertPresent(proto.update.name);
const key = fromName(serializer, proto.update.name);
const value = new ObjectValue({
mapValue: { fields: proto.update.fields }
});
if (proto.updateMask) {
const fieldMask = fromDocumentMask(proto.updateMask);
return new PatchMutation(key, value, fieldMask, precondition, fieldTransforms);
}
else {
return new SetMutation(key, value, precondition, fieldTransforms);
}
}
else if (proto.delete) {
const key = fromName(serializer, proto.delete);
return new DeleteMutation(key, precondition);
}
else if (proto.verify) {
const key = fromName(serializer, proto.verify);
return new VerifyMutation(key, precondition);
}
else {
return fail();
}
}
function toPrecondition(serializer, precondition) {
if (precondition.updateTime !== undefined) {
return {
updateTime: toVersion(serializer, precondition.updateTime)
};
}
else if (precondition.exists !== undefined) {
return { exists: precondition.exists };
}
else {
return fail();
}
}
function fromPrecondition(precondition) {
if (precondition.updateTime !== undefined) {
return Precondition.updateTime(fromVersion(precondition.updateTime));
}
else if (precondition.exists !== undefined) {
return Precondition.exists(precondition.exists);
}
else {
return Precondition.none();
}
}
function fromWriteResult(proto, commitTime) {
// NOTE: Deletes don't have an updateTime.
let version = proto.updateTime
? fromVersion(proto.updateTime)
: fromVersion(commitTime);
if (version.isEqual(SnapshotVersion.min())) {
// The Firestore Emulator currently returns an update time of 0 for
// deletes of non-existing documents (rather than null). This breaks the
// test "get deleted doc while offline with source=cache" as NoDocuments
// with version 0 are filtered by IndexedDb's RemoteDocumentCache.
// TODO(#2149): Remove this when Emulator is fixed
version = fromVersion(commitTime);
}
return new MutationResult(version, proto.transformResults || []);
}
function fromWriteResults(protos, commitTime) {
if (protos && protos.length > 0) {
hardAssert(commitTime !== undefined);
return protos.map(proto => fromWriteResult(proto, commitTime));
}
else {
return [];
}
}
function toFieldTransform(serializer, fieldTransform) {
const transform = fieldTransform.transform;
if (transform instanceof ServerTimestampTransform) {
return {
fieldPath: fieldTransform.field.canonicalString(),
setToServerValue: 'REQUEST_TIME'
};
}
else if (transform instanceof ArrayUnionTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
appendMissingElements: {
values: transform.elements
}
};
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
removeAllFromArray: {
values: transform.elements
}
};
}
else if (transform instanceof NumericIncrementTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
increment: transform.operand
};
}
else {
throw fail();
}
}
function fromFieldTransform(serializer, proto) {
let transform = null;
if ('setToServerValue' in proto) {
hardAssert(proto.setToServerValue === 'REQUEST_TIME');
transform = new ServerTimestampTransform();
}
else if ('appendMissingElements' in proto) {
const values = proto.appendMissingElements.values || [];
transform = new ArrayUnionTransformOperation(values);
}
else if ('removeAllFromArray' in proto) {
const values = proto.removeAllFromArray.values || [];
transform = new ArrayRemoveTransformOperation(values);
}
else if ('increment' in proto) {
transform = new NumericIncrementTransformOperation(serializer, proto.increment);
}
else {
fail();
}
const fieldPath = FieldPath$1.fromServerFormat(proto.fieldPath);
return new FieldTransform(fieldPath, transform);
}
function toDocumentsTarget(serializer, target) {
return { documents: [toQueryPath(serializer, target.path)] };
}
function fromDocumentsTarget(documentsTarget) {
const count = documentsTarget.documents.length;
hardAssert(count === 1);
const name = documentsTarget.documents[0];
return queryToTarget(newQueryForPath(fromQueryPath(name)));
}
function toQueryTarget(serializer, target) {
// Dissect the path into parent, collectionId, and optional key filter.
const result = { structuredQuery: {} };
const path = target.path;
if (target.collectionGroup !== null) {
result.parent = toQueryPath(serializer, path);
result.structuredQuery.from = [
{
collectionId: target.collectionGroup,
allDescendants: true
}
];
}
else {
result.parent = toQueryPath(serializer, path.popLast());
result.structuredQuery.from = [{ collectionId: path.lastSegment() }];
}
const where = toFilters(target.filters);
if (where) {
result.structuredQuery.where = where;
}
const orderBy = toOrder(target.orderBy);
if (orderBy) {
result.structuredQuery.orderBy = orderBy;
}
const limit = toInt32Proto(serializer, target.limit);
if (limit !== null) {
result.structuredQuery.limit = limit;
}
if (target.startAt) {
result.structuredQuery.startAt = toStartAtCursor(target.startAt);
}
if (target.endAt) {
result.structuredQuery.endAt = toEndAtCursor(target.endAt);
}
return result;
}
function toRunAggregationQueryRequest(serializer, target) {
const queryTarget = toQueryTarget(serializer, target);
return {
structuredAggregationQuery: {
aggregations: [
{
count: {},
alias: 'count_alias'
}
],
structuredQuery: queryTarget.structuredQuery
},
parent: queryTarget.parent
};
}
function convertQueryTargetToQuery(target) {
let path = fromQueryPath(target.parent);
const query = target.structuredQuery;
const fromCount = query.from ? query.from.length : 0;
let collectionGroup = null;
if (fromCount > 0) {
hardAssert(fromCount === 1);
const from = query.from[0];
if (from.allDescendants) {
collectionGroup = from.collectionId;
}
else {
path = path.child(from.collectionId);
}
}
let filterBy = [];
if (query.where) {
filterBy = fromFilters(query.where);
}
let orderBy = [];
if (query.orderBy) {
orderBy = fromOrder(query.orderBy);
}
let limit = null;
if (query.limit) {
limit = fromInt32Proto(query.limit);
}
let startAt = null;
if (query.startAt) {
startAt = fromStartAtCursor(query.startAt);
}
let endAt = null;
if (query.endAt) {
endAt = fromEndAtCursor(query.endAt);
}
return newQuery(path, collectionGroup, orderBy, filterBy, limit, "F" /* LimitType.First */, startAt, endAt);
}
function fromQueryTarget(target) {
return queryToTarget(convertQueryTargetToQuery(target));
}
function toListenRequestLabels(serializer, targetData) {
const value = toLabel(serializer, targetData.purpose);
if (value == null) {
return null;
}
else {
return {
'goog-listen-tags': value
};
}
}
function toLabel(serializer, purpose) {
switch (purpose) {
case 0 /* TargetPurpose.Listen */:
return null;
case 1 /* TargetPurpose.ExistenceFilterMismatch */:
return 'existence-filter-mismatch';
case 2 /* TargetPurpose.LimboResolution */:
return 'limbo-document';
default:
return fail();
}
}
function toTarget(serializer, targetData) {
let result;
const target = targetData.target;
if (targetIsDocumentTarget(target)) {
result = { documents: toDocumentsTarget(serializer, target) };
}
else {
result = { query: toQueryTarget(serializer, target) };
}
result.targetId = targetData.targetId;
if (targetData.resumeToken.approximateByteSize() > 0) {
result.resumeToken = toBytes(serializer, targetData.resumeToken);
}
else if (targetData.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
// TODO(wuandy): Consider removing above check because it is most likely true.
// Right now, many tests depend on this behaviour though (leaving min() out
// of serialization).
result.readTime = toTimestamp(serializer, targetData.snapshotVersion.toTimestamp());
}
return result;
}
function toFilters(filters) {
if (filters.length === 0) {
return;
}
return toFilter(CompositeFilter.create(filters, "and" /* CompositeOperator.AND */));
}
function fromFilters(filter) {
const result = fromFilter(filter);
if (result instanceof CompositeFilter &&
compositeFilterIsFlatConjunction(result)) {
return result.getFilters();
}
return [result];
}
function fromFilter(filter) {
if (filter.unaryFilter !== undefined) {
return fromUnaryFilter(filter);
}
else if (filter.fieldFilter !== undefined) {
return fromFieldFilter(filter);
}
else if (filter.compositeFilter !== undefined) {
return fromCompositeFilter(filter);
}
else {
return fail();
}
}
function toOrder(orderBys) {
if (orderBys.length === 0) {
return;
}
return orderBys.map(order => toPropertyOrder(order));
}
function fromOrder(orderBys) {
return orderBys.map(order => fromPropertyOrder(order));
}
function toStartAtCursor(cursor) {
return {
before: cursor.inclusive,
values: cursor.position
};
}
function toEndAtCursor(cursor) {
return {
before: !cursor.inclusive,
values: cursor.position
};
}
function fromStartAtCursor(cursor) {
const inclusive = !!cursor.before;
const position = cursor.values || [];
return new Bound(position, inclusive);
}
function fromEndAtCursor(cursor) {
const inclusive = !cursor.before;
const position = cursor.values || [];
return new Bound(position, inclusive);
}
// visible for testing
function toDirection(dir) {
return DIRECTIONS[dir];
}
// visible for testing
function fromDirection(dir) {
switch (dir) {
case 'ASCENDING':
return "asc" /* Direction.ASCENDING */;
case 'DESCENDING':
return "desc" /* Direction.DESCENDING */;
default:
return undefined;
}
}
// visible for testing
function toOperatorName(op) {
return OPERATORS[op];
}
function toCompositeOperatorName(op) {
return COMPOSITE_OPERATORS[op];
}
function fromOperatorName(op) {
switch (op) {
case 'EQUAL':
return "==" /* Operator.EQUAL */;
case 'NOT_EQUAL':
return "!=" /* Operator.NOT_EQUAL */;
case 'GREATER_THAN':
return ">" /* Operator.GREATER_THAN */;
case 'GREATER_THAN_OR_EQUAL':
return ">=" /* Operator.GREATER_THAN_OR_EQUAL */;
case 'LESS_THAN':
return "<" /* Operator.LESS_THAN */;
case 'LESS_THAN_OR_EQUAL':
return "<=" /* Operator.LESS_THAN_OR_EQUAL */;
case 'ARRAY_CONTAINS':
return "array-contains" /* Operator.ARRAY_CONTAINS */;
case 'IN':
return "in" /* Operator.IN */;
case 'NOT_IN':
return "not-in" /* Operator.NOT_IN */;
case 'ARRAY_CONTAINS_ANY':
return "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
case 'OPERATOR_UNSPECIFIED':
return fail();
default:
return fail();
}
}
function fromCompositeOperatorName(op) {
switch (op) {
case 'AND':
return "and" /* CompositeOperator.AND */;
case 'OR':
return "or" /* CompositeOperator.OR */;
default:
return fail();
}
}
function toFieldPathReference(path) {
return { fieldPath: path.canonicalString() };
}
function fromFieldPathReference(fieldReference) {
return FieldPath$1.fromServerFormat(fieldReference.fieldPath);
}
// visible for testing
function toPropertyOrder(orderBy) {
return {
field: toFieldPathReference(orderBy.field),
direction: toDirection(orderBy.dir)
};
}
function fromPropertyOrder(orderBy) {
return new OrderBy(fromFieldPathReference(orderBy.field), fromDirection(orderBy.direction));
}
// visible for testing
function toFilter(filter) {
if (filter instanceof FieldFilter) {
return toUnaryOrFieldFilter(filter);
}
else if (filter instanceof CompositeFilter) {
return toCompositeFilter(filter);
}
else {
return fail();
}
}
function toCompositeFilter(filter) {
const protos = filter.getFilters().map(filter => toFilter(filter));
if (protos.length === 1) {
return protos[0];
}
return {
compositeFilter: {
op: toCompositeOperatorName(filter.op),
filters: protos
}
};
}
function toUnaryOrFieldFilter(filter) {
if (filter.op === "==" /* Operator.EQUAL */) {
if (isNanValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NAN'
}
};
}
else if (isNullValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NULL'
}
};
}
}
else if (filter.op === "!=" /* Operator.NOT_EQUAL */) {
if (isNanValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NOT_NAN'
}
};
}
else if (isNullValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NOT_NULL'
}
};
}
}
return {
fieldFilter: {
field: toFieldPathReference(filter.field),
op: toOperatorName(filter.op),
value: filter.value
}
};
}
function fromUnaryFilter(filter) {
switch (filter.unaryFilter.op) {
case 'IS_NAN':
const nanField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(nanField, "==" /* Operator.EQUAL */, {
doubleValue: NaN
});
case 'IS_NULL':
const nullField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(nullField, "==" /* Operator.EQUAL */, {
nullValue: 'NULL_VALUE'
});
case 'IS_NOT_NAN':
const notNanField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(notNanField, "!=" /* Operator.NOT_EQUAL */, {
doubleValue: NaN
});
case 'IS_NOT_NULL':
const notNullField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(notNullField, "!=" /* Operator.NOT_EQUAL */, {
nullValue: 'NULL_VALUE'
});
case 'OPERATOR_UNSPECIFIED':
return fail();
default:
return fail();
}
}
function fromFieldFilter(filter) {
return FieldFilter.create(fromFieldPathReference(filter.fieldFilter.field), fromOperatorName(filter.fieldFilter.op), filter.fieldFilter.value);
}
function fromCompositeFilter(filter) {
return CompositeFilter.create(filter.compositeFilter.filters.map(filter => fromFilter(filter)), fromCompositeOperatorName(filter.compositeFilter.op));
}
function toDocumentMask(fieldMask) {
const canonicalFields = [];
fieldMask.fields.forEach(field => canonicalFields.push(field.canonicalString()));
return {
fieldPaths: canonicalFields
};
}
function fromDocumentMask(proto) {
const paths = proto.fieldPaths || [];
return new FieldMask(paths.map(path => FieldPath$1.fromServerFormat(path)));
}
function isValidResourceName(path) {
// Resource names have at least 4 components (project ID, database ID)
return (path.length >= 4 &&
path.get(0) === 'projects' &&
path.get(2) === 'databases');
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable set of metadata that the local store tracks for each target.
*/
class TargetData {
constructor(
/** The target being listened to. */
target,
/**
* The target ID to which the target corresponds; Assigned by the
* LocalStore for user listens and by the SyncEngine for limbo watches.
*/
targetId,
/** The purpose of the target. */
purpose,
/**
* The sequence number of the last transaction during which this target data
* was modified.
*/
sequenceNumber,
/** The latest snapshot version seen for this target. */
snapshotVersion = SnapshotVersion.min(),
/**
* The maximum snapshot version at which the associated view
* contained no limbo documents.
*/
lastLimboFreeSnapshotVersion = SnapshotVersion.min(),
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken = ByteString.EMPTY_BYTE_STRING) {
this.target = target;
this.targetId = targetId;
this.purpose = purpose;
this.sequenceNumber = sequenceNumber;
this.snapshotVersion = snapshotVersion;
this.lastLimboFreeSnapshotVersion = lastLimboFreeSnapshotVersion;
this.resumeToken = resumeToken;
}
/** Creates a new target data instance with an updated sequence number. */
withSequenceNumber(sequenceNumber) {
return new TargetData(this.target, this.targetId, this.purpose, sequenceNumber, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken);
}
/**
* Creates a new target data instance with an updated resume token and
* snapshot version.
*/
withResumeToken(resumeToken, snapshotVersion) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, snapshotVersion, this.lastLimboFreeSnapshotVersion, resumeToken);
}
/**
* Creates a new target data instance with an updated last limbo free
* snapshot version number.
*/
withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, lastLimboFreeSnapshotVersion, this.resumeToken);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Serializer for values stored in the LocalStore. */
class LocalSerializer {
constructor(remoteSerializer) {
this.remoteSerializer = remoteSerializer;
}
}
/** Decodes a remote document from storage locally to a Document. */
function fromDbRemoteDocument(localSerializer, remoteDoc) {
let doc;
if (remoteDoc.document) {
doc = fromDocument(localSerializer.remoteSerializer, remoteDoc.document, !!remoteDoc.hasCommittedMutations);
}
else if (remoteDoc.noDocument) {
const key = DocumentKey.fromSegments(remoteDoc.noDocument.path);
const version = fromDbTimestamp(remoteDoc.noDocument.readTime);
doc = MutableDocument.newNoDocument(key, version);
if (remoteDoc.hasCommittedMutations) {
doc.setHasCommittedMutations();
}
}
else if (remoteDoc.unknownDocument) {
const key = DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
const version = fromDbTimestamp(remoteDoc.unknownDocument.version);
doc = MutableDocument.newUnknownDocument(key, version);
}
else {
return fail();
}
if (remoteDoc.readTime) {
doc.setReadTime(fromDbTimestampKey(remoteDoc.readTime));
}
return doc;
}
/** Encodes a document for storage locally. */
function toDbRemoteDocument(localSerializer, document) {
const key = document.key;
const remoteDoc = {
prefixPath: key.getCollectionPath().popLast().toArray(),
collectionGroup: key.collectionGroup,
documentId: key.path.lastSegment(),
readTime: toDbTimestampKey(document.readTime),
hasCommittedMutations: document.hasCommittedMutations
};
if (document.isFoundDocument()) {
remoteDoc.document = toDocument(localSerializer.remoteSerializer, document);
}
else if (document.isNoDocument()) {
remoteDoc.noDocument = {
path: key.path.toArray(),
readTime: toDbTimestamp(document.version)
};
}
else if (document.isUnknownDocument()) {
remoteDoc.unknownDocument = {
path: key.path.toArray(),
version: toDbTimestamp(document.version)
};
}
else {
return fail();
}
return remoteDoc;
}
function toDbTimestampKey(snapshotVersion) {
const timestamp = snapshotVersion.toTimestamp();
return [timestamp.seconds, timestamp.nanoseconds];
}
function fromDbTimestampKey(dbTimestampKey) {
const timestamp = new Timestamp(dbTimestampKey[0], dbTimestampKey[1]);
return SnapshotVersion.fromTimestamp(timestamp);
}
function toDbTimestamp(snapshotVersion) {
const timestamp = snapshotVersion.toTimestamp();
return { seconds: timestamp.seconds, nanoseconds: timestamp.nanoseconds };
}
function fromDbTimestamp(dbTimestamp) {
const timestamp = new Timestamp(dbTimestamp.seconds, dbTimestamp.nanoseconds);
return SnapshotVersion.fromTimestamp(timestamp);
}
/** Encodes a batch of mutations into a DbMutationBatch for local storage. */
function toDbMutationBatch(localSerializer, userId, batch) {
const serializedBaseMutations = batch.baseMutations.map(m => toMutation(localSerializer.remoteSerializer, m));
const serializedMutations = batch.mutations.map(m => toMutation(localSerializer.remoteSerializer, m));
return {
userId,
batchId: batch.batchId,
localWriteTimeMs: batch.localWriteTime.toMillis(),
baseMutations: serializedBaseMutations,
mutations: serializedMutations
};
}
/** Decodes a DbMutationBatch into a MutationBatch */
function fromDbMutationBatch(localSerializer, dbBatch) {
const baseMutations = (dbBatch.baseMutations || []).map(m => fromMutation(localSerializer.remoteSerializer, m));
// Squash old transform mutations into existing patch or set mutations.
// The replacement of representing `transforms` with `update_transforms`
// on the SDK means that old `transform` mutations stored in IndexedDB need
// to be updated to `update_transforms`.
// TODO(b/174608374): Remove this code once we perform a schema migration.
for (let i = 0; i < dbBatch.mutations.length - 1; ++i) {
const currentMutation = dbBatch.mutations[i];
const hasTransform = i + 1 < dbBatch.mutations.length &&
dbBatch.mutations[i + 1].transform !== undefined;
if (hasTransform) {
const transformMutation = dbBatch.mutations[i + 1];
currentMutation.updateTransforms =
transformMutation.transform.fieldTransforms;
dbBatch.mutations.splice(i + 1, 1);
++i;
}
}
const mutations = dbBatch.mutations.map(m => fromMutation(localSerializer.remoteSerializer, m));
const timestamp = Timestamp.fromMillis(dbBatch.localWriteTimeMs);
return new MutationBatch(dbBatch.batchId, timestamp, baseMutations, mutations);
}
/** Decodes a DbTarget into TargetData */
function fromDbTarget(dbTarget) {
const version = fromDbTimestamp(dbTarget.readTime);
const lastLimboFreeSnapshotVersion = dbTarget.lastLimboFreeSnapshotVersion !== undefined
? fromDbTimestamp(dbTarget.lastLimboFreeSnapshotVersion)
: SnapshotVersion.min();
let target;
if (isDocumentQuery(dbTarget.query)) {
target = fromDocumentsTarget(dbTarget.query);
}
else {
target = fromQueryTarget(dbTarget.query);
}
return new TargetData(target, dbTarget.targetId, 0 /* TargetPurpose.Listen */, dbTarget.lastListenSequenceNumber, version, lastLimboFreeSnapshotVersion, ByteString.fromBase64String(dbTarget.resumeToken));
}
/** Encodes TargetData into a DbTarget for storage locally. */
function toDbTarget(localSerializer, targetData) {
const dbTimestamp = toDbTimestamp(targetData.snapshotVersion);
const dbLastLimboFreeTimestamp = toDbTimestamp(targetData.lastLimboFreeSnapshotVersion);
let queryProto;
if (targetIsDocumentTarget(targetData.target)) {
queryProto = toDocumentsTarget(localSerializer.remoteSerializer, targetData.target);
}
else {
queryProto = toQueryTarget(localSerializer.remoteSerializer, targetData.target);
}
// We can't store the resumeToken as a ByteString in IndexedDb, so we
// convert it to a base64 string for storage.
const resumeToken = targetData.resumeToken.toBase64();
// lastListenSequenceNumber is always 0 until we do real GC.
return {
targetId: targetData.targetId,
canonicalId: canonifyTarget(targetData.target),
readTime: dbTimestamp,
resumeToken,
lastListenSequenceNumber: targetData.sequenceNumber,
lastLimboFreeSnapshotVersion: dbLastLimboFreeTimestamp,
query: queryProto
};
}
/**
* A helper function for figuring out what kind of query has been stored.
*/
function isDocumentQuery(dbQuery) {
return dbQuery.documents !== undefined;
}
/** Encodes a DbBundle to a BundleMetadata object. */
function fromDbBundle(dbBundle) {
return {
id: dbBundle.bundleId,
createTime: fromDbTimestamp(dbBundle.createTime),
version: dbBundle.version
};
}
/** Encodes a BundleMetadata to a DbBundle. */
function toDbBundle(metadata) {
return {
bundleId: metadata.id,
createTime: toDbTimestamp(fromVersion(metadata.createTime)),
version: metadata.version
};
}
/** Encodes a DbNamedQuery to a NamedQuery. */
function fromDbNamedQuery(dbNamedQuery) {
return {
name: dbNamedQuery.name,
query: fromBundledQuery(dbNamedQuery.bundledQuery),
readTime: fromDbTimestamp(dbNamedQuery.readTime)
};
}
/** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */
function toDbNamedQuery(query) {
return {
name: query.name,
readTime: toDbTimestamp(fromVersion(query.readTime)),
bundledQuery: query.bundledQuery
};
}
/**
* Encodes a `BundledQuery` from bundle proto to a Query object.
*
* This reconstructs the original query used to build the bundle being loaded,
* including features exists only in SDKs (for example: limit-to-last).
*/
function fromBundledQuery(bundledQuery) {
const query = convertQueryTargetToQuery({
parent: bundledQuery.parent,
structuredQuery: bundledQuery.structuredQuery
});
if (bundledQuery.limitType === 'LAST') {
return queryWithLimit(query, query.limit, "L" /* LimitType.Last */);
}
return query;
}
/** Encodes a NamedQuery proto object to a NamedQuery model object. */
function fromProtoNamedQuery(namedQuery) {
return {
name: namedQuery.name,
query: fromBundledQuery(namedQuery.bundledQuery),
readTime: fromVersion(namedQuery.readTime)
};
}
/** Decodes a BundleMetadata proto into a BundleMetadata object. */
function fromBundleMetadata(metadata) {
return {
id: metadata.id,
version: metadata.version,
createTime: fromVersion(metadata.createTime)
};
}
/** Encodes a DbDocumentOverlay object to an Overlay model object. */
function fromDbDocumentOverlay(localSerializer, dbDocumentOverlay) {
return new Overlay(dbDocumentOverlay.largestBatchId, fromMutation(localSerializer.remoteSerializer, dbDocumentOverlay.overlayMutation));
}
/** Decodes an Overlay model object into a DbDocumentOverlay object. */
function toDbDocumentOverlay(localSerializer, userId, overlay) {
const [_, collectionPath, documentId] = toDbDocumentOverlayKey(userId, overlay.mutation.key);
return {
userId,
collectionPath,
documentId,
collectionGroup: overlay.mutation.key.getCollectionGroup(),
largestBatchId: overlay.largestBatchId,
overlayMutation: toMutation(localSerializer.remoteSerializer, overlay.mutation)
};
}
/**
* Returns the DbDocumentOverlayKey corresponding to the given user and
* document key.
*/
function toDbDocumentOverlayKey(userId, docKey) {
const docId = docKey.path.lastSegment();
const collectionPath = encodeResourcePath(docKey.path.popLast());
return [userId, collectionPath, docId];
}
function toDbIndexConfiguration(index) {
return {
indexId: index.indexId,
collectionGroup: index.collectionGroup,
fields: index.fields.map(s => [s.fieldPath.canonicalString(), s.kind])
};
}
function fromDbIndexConfiguration(index, state) {
const decodedState = state
? new IndexState(state.sequenceNumber, new IndexOffset(fromDbTimestamp(state.readTime), new DocumentKey(decodeResourcePath(state.documentKey)), state.largestBatchId))
: IndexState.empty();
const decodedSegments = index.fields.map(([fieldPath, kind]) => new IndexSegment(FieldPath$1.fromServerFormat(fieldPath), kind));
return new FieldIndex(index.indexId, index.collectionGroup, decodedSegments, decodedState);
}
function toDbIndexState(indexId, user, sequenceNumber, offset) {
return {
indexId,
uid: user.uid || '',
sequenceNumber,
readTime: toDbTimestamp(offset.readTime),
documentKey: encodeResourcePath(offset.documentKey.path),
largestBatchId: offset.largestBatchId
};
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class IndexedDbBundleCache {
getBundleMetadata(transaction, bundleId) {
return bundlesStore(transaction)
.get(bundleId)
.next(bundle => {
if (bundle) {
return fromDbBundle(bundle);
}
return undefined;
});
}
saveBundleMetadata(transaction, bundleMetadata) {
return bundlesStore(transaction).put(toDbBundle(bundleMetadata));
}
getNamedQuery(transaction, queryName) {
return namedQueriesStore(transaction)
.get(queryName)
.next(query => {
if (query) {
return fromDbNamedQuery(query);
}
return undefined;
});
}
saveNamedQuery(transaction, query) {
return namedQueriesStore(transaction).put(toDbNamedQuery(query));
}
}
/**
* Helper to get a typed SimpleDbStore for the bundles object store.
*/
function bundlesStore(txn) {
return getStore(txn, DbBundleStore);
}
/**
* Helper to get a typed SimpleDbStore for the namedQueries object store.
*/
function namedQueriesStore(txn) {
return getStore(txn, DbNamedQueryStore);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of DocumentOverlayCache using IndexedDb.
*/
class IndexedDbDocumentOverlayCache {
/**
* @param serializer - The document serializer.
* @param userId - The userId for which we are accessing overlays.
*/
constructor(serializer, userId) {
this.serializer = serializer;
this.userId = userId;
}
static forUser(serializer, user) {
const userId = user.uid || '';
return new IndexedDbDocumentOverlayCache(serializer, userId);
}
getOverlay(transaction, key) {
return documentOverlayStore(transaction)
.get(toDbDocumentOverlayKey(this.userId, key))
.next(dbOverlay => {
if (dbOverlay) {
return fromDbDocumentOverlay(this.serializer, dbOverlay);
}
return null;
});
}
getOverlays(transaction, keys) {
const result = newOverlayMap();
return PersistencePromise.forEach(keys, (key) => {
return this.getOverlay(transaction, key).next(overlay => {
if (overlay !== null) {
result.set(key, overlay);
}
});
}).next(() => result);
}
saveOverlays(transaction, largestBatchId, overlays) {
const promises = [];
overlays.forEach((_, mutation) => {
const overlay = new Overlay(largestBatchId, mutation);
promises.push(this.saveOverlay(transaction, overlay));
});
return PersistencePromise.waitFor(promises);
}
removeOverlaysForBatchId(transaction, documentKeys, batchId) {
const collectionPaths = new Set();
// Get the set of unique collection paths.
documentKeys.forEach(key => collectionPaths.add(encodeResourcePath(key.getCollectionPath())));
const promises = [];
collectionPaths.forEach(collectionPath => {
const range = IDBKeyRange.bound([this.userId, collectionPath, batchId], [this.userId, collectionPath, batchId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
promises.push(documentOverlayStore(transaction).deleteAll(DbDocumentOverlayCollectionPathOverlayIndex, range));
});
return PersistencePromise.waitFor(promises);
}
getOverlaysForCollection(transaction, collection, sinceBatchId) {
const result = newOverlayMap();
const collectionPath = encodeResourcePath(collection);
// We want batch IDs larger than `sinceBatchId`, and so the lower bound
// is not inclusive.
const range = IDBKeyRange.bound([this.userId, collectionPath, sinceBatchId], [this.userId, collectionPath, Number.POSITIVE_INFINITY],
/*lowerOpen=*/ true);
return documentOverlayStore(transaction)
.loadAll(DbDocumentOverlayCollectionPathOverlayIndex, range)
.next(dbOverlays => {
for (const dbOverlay of dbOverlays) {
const overlay = fromDbDocumentOverlay(this.serializer, dbOverlay);
result.set(overlay.getKey(), overlay);
}
return result;
});
}
getOverlaysForCollectionGroup(transaction, collectionGroup, sinceBatchId, count) {
const result = newOverlayMap();
let currentBatchId = undefined;
// We want batch IDs larger than `sinceBatchId`, and so the lower bound
// is not inclusive.
const range = IDBKeyRange.bound([this.userId, collectionGroup, sinceBatchId], [this.userId, collectionGroup, Number.POSITIVE_INFINITY],
/*lowerOpen=*/ true);
return documentOverlayStore(transaction)
.iterate({
index: DbDocumentOverlayCollectionGroupOverlayIndex,
range
}, (_, dbOverlay, control) => {
// We do not want to return partial batch overlays, even if the size
// of the result set exceeds the given `count` argument. Therefore, we
// continue to aggregate results even after the result size exceeds
// `count` if there are more overlays from the `currentBatchId`.
const overlay = fromDbDocumentOverlay(this.serializer, dbOverlay);
if (result.size() < count ||
overlay.largestBatchId === currentBatchId) {
result.set(overlay.getKey(), overlay);
currentBatchId = overlay.largestBatchId;
}
else {
control.done();
}
})
.next(() => result);
}
saveOverlay(transaction, overlay) {
return documentOverlayStore(transaction).put(toDbDocumentOverlay(this.serializer, this.userId, overlay));
}
}
/**
* Helper to get a typed SimpleDbStore for the document overlay object store.
*/
function documentOverlayStore(txn) {
return getStore(txn, DbDocumentOverlayStore);
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Note: This code is copied from the backend. Code that is not used by
// Firestore was removed.
const INDEX_TYPE_NULL = 5;
const INDEX_TYPE_BOOLEAN = 10;
const INDEX_TYPE_NAN = 13;
const INDEX_TYPE_NUMBER = 15;
const INDEX_TYPE_TIMESTAMP = 20;
const INDEX_TYPE_STRING = 25;
const INDEX_TYPE_BLOB = 30;
const INDEX_TYPE_REFERENCE = 37;
const INDEX_TYPE_GEOPOINT = 45;
const INDEX_TYPE_ARRAY = 50;
const INDEX_TYPE_MAP = 55;
const INDEX_TYPE_REFERENCE_SEGMENT = 60;
// A terminator that indicates that a truncatable value was not truncated.
// This must be smaller than all other type labels.
const NOT_TRUNCATED = 2;
/** Firestore index value writer. */
class FirestoreIndexValueWriter {
constructor() { }
// The write methods below short-circuit writing terminators for values
// containing a (terminating) truncated value.
//
// As an example, consider the resulting encoding for:
//
// ["bar", [2, "foo"]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TERM, TERM, TERM)
// ["bar", [2, truncated("foo")]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TRUNC)
// ["bar", truncated(["foo"])] -> (STRING, "bar", TERM, ARRAY. STRING, "foo", TERM, TRUNC)
/** Writes an index value. */
writeIndexValue(value, encoder) {
this.writeIndexValueAux(value, encoder);
// Write separator to split index values
// (see go/firestore-storage-format#encodings).
encoder.writeInfinity();
}
writeIndexValueAux(indexValue, encoder) {
if ('nullValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NULL);
}
else if ('booleanValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_BOOLEAN);
encoder.writeNumber(indexValue.booleanValue ? 1 : 0);
}
else if ('integerValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NUMBER);
encoder.writeNumber(normalizeNumber(indexValue.integerValue));
}
else if ('doubleValue' in indexValue) {
const n = normalizeNumber(indexValue.doubleValue);
if (isNaN(n)) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NAN);
}
else {
this.writeValueTypeLabel(encoder, INDEX_TYPE_NUMBER);
if (isNegativeZero(n)) {
// -0.0, 0 and 0.0 are all considered the same
encoder.writeNumber(0.0);
}
else {
encoder.writeNumber(n);
}
}
}
else if ('timestampValue' in indexValue) {
const timestamp = indexValue.timestampValue;
this.writeValueTypeLabel(encoder, INDEX_TYPE_TIMESTAMP);
if (typeof timestamp === 'string') {
encoder.writeString(timestamp);
}
else {
encoder.writeString(`${timestamp.seconds || ''}`);
encoder.writeNumber(timestamp.nanos || 0);
}
}
else if ('stringValue' in indexValue) {
this.writeIndexString(indexValue.stringValue, encoder);
this.writeTruncationMarker(encoder);
}
else if ('bytesValue' in indexValue) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_BLOB);
encoder.writeBytes(normalizeByteString(indexValue.bytesValue));
this.writeTruncationMarker(encoder);
}
else if ('referenceValue' in indexValue) {
this.writeIndexEntityRef(indexValue.referenceValue, encoder);
}
else if ('geoPointValue' in indexValue) {
const geoPoint = indexValue.geoPointValue;
this.writeValueTypeLabel(encoder, INDEX_TYPE_GEOPOINT);
encoder.writeNumber(geoPoint.latitude || 0);
encoder.writeNumber(geoPoint.longitude || 0);
}
else if ('mapValue' in indexValue) {
if (isMaxValue(indexValue)) {
this.writeValueTypeLabel(encoder, Number.MAX_SAFE_INTEGER);
}
else {
this.writeIndexMap(indexValue.mapValue, encoder);
this.writeTruncationMarker(encoder);
}
}
else if ('arrayValue' in indexValue) {
this.writeIndexArray(indexValue.arrayValue, encoder);
this.writeTruncationMarker(encoder);
}
else {
fail();
}
}
writeIndexString(stringIndexValue, encoder) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_STRING);
this.writeUnlabeledIndexString(stringIndexValue, encoder);
}
writeUnlabeledIndexString(stringIndexValue, encoder) {
encoder.writeString(stringIndexValue);
}
writeIndexMap(mapIndexValue, encoder) {
const map = mapIndexValue.fields || {};
this.writeValueTypeLabel(encoder, INDEX_TYPE_MAP);
for (const key of Object.keys(map)) {
this.writeIndexString(key, encoder);
this.writeIndexValueAux(map[key], encoder);
}
}
writeIndexArray(arrayIndexValue, encoder) {
const values = arrayIndexValue.values || [];
this.writeValueTypeLabel(encoder, INDEX_TYPE_ARRAY);
for (const element of values) {
this.writeIndexValueAux(element, encoder);
}
}
writeIndexEntityRef(referenceValue, encoder) {
this.writeValueTypeLabel(encoder, INDEX_TYPE_REFERENCE);
const path = DocumentKey.fromName(referenceValue).path;
path.forEach(segment => {
this.writeValueTypeLabel(encoder, INDEX_TYPE_REFERENCE_SEGMENT);
this.writeUnlabeledIndexString(segment, encoder);
});
}
writeValueTypeLabel(encoder, typeOrder) {
encoder.writeNumber(typeOrder);
}
writeTruncationMarker(encoder) {
// While the SDK does not implement truncation, the truncation marker is
// used to terminate all variable length values (which are strings, bytes,
// references, arrays and maps).
encoder.writeNumber(NOT_TRUNCATED);
}
}
FirestoreIndexValueWriter.INSTANCE = new FirestoreIndexValueWriter();
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law | agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES | CONDITIONS OF ANY KIND, either express | implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** These constants are taken from the backend. */
const MIN_SURROGATE = '\uD800';
const MAX_SURROGATE = '\uDBFF';
const ESCAPE1 = 0x00;
const NULL_BYTE = 0xff; // Combined with ESCAPE1
const SEPARATOR = 0x01; // Combined with ESCAPE1
const ESCAPE2 = 0xff;
const INFINITY = 0xff; // Combined with ESCAPE2
const FF_BYTE = 0x00; // Combined with ESCAPE2
const LONG_SIZE = 64;
const BYTE_SIZE = 8;
/**
* The default size of the buffer. This is arbitrary, but likely larger than
* most index values so that less copies of the underlying buffer will be made.
* For large values, a single copy will made to double the buffer length.
*/
const DEFAULT_BUFFER_SIZE = 1024;
/** Converts a JavaScript number to a byte array (using big endian encoding). */
function doubleToLongBits(value) {
const dv = new DataView(new ArrayBuffer(8));
dv.setFloat64(0, value, /* littleEndian= */ false);
return new Uint8Array(dv.buffer);
}
/**
* Counts the number of zeros in a byte.
*
* Visible for testing.
*/
function numberOfLeadingZerosInByte(x) {
if (x === 0) {
return 8;
}
let zeros = 0;
if (x >> 4 === 0) {
// Test if the first four bits are zero.
zeros += 4;
x = x << 4;
}
if (x >> 6 === 0) {
// Test if the first two (or next two) bits are zero.
zeros += 2;
x = x << 2;
}
if (x >> 7 === 0) {
// Test if the remaining bit is zero.
zeros += 1;
}
return zeros;
}
/** Counts the number of leading zeros in the given byte array. */
function numberOfLeadingZeros(bytes) {
let leadingZeros = 0;
for (let i = 0; i < 8; ++i) {
const zeros = numberOfLeadingZerosInByte(bytes[i] & 0xff);
leadingZeros += zeros;
if (zeros !== 8) {
break;
}
}
return leadingZeros;
}
/**
* Returns the number of bytes required to store "value". Leading zero bytes
* are skipped.
*/
function unsignedNumLength(value) {
// This is just the number of bytes for the unsigned representation of the number.
const numBits = LONG_SIZE - numberOfLeadingZeros(value);
return Math.ceil(numBits / BYTE_SIZE);
}
/**
* OrderedCodeWriter is a minimal-allocation implementation of the writing
* behavior defined by the backend.
*
* The code is ported from its Java counterpart.
*/
class OrderedCodeWriter {
constructor() {
this.buffer = new Uint8Array(DEFAULT_BUFFER_SIZE);
this.position = 0;
}
writeBytesAscending(value) {
const it = value[Symbol.iterator]();
let byte = it.next();
while (!byte.done) {
this.writeByteAscending(byte.value);
byte = it.next();
}
this.writeSeparatorAscending();
}
writeBytesDescending(value) {
const it = value[Symbol.iterator]();
let byte = it.next();
while (!byte.done) {
this.writeByteDescending(byte.value);
byte = it.next();
}
this.writeSeparatorDescending();
}
/** Writes utf8 bytes into this byte sequence, ascending. */
writeUtf8Ascending(sequence) {
for (const c of sequence) {
const charCode = c.charCodeAt(0);
if (charCode < 0x80) {
this.writeByteAscending(charCode);
}
else if (charCode < 0x800) {
this.writeByteAscending((0x0f << 6) | (charCode >>> 6));
this.writeByteAscending(0x80 | (0x3f & charCode));
}
else if (c < MIN_SURROGATE || MAX_SURROGATE < c) {
this.writeByteAscending((0x0f << 5) | (charCode >>> 12));
this.writeByteAscending(0x80 | (0x3f & (charCode >>> 6)));
this.writeByteAscending(0x80 | (0x3f & charCode));
}
else {
const codePoint = c.codePointAt(0);
this.writeByteAscending((0x0f << 4) | (codePoint >>> 18));
this.writeByteAscending(0x80 | (0x3f & (codePoint >>> 12)));
this.writeByteAscending(0x80 | (0x3f & (codePoint >>> 6)));
this.writeByteAscending(0x80 | (0x3f & codePoint));
}
}
this.writeSeparatorAscending();
}
/** Writes utf8 bytes into this byte sequence, descending */
writeUtf8Descending(sequence) {
for (const c of sequence) {
const charCode = c.charCodeAt(0);
if (charCode < 0x80) {
this.writeByteDescending(charCode);
}
else if (charCode < 0x800) {
this.writeByteDescending((0x0f << 6) | (charCode >>> 6));
this.writeByteDescending(0x80 | (0x3f & charCode));
}
else if (c < MIN_SURROGATE || MAX_SURROGATE < c) {
this.writeByteDescending((0x0f << 5) | (charCode >>> 12));
this.writeByteDescending(0x80 | (0x3f & (charCode >>> 6)));
this.writeByteDescending(0x80 | (0x3f & charCode));
}
else {
const codePoint = c.codePointAt(0);
this.writeByteDescending((0x0f << 4) | (codePoint >>> 18));
this.writeByteDescending(0x80 | (0x3f & (codePoint >>> 12)));
this.writeByteDescending(0x80 | (0x3f & (codePoint >>> 6)));
this.writeByteDescending(0x80 | (0x3f & codePoint));
}
}
this.writeSeparatorDescending();
}
writeNumberAscending(val) {
// Values are encoded with a single byte length prefix, followed by the
// actual value in big-endian format with leading 0 bytes dropped.
const value = this.toOrderedBits(val);
const len = unsignedNumLength(value);
this.ensureAvailable(1 + len);
this.buffer[this.position++] = len & 0xff; // Write the length
for (let i = value.length - len; i < value.length; ++i) {
this.buffer[this.position++] = value[i] & 0xff;
}
}
writeNumberDescending(val) {
// Values are encoded with a single byte length prefix, followed by the
// inverted value in big-endian format with leading 0 bytes dropped.
const value = this.toOrderedBits(val);
const len = unsignedNumLength(value);
this.ensureAvailable(1 + len);
this.buffer[this.position++] = ~(len & 0xff); // Write the length
for (let i = value.length - len; i < value.length; ++i) {
this.buffer[this.position++] = ~(value[i] & 0xff);
}
}
/**
* Writes the "infinity" byte sequence that sorts after all other byte
* sequences written in ascending order.
*/
writeInfinityAscending() {
this.writeEscapedByteAscending(ESCAPE2);
this.writeEscapedByteAscending(INFINITY);
}
/**
* Writes the "infinity" byte sequence that sorts before all other byte
* sequences written in descending order.
*/
writeInfinityDescending() {
this.writeEscapedByteDescending(ESCAPE2);
this.writeEscapedByteDescending(INFINITY);
}
/**
* Resets the buffer such that it is the same as when it was newly
* constructed.
*/
reset() {
this.position = 0;
}
seed(encodedBytes) {
this.ensureAvailable(encodedBytes.length);
this.buffer.set(encodedBytes, this.position);
this.position += encodedBytes.length;
}
/** Makes a copy of the encoded bytes in this buffer. */
encodedBytes() {
return this.buffer.slice(0, this.position);
}
/**
* Encodes `val` into an encoding so that the order matches the IEEE 754
* floating-point comparison results with the following exceptions:
* -0.0 < 0.0
* all non-NaN < NaN
* NaN = NaN
*/
toOrderedBits(val) {
const value = doubleToLongBits(val);
// Check if the first bit is set. We use a bit mask since value[0] is
// encoded as a number from 0 to 255.
const isNegative = (value[0] & 0x80) !== 0;
// Revert the two complement to get natural ordering
value[0] ^= isNegative ? 0xff : 0x80;
for (let i = 1; i < value.length; ++i) {
value[i] ^= isNegative ? 0xff : 0x00;
}
return value;
}
/** Writes a single byte ascending to the buffer. */
writeByteAscending(b) {
const masked = b & 0xff;
if (masked === ESCAPE1) {
this.writeEscapedByteAscending(ESCAPE1);
this.writeEscapedByteAscending(NULL_BYTE);
}
else if (masked === ESCAPE2) {
this.writeEscapedByteAscending(ESCAPE2);
this.writeEscapedByteAscending(FF_BYTE);
}
else {
this.writeEscapedByteAscending(masked);
}
}
/** Writes a single byte descending to the buffer. */
writeByteDescending(b) {
const masked = b & 0xff;
if (masked === ESCAPE1) {
this.writeEscapedByteDescending(ESCAPE1);
this.writeEscapedByteDescending(NULL_BYTE);
}
else if (masked === ESCAPE2) {
this.writeEscapedByteDescending(ESCAPE2);
this.writeEscapedByteDescending(FF_BYTE);
}
else {
this.writeEscapedByteDescending(b);
}
}
writeSeparatorAscending() {
this.writeEscapedByteAscending(ESCAPE1);
this.writeEscapedByteAscending(SEPARATOR);
}
writeSeparatorDescending() {
this.writeEscapedByteDescending(ESCAPE1);
this.writeEscapedByteDescending(SEPARATOR);
}
writeEscapedByteAscending(b) {
this.ensureAvailable(1);
this.buffer[this.position++] = b;
}
writeEscapedByteDescending(b) {
this.ensureAvailable(1);
this.buffer[this.position++] = ~b;
}
ensureAvailable(bytes) {
const minCapacity = bytes + this.position;
if (minCapacity <= this.buffer.length) {
return;
}
// Try doubling.
let newLength = this.buffer.length * 2;
// Still not big enough? Just allocate the right size.
if (newLength < minCapacity) {
newLength = minCapacity;
}
// Create the new buffer.
const newBuffer = new Uint8Array(newLength);
newBuffer.set(this.buffer); // copy old data
this.buffer = newBuffer;
}
}
class AscendingIndexByteEncoder {
constructor(orderedCode) {
this.orderedCode = orderedCode;
}
writeBytes(value) {
this.orderedCode.writeBytesAscending(value);
}
writeString(value) {
this.orderedCode.writeUtf8Ascending(value);
}
writeNumber(value) {
this.orderedCode.writeNumberAscending(value);
}
writeInfinity() {
this.orderedCode.writeInfinityAscending();
}
}
class DescendingIndexByteEncoder {
constructor(orderedCode) {
this.orderedCode = orderedCode;
}
writeBytes(value) {
this.orderedCode.writeBytesDescending(value);
}
writeString(value) {
this.orderedCode.writeUtf8Descending(value);
}
writeNumber(value) {
this.orderedCode.writeNumberDescending(value);
}
writeInfinity() {
this.orderedCode.writeInfinityDescending();
}
}
/**
* Implements `DirectionalIndexByteEncoder` using `OrderedCodeWriter` for the
* actual encoding.
*/
class IndexByteEncoder {
constructor() {
this.orderedCode = new OrderedCodeWriter();
this.ascending = new AscendingIndexByteEncoder(this.orderedCode);
this.descending = new DescendingIndexByteEncoder(this.orderedCode);
}
seed(encodedBytes) {
this.orderedCode.seed(encodedBytes);
}
forKind(kind) {
return kind === 0 /* IndexKind.ASCENDING */ ? this.ascending : this.descending;
}
encodedBytes() {
return this.orderedCode.encodedBytes();
}
reset() {
this.orderedCode.reset();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Represents an index entry saved by the SDK in persisted storage. */
class IndexEntry {
constructor(indexId, documentKey, arrayValue, directionalValue) {
this.indexId = indexId;
this.documentKey = documentKey;
this.arrayValue = arrayValue;
this.directionalValue = directionalValue;
}
/**
* Returns an IndexEntry entry that sorts immediately after the current
* directional value.
*/
successor() {
const currentLength = this.directionalValue.length;
const newLength = currentLength === 0 || this.directionalValue[currentLength - 1] === 255
? currentLength + 1
: currentLength;
const successor = new Uint8Array(newLength);
successor.set(this.directionalValue, 0);
if (newLength !== currentLength) {
successor.set([0], this.directionalValue.length);
}
else {
++successor[successor.length - 1];
}
return new IndexEntry(this.indexId, this.documentKey, this.arrayValue, successor);
}
}
function indexEntryComparator(left, right) {
let cmp = left.indexId - right.indexId;
if (cmp !== 0) {
return cmp;
}
cmp = compareByteArrays(left.arrayValue, right.arrayValue);
if (cmp !== 0) {
return cmp;
}
cmp = compareByteArrays(left.directionalValue, right.directionalValue);
if (cmp !== 0) {
return cmp;
}
return DocumentKey.comparator(left.documentKey, right.documentKey);
}
function compareByteArrays(left, right) {
for (let i = 0; i < left.length && i < right.length; ++i) {
const compare = left[i] - right[i];
if (compare !== 0) {
return compare;
}
}
return left.length - right.length;
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A light query planner for Firestore.
*
* This class matches a `FieldIndex` against a Firestore Query `Target`. It
* determines whether a given index can be used to serve the specified target.
*
* The following table showcases some possible index configurations:
*
* Query | Index
* -----------------------------------------------------------------------------
* where('a', '==', 'a').where('b', '==', 'b') | a ASC, b DESC
* where('a', '==', 'a').where('b', '==', 'b') | a ASC
* where('a', '==', 'a').where('b', '==', 'b') | b DESC
* where('a', '>=', 'a').orderBy('a') | a ASC
* where('a', '>=', 'a').orderBy('a', 'desc') | a DESC
* where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC, b ASC
* where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC
* where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS, b ASCENDING
* where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS
*/
class TargetIndexMatcher {
constructor(target) {
this.collectionId =
target.collectionGroup != null
? target.collectionGroup
: target.path.lastSegment();
this.orderBys = target.orderBy;
this.equalityFilters = [];
for (const filter of target.filters) {
const fieldFilter = filter;
if (fieldFilter.isInequality()) {
this.inequalityFilter = fieldFilter;
}
else {
this.equalityFilters.push(fieldFilter);
}
}
}
/**
* Returns whether the index can be used to serve the TargetIndexMatcher's
* target.
*
* An index is considered capable of serving the target when:
* - The target uses all index segments for its filters and orderBy clauses.
* The target can have additional filter and orderBy clauses, but not
* fewer.
* - If an ArrayContains/ArrayContainsAnyfilter is used, the index must also
* have a corresponding `CONTAINS` segment.
* - All directional index segments can be mapped to the target as a series of
* equality filters, a single inequality filter and a series of orderBy
* clauses.
* - The segments that represent the equality filters may appear out of order.
* - The optional segment for the inequality filter must appear after all
* equality segments.
* - The segments that represent that orderBy clause of the target must appear
* in order after all equality and inequality segments. Single orderBy
* clauses cannot be skipped, but a continuous orderBy suffix may be
* omitted.
*/
servedByIndex(index) {
hardAssert(index.collectionGroup === this.collectionId);
// If there is an array element, find a matching filter.
const arraySegment = fieldIndexGetArraySegment(index);
if (arraySegment !== undefined &&
!this.hasMatchingEqualityFilter(arraySegment)) {
return false;
}
const segments = fieldIndexGetDirectionalSegments(index);
let segmentIndex = 0;
let orderBysIndex = 0;
// Process all equalities first. Equalities can appear out of order.
for (; segmentIndex < segments.length; ++segmentIndex) {
// We attempt to greedily match all segments to equality filters. If a
// filter matches an index segment, we can mark the segment as used.
// Since it is not possible to use the same field path in both an equality
// and inequality/oderBy clause, we do not have to consider the possibility
// that a matching equality segment should instead be used to map to an
// inequality filter or orderBy clause.
if (!this.hasMatchingEqualityFilter(segments[segmentIndex])) {
// If we cannot find a matching filter, we need to verify whether the
// remaining segments map to the target's inequality and its orderBy
// clauses.
break;
}
}
// If we already have processed all segments, all segments are used to serve
// the equality filters and we do not need to map any segments to the
// target's inequality and orderBy clauses.
if (segmentIndex === segments.length) {
return true;
}
// If there is an inequality filter, the next segment must match both the
// filter and the first orderBy clause.
if (this.inequalityFilter !== undefined) {
const segment = segments[segmentIndex];
if (!this.matchesFilter(this.inequalityFilter, segment) ||
!this.matchesOrderBy(this.orderBys[orderBysIndex++], segment)) {
return false;
}
++segmentIndex;
}
// All remaining segments need to represent the prefix of the target's
// orderBy.
for (; segmentIndex < segments.length; ++segmentIndex) {
const segment = segments[segmentIndex];
if (orderBysIndex >= this.orderBys.length ||
!this.matchesOrderBy(this.orderBys[orderBysIndex++], segment)) {
return false;
}
}
return true;
}
hasMatchingEqualityFilter(segment) {
for (const filter of this.equalityFilters) {
if (this.matchesFilter(filter, segment)) {
return true;
}
}
return false;
}
matchesFilter(filter, segment) {
if (filter === undefined || !filter.field.isEqual(segment.fieldPath)) {
return false;
}
const isArrayOperator = filter.op === "array-contains" /* Operator.ARRAY_CONTAINS */ ||
filter.op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
return (segment.kind === 2 /* IndexKind.CONTAINS */) === isArrayOperator;
}
matchesOrderBy(orderBy, segment) {
if (!orderBy.field.isEqual(segment.fieldPath)) {
return false;
}
return ((segment.kind === 0 /* IndexKind.ASCENDING */ &&
orderBy.dir === "asc" /* Direction.ASCENDING */) ||
(segment.kind === 1 /* IndexKind.DESCENDING */ &&
orderBy.dir === "desc" /* Direction.DESCENDING */));
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides utility functions that help with boolean logic transformations needed for handling
* complex filters used in queries.
*/
/**
* The `in` filter is only a syntactic sugar over a disjunction of equalities. For instance: `a in
* [1,2,3]` is in fact `a==1 || a==2 || a==3`. This method expands any `in` filter in the given
* input into a disjunction of equality filters and returns the expanded filter.
*/
function computeInExpansion(filter) {
var _a, _b;
hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
if (filter instanceof FieldFilter) {
if (filter instanceof InFilter) {
const expandedFilters = ((_b = (_a = filter.value.arrayValue) === null || _a === void 0 ? void 0 : _a.values) === null || _b === void 0 ? void 0 : _b.map(value => FieldFilter.create(filter.field, "==" /* Operator.EQUAL */, value))) || [];
return CompositeFilter.create(expandedFilters, "or" /* CompositeOperator.OR */);
}
else {
// We have reached other kinds of field filters.
return filter;
}
}
// We have a composite filter.
const expandedFilters = filter.filters.map(subfilter => computeInExpansion(subfilter));
return CompositeFilter.create(expandedFilters, filter.op);
}
/**
* Given a composite filter, returns the list of terms in its disjunctive normal form.
*
* Each element in the return value is one term of the resulting DNF. For instance: For the
* input: (A || B) && C, the DNF form is: (A && C) || (B && C), and the return value is a list
* with two elements: a composite filter that performs (A && C), and a composite filter that
* performs (B && C).
*
* @param filter the composite filter to calculate DNF transform for.
* @return the terms in the DNF transform.
*/
function getDnfTerms(filter) {
if (filter.getFilters().length === 0) {
return [];
}
const result = computeDistributedNormalForm(computeInExpansion(filter));
hardAssert(isDisjunctiveNormalForm(result));
if (isSingleFieldFilter(result) || isFlatConjunction(result)) {
return [result];
}
return result.getFilters();
}
/** Returns true if the given filter is a single field filter. e.g. (a == 10). */
function isSingleFieldFilter(filter) {
return filter instanceof FieldFilter;
}
/**
* Returns true if the given filter is the conjunction of one or more field filters. e.g. (a == 10
* && b == 20)
*/
function isFlatConjunction(filter) {
return (filter instanceof CompositeFilter &&
compositeFilterIsFlatConjunction(filter));
}
/**
* Returns whether or not the given filter is in disjunctive normal form (DNF).
*
*
In boolean logic, a disjunctive normal form (DNF) is a canonical normal form of a logical
* formula consisting of a disjunction of conjunctions; it can also be described as an OR of ANDs.
*
*
For more info, visit: https://en.wikipedia.org/wiki/Disjunctive_normal_form
*/
function isDisjunctiveNormalForm(filter) {
return (isSingleFieldFilter(filter) ||
isFlatConjunction(filter) ||
isDisjunctionOfFieldFiltersAndFlatConjunctions(filter));
}
/**
* Returns true if the given filter is the disjunction of one or more "flat conjunctions" and
* field filters. e.g. (a == 10) || (b==20 && c==30)
*/
function isDisjunctionOfFieldFiltersAndFlatConjunctions(filter) {
if (filter instanceof CompositeFilter) {
if (compositeFilterIsDisjunction(filter)) {
for (const subFilter of filter.getFilters()) {
if (!isSingleFieldFilter(subFilter) && !isFlatConjunction(subFilter)) {
return false;
}
}
return true;
}
}
return false;
}
function computeDistributedNormalForm(filter) {
hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
if (filter instanceof FieldFilter) {
return filter;
}
if (filter.filters.length === 1) {
return computeDistributedNormalForm(filter.filters[0]);
}
// Compute DNF for each of the subfilters first
const result = filter.filters.map(subfilter => computeDistributedNormalForm(subfilter));
let newFilter = CompositeFilter.create(result, filter.op);
newFilter = applyAssociation(newFilter);
if (isDisjunctiveNormalForm(newFilter)) {
return newFilter;
}
hardAssert(newFilter instanceof CompositeFilter);
hardAssert(compositeFilterIsConjunction(newFilter));
hardAssert(newFilter.filters.length > 1);
return newFilter.filters.reduce((runningResult, filter) => applyDistribution(runningResult, filter));
}
function applyDistribution(lhs, rhs) {
hardAssert(lhs instanceof FieldFilter || lhs instanceof CompositeFilter);
hardAssert(rhs instanceof FieldFilter || rhs instanceof CompositeFilter);
let result;
if (lhs instanceof FieldFilter) {
if (rhs instanceof FieldFilter) {
// FieldFilter FieldFilter
result = applyDistributionFieldFilters(lhs, rhs);
}
else {
// FieldFilter CompositeFilter
result = applyDistributionFieldAndCompositeFilters(lhs, rhs);
}
}
else {
if (rhs instanceof FieldFilter) {
// CompositeFilter FieldFilter
result = applyDistributionFieldAndCompositeFilters(rhs, lhs);
}
else {
// CompositeFilter CompositeFilter
result = applyDistributionCompositeFilters(lhs, rhs);
}
}
return applyAssociation(result);
}
function applyDistributionFieldFilters(lhs, rhs) {
// Conjunction distribution for two field filters is the conjunction of them.
return CompositeFilter.create([lhs, rhs], "and" /* CompositeOperator.AND */);
}
function applyDistributionCompositeFilters(lhs, rhs) {
hardAssert(lhs.filters.length > 0 && rhs.filters.length > 0);
// There are four cases:
// (A & B) & (C & D) --> (A & B & C & D)
// (A & B) & (C | D) --> (A & B & C) | (A & B & D)
// (A | B) & (C & D) --> (C & D & A) | (C & D & B)
// (A | B) & (C | D) --> (A & C) | (A & D) | (B & C) | (B & D)
// Case 1 is a merge.
if (compositeFilterIsConjunction(lhs) && compositeFilterIsConjunction(rhs)) {
return compositeFilterWithAddedFilters(lhs, rhs.getFilters());
}
// Case 2,3,4 all have at least one side (lhs or rhs) that is a disjunction. In all three cases
// we should take each element of the disjunction and distribute it over the other side, and
// return the disjunction of the distribution results.
const disjunctionSide = compositeFilterIsDisjunction(lhs) ? lhs : rhs;
const otherSide = compositeFilterIsDisjunction(lhs) ? rhs : lhs;
const results = disjunctionSide.filters.map(subfilter => applyDistribution(subfilter, otherSide));
return CompositeFilter.create(results, "or" /* CompositeOperator.OR */);
}
function applyDistributionFieldAndCompositeFilters(fieldFilter, compositeFilter) {
// There are two cases:
// A & (B & C) --> (A & B & C)
// A & (B | C) --> (A & B) | (A & C)
if (compositeFilterIsConjunction(compositeFilter)) {
// Case 1
return compositeFilterWithAddedFilters(compositeFilter, fieldFilter.getFilters());
}
else {
// Case 2
const newFilters = compositeFilter.filters.map(subfilter => applyDistribution(fieldFilter, subfilter));
return CompositeFilter.create(newFilters, "or" /* CompositeOperator.OR */);
}
}
/**
* Applies the associativity property to the given filter and returns the resulting filter.
*
*
* - A | (B | C) == (A | B) | C == (A | B | C)
*
- A & (B & C) == (A & B) & C == (A & B & C)
*
*
* For more info, visit: https://en.wikipedia.org/wiki/Associative_property#Propositional_logic
*/
function applyAssociation(filter) {
hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
if (filter instanceof FieldFilter) {
return filter;
}
const filters = filter.getFilters();
// If the composite filter only contains 1 filter, apply associativity to it.
if (filters.length === 1) {
return applyAssociation(filters[0]);
}
// Associativity applied to a flat composite filter results is itself.
if (compositeFilterIsFlat(filter)) {
return filter;
}
// First apply associativity to all subfilters. This will in turn recursively apply
// associativity to all nested composite filters and field filters.
const updatedFilters = filters.map(subfilter => applyAssociation(subfilter));
// For composite subfilters that perform the same kind of logical operation as `compositeFilter`
// take out their filters and add them to `compositeFilter`. For example:
// compositeFilter = (A | (B | C | D))
// compositeSubfilter = (B | C | D)
// Result: (A | B | C | D)
// Note that the `compositeSubfilter` has been eliminated, and its filters (B, C, D) have been
// added to the top-level "compositeFilter".
const newSubfilters = [];
updatedFilters.forEach(subfilter => {
if (subfilter instanceof FieldFilter) {
newSubfilters.push(subfilter);
}
else if (subfilter instanceof CompositeFilter) {
if (subfilter.op === filter.op) {
// compositeFilter: (A | (B | C))
// compositeSubfilter: (B | C)
// Result: (A | B | C)
newSubfilters.push(...subfilter.filters);
}
else {
// compositeFilter: (A | (B & C))
// compositeSubfilter: (B & C)
// Result: (A | (B & C))
newSubfilters.push(subfilter);
}
}
});
if (newSubfilters.length === 1) {
return newSubfilters[0];
}
return CompositeFilter.create(newSubfilters, filter.op);
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of IndexManager.
*/
class MemoryIndexManager {
constructor() {
this.collectionParentIndex = new MemoryCollectionParentIndex();
}
addToCollectionParentIndex(transaction, collectionPath) {
this.collectionParentIndex.add(collectionPath);
return PersistencePromise.resolve();
}
getCollectionParents(transaction, collectionId) {
return PersistencePromise.resolve(this.collectionParentIndex.getEntries(collectionId));
}
addFieldIndex(transaction, index) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
deleteFieldIndex(transaction, index) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
getDocumentsMatchingTarget(transaction, target) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve(null);
}
getIndexType(transaction, target) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve(0 /* IndexType.NONE */);
}
getFieldIndexes(transaction, collectionGroup) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve([]);
}
getNextCollectionGroupToUpdate(transaction) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve(null);
}
getMinOffset(transaction, target) {
return PersistencePromise.resolve(IndexOffset.min());
}
getMinOffsetFromCollectionGroup(transaction, collectionGroup) {
return PersistencePromise.resolve(IndexOffset.min());
}
updateCollectionGroup(transaction, collectionGroup, offset) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
updateIndexEntries(transaction, documents) {
// Field indices are not supported with memory persistence.
return PersistencePromise.resolve();
}
}
/**
* Internal implementation of the collection-parent index exposed by MemoryIndexManager.
* Also used for in-memory caching by IndexedDbIndexManager and initial index population
* in indexeddb_schema.ts
*/
class MemoryCollectionParentIndex {
constructor() {
this.index = {};
}
// Returns false if the entry already existed.
add(collectionPath) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
const existingParents = this.index[collectionId] ||
new SortedSet(ResourcePath.comparator);
const added = !existingParents.has(parentPath);
this.index[collectionId] = existingParents.add(parentPath);
return added;
}
has(collectionPath) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
const existingParents = this.index[collectionId];
return existingParents && existingParents.has(parentPath);
}
getEntries(collectionId) {
const parentPaths = this.index[collectionId] ||
new SortedSet(ResourcePath.comparator);
return parentPaths.toArray();
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$f = 'IndexedDbIndexManager';
const EMPTY_VALUE = new Uint8Array(0);
/**
* A persisted implementation of IndexManager.
*
* PORTING NOTE: Unlike iOS and Android, the Web SDK does not memoize index
* data as it supports multi-tab access.
*/
class IndexedDbIndexManager {
constructor(user, databaseId) {
this.user = user;
this.databaseId = databaseId;
/**
* An in-memory copy of the index entries we've already written since the SDK
* launched. Used to avoid re-writing the same entry repeatedly.
*
* This is *NOT* a complete cache of what's in persistence and so can never be
* used to satisfy reads.
*/
this.collectionParentsCache = new MemoryCollectionParentIndex();
/**
* Maps from a target to its equivalent list of sub-targets. Each sub-target
* contains only one term from the target's disjunctive normal form (DNF).
*/
this.targetToDnfSubTargets = new ObjectMap(t => canonifyTarget(t), (l, r) => targetEquals(l, r));
this.uid = user.uid || '';
}
/**
* Adds a new entry to the collection parent index.
*
* Repeated calls for the same collectionPath should be avoided within a
* transaction as IndexedDbIndexManager only caches writes once a transaction
* has been committed.
*/
addToCollectionParentIndex(transaction, collectionPath) {
if (!this.collectionParentsCache.has(collectionPath)) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
transaction.addOnCommittedListener(() => {
// Add the collection to the in memory cache only if the transaction was
// successfully committed.
this.collectionParentsCache.add(collectionPath);
});
const collectionParent = {
collectionId,
parent: encodeResourcePath(parentPath)
};
return collectionParentsStore(transaction).put(collectionParent);
}
return PersistencePromise.resolve();
}
getCollectionParents(transaction, collectionId) {
const parentPaths = [];
const range = IDBKeyRange.bound([collectionId, ''], [immediateSuccessor(collectionId), ''],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
return collectionParentsStore(transaction)
.loadAll(range)
.next(entries => {
for (const entry of entries) {
// This collectionId guard shouldn't be necessary (and isn't as long
// as we're running in a real browser), but there's a bug in
// indexeddbshim that breaks our range in our tests running in node:
// https://github.com/axemclion/IndexedDBShim/issues/334
if (entry.collectionId !== collectionId) {
break;
}
parentPaths.push(decodeResourcePath(entry.parent));
}
return parentPaths;
});
}
addFieldIndex(transaction, index) {
// TODO(indexing): Verify that the auto-incrementing index ID works in
// Safari & Firefox.
const indexes = indexConfigurationStore(transaction);
const dbIndex = toDbIndexConfiguration(index);
delete dbIndex.indexId; // `indexId` is auto-populated by IndexedDb
const result = indexes.add(dbIndex);
if (index.indexState) {
const states = indexStateStore(transaction);
return result.next(indexId => {
states.put(toDbIndexState(indexId, this.user, index.indexState.sequenceNumber, index.indexState.offset));
});
}
else {
return result.next();
}
}
deleteFieldIndex(transaction, index) {
const indexes = indexConfigurationStore(transaction);
const states = indexStateStore(transaction);
const entries = indexEntriesStore(transaction);
return indexes
.delete(index.indexId)
.next(() => states.delete(IDBKeyRange.bound([index.indexId], [index.indexId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true)))
.next(() => entries.delete(IDBKeyRange.bound([index.indexId], [index.indexId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true)));
}
getDocumentsMatchingTarget(transaction, target) {
const indexEntries = indexEntriesStore(transaction);
let canServeTarget = true;
const indexes = new Map();
return PersistencePromise.forEach(this.getSubTargets(target), (subTarget) => {
return this.getFieldIndex(transaction, subTarget).next(index => {
canServeTarget && (canServeTarget = !!index);
indexes.set(subTarget, index);
});
}).next(() => {
if (!canServeTarget) {
return PersistencePromise.resolve(null);
}
else {
let existingKeys = documentKeySet();
const result = [];
return PersistencePromise.forEach(indexes, (index, subTarget) => {
logDebug(LOG_TAG$f, `Using index ${fieldIndexToString(index)} to execute ${canonifyTarget(target)}`);
const arrayValues = targetGetArrayValues(subTarget, index);
const notInValues = targetGetNotInValues(subTarget, index);
const lowerBound = targetGetLowerBound(subTarget, index);
const upperBound = targetGetUpperBound(subTarget, index);
const lowerBoundEncoded = this.encodeBound(index, subTarget, lowerBound);
const upperBoundEncoded = this.encodeBound(index, subTarget, upperBound);
const notInEncoded = this.encodeValues(index, subTarget, notInValues);
const indexRanges = this.generateIndexRanges(index.indexId, arrayValues, lowerBoundEncoded, lowerBound.inclusive, upperBoundEncoded, upperBound.inclusive, notInEncoded);
return PersistencePromise.forEach(indexRanges, (indexRange) => {
return indexEntries
.loadFirst(indexRange, target.limit)
.next(entries => {
entries.forEach(entry => {
const documentKey = DocumentKey.fromSegments(entry.documentKey);
if (!existingKeys.has(documentKey)) {
existingKeys = existingKeys.add(documentKey);
result.push(documentKey);
}
});
});
});
}).next(() => result);
}
});
}
getSubTargets(target) {
let subTargets = this.targetToDnfSubTargets.get(target);
if (subTargets) {
return subTargets;
}
if (target.filters.length === 0) {
subTargets = [target];
}
else {
// There is an implicit AND operation between all the filters stored in the target
const dnf = getDnfTerms(CompositeFilter.create(target.filters, "and" /* CompositeOperator.AND */));
subTargets = dnf.map(term => newTarget(target.path, target.collectionGroup, target.orderBy, term.getFilters(), target.limit, target.startAt, target.endAt));
}
this.targetToDnfSubTargets.set(target, subTargets);
return subTargets;
}
/**
* Constructs a key range query on `DbIndexEntryStore` that unions all
* bounds.
*/
generateIndexRanges(indexId, arrayValues, lowerBounds, lowerBoundInclusive, upperBounds, upperBoundInclusive, notInValues) {
// The number of total index scans we union together. This is similar to a
// distributed normal form, but adapted for array values. We create a single
// index range per value in an ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filter
// combined with the values from the query bounds.
const totalScans = (arrayValues != null ? arrayValues.length : 1) *
Math.max(lowerBounds.length, upperBounds.length);
const scansPerArrayElement = totalScans / (arrayValues != null ? arrayValues.length : 1);
const indexRanges = [];
for (let i = 0; i < totalScans; ++i) {
const arrayValue = arrayValues
? this.encodeSingleElement(arrayValues[i / scansPerArrayElement])
: EMPTY_VALUE;
const lowerBound = this.generateLowerBound(indexId, arrayValue, lowerBounds[i % scansPerArrayElement], lowerBoundInclusive);
const upperBound = this.generateUpperBound(indexId, arrayValue, upperBounds[i % scansPerArrayElement], upperBoundInclusive);
const notInBound = notInValues.map(notIn => this.generateLowerBound(indexId, arrayValue, notIn,
/* inclusive= */ true));
indexRanges.push(...this.createRange(lowerBound, upperBound, notInBound));
}
return indexRanges;
}
/** Generates the lower bound for `arrayValue` and `directionalValue`. */
generateLowerBound(indexId, arrayValue, directionalValue, inclusive) {
const entry = new IndexEntry(indexId, DocumentKey.empty(), arrayValue, directionalValue);
return inclusive ? entry : entry.successor();
}
/** Generates the upper bound for `arrayValue` and `directionalValue`. */
generateUpperBound(indexId, arrayValue, directionalValue, inclusive) {
const entry = new IndexEntry(indexId, DocumentKey.empty(), arrayValue, directionalValue);
return inclusive ? entry.successor() : entry;
}
getFieldIndex(transaction, target) {
const targetIndexMatcher = new TargetIndexMatcher(target);
const collectionGroup = target.collectionGroup != null
? target.collectionGroup
: target.path.lastSegment();
return this.getFieldIndexes(transaction, collectionGroup).next(indexes => {
// Return the index with the most number of segments.
let index = null;
for (const candidate of indexes) {
const matches = targetIndexMatcher.servedByIndex(candidate);
if (matches &&
(!index || candidate.fields.length > index.fields.length)) {
index = candidate;
}
}
return index;
});
}
getIndexType(transaction, target) {
let indexType = 2 /* IndexType.FULL */;
const subTargets = this.getSubTargets(target);
return PersistencePromise.forEach(subTargets, (target) => {
return this.getFieldIndex(transaction, target).next(index => {
if (!index) {
indexType = 0 /* IndexType.NONE */;
}
else if (indexType !== 0 /* IndexType.NONE */ &&
index.fields.length < targetGetSegmentCount(target)) {
indexType = 1 /* IndexType.PARTIAL */;
}
});
}).next(() => {
// OR queries have more than one sub-target (one sub-target per DNF term). We currently consider
// OR queries that have a `limit` to have a partial index. For such queries we perform sorting
// and apply the limit in memory as a post-processing step.
if (targetHasLimit(target) &&
subTargets.length > 1 &&
indexType === 2 /* IndexType.FULL */) {
return 1 /* IndexType.PARTIAL */;
}
return indexType;
});
}
/**
* Returns the byte encoded form of the directional values in the field index.
* Returns `null` if the document does not have all fields specified in the
* index.
*/
encodeDirectionalElements(fieldIndex, document) {
const encoder = new IndexByteEncoder();
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
const field = document.data.field(segment.fieldPath);
if (field == null) {
return null;
}
const directionalEncoder = encoder.forKind(segment.kind);
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(field, directionalEncoder);
}
return encoder.encodedBytes();
}
/** Encodes a single value to the ascending index format. */
encodeSingleElement(value) {
const encoder = new IndexByteEncoder();
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(value, encoder.forKind(0 /* IndexKind.ASCENDING */));
return encoder.encodedBytes();
}
/**
* Returns an encoded form of the document key that sorts based on the key
* ordering of the field index.
*/
encodeDirectionalKey(fieldIndex, documentKey) {
const encoder = new IndexByteEncoder();
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(refValue(this.databaseId, documentKey), encoder.forKind(fieldIndexGetKeyOrder(fieldIndex)));
return encoder.encodedBytes();
}
/**
* Encodes the given field values according to the specification in `target`.
* For IN queries, a list of possible values is returned.
*/
encodeValues(fieldIndex, target, values) {
if (values === null) {
return [];
}
let encoders = [];
encoders.push(new IndexByteEncoder());
let valueIdx = 0;
for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
const value = values[valueIdx++];
for (const encoder of encoders) {
if (this.isInFilter(target, segment.fieldPath) && isArray(value)) {
encoders = this.expandIndexValues(encoders, segment, value);
}
else {
const directionalEncoder = encoder.forKind(segment.kind);
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(value, directionalEncoder);
}
}
}
return this.getEncodedBytes(encoders);
}
/**
* Encodes the given bounds according to the specification in `target`. For IN
* queries, a list of possible values is returned.
*/
encodeBound(fieldIndex, target, bound) {
return this.encodeValues(fieldIndex, target, bound.position);
}
/** Returns the byte representation for the provided encoders. */
getEncodedBytes(encoders) {
const result = [];
for (let i = 0; i < encoders.length; ++i) {
result[i] = encoders[i].encodedBytes();
}
return result;
}
/**
* Creates a separate encoder for each element of an array.
*
* The method appends each value to all existing encoders (e.g. filter("a",
* "==", "a1").filter("b", "in", ["b1", "b2"]) becomes ["a1,b1", "a1,b2"]). A
* list of new encoders is returned.
*/
expandIndexValues(encoders, segment, value) {
const prefixes = [...encoders];
const results = [];
for (const arrayElement of value.arrayValue.values || []) {
for (const prefix of prefixes) {
const clonedEncoder = new IndexByteEncoder();
clonedEncoder.seed(prefix.encodedBytes());
FirestoreIndexValueWriter.INSTANCE.writeIndexValue(arrayElement, clonedEncoder.forKind(segment.kind));
results.push(clonedEncoder);
}
}
return results;
}
isInFilter(target, fieldPath) {
return !!target.filters.find(f => f instanceof FieldFilter &&
f.field.isEqual(fieldPath) &&
(f.op === "in" /* Operator.IN */ || f.op === "not-in" /* Operator.NOT_IN */));
}
getFieldIndexes(transaction, collectionGroup) {
const indexes = indexConfigurationStore(transaction);
const states = indexStateStore(transaction);
return (collectionGroup
? indexes.loadAll(DbIndexConfigurationCollectionGroupIndex, IDBKeyRange.bound(collectionGroup, collectionGroup))
: indexes.loadAll()).next(indexConfigs => {
const result = [];
return PersistencePromise.forEach(indexConfigs, (indexConfig) => {
return states
.get([indexConfig.indexId, this.uid])
.next(indexState => {
result.push(fromDbIndexConfiguration(indexConfig, indexState));
});
}).next(() => result);
});
}
getNextCollectionGroupToUpdate(transaction) {
return this.getFieldIndexes(transaction).next(indexes => {
if (indexes.length === 0) {
return null;
}
indexes.sort((l, r) => {
const cmp = l.indexState.sequenceNumber - r.indexState.sequenceNumber;
return cmp !== 0
? cmp
: primitiveComparator(l.collectionGroup, r.collectionGroup);
});
return indexes[0].collectionGroup;
});
}
updateCollectionGroup(transaction, collectionGroup, offset) {
const indexes = indexConfigurationStore(transaction);
const states = indexStateStore(transaction);
return this.getNextSequenceNumber(transaction).next(nextSequenceNumber => indexes
.loadAll(DbIndexConfigurationCollectionGroupIndex, IDBKeyRange.bound(collectionGroup, collectionGroup))
.next(configs => PersistencePromise.forEach(configs, (config) => states.put(toDbIndexState(config.indexId, this.user, nextSequenceNumber, offset)))));
}
updateIndexEntries(transaction, documents) {
// Porting Note: `getFieldIndexes()` on Web does not cache index lookups as
// it could be used across different IndexedDB transactions. As any cached
// data might be invalidated by other multi-tab clients, we can only trust
// data within a single IndexedDB transaction. We therefore add a cache
// here.
const memoizedIndexes = new Map();
return PersistencePromise.forEach(documents, (key, doc) => {
const memoizedCollectionIndexes = memoizedIndexes.get(key.collectionGroup);
const fieldIndexes = memoizedCollectionIndexes
? PersistencePromise.resolve(memoizedCollectionIndexes)
: this.getFieldIndexes(transaction, key.collectionGroup);
return fieldIndexes.next(fieldIndexes => {
memoizedIndexes.set(key.collectionGroup, fieldIndexes);
return PersistencePromise.forEach(fieldIndexes, (fieldIndex) => {
return this.getExistingIndexEntries(transaction, key, fieldIndex).next(existingEntries => {
const newEntries = this.computeIndexEntries(doc, fieldIndex);
if (!existingEntries.isEqual(newEntries)) {
return this.updateEntries(transaction, doc, fieldIndex, existingEntries, newEntries);
}
return PersistencePromise.resolve();
});
});
});
});
}
addIndexEntry(transaction, document, fieldIndex, indexEntry) {
const indexEntries = indexEntriesStore(transaction);
return indexEntries.put({
indexId: indexEntry.indexId,
uid: this.uid,
arrayValue: indexEntry.arrayValue,
directionalValue: indexEntry.directionalValue,
orderedDocumentKey: this.encodeDirectionalKey(fieldIndex, document.key),
documentKey: document.key.path.toArray()
});
}
deleteIndexEntry(transaction, document, fieldIndex, indexEntry) {
const indexEntries = indexEntriesStore(transaction);
return indexEntries.delete([
indexEntry.indexId,
this.uid,
indexEntry.arrayValue,
indexEntry.directionalValue,
this.encodeDirectionalKey(fieldIndex, document.key),
document.key.path.toArray()
]);
}
getExistingIndexEntries(transaction, documentKey, fieldIndex) {
const indexEntries = indexEntriesStore(transaction);
let results = new SortedSet(indexEntryComparator);
return indexEntries
.iterate({
index: DbIndexEntryDocumentKeyIndex,
range: IDBKeyRange.only([
fieldIndex.indexId,
this.uid,
this.encodeDirectionalKey(fieldIndex, documentKey)
])
}, (_, entry) => {
results = results.add(new IndexEntry(fieldIndex.indexId, documentKey, entry.arrayValue, entry.directionalValue));
})
.next(() => results);
}
/** Creates the index entries for the given document. */
computeIndexEntries(document, fieldIndex) {
let results = new SortedSet(indexEntryComparator);
const directionalValue = this.encodeDirectionalElements(fieldIndex, document);
if (directionalValue == null) {
return results;
}
const arraySegment = fieldIndexGetArraySegment(fieldIndex);
if (arraySegment != null) {
const value = document.data.field(arraySegment.fieldPath);
if (isArray(value)) {
for (const arrayValue of value.arrayValue.values || []) {
results = results.add(new IndexEntry(fieldIndex.indexId, document.key, this.encodeSingleElement(arrayValue), directionalValue));
}
}
}
else {
results = results.add(new IndexEntry(fieldIndex.indexId, document.key, EMPTY_VALUE, directionalValue));
}
return results;
}
/**
* Updates the index entries for the provided document by deleting entries
* that are no longer referenced in `newEntries` and adding all newly added
* entries.
*/
updateEntries(transaction, document, fieldIndex, existingEntries, newEntries) {
logDebug(LOG_TAG$f, "Updating index entries for document '%s'", document.key);
const promises = [];
diffSortedSets(existingEntries, newEntries, indexEntryComparator,
/* onAdd= */ entry => {
promises.push(this.addIndexEntry(transaction, document, fieldIndex, entry));
},
/* onRemove= */ entry => {
promises.push(this.deleteIndexEntry(transaction, document, fieldIndex, entry));
});
return PersistencePromise.waitFor(promises);
}
getNextSequenceNumber(transaction) {
let nextSequenceNumber = 1;
const states = indexStateStore(transaction);
return states
.iterate({
index: DbIndexStateSequenceNumberIndex,
reverse: true,
range: IDBKeyRange.upperBound([this.uid, Number.MAX_SAFE_INTEGER])
}, (_, state, controller) => {
controller.done();
nextSequenceNumber = state.sequenceNumber + 1;
})
.next(() => nextSequenceNumber);
}
/**
* Returns a new set of IDB ranges that splits the existing range and excludes
* any values that match the `notInValue` from these ranges. As an example,
* '[foo > 2 && foo != 3]` becomes `[foo > 2 && < 3, foo > 3]`.
*/
createRange(lower, upper, notInValues) {
// The notIn values need to be sorted and unique so that we can return a
// sorted set of non-overlapping ranges.
notInValues = notInValues
.sort((l, r) => indexEntryComparator(l, r))
.filter((el, i, values) => !i || indexEntryComparator(el, values[i - 1]) !== 0);
const bounds = [];
bounds.push(lower);
for (const notInValue of notInValues) {
const cmpToLower = indexEntryComparator(notInValue, lower);
const cmpToUpper = indexEntryComparator(notInValue, upper);
if (cmpToLower === 0) {
// `notInValue` is the lower bound. We therefore need to raise the bound
// to the next value.
bounds[0] = lower.successor();
}
else if (cmpToLower > 0 && cmpToUpper < 0) {
// `notInValue` is in the middle of the range
bounds.push(notInValue);
bounds.push(notInValue.successor());
}
else if (cmpToUpper > 0) {
// `notInValue` (and all following values) are out of the range
break;
}
}
bounds.push(upper);
const ranges = [];
for (let i = 0; i < bounds.length; i += 2) {
// If we encounter two bounds that will create an unmatchable key range,
// then we return an empty set of key ranges.
if (this.isRangeMatchable(bounds[i], bounds[i + 1])) {
return [];
}
const lowerBound = [
bounds[i].indexId,
this.uid,
bounds[i].arrayValue,
bounds[i].directionalValue,
EMPTY_VALUE,
[]
];
const upperBound = [
bounds[i + 1].indexId,
this.uid,
bounds[i + 1].arrayValue,
bounds[i + 1].directionalValue,
EMPTY_VALUE,
[]
];
ranges.push(IDBKeyRange.bound(lowerBound, upperBound));
}
return ranges;
}
isRangeMatchable(lowerBound, upperBound) {
// If lower bound is greater than the upper bound, then the key
// range can never be matched.
return indexEntryComparator(lowerBound, upperBound) > 0;
}
getMinOffsetFromCollectionGroup(transaction, collectionGroup) {
return this.getFieldIndexes(transaction, collectionGroup).next(getMinOffsetFromFieldIndexes);
}
getMinOffset(transaction, target) {
return PersistencePromise.mapArray(this.getSubTargets(target), (subTarget) => this.getFieldIndex(transaction, subTarget).next(index => index ? index : fail())).next(getMinOffsetFromFieldIndexes);
}
}
/**
* Helper to get a typed SimpleDbStore for the collectionParents
* document store.
*/
function collectionParentsStore(txn) {
return getStore(txn, DbCollectionParentStore);
}
/**
* Helper to get a typed SimpleDbStore for the index entry object store.
*/
function indexEntriesStore(txn) {
return getStore(txn, DbIndexEntryStore);
}
/**
* Helper to get a typed SimpleDbStore for the index configuration object store.
*/
function indexConfigurationStore(txn) {
return getStore(txn, DbIndexConfigurationStore);
}
/**
* Helper to get a typed SimpleDbStore for the index state object store.
*/
function indexStateStore(txn) {
return getStore(txn, DbIndexStateStore);
}
function getMinOffsetFromFieldIndexes(fieldIndexes) {
hardAssert(fieldIndexes.length !== 0);
let minOffset = fieldIndexes[0].indexState.offset;
let maxBatchId = minOffset.largestBatchId;
for (let i = 1; i < fieldIndexes.length; i++) {
const newOffset = fieldIndexes[i].indexState.offset;
if (indexOffsetComparator(newOffset, minOffset) < 0) {
minOffset = newOffset;
}
if (maxBatchId < newOffset.largestBatchId) {
maxBatchId = newOffset.largestBatchId;
}
}
return new IndexOffset(minOffset.readTime, minOffset.documentKey, maxBatchId);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Delete a mutation batch and the associated document mutations.
* @returns A PersistencePromise of the document mutations that were removed.
*/
function removeMutationBatch(txn, userId, batch) {
const mutationStore = txn.store(DbMutationBatchStore);
const indexTxn = txn.store(DbDocumentMutationStore);
const promises = [];
const range = IDBKeyRange.only(batch.batchId);
let numDeleted = 0;
const removePromise = mutationStore.iterate({ range }, (key, value, control) => {
numDeleted++;
return control.delete();
});
promises.push(removePromise.next(() => {
hardAssert(numDeleted === 1);
}));
const removedDocuments = [];
for (const mutation of batch.mutations) {
const indexKey = newDbDocumentMutationKey(userId, mutation.key.path, batch.batchId);
promises.push(indexTxn.delete(indexKey));
removedDocuments.push(mutation.key);
}
return PersistencePromise.waitFor(promises).next(() => removedDocuments);
}
/**
* Returns an approximate size for the given document.
*/
function dbDocumentSize(doc) {
if (!doc) {
return 0;
}
let value;
if (doc.document) {
value = doc.document;
}
else if (doc.unknownDocument) {
value = doc.unknownDocument;
}
else if (doc.noDocument) {
value = doc.noDocument;
}
else {
throw fail();
}
return JSON.stringify(value).length;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A mutation queue for a specific user, backed by IndexedDB. */
class IndexedDbMutationQueue {
constructor(
/**
* The normalized userId (e.g. null UID => "" userId) used to store /
* retrieve mutations.
*/
userId, serializer, indexManager, referenceDelegate) {
this.userId = userId;
this.serializer = serializer;
this.indexManager = indexManager;
this.referenceDelegate = referenceDelegate;
/**
* Caches the document keys for pending mutation batches. If the mutation
* has been removed from IndexedDb, the cached value may continue to
* be used to retrieve the batch's document keys. To remove a cached value
* locally, `removeCachedMutationKeys()` should be invoked either directly
* or through `removeMutationBatches()`.
*
* With multi-tab, when the primary client acknowledges or rejects a mutation,
* this cache is used by secondary clients to invalidate the local
* view of the documents that were previously affected by the mutation.
*/
// PORTING NOTE: Multi-tab only.
this.documentKeysByBatchId = {};
}
/**
* Creates a new mutation queue for the given user.
* @param user - The user for which to create a mutation queue.
* @param serializer - The serializer to use when persisting to IndexedDb.
*/
static forUser(user, serializer, indexManager, referenceDelegate) {
// TODO(mcg): Figure out what constraints there are on userIDs
// In particular, are there any reserved characters? are empty ids allowed?
// For the moment store these together in the same mutations table assuming
// that empty userIDs aren't allowed.
hardAssert(user.uid !== '');
const userId = user.isAuthenticated() ? user.uid : '';
return new IndexedDbMutationQueue(userId, serializer, indexManager, referenceDelegate);
}
checkEmpty(transaction) {
let empty = true;
const range = IDBKeyRange.bound([this.userId, Number.NEGATIVE_INFINITY], [this.userId, Number.POSITIVE_INFINITY]);
return mutationsStore(transaction)
.iterate({ index: DbMutationBatchUserMutationsIndex, range }, (key, value, control) => {
empty = false;
control.done();
})
.next(() => empty);
}
addMutationBatch(transaction, localWriteTime, baseMutations, mutations) {
const documentStore = documentMutationsStore(transaction);
const mutationStore = mutationsStore(transaction);
// The IndexedDb implementation in Chrome (and Firefox) does not handle
// compound indices that include auto-generated keys correctly. To ensure
// that the index entry is added correctly in all browsers, we perform two
// writes: The first write is used to retrieve the next auto-generated Batch
// ID, and the second write populates the index and stores the actual
// mutation batch.
// See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972
// We write an empty object to obtain key
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return mutationStore.add({}).next(batchId => {
hardAssert(typeof batchId === 'number');
const batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
const dbBatch = toDbMutationBatch(this.serializer, this.userId, batch);
const promises = [];
let collectionParents = new SortedSet((l, r) => primitiveComparator(l.canonicalString(), r.canonicalString()));
for (const mutation of mutations) {
const indexKey = newDbDocumentMutationKey(this.userId, mutation.key.path, batchId);
collectionParents = collectionParents.add(mutation.key.path.popLast());
promises.push(mutationStore.put(dbBatch));
promises.push(documentStore.put(indexKey, DbDocumentMutationPlaceholder));
}
collectionParents.forEach(parent => {
promises.push(this.indexManager.addToCollectionParentIndex(transaction, parent));
});
transaction.addOnCommittedListener(() => {
this.documentKeysByBatchId[batchId] = batch.keys();
});
return PersistencePromise.waitFor(promises).next(() => batch);
});
}
lookupMutationBatch(transaction, batchId) {
return mutationsStore(transaction)
.get(batchId)
.next(dbBatch => {
if (dbBatch) {
hardAssert(dbBatch.userId === this.userId);
return fromDbMutationBatch(this.serializer, dbBatch);
}
return null;
});
}
/**
* Returns the document keys for the mutation batch with the given batchId.
* For primary clients, this method returns `null` after
* `removeMutationBatches()` has been called. Secondary clients return a
* cached result until `removeCachedMutationKeys()` is invoked.
*/
// PORTING NOTE: Multi-tab only.
lookupMutationKeys(transaction, batchId) {
if (this.documentKeysByBatchId[batchId]) {
return PersistencePromise.resolve(this.documentKeysByBatchId[batchId]);
}
else {
return this.lookupMutationBatch(transaction, batchId).next(batch => {
if (batch) {
const keys = batch.keys();
this.documentKeysByBatchId[batchId] = keys;
return keys;
}
else {
return null;
}
});
}
}
getNextMutationBatchAfterBatchId(transaction, batchId) {
const nextBatchId = batchId + 1;
const range = IDBKeyRange.lowerBound([this.userId, nextBatchId]);
let foundBatch = null;
return mutationsStore(transaction)
.iterate({ index: DbMutationBatchUserMutationsIndex, range }, (key, dbBatch, control) => {
if (dbBatch.userId === this.userId) {
hardAssert(dbBatch.batchId >= nextBatchId);
foundBatch = fromDbMutationBatch(this.serializer, dbBatch);
}
control.done();
})
.next(() => foundBatch);
}
getHighestUnacknowledgedBatchId(transaction) {
const range = IDBKeyRange.upperBound([
this.userId,
Number.POSITIVE_INFINITY
]);
let batchId = BATCHID_UNKNOWN;
return mutationsStore(transaction)
.iterate({ index: DbMutationBatchUserMutationsIndex, range, reverse: true }, (key, dbBatch, control) => {
batchId = dbBatch.batchId;
control.done();
})
.next(() => batchId);
}
getAllMutationBatches(transaction) {
const range = IDBKeyRange.bound([this.userId, BATCHID_UNKNOWN], [this.userId, Number.POSITIVE_INFINITY]);
return mutationsStore(transaction)
.loadAll(DbMutationBatchUserMutationsIndex, range)
.next(dbBatches => dbBatches.map(dbBatch => fromDbMutationBatch(this.serializer, dbBatch)));
}
getAllMutationBatchesAffectingDocumentKey(transaction, documentKey) {
// Scan the document-mutation index starting with a prefix starting with
// the given documentKey.
const indexPrefix = newDbDocumentMutationPrefixForPath(this.userId, documentKey.path);
const indexStart = IDBKeyRange.lowerBound(indexPrefix);
const results = [];
return documentMutationsStore(transaction)
.iterate({ range: indexStart }, (indexKey, _, control) => {
const [userID, encodedPath, batchId] = indexKey;
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
const path = decodeResourcePath(encodedPath);
if (userID !== this.userId || !documentKey.path.isEqual(path)) {
control.done();
return;
}
// Look up the mutation batch in the store.
return mutationsStore(transaction)
.get(batchId)
.next(mutation => {
if (!mutation) {
throw fail();
}
hardAssert(mutation.userId === this.userId);
results.push(fromDbMutationBatch(this.serializer, mutation));
});
})
.next(() => results);
}
getAllMutationBatchesAffectingDocumentKeys(transaction, documentKeys) {
let uniqueBatchIDs = new SortedSet(primitiveComparator);
const promises = [];
documentKeys.forEach(documentKey => {
const indexStart = newDbDocumentMutationPrefixForPath(this.userId, documentKey.path);
const range = IDBKeyRange.lowerBound(indexStart);
const promise = documentMutationsStore(transaction).iterate({ range }, (indexKey, _, control) => {
const [userID, encodedPath, batchID] = indexKey;
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
const path = decodeResourcePath(encodedPath);
if (userID !== this.userId || !documentKey.path.isEqual(path)) {
control.done();
return;
}
uniqueBatchIDs = uniqueBatchIDs.add(batchID);
});
promises.push(promise);
});
return PersistencePromise.waitFor(promises).next(() => this.lookupMutationBatches(transaction, uniqueBatchIDs));
}
getAllMutationBatchesAffectingQuery(transaction, query) {
const queryPath = query.path;
const immediateChildrenLength = queryPath.length + 1;
// TODO(mcg): Actually implement a single-collection query
//
// This is actually executing an ancestor query, traversing the whole
// subtree below the collection which can be horrifically inefficient for
// some structures. The right way to solve this is to implement the full
// value index, but that's not in the cards in the near future so this is
// the best we can do for the moment.
//
// Since we don't yet index the actual properties in the mutations, our
// current approach is to just return all mutation batches that affect
// documents in the collection being queried.
const indexPrefix = newDbDocumentMutationPrefixForPath(this.userId, queryPath);
const indexStart = IDBKeyRange.lowerBound(indexPrefix);
// Collect up unique batchIDs encountered during a scan of the index. Use a
// SortedSet to accumulate batch IDs so they can be traversed in order in a
// scan of the main table.
let uniqueBatchIDs = new SortedSet(primitiveComparator);
return documentMutationsStore(transaction)
.iterate({ range: indexStart }, (indexKey, _, control) => {
const [userID, encodedPath, batchID] = indexKey;
const path = decodeResourcePath(encodedPath);
if (userID !== this.userId || !queryPath.isPrefixOf(path)) {
control.done();
return;
}
// Rows with document keys more than one segment longer than the
// query path can't be matches. For example, a query on 'rooms'
// can't match the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
if (path.length !== immediateChildrenLength) {
return;
}
uniqueBatchIDs = uniqueBatchIDs.add(batchID);
})
.next(() => this.lookupMutationBatches(transaction, uniqueBatchIDs));
}
lookupMutationBatches(transaction, batchIDs) {
const results = [];
const promises = [];
// TODO(rockwood): Implement this using iterate.
batchIDs.forEach(batchId => {
promises.push(mutationsStore(transaction)
.get(batchId)
.next(mutation => {
if (mutation === null) {
throw fail();
}
hardAssert(mutation.userId === this.userId);
results.push(fromDbMutationBatch(this.serializer, mutation));
}));
});
return PersistencePromise.waitFor(promises).next(() => results);
}
removeMutationBatch(transaction, batch) {
return removeMutationBatch(transaction.simpleDbTransaction, this.userId, batch).next(removedDocuments => {
transaction.addOnCommittedListener(() => {
this.removeCachedMutationKeys(batch.batchId);
});
return PersistencePromise.forEach(removedDocuments, (key) => {
return this.referenceDelegate.markPotentiallyOrphaned(transaction, key);
});
});
}
/**
* Clears the cached keys for a mutation batch. This method should be
* called by secondary clients after they process mutation updates.
*
* Note that this method does not have to be called from primary clients as
* the corresponding cache entries are cleared when an acknowledged or
* rejected batch is removed from the mutation queue.
*/
// PORTING NOTE: Multi-tab only
removeCachedMutationKeys(batchId) {
delete this.documentKeysByBatchId[batchId];
}
performConsistencyCheck(txn) {
return this.checkEmpty(txn).next(empty => {
if (!empty) {
return PersistencePromise.resolve();
}
// Verify that there are no entries in the documentMutations index if
// the queue is empty.
const startRange = IDBKeyRange.lowerBound(newDbDocumentMutationPrefixForUser(this.userId));
const danglingMutationReferences = [];
return documentMutationsStore(txn)
.iterate({ range: startRange }, (key, _, control) => {
const userID = key[0];
if (userID !== this.userId) {
control.done();
return;
}
else {
const path = decodeResourcePath(key[1]);
danglingMutationReferences.push(path);
}
})
.next(() => {
hardAssert(danglingMutationReferences.length === 0);
});
});
}
containsKey(txn, key) {
return mutationQueueContainsKey(txn, this.userId, key);
}
// PORTING NOTE: Multi-tab only (state is held in memory in other clients).
/** Returns the mutation queue's metadata from IndexedDb. */
getMutationQueueMetadata(transaction) {
return mutationQueuesStore(transaction)
.get(this.userId)
.next((metadata) => {
return (metadata || {
userId: this.userId,
lastAcknowledgedBatchId: BATCHID_UNKNOWN,
lastStreamToken: ''
});
});
}
}
/**
* @returns true if the mutation queue for the given user contains a pending
* mutation for the given key.
*/
function mutationQueueContainsKey(txn, userId, key) {
const indexKey = newDbDocumentMutationPrefixForPath(userId, key.path);
const encodedPath = indexKey[1];
const startRange = IDBKeyRange.lowerBound(indexKey);
let containsKey = false;
return documentMutationsStore(txn)
.iterate({ range: startRange, keysOnly: true }, (key, value, control) => {
const [userID, keyPath, /*batchID*/ _] = key;
if (userID === userId && keyPath === encodedPath) {
containsKey = true;
}
control.done();
})
.next(() => containsKey);
}
/** Returns true if any mutation queue contains the given document. */
function mutationQueuesContainKey(txn, docKey) {
let found = false;
return mutationQueuesStore(txn)
.iterateSerial(userId => {
return mutationQueueContainsKey(txn, userId, docKey).next(containsKey => {
if (containsKey) {
found = true;
}
return PersistencePromise.resolve(!containsKey);
});
})
.next(() => found);
}
/**
* Helper to get a typed SimpleDbStore for the mutations object store.
*/
function mutationsStore(txn) {
return getStore(txn, DbMutationBatchStore);
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/
function documentMutationsStore(txn) {
return getStore(txn, DbDocumentMutationStore);
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/
function mutationQueuesStore(txn) {
return getStore(txn, DbMutationQueueStore);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Offset to ensure non-overlapping target ids. */
const OFFSET = 2;
/**
* Generates monotonically increasing target IDs for sending targets to the
* watch stream.
*
* The client constructs two generators, one for the target cache, and one for
* for the sync engine (to generate limbo documents targets). These
* generators produce non-overlapping IDs (by using even and odd IDs
* respectively).
*
* By separating the target ID space, the query cache can generate target IDs
* that persist across client restarts, while sync engine can independently
* generate in-memory target IDs that are transient and can be reused after a
* restart.
*/
class TargetIdGenerator {
constructor(lastId) {
this.lastId = lastId;
}
next() {
this.lastId += OFFSET;
return this.lastId;
}
static forTargetCache() {
// The target cache generator must return '2' in its first call to `next()`
// as there is no differentiation in the protocol layer between an unset
// number and the number '0'. If we were to sent a target with target ID
// '0', the backend would consider it unset and replace it with its own ID.
return new TargetIdGenerator(2 - OFFSET);
}
static forSyncEngine() {
// Sync engine assigns target IDs for limbo document detection.
return new TargetIdGenerator(1 - OFFSET);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class IndexedDbTargetCache {
constructor(referenceDelegate, serializer) {
this.referenceDelegate = referenceDelegate;
this.serializer = serializer;
}
// PORTING NOTE: We don't cache global metadata for the target cache, since
// some of it (in particular `highestTargetId`) can be modified by secondary
// tabs. We could perhaps be more granular (and e.g. still cache
// `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go
// to IndexedDb whenever we need to read metadata. We can revisit if it turns
// out to have a meaningful performance impact.
allocateTargetId(transaction) {
return this.retrieveMetadata(transaction).next(metadata => {
const targetIdGenerator = new TargetIdGenerator(metadata.highestTargetId);
metadata.highestTargetId = targetIdGenerator.next();
return this.saveMetadata(transaction, metadata).next(() => metadata.highestTargetId);
});
}
getLastRemoteSnapshotVersion(transaction) {
return this.retrieveMetadata(transaction).next(metadata => {
return SnapshotVersion.fromTimestamp(new Timestamp(metadata.lastRemoteSnapshotVersion.seconds, metadata.lastRemoteSnapshotVersion.nanoseconds));
});
}
getHighestSequenceNumber(transaction) {
return this.retrieveMetadata(transaction).next(targetGlobal => targetGlobal.highestListenSequenceNumber);
}
setTargetsMetadata(transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
return this.retrieveMetadata(transaction).next(metadata => {
metadata.highestListenSequenceNumber = highestListenSequenceNumber;
if (lastRemoteSnapshotVersion) {
metadata.lastRemoteSnapshotVersion =
lastRemoteSnapshotVersion.toTimestamp();
}
if (highestListenSequenceNumber > metadata.highestListenSequenceNumber) {
metadata.highestListenSequenceNumber = highestListenSequenceNumber;
}
return this.saveMetadata(transaction, metadata);
});
}
addTargetData(transaction, targetData) {
return this.saveTargetData(transaction, targetData).next(() => {
return this.retrieveMetadata(transaction).next(metadata => {
metadata.targetCount += 1;
this.updateMetadataFromTargetData(targetData, metadata);
return this.saveMetadata(transaction, metadata);
});
});
}
updateTargetData(transaction, targetData) {
return this.saveTargetData(transaction, targetData);
}
removeTargetData(transaction, targetData) {
return this.removeMatchingKeysForTargetId(transaction, targetData.targetId)
.next(() => targetsStore(transaction).delete(targetData.targetId))
.next(() => this.retrieveMetadata(transaction))
.next(metadata => {
hardAssert(metadata.targetCount > 0);
metadata.targetCount -= 1;
return this.saveMetadata(transaction, metadata);
});
}
/**
* Drops any targets with sequence number less than or equal to the upper bound, excepting those
* present in `activeTargetIds`. Document associations for the removed targets are also removed.
* Returns the number of targets removed.
*/
removeTargets(txn, upperBound, activeTargetIds) {
let count = 0;
const promises = [];
return targetsStore(txn)
.iterate((key, value) => {
const targetData = fromDbTarget(value);
if (targetData.sequenceNumber <= upperBound &&
activeTargetIds.get(targetData.targetId) === null) {
count++;
promises.push(this.removeTargetData(txn, targetData));
}
})
.next(() => PersistencePromise.waitFor(promises))
.next(() => count);
}
/**
* Call provided function with each `TargetData` that we have cached.
*/
forEachTarget(txn, f) {
return targetsStore(txn).iterate((key, value) => {
const targetData = fromDbTarget(value);
f(targetData);
});
}
retrieveMetadata(transaction) {
return globalTargetStore(transaction)
.get(DbTargetGlobalKey)
.next(metadata => {
hardAssert(metadata !== null);
return metadata;
});
}
saveMetadata(transaction, metadata) {
return globalTargetStore(transaction).put(DbTargetGlobalKey, metadata);
}
saveTargetData(transaction, targetData) {
return targetsStore(transaction).put(toDbTarget(this.serializer, targetData));
}
/**
* In-place updates the provided metadata to account for values in the given
* TargetData. Saving is done separately. Returns true if there were any
* changes to the metadata.
*/
updateMetadataFromTargetData(targetData, metadata) {
let updated = false;
if (targetData.targetId > metadata.highestTargetId) {
metadata.highestTargetId = targetData.targetId;
updated = true;
}
if (targetData.sequenceNumber > metadata.highestListenSequenceNumber) {
metadata.highestListenSequenceNumber = targetData.sequenceNumber;
updated = true;
}
return updated;
}
getTargetCount(transaction) {
return this.retrieveMetadata(transaction).next(metadata => metadata.targetCount);
}
getTargetData(transaction, target) {
// Iterating by the canonicalId may yield more than one result because
// canonicalId values are not required to be unique per target. This query
// depends on the queryTargets index to be efficient.
const canonicalId = canonifyTarget(target);
const range = IDBKeyRange.bound([canonicalId, Number.NEGATIVE_INFINITY], [canonicalId, Number.POSITIVE_INFINITY]);
let result = null;
return targetsStore(transaction)
.iterate({ range, index: DbTargetQueryTargetsIndexName }, (key, value, control) => {
const found = fromDbTarget(value);
// After finding a potential match, check that the target is
// actually equal to the requested target.
if (targetEquals(target, found.target)) {
result = found;
control.done();
}
})
.next(() => result);
}
addMatchingKeys(txn, keys, targetId) {
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
const promises = [];
const store = documentTargetStore(txn);
keys.forEach(key => {
const path = encodeResourcePath(key.path);
promises.push(store.put({ targetId, path }));
promises.push(this.referenceDelegate.addReference(txn, targetId, key));
});
return PersistencePromise.waitFor(promises);
}
removeMatchingKeys(txn, keys, targetId) {
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
const store = documentTargetStore(txn);
return PersistencePromise.forEach(keys, (key) => {
const path = encodeResourcePath(key.path);
return PersistencePromise.waitFor([
store.delete([targetId, path]),
this.referenceDelegate.removeReference(txn, targetId, key)
]);
});
}
removeMatchingKeysForTargetId(txn, targetId) {
const store = documentTargetStore(txn);
const range = IDBKeyRange.bound([targetId], [targetId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
return store.delete(range);
}
getMatchingKeysForTargetId(txn, targetId) {
const range = IDBKeyRange.bound([targetId], [targetId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
const store = documentTargetStore(txn);
let result = documentKeySet();
return store
.iterate({ range, keysOnly: true }, (key, _, control) => {
const path = decodeResourcePath(key[1]);
const docKey = new DocumentKey(path);
result = result.add(docKey);
})
.next(() => result);
}
containsKey(txn, key) {
const path = encodeResourcePath(key.path);
const range = IDBKeyRange.bound([path], [immediateSuccessor(path)],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
let count = 0;
return documentTargetStore(txn)
.iterate({
index: DbTargetDocumentDocumentTargetsIndex,
keysOnly: true,
range
}, ([targetId, path], _, control) => {
// Having a sentinel row for a document does not count as containing that document;
// For the target cache, containing the document means the document is part of some
// target.
if (targetId !== 0) {
count++;
control.done();
}
})
.next(() => count > 0);
}
/**
* Looks up a TargetData entry by target ID.
*
* @param targetId - The target ID of the TargetData entry to look up.
* @returns The cached TargetData entry, or null if the cache has no entry for
* the target.
*/
// PORTING NOTE: Multi-tab only.
getTargetDataForTarget(transaction, targetId) {
return targetsStore(transaction)
.get(targetId)
.next(found => {
if (found) {
return fromDbTarget(found);
}
else {
return null;
}
});
}
}
/**
* Helper to get a typed SimpleDbStore for the queries object store.
*/
function targetsStore(txn) {
return getStore(txn, DbTargetStore);
}
/**
* Helper to get a typed SimpleDbStore for the target globals object store.
*/
function globalTargetStore(txn) {
return getStore(txn, DbTargetGlobalStore);
}
/**
* Helper to get a typed SimpleDbStore for the document target object store.
*/
function documentTargetStore(txn) {
return getStore(txn, DbTargetDocumentStore);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const GC_DID_NOT_RUN = {
didRun: false,
sequenceNumbersCollected: 0,
targetsRemoved: 0,
documentsRemoved: 0
};
const LRU_COLLECTION_DISABLED = -1;
const LRU_DEFAULT_CACHE_SIZE_BYTES = 40 * 1024 * 1024;
class LruParams {
constructor(
// When we attempt to collect, we will only do so if the cache size is greater than this
// threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.
cacheSizeCollectionThreshold,
// The percentage of sequence numbers that we will attempt to collect
percentileToCollect,
// A cap on the total number of sequence numbers that will be collected. This prevents
// us from collecting a huge number of sequence numbers if the cache has grown very large.
maximumSequenceNumbersToCollect) {
this.cacheSizeCollectionThreshold = cacheSizeCollectionThreshold;
this.percentileToCollect = percentileToCollect;
this.maximumSequenceNumbersToCollect = maximumSequenceNumbersToCollect;
}
static withCacheSize(cacheSize) {
return new LruParams(cacheSize, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
}
}
LruParams.DEFAULT_COLLECTION_PERCENTILE = 10;
LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1000;
LruParams.DEFAULT = new LruParams(LRU_DEFAULT_CACHE_SIZE_BYTES, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
LruParams.DISABLED = new LruParams(LRU_COLLECTION_DISABLED, 0, 0);
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$e = 'LruGarbageCollector';
const LRU_MINIMUM_CACHE_SIZE_BYTES = 1 * 1024 * 1024;
/** How long we wait to try running LRU GC after SDK initialization. */
const INITIAL_GC_DELAY_MS = 1 * 60 * 1000;
/** Minimum amount of time between GC checks, after the first one. */
const REGULAR_GC_DELAY_MS = 5 * 60 * 1000;
function bufferEntryComparator([aSequence, aIndex], [bSequence, bIndex]) {
const seqCmp = primitiveComparator(aSequence, bSequence);
if (seqCmp === 0) {
// This order doesn't matter, but we can bias against churn by sorting
// entries created earlier as less than newer entries.
return primitiveComparator(aIndex, bIndex);
}
else {
return seqCmp;
}
}
/**
* Used to calculate the nth sequence number. Keeps a rolling buffer of the
* lowest n values passed to `addElement`, and finally reports the largest of
* them in `maxValue`.
*/
class RollingSequenceNumberBuffer {
constructor(maxElements) {
this.maxElements = maxElements;
this.buffer = new SortedSet(bufferEntryComparator);
this.previousIndex = 0;
}
nextIndex() {
return ++this.previousIndex;
}
addElement(sequenceNumber) {
const entry = [sequenceNumber, this.nextIndex()];
if (this.buffer.size < this.maxElements) {
this.buffer = this.buffer.add(entry);
}
else {
const highestValue = this.buffer.last();
if (bufferEntryComparator(entry, highestValue) < 0) {
this.buffer = this.buffer.delete(highestValue).add(entry);
}
}
}
get maxValue() {
// Guaranteed to be non-empty. If we decide we are not collecting any
// sequence numbers, nthSequenceNumber below short-circuits. If we have
// decided that we are collecting n sequence numbers, it's because n is some
// percentage of the existing sequence numbers. That means we should never
// be in a situation where we are collecting sequence numbers but don't
// actually have any.
return this.buffer.last()[0];
}
}
/**
* This class is responsible for the scheduling of LRU garbage collection. It handles checking
* whether or not GC is enabled, as well as which delay to use before the next run.
*/
class LruScheduler {
constructor(garbageCollector, asyncQueue, localStore) {
this.garbageCollector = garbageCollector;
this.asyncQueue = asyncQueue;
this.localStore = localStore;
this.gcTask = null;
}
start() {
if (this.garbageCollector.params.cacheSizeCollectionThreshold !==
LRU_COLLECTION_DISABLED) {
this.scheduleGC(INITIAL_GC_DELAY_MS);
}
}
stop() {
if (this.gcTask) {
this.gcTask.cancel();
this.gcTask = null;
}
}
get started() {
return this.gcTask !== null;
}
scheduleGC(delay) {
logDebug(LOG_TAG$e, `Garbage collection scheduled in ${delay}ms`);
this.gcTask = this.asyncQueue.enqueueAfterDelay("lru_garbage_collection" /* TimerId.LruGarbageCollection */, delay, async () => {
this.gcTask = null;
try {
await this.localStore.collectGarbage(this.garbageCollector);
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG$e, 'Ignoring IndexedDB error during garbage collection: ', e);
}
else {
await ignoreIfPrimaryLeaseLoss(e);
}
}
await this.scheduleGC(REGULAR_GC_DELAY_MS);
});
}
}
/** Implements the steps for LRU garbage collection. */
class LruGarbageCollectorImpl {
constructor(delegate, params) {
this.delegate = delegate;
this.params = params;
}
calculateTargetCount(txn, percentile) {
return this.delegate.getSequenceNumberCount(txn).next(targetCount => {
return Math.floor((percentile / 100.0) * targetCount);
});
}
nthSequenceNumber(txn, n) {
if (n === 0) {
return PersistencePromise.resolve(ListenSequence.INVALID);
}
const buffer = new RollingSequenceNumberBuffer(n);
return this.delegate
.forEachTarget(txn, target => buffer.addElement(target.sequenceNumber))
.next(() => {
return this.delegate.forEachOrphanedDocumentSequenceNumber(txn, sequenceNumber => buffer.addElement(sequenceNumber));
})
.next(() => buffer.maxValue);
}
removeTargets(txn, upperBound, activeTargetIds) {
return this.delegate.removeTargets(txn, upperBound, activeTargetIds);
}
removeOrphanedDocuments(txn, upperBound) {
return this.delegate.removeOrphanedDocuments(txn, upperBound);
}
collect(txn, activeTargetIds) {
if (this.params.cacheSizeCollectionThreshold === LRU_COLLECTION_DISABLED) {
logDebug('LruGarbageCollector', 'Garbage collection skipped; disabled');
return PersistencePromise.resolve(GC_DID_NOT_RUN);
}
return this.getCacheSize(txn).next(cacheSize => {
if (cacheSize < this.params.cacheSizeCollectionThreshold) {
logDebug('LruGarbageCollector', `Garbage collection skipped; Cache size ${cacheSize} ` +
`is lower than threshold ${this.params.cacheSizeCollectionThreshold}`);
return GC_DID_NOT_RUN;
}
else {
return this.runGarbageCollection(txn, activeTargetIds);
}
});
}
getCacheSize(txn) {
return this.delegate.getCacheSize(txn);
}
runGarbageCollection(txn, activeTargetIds) {
let upperBoundSequenceNumber;
let sequenceNumbersToCollect, targetsRemoved;
// Timestamps for various pieces of the process
let countedTargetsTs, foundUpperBoundTs, removedTargetsTs, removedDocumentsTs;
const startTs = Date.now();
return this.calculateTargetCount(txn, this.params.percentileToCollect)
.next(sequenceNumbers => {
// Cap at the configured max
if (sequenceNumbers > this.params.maximumSequenceNumbersToCollect) {
logDebug('LruGarbageCollector', 'Capping sequence numbers to collect down ' +
`to the maximum of ${this.params.maximumSequenceNumbersToCollect} ` +
`from ${sequenceNumbers}`);
sequenceNumbersToCollect =
this.params.maximumSequenceNumbersToCollect;
}
else {
sequenceNumbersToCollect = sequenceNumbers;
}
countedTargetsTs = Date.now();
return this.nthSequenceNumber(txn, sequenceNumbersToCollect);
})
.next(upperBound => {
upperBoundSequenceNumber = upperBound;
foundUpperBoundTs = Date.now();
return this.removeTargets(txn, upperBoundSequenceNumber, activeTargetIds);
})
.next(numTargetsRemoved => {
targetsRemoved = numTargetsRemoved;
removedTargetsTs = Date.now();
return this.removeOrphanedDocuments(txn, upperBoundSequenceNumber);
})
.next(documentsRemoved => {
removedDocumentsTs = Date.now();
if (getLogLevel() <= logger.LogLevel.DEBUG) {
const desc = 'LRU Garbage Collection\n' +
`\tCounted targets in ${countedTargetsTs - startTs}ms\n` +
`\tDetermined least recently used ${sequenceNumbersToCollect} in ` +
`${foundUpperBoundTs - countedTargetsTs}ms\n` +
`\tRemoved ${targetsRemoved} targets in ` +
`${removedTargetsTs - foundUpperBoundTs}ms\n` +
`\tRemoved ${documentsRemoved} documents in ` +
`${removedDocumentsTs - removedTargetsTs}ms\n` +
`Total Duration: ${removedDocumentsTs - startTs}ms`;
logDebug('LruGarbageCollector', desc);
}
return PersistencePromise.resolve({
didRun: true,
sequenceNumbersCollected: sequenceNumbersToCollect,
targetsRemoved,
documentsRemoved
});
});
}
}
function newLruGarbageCollector(delegate, params) {
return new LruGarbageCollectorImpl(delegate, params);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Provides LRU functionality for IndexedDB persistence. */
class IndexedDbLruDelegateImpl {
constructor(db, params) {
this.db = db;
this.garbageCollector = newLruGarbageCollector(this, params);
}
getSequenceNumberCount(txn) {
const docCountPromise = this.orphanedDocumentCount(txn);
const targetCountPromise = this.db.getTargetCache().getTargetCount(txn);
return targetCountPromise.next(targetCount => docCountPromise.next(docCount => targetCount + docCount));
}
orphanedDocumentCount(txn) {
let orphanedCount = 0;
return this.forEachOrphanedDocumentSequenceNumber(txn, _ => {
orphanedCount++;
}).next(() => orphanedCount);
}
forEachTarget(txn, f) {
return this.db.getTargetCache().forEachTarget(txn, f);
}
forEachOrphanedDocumentSequenceNumber(txn, f) {
return this.forEachOrphanedDocument(txn, (docKey, sequenceNumber) => f(sequenceNumber));
}
addReference(txn, targetId, key) {
return writeSentinelKey(txn, key);
}
removeReference(txn, targetId, key) {
return writeSentinelKey(txn, key);
}
removeTargets(txn, upperBound, activeTargetIds) {
return this.db.getTargetCache().removeTargets(txn, upperBound, activeTargetIds);
}
markPotentiallyOrphaned(txn, key) {
return writeSentinelKey(txn, key);
}
/**
* Returns true if anything would prevent this document from being garbage
* collected, given that the document in question is not present in any
* targets and has a sequence number less than or equal to the upper bound for
* the collection run.
*/
isPinned(txn, docKey) {
return mutationQueuesContainKey(txn, docKey);
}
removeOrphanedDocuments(txn, upperBound) {
const documentCache = this.db.getRemoteDocumentCache();
const changeBuffer = documentCache.newChangeBuffer();
const promises = [];
let documentCount = 0;
const iteration = this.forEachOrphanedDocument(txn, (docKey, sequenceNumber) => {
if (sequenceNumber <= upperBound) {
const p = this.isPinned(txn, docKey).next(isPinned => {
if (!isPinned) {
documentCount++;
// Our size accounting requires us to read all documents before
// removing them.
return changeBuffer.getEntry(txn, docKey).next(() => {
changeBuffer.removeEntry(docKey, SnapshotVersion.min());
return documentTargetStore(txn).delete(sentinelKey$1(docKey));
});
}
});
promises.push(p);
}
});
return iteration
.next(() => PersistencePromise.waitFor(promises))
.next(() => changeBuffer.apply(txn))
.next(() => documentCount);
}
removeTarget(txn, targetData) {
const updated = targetData.withSequenceNumber(txn.currentSequenceNumber);
return this.db.getTargetCache().updateTargetData(txn, updated);
}
updateLimboDocument(txn, key) {
return writeSentinelKey(txn, key);
}
/**
* Call provided function for each document in the cache that is 'orphaned'. Orphaned
* means not a part of any target, so the only entry in the target-document index for
* that document will be the sentinel row (targetId 0), which will also have the sequence
* number for the last time the document was accessed.
*/
forEachOrphanedDocument(txn, f) {
const store = documentTargetStore(txn);
let nextToReport = ListenSequence.INVALID;
let nextPath;
return store
.iterate({
index: DbTargetDocumentDocumentTargetsIndex
}, ([targetId, docKey], { path, sequenceNumber }) => {
if (targetId === 0) {
// if nextToReport is valid, report it, this is a new key so the
// last one must not be a member of any targets.
if (nextToReport !== ListenSequence.INVALID) {
f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
}
// set nextToReport to be this sequence number. It's the next one we
// might report, if we don't find any targets for this document.
// Note that the sequence number must be defined when the targetId
// is 0.
nextToReport = sequenceNumber;
nextPath = path;
}
else {
// set nextToReport to be invalid, we know we don't need to report
// this one since we found a target for it.
nextToReport = ListenSequence.INVALID;
}
})
.next(() => {
// Since we report sequence numbers after getting to the next key, we
// need to check if the last key we iterated over was an orphaned
// document and report it.
if (nextToReport !== ListenSequence.INVALID) {
f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
}
});
}
getCacheSize(txn) {
return this.db.getRemoteDocumentCache().getSize(txn);
}
}
function sentinelKey$1(key) {
return [0, encodeResourcePath(key.path)];
}
/**
* @returns A value suitable for writing a sentinel row in the target-document
* store.
*/
function sentinelRow(key, sequenceNumber) {
return { targetId: 0, path: encodeResourcePath(key.path), sequenceNumber };
}
function writeSentinelKey(txn, key) {
return documentTargetStore(txn).put(sentinelRow(key, txn.currentSequenceNumber));
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory buffer of entries to be written to a RemoteDocumentCache.
* It can be used to batch up a set of changes to be written to the cache, but
* additionally supports reading entries back with the `getEntry()` method,
* falling back to the underlying RemoteDocumentCache if no entry is
* buffered.
*
* Entries added to the cache *must* be read first. This is to facilitate
* calculating the size delta of the pending changes.
*
* PORTING NOTE: This class was implemented then removed from other platforms.
* If byte-counting ends up being needed on the other platforms, consider
* porting this class as part of that implementation work.
*/
class RemoteDocumentChangeBuffer {
constructor() {
// A mapping of document key to the new cache entry that should be written.
this.changes = new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
this.changesApplied = false;
}
/**
* Buffers a `RemoteDocumentCache.addEntry()` call.
*
* You can only modify documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
addEntry(document) {
this.assertNotApplied();
this.changes.set(document.key, document);
}
/**
* Buffers a `RemoteDocumentCache.removeEntry()` call.
*
* You can only remove documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
removeEntry(key, readTime) {
this.assertNotApplied();
this.changes.set(key, MutableDocument.newInvalidDocument(key).setReadTime(readTime));
}
/**
* Looks up an entry in the cache. The buffered changes will first be checked,
* and if no buffered change applies, this will forward to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKey - The key of the entry to look up.
* @returns The cached document or an invalid document if we have nothing
* cached.
*/
getEntry(transaction, documentKey) {
this.assertNotApplied();
const bufferedEntry = this.changes.get(documentKey);
if (bufferedEntry !== undefined) {
return PersistencePromise.resolve(bufferedEntry);
}
else {
return this.getFromCache(transaction, documentKey);
}
}
/**
* Looks up several entries in the cache, forwarding to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKeys - The keys of the entries to look up.
* @returns A map of cached documents, indexed by key. If an entry cannot be
* found, the corresponding key will be mapped to an invalid document.
*/
getEntries(transaction, documentKeys) {
return this.getAllFromCache(transaction, documentKeys);
}
/**
* Applies buffered changes to the underlying RemoteDocumentCache, using
* the provided transaction.
*/
apply(transaction) {
this.assertNotApplied();
this.changesApplied = true;
return this.applyChanges(transaction);
}
/** Helper to assert this.changes is not null */
assertNotApplied() {
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The RemoteDocumentCache for IndexedDb. To construct, invoke
* `newIndexedDbRemoteDocumentCache()`.
*/
class IndexedDbRemoteDocumentCacheImpl {
constructor(serializer) {
this.serializer = serializer;
}
setIndexManager(indexManager) {
this.indexManager = indexManager;
}
/**
* Adds the supplied entries to the cache.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
addEntry(transaction, key, doc) {
const documentStore = remoteDocumentsStore(transaction);
return documentStore.put(doc);
}
/**
* Removes a document from the cache.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
removeEntry(transaction, documentKey, readTime) {
const store = remoteDocumentsStore(transaction);
return store.delete(dbReadTimeKey(documentKey, readTime));
}
/**
* Updates the current cache size.
*
* Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
* cache's metadata.
*/
updateMetadata(transaction, sizeDelta) {
return this.getMetadata(transaction).next(metadata => {
metadata.byteSize += sizeDelta;
return this.setMetadata(transaction, metadata);
});
}
getEntry(transaction, documentKey) {
let doc = MutableDocument.newInvalidDocument(documentKey);
return remoteDocumentsStore(transaction)
.iterate({
index: DbRemoteDocumentDocumentKeyIndex,
range: IDBKeyRange.only(dbKey(documentKey))
}, (_, dbRemoteDoc) => {
doc = this.maybeDecodeDocument(documentKey, dbRemoteDoc);
})
.next(() => doc);
}
/**
* Looks up an entry in the cache.
*
* @param documentKey - The key of the entry to look up.
* @returns The cached document entry and its size.
*/
getSizedEntry(transaction, documentKey) {
let result = {
size: 0,
document: MutableDocument.newInvalidDocument(documentKey)
};
return remoteDocumentsStore(transaction)
.iterate({
index: DbRemoteDocumentDocumentKeyIndex,
range: IDBKeyRange.only(dbKey(documentKey))
}, (_, dbRemoteDoc) => {
result = {
document: this.maybeDecodeDocument(documentKey, dbRemoteDoc),
size: dbDocumentSize(dbRemoteDoc)
};
})
.next(() => result);
}
getEntries(transaction, documentKeys) {
let results = mutableDocumentMap();
return this.forEachDbEntry(transaction, documentKeys, (key, dbRemoteDoc) => {
const doc = this.maybeDecodeDocument(key, dbRemoteDoc);
results = results.insert(key, doc);
}).next(() => results);
}
/**
* Looks up several entries in the cache.
*
* @param documentKeys - The set of keys entries to look up.
* @returns A map of documents indexed by key and a map of sizes indexed by
* key (zero if the document does not exist).
*/
getSizedEntries(transaction, documentKeys) {
let results = mutableDocumentMap();
let sizeMap = new SortedMap(DocumentKey.comparator);
return this.forEachDbEntry(transaction, documentKeys, (key, dbRemoteDoc) => {
const doc = this.maybeDecodeDocument(key, dbRemoteDoc);
results = results.insert(key, doc);
sizeMap = sizeMap.insert(key, dbDocumentSize(dbRemoteDoc));
}).next(() => {
return { documents: results, sizeMap };
});
}
forEachDbEntry(transaction, documentKeys, callback) {
if (documentKeys.isEmpty()) {
return PersistencePromise.resolve();
}
let sortedKeys = new SortedSet(dbKeyComparator);
documentKeys.forEach(e => (sortedKeys = sortedKeys.add(e)));
const range = IDBKeyRange.bound(dbKey(sortedKeys.first()), dbKey(sortedKeys.last()));
const keyIter = sortedKeys.getIterator();
let nextKey = keyIter.getNext();
return remoteDocumentsStore(transaction)
.iterate({ index: DbRemoteDocumentDocumentKeyIndex, range }, (_, dbRemoteDoc, control) => {
const potentialKey = DocumentKey.fromSegments([
...dbRemoteDoc.prefixPath,
dbRemoteDoc.collectionGroup,
dbRemoteDoc.documentId
]);
// Go through keys not found in cache.
while (nextKey && dbKeyComparator(nextKey, potentialKey) < 0) {
callback(nextKey, null);
nextKey = keyIter.getNext();
}
if (nextKey && nextKey.isEqual(potentialKey)) {
// Key found in cache.
callback(nextKey, dbRemoteDoc);
nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
}
// Skip to the next key (if there is one).
if (nextKey) {
control.skip(dbKey(nextKey));
}
else {
control.done();
}
})
.next(() => {
// The rest of the keys are not in the cache. One case where `iterate`
// above won't go through them is when the cache is empty.
while (nextKey) {
callback(nextKey, null);
nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
}
});
}
getAllFromCollection(transaction, collection, offset) {
const startKey = [
collection.popLast().toArray(),
collection.lastSegment(),
toDbTimestampKey(offset.readTime),
offset.documentKey.path.isEmpty()
? ''
: offset.documentKey.path.lastSegment()
];
const endKey = [
collection.popLast().toArray(),
collection.lastSegment(),
[Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER],
''
];
return remoteDocumentsStore(transaction)
.loadAll(IDBKeyRange.bound(startKey, endKey, true))
.next(dbRemoteDocs => {
let results = mutableDocumentMap();
for (const dbRemoteDoc of dbRemoteDocs) {
const document = this.maybeDecodeDocument(DocumentKey.fromSegments(dbRemoteDoc.prefixPath.concat(dbRemoteDoc.collectionGroup, dbRemoteDoc.documentId)), dbRemoteDoc);
results = results.insert(document.key, document);
}
return results;
});
}
getAllFromCollectionGroup(transaction, collectionGroup, offset, limit) {
let results = mutableDocumentMap();
const startKey = dbCollectionGroupKey(collectionGroup, offset);
const endKey = dbCollectionGroupKey(collectionGroup, IndexOffset.max());
return remoteDocumentsStore(transaction)
.iterate({
index: DbRemoteDocumentCollectionGroupIndex,
range: IDBKeyRange.bound(startKey, endKey, true)
}, (_, dbRemoteDoc, control) => {
const document = this.maybeDecodeDocument(DocumentKey.fromSegments(dbRemoteDoc.prefixPath.concat(dbRemoteDoc.collectionGroup, dbRemoteDoc.documentId)), dbRemoteDoc);
results = results.insert(document.key, document);
if (results.size === limit) {
control.done();
}
})
.next(() => results);
}
newChangeBuffer(options) {
return new IndexedDbRemoteDocumentChangeBuffer(this, !!options && options.trackRemovals);
}
getSize(txn) {
return this.getMetadata(txn).next(metadata => metadata.byteSize);
}
getMetadata(txn) {
return documentGlobalStore(txn)
.get(DbRemoteDocumentGlobalKey)
.next(metadata => {
hardAssert(!!metadata);
return metadata;
});
}
setMetadata(txn, metadata) {
return documentGlobalStore(txn).put(DbRemoteDocumentGlobalKey, metadata);
}
/**
* Decodes `dbRemoteDoc` and returns the document (or an invalid document if
* the document corresponds to the format used for sentinel deletes).
*/
maybeDecodeDocument(documentKey, dbRemoteDoc) {
if (dbRemoteDoc) {
const doc = fromDbRemoteDocument(this.serializer, dbRemoteDoc);
// Whether the document is a sentinel removal and should only be used in the
// `getNewDocumentChanges()`
const isSentinelRemoval = doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min());
if (!isSentinelRemoval) {
return doc;
}
}
return MutableDocument.newInvalidDocument(documentKey);
}
}
/** Creates a new IndexedDbRemoteDocumentCache. */
function newIndexedDbRemoteDocumentCache(serializer) {
return new IndexedDbRemoteDocumentCacheImpl(serializer);
}
/**
* Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
*
* Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
* delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
* when we apply the changes.
*/
class IndexedDbRemoteDocumentChangeBuffer extends RemoteDocumentChangeBuffer {
/**
* @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.
* @param trackRemovals - Whether to create sentinel deletes that can be tracked by
* `getNewDocumentChanges()`.
*/
constructor(documentCache, trackRemovals) {
super();
this.documentCache = documentCache;
this.trackRemovals = trackRemovals;
// A map of document sizes and read times prior to applying the changes in
// this buffer.
this.documentStates = new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
}
applyChanges(transaction) {
const promises = [];
let sizeDelta = 0;
let collectionParents = new SortedSet((l, r) => primitiveComparator(l.canonicalString(), r.canonicalString()));
this.changes.forEach((key, documentChange) => {
const previousDoc = this.documentStates.get(key);
promises.push(this.documentCache.removeEntry(transaction, key, previousDoc.readTime));
if (documentChange.isValidDocument()) {
const doc = toDbRemoteDocument(this.documentCache.serializer, documentChange);
collectionParents = collectionParents.add(key.path.popLast());
const size = dbDocumentSize(doc);
sizeDelta += size - previousDoc.size;
promises.push(this.documentCache.addEntry(transaction, key, doc));
}
else {
sizeDelta -= previousDoc.size;
if (this.trackRemovals) {
// In order to track removals, we store a "sentinel delete" in the
// RemoteDocumentCache. This entry is represented by a NoDocument
// with a version of 0 and ignored by `maybeDecodeDocument()` but
// preserved in `getNewDocumentChanges()`.
const deletedDoc = toDbRemoteDocument(this.documentCache.serializer, documentChange.convertToNoDocument(SnapshotVersion.min()));
promises.push(this.documentCache.addEntry(transaction, key, deletedDoc));
}
}
});
collectionParents.forEach(parent => {
promises.push(this.documentCache.indexManager.addToCollectionParentIndex(transaction, parent));
});
promises.push(this.documentCache.updateMetadata(transaction, sizeDelta));
return PersistencePromise.waitFor(promises);
}
getFromCache(transaction, documentKey) {
// Record the size of everything we load from the cache so we can compute a delta later.
return this.documentCache
.getSizedEntry(transaction, documentKey)
.next(getResult => {
this.documentStates.set(documentKey, {
size: getResult.size,
readTime: getResult.document.readTime
});
return getResult.document;
});
}
getAllFromCache(transaction, documentKeys) {
// Record the size of everything we load from the cache so we can compute
// a delta later.
return this.documentCache
.getSizedEntries(transaction, documentKeys)
.next(({ documents, sizeMap }) => {
// Note: `getAllFromCache` returns two maps instead of a single map from
// keys to `DocumentSizeEntry`s. This is to allow returning the
// `MutableDocumentMap` directly, without a conversion.
sizeMap.forEach((documentKey, size) => {
this.documentStates.set(documentKey, {
size,
readTime: documents.get(documentKey).readTime
});
});
return documents;
});
}
}
function documentGlobalStore(txn) {
return getStore(txn, DbRemoteDocumentGlobalStore);
}
/**
* Helper to get a typed SimpleDbStore for the remoteDocuments object store.
*/
function remoteDocumentsStore(txn) {
return getStore(txn, DbRemoteDocumentStore);
}
/**
* Returns a key that can be used for document lookups on the
* `DbRemoteDocumentDocumentKeyIndex` index.
*/
function dbKey(documentKey) {
const path = documentKey.path.toArray();
return [
/* prefix path */ path.slice(0, path.length - 2),
/* collection id */ path[path.length - 2],
/* document id */ path[path.length - 1]
];
}
/**
* Returns a key that can be used for document lookups via the primary key of
* the DbRemoteDocument object store.
*/
function dbReadTimeKey(documentKey, readTime) {
const path = documentKey.path.toArray();
return [
/* prefix path */ path.slice(0, path.length - 2),
/* collection id */ path[path.length - 2],
toDbTimestampKey(readTime),
/* document id */ path[path.length - 1]
];
}
/**
* Returns a key that can be used for document lookups on the
* `DbRemoteDocumentDocumentCollectionGroupIndex` index.
*/
function dbCollectionGroupKey(collectionGroup, offset) {
const path = offset.documentKey.path.toArray();
return [
/* collection id */ collectionGroup,
toDbTimestampKey(offset.readTime),
/* prefix path */ path.slice(0, path.length - 2),
/* document id */ path.length > 0 ? path[path.length - 1] : ''
];
}
/**
* Comparator that compares document keys according to the primary key sorting
* used by the `DbRemoteDocumentDocument` store (by prefix path, collection id
* and then document ID).
*
* Visible for testing.
*/
function dbKeyComparator(l, r) {
const left = l.path.toArray();
const right = r.path.toArray();
// The ordering is based on https://chromium.googlesource.com/chromium/blink/+/fe5c21fef94dae71c1c3344775b8d8a7f7e6d9ec/Source/modules/indexeddb/IDBKey.cpp#74
let cmp = 0;
for (let i = 0; i < left.length - 2 && i < right.length - 2; ++i) {
cmp = primitiveComparator(left[i], right[i]);
if (cmp) {
return cmp;
}
}
cmp = primitiveComparator(left.length, right.length);
if (cmp) {
return cmp;
}
cmp = primitiveComparator(left[left.length - 2], right[right.length - 2]);
if (cmp) {
return cmp;
}
return primitiveComparator(left[left.length - 1], right[right.length - 1]);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Schema Version for the Web client:
* 1. Initial version including Mutation Queue, Query Cache, and Remote
* Document Cache
* 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
* longer required because migration 3 unconditionally clears it.
* 3. Dropped and re-created Query Cache to deal with cache corruption related
* to limbo resolution. Addresses
* https://github.com/firebase/firebase-ios-sdk/issues/1548
* 4. Multi-Tab Support.
* 5. Removal of held write acks.
* 6. Create document global for tracking document cache size.
* 7. Ensure every cached document has a sentinel row with a sequence number.
* 8. Add collection-parent index for Collection Group queries.
* 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
* an auto-incrementing ID. This is required for Index-Free queries.
* 10. Rewrite the canonical IDs to the explicit Protobuf-based format.
* 11. Add bundles and named_queries for bundle support.
* 12. Add document overlays.
* 13. Rewrite the keys of the remote document cache to allow for efficient
* document lookup via `getAll()`.
* 14. Add overlays.
* 15. Add indexing support.
*/
const SCHEMA_VERSION = 15;
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a local view (overlay) of a document, and the fields that are
* locally mutated.
*/
class OverlayedDocument {
constructor(overlayedDocument,
/**
* The fields that are locally mutated by patch mutations.
*
* If the overlayed document is from set or delete mutations, this is `null`.
* If there is no overlay (mutation) for the document, this is an empty `FieldMask`.
*/
mutatedFields) {
this.overlayedDocument = overlayedDocument;
this.mutatedFields = mutatedFields;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A readonly view of the local state of all documents we're tracking (i.e. we
* have a cached version in remoteDocumentCache or local mutations for the
* document). The view is computed by applying the mutations in the
* MutationQueue to the RemoteDocumentCache.
*/
class LocalDocumentsView {
constructor(remoteDocumentCache, mutationQueue, documentOverlayCache, indexManager) {
this.remoteDocumentCache = remoteDocumentCache;
this.mutationQueue = mutationQueue;
this.documentOverlayCache = documentOverlayCache;
this.indexManager = indexManager;
}
/**
* Get the local view of the document identified by `key`.
*
* @returns Local view of the document or null if we don't have any cached
* state for it.
*/
getDocument(transaction, key) {
let overlay = null;
return this.documentOverlayCache
.getOverlay(transaction, key)
.next(value => {
overlay = value;
return this.remoteDocumentCache.getEntry(transaction, key);
})
.next(document => {
if (overlay !== null) {
mutationApplyToLocalView(overlay.mutation, document, FieldMask.empty(), Timestamp.now());
}
return document;
});
}
/**
* Gets the local view of the documents identified by `keys`.
*
* If we don't have cached state for a document in `keys`, a NoDocument will
* be stored for that key in the resulting set.
*/
getDocuments(transaction, keys) {
return this.remoteDocumentCache
.getEntries(transaction, keys)
.next(docs => this.getLocalViewOfDocuments(transaction, docs, documentKeySet()).next(() => docs));
}
/**
* Similar to `getDocuments`, but creates the local view from the given
* `baseDocs` without retrieving documents from the local store.
*
* @param transaction - The transaction this operation is scoped to.
* @param docs - The documents to apply local mutations to get the local views.
* @param existenceStateChanged - The set of document keys whose existence state
* is changed. This is useful to determine if some documents overlay needs
* to be recalculated.
*/
getLocalViewOfDocuments(transaction, docs, existenceStateChanged = documentKeySet()) {
const overlays = newOverlayMap();
return this.populateOverlays(transaction, overlays, docs).next(() => {
return this.computeViews(transaction, docs, overlays, existenceStateChanged).next(computeViewsResult => {
let result = documentMap();
computeViewsResult.forEach((documentKey, overlayedDocument) => {
result = result.insert(documentKey, overlayedDocument.overlayedDocument);
});
return result;
});
});
}
/**
* Gets the overlayed documents for the given document map, which will include
* the local view of those documents and a `FieldMask` indicating which fields
* are mutated locally, `null` if overlay is a Set or Delete mutation.
*/
getOverlayedDocuments(transaction, docs) {
const overlays = newOverlayMap();
return this.populateOverlays(transaction, overlays, docs).next(() => this.computeViews(transaction, docs, overlays, documentKeySet()));
}
/**
* Fetches the overlays for {@code docs} and adds them to provided overlay map
* if the map does not already contain an entry for the given document key.
*/
populateOverlays(transaction, overlays, docs) {
const missingOverlays = [];
docs.forEach(key => {
if (!overlays.has(key)) {
missingOverlays.push(key);
}
});
return this.documentOverlayCache
.getOverlays(transaction, missingOverlays)
.next(result => {
result.forEach((key, val) => {
overlays.set(key, val);
});
});
}
/**
* Computes the local view for the given documents.
*
* @param docs - The documents to compute views for. It also has the base
* version of the documents.
* @param overlays - The overlays that need to be applied to the given base
* version of the documents.
* @param existenceStateChanged - A set of documents whose existence states
* might have changed. This is used to determine if we need to re-calculate
* overlays from mutation queues.
* @return A map represents the local documents view.
*/
computeViews(transaction, docs, overlays, existenceStateChanged) {
let recalculateDocuments = mutableDocumentMap();
const mutatedFields = newDocumentKeyMap();
const results = newOverlayedDocumentMap();
docs.forEach((_, doc) => {
const overlay = overlays.get(doc.key);
// Recalculate an overlay if the document's existence state changed due to
// a remote event *and* the overlay is a PatchMutation. This is because
// document existence state can change if some patch mutation's
// preconditions are met.
// NOTE: we recalculate when `overlay` is undefined as well, because there
// might be a patch mutation whose precondition does not match before the
// change (hence overlay is undefined), but would now match.
if (existenceStateChanged.has(doc.key) &&
(overlay === undefined || overlay.mutation instanceof PatchMutation)) {
recalculateDocuments = recalculateDocuments.insert(doc.key, doc);
}
else if (overlay !== undefined) {
mutatedFields.set(doc.key, overlay.mutation.getFieldMask());
mutationApplyToLocalView(overlay.mutation, doc, overlay.mutation.getFieldMask(), Timestamp.now());
}
else {
// no overlay exists
// Using EMPTY to indicate there is no overlay for the document.
mutatedFields.set(doc.key, FieldMask.empty());
}
});
return this.recalculateAndSaveOverlays(transaction, recalculateDocuments).next(recalculatedFields => {
recalculatedFields.forEach((documentKey, mask) => mutatedFields.set(documentKey, mask));
docs.forEach((documentKey, document) => {
var _a;
return results.set(documentKey, new OverlayedDocument(document, (_a = mutatedFields.get(documentKey)) !== null && _a !== void 0 ? _a : null));
});
return results;
});
}
recalculateAndSaveOverlays(transaction, docs) {
const masks = newDocumentKeyMap();
// A reverse lookup map from batch id to the documents within that batch.
let documentsByBatchId = new SortedMap((key1, key2) => key1 - key2);
let processed = documentKeySet();
return this.mutationQueue
.getAllMutationBatchesAffectingDocumentKeys(transaction, docs)
.next(batches => {
for (const batch of batches) {
batch.keys().forEach(key => {
const baseDoc = docs.get(key);
if (baseDoc === null) {
return;
}
let mask = masks.get(key) || FieldMask.empty();
mask = batch.applyToLocalView(baseDoc, mask);
masks.set(key, mask);
const newSet = (documentsByBatchId.get(batch.batchId) || documentKeySet()).add(key);
documentsByBatchId = documentsByBatchId.insert(batch.batchId, newSet);
});
}
})
.next(() => {
const promises = [];
// Iterate in descending order of batch IDs, and skip documents that are
// already saved.
const iter = documentsByBatchId.getReverseIterator();
while (iter.hasNext()) {
const entry = iter.getNext();
const batchId = entry.key;
const keys = entry.value;
const overlays = newMutationMap();
keys.forEach(key => {
if (!processed.has(key)) {
const overlayMutation = calculateOverlayMutation(docs.get(key), masks.get(key));
if (overlayMutation !== null) {
overlays.set(key, overlayMutation);
}
processed = processed.add(key);
}
});
promises.push(this.documentOverlayCache.saveOverlays(transaction, batchId, overlays));
}
return PersistencePromise.waitFor(promises);
})
.next(() => masks);
}
/**
* Recalculates overlays by reading the documents from remote document cache
* first, and saves them after they are calculated.
*/
recalculateAndSaveOverlaysForDocumentKeys(transaction, documentKeys) {
return this.remoteDocumentCache
.getEntries(transaction, documentKeys)
.next(docs => this.recalculateAndSaveOverlays(transaction, docs));
}
/**
* Performs a query against the local view of all documents.
*
* @param transaction - The persistence transaction.
* @param query - The query to match documents against.
* @param offset - Read time and key to start scanning by (exclusive).
*/
getDocumentsMatchingQuery(transaction, query, offset) {
if (isDocumentQuery$1(query)) {
return this.getDocumentsMatchingDocumentQuery(transaction, query.path);
}
else if (isCollectionGroupQuery(query)) {
return this.getDocumentsMatchingCollectionGroupQuery(transaction, query, offset);
}
else {
return this.getDocumentsMatchingCollectionQuery(transaction, query, offset);
}
}
/**
* Given a collection group, returns the next documents that follow the provided offset, along
* with an updated batch ID.
*
*
The documents returned by this method are ordered by remote version from the provided
* offset. If there are no more remote documents after the provided offset, documents with
* mutations in order of batch id from the offset are returned. Since all documents in a batch are
* returned together, the total number of documents returned can exceed {@code count}.
*
* @param transaction
* @param collectionGroup The collection group for the documents.
* @param offset The offset to index into.
* @param count The number of documents to return
* @return A LocalWriteResult with the documents that follow the provided offset and the last processed batch id.
*/
getNextDocuments(transaction, collectionGroup, offset, count) {
return this.remoteDocumentCache
.getAllFromCollectionGroup(transaction, collectionGroup, offset, count)
.next((originalDocs) => {
const overlaysPromise = count - originalDocs.size > 0
? this.documentOverlayCache.getOverlaysForCollectionGroup(transaction, collectionGroup, offset.largestBatchId, count - originalDocs.size)
: PersistencePromise.resolve(newOverlayMap());
// The callsite will use the largest batch ID together with the latest read time to create
// a new index offset. Since we only process batch IDs if all remote documents have been read,
// no overlay will increase the overall read time. This is why we only need to special case
// the batch id.
let largestBatchId = INITIAL_LARGEST_BATCH_ID;
let modifiedDocs = originalDocs;
return overlaysPromise.next(overlays => {
return PersistencePromise.forEach(overlays, (key, overlay) => {
if (largestBatchId < overlay.largestBatchId) {
largestBatchId = overlay.largestBatchId;
}
if (originalDocs.get(key)) {
return PersistencePromise.resolve();
}
return this.remoteDocumentCache
.getEntry(transaction, key)
.next(doc => {
modifiedDocs = modifiedDocs.insert(key, doc);
});
})
.next(() => this.populateOverlays(transaction, overlays, originalDocs))
.next(() => this.computeViews(transaction, modifiedDocs, overlays, documentKeySet()))
.next(localDocs => ({
batchId: largestBatchId,
changes: convertOverlayedDocumentMapToDocumentMap(localDocs)
}));
});
});
}
getDocumentsMatchingDocumentQuery(transaction, docPath) {
// Just do a simple document lookup.
return this.getDocument(transaction, new DocumentKey(docPath)).next(document => {
let result = documentMap();
if (document.isFoundDocument()) {
result = result.insert(document.key, document);
}
return result;
});
}
getDocumentsMatchingCollectionGroupQuery(transaction, query, offset) {
const collectionId = query.collectionGroup;
let results = documentMap();
return this.indexManager
.getCollectionParents(transaction, collectionId)
.next(parents => {
// Perform a collection query against each parent that contains the
// collectionId and aggregate the results.
return PersistencePromise.forEach(parents, (parent) => {
const collectionQuery = asCollectionQueryAtPath(query, parent.child(collectionId));
return this.getDocumentsMatchingCollectionQuery(transaction, collectionQuery, offset).next(r => {
r.forEach((key, doc) => {
results = results.insert(key, doc);
});
});
}).next(() => results);
});
}
getDocumentsMatchingCollectionQuery(transaction, query, offset) {
// Query the remote documents and overlay mutations.
let remoteDocuments;
return this.remoteDocumentCache
.getAllFromCollection(transaction, query.path, offset)
.next(queryResults => {
remoteDocuments = queryResults;
return this.documentOverlayCache.getOverlaysForCollection(transaction, query.path, offset.largestBatchId);
})
.next(overlays => {
// As documents might match the query because of their overlay we need to
// include documents for all overlays in the initial document set.
overlays.forEach((_, overlay) => {
const key = overlay.getKey();
if (remoteDocuments.get(key) === null) {
remoteDocuments = remoteDocuments.insert(key, MutableDocument.newInvalidDocument(key));
}
});
// Apply the overlays and match against the query.
let results = documentMap();
remoteDocuments.forEach((key, document) => {
const overlay = overlays.get(key);
if (overlay !== undefined) {
mutationApplyToLocalView(overlay.mutation, document, FieldMask.empty(), Timestamp.now());
}
// Finally, insert the documents that still match the query
if (queryMatches(query, document)) {
results = results.insert(key, document);
}
});
return results;
});
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryBundleCache {
constructor(serializer) {
this.serializer = serializer;
this.bundles = new Map();
this.namedQueries = new Map();
}
getBundleMetadata(transaction, bundleId) {
return PersistencePromise.resolve(this.bundles.get(bundleId));
}
saveBundleMetadata(transaction, bundleMetadata) {
this.bundles.set(bundleMetadata.id, fromBundleMetadata(bundleMetadata));
return PersistencePromise.resolve();
}
getNamedQuery(transaction, queryName) {
return PersistencePromise.resolve(this.namedQueries.get(queryName));
}
saveNamedQuery(transaction, query) {
this.namedQueries.set(query.name, fromProtoNamedQuery(query));
return PersistencePromise.resolve();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of DocumentOverlayCache.
*/
class MemoryDocumentOverlayCache {
constructor() {
// A map sorted by DocumentKey, whose value is a pair of the largest batch id
// for the overlay and the overlay itself.
this.overlays = new SortedMap(DocumentKey.comparator);
this.overlayByBatchId = new Map();
}
getOverlay(transaction, key) {
return PersistencePromise.resolve(this.overlays.get(key));
}
getOverlays(transaction, keys) {
const result = newOverlayMap();
return PersistencePromise.forEach(keys, (key) => {
return this.getOverlay(transaction, key).next(overlay => {
if (overlay !== null) {
result.set(key, overlay);
}
});
}).next(() => result);
}
saveOverlays(transaction, largestBatchId, overlays) {
overlays.forEach((_, mutation) => {
this.saveOverlay(transaction, largestBatchId, mutation);
});
return PersistencePromise.resolve();
}
removeOverlaysForBatchId(transaction, documentKeys, batchId) {
const keys = this.overlayByBatchId.get(batchId);
if (keys !== undefined) {
keys.forEach(key => (this.overlays = this.overlays.remove(key)));
this.overlayByBatchId.delete(batchId);
}
return PersistencePromise.resolve();
}
getOverlaysForCollection(transaction, collection, sinceBatchId) {
const result = newOverlayMap();
const immediateChildrenPathLength = collection.length + 1;
const prefix = new DocumentKey(collection.child(''));
const iter = this.overlays.getIteratorFrom(prefix);
while (iter.hasNext()) {
const entry = iter.getNext();
const overlay = entry.value;
const key = overlay.getKey();
if (!collection.isPrefixOf(key.path)) {
break;
}
// Documents from sub-collections
if (key.path.length !== immediateChildrenPathLength) {
continue;
}
if (overlay.largestBatchId > sinceBatchId) {
result.set(overlay.getKey(), overlay);
}
}
return PersistencePromise.resolve(result);
}
getOverlaysForCollectionGroup(transaction, collectionGroup, sinceBatchId, count) {
let batchIdToOverlays = new SortedMap((key1, key2) => key1 - key2);
const iter = this.overlays.getIterator();
while (iter.hasNext()) {
const entry = iter.getNext();
const overlay = entry.value;
const key = overlay.getKey();
if (key.getCollectionGroup() !== collectionGroup) {
continue;
}
if (overlay.largestBatchId > sinceBatchId) {
let overlaysForBatchId = batchIdToOverlays.get(overlay.largestBatchId);
if (overlaysForBatchId === null) {
overlaysForBatchId = newOverlayMap();
batchIdToOverlays = batchIdToOverlays.insert(overlay.largestBatchId, overlaysForBatchId);
}
overlaysForBatchId.set(overlay.getKey(), overlay);
}
}
const result = newOverlayMap();
const batchIter = batchIdToOverlays.getIterator();
while (batchIter.hasNext()) {
const entry = batchIter.getNext();
const overlays = entry.value;
overlays.forEach((key, overlay) => result.set(key, overlay));
if (result.size() >= count) {
break;
}
}
return PersistencePromise.resolve(result);
}
saveOverlay(transaction, largestBatchId, mutation) {
// Remove the association of the overlay to its batch id.
const existing = this.overlays.get(mutation.key);
if (existing !== null) {
const newSet = this.overlayByBatchId
.get(existing.largestBatchId)
.delete(mutation.key);
this.overlayByBatchId.set(existing.largestBatchId, newSet);
}
this.overlays = this.overlays.insert(mutation.key, new Overlay(largestBatchId, mutation));
// Create the association of this overlay to the given largestBatchId.
let batch = this.overlayByBatchId.get(largestBatchId);
if (batch === undefined) {
batch = documentKeySet();
this.overlayByBatchId.set(largestBatchId, batch);
}
this.overlayByBatchId.set(largestBatchId, batch.add(mutation.key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A collection of references to a document from some kind of numbered entity
* (either a target ID or batch ID). As references are added to or removed from
* the set corresponding events are emitted to a registered garbage collector.
*
* Each reference is represented by a DocumentReference object. Each of them
* contains enough information to uniquely identify the reference. They are all
* stored primarily in a set sorted by key. A document is considered garbage if
* there's no references in that set (this can be efficiently checked thanks to
* sorting by key).
*
* ReferenceSet also keeps a secondary set that contains references sorted by
* IDs. This one is used to efficiently implement removal of all references by
* some target ID.
*/
class ReferenceSet {
constructor() {
// A set of outstanding references to a document sorted by key.
this.refsByKey = new SortedSet(DocReference.compareByKey);
// A set of outstanding references to a document sorted by target id.
this.refsByTarget = new SortedSet(DocReference.compareByTargetId);
}
/** Returns true if the reference set contains no references. */
isEmpty() {
return this.refsByKey.isEmpty();
}
/** Adds a reference to the given document key for the given ID. */
addReference(key, id) {
const ref = new DocReference(key, id);
this.refsByKey = this.refsByKey.add(ref);
this.refsByTarget = this.refsByTarget.add(ref);
}
/** Add references to the given document keys for the given ID. */
addReferences(keys, id) {
keys.forEach(key => this.addReference(key, id));
}
/**
* Removes a reference to the given document key for the given
* ID.
*/
removeReference(key, id) {
this.removeRef(new DocReference(key, id));
}
removeReferences(keys, id) {
keys.forEach(key => this.removeReference(key, id));
}
/**
* Clears all references with a given ID. Calls removeRef() for each key
* removed.
*/
removeReferencesForId(id) {
const emptyKey = new DocumentKey(new ResourcePath([]));
const startRef = new DocReference(emptyKey, id);
const endRef = new DocReference(emptyKey, id + 1);
const keys = [];
this.refsByTarget.forEachInRange([startRef, endRef], ref => {
this.removeRef(ref);
keys.push(ref.key);
});
return keys;
}
removeAllReferences() {
this.refsByKey.forEach(ref => this.removeRef(ref));
}
removeRef(ref) {
this.refsByKey = this.refsByKey.delete(ref);
this.refsByTarget = this.refsByTarget.delete(ref);
}
referencesForId(id) {
const emptyKey = new DocumentKey(new ResourcePath([]));
const startRef = new DocReference(emptyKey, id);
const endRef = new DocReference(emptyKey, id + 1);
let keys = documentKeySet();
this.refsByTarget.forEachInRange([startRef, endRef], ref => {
keys = keys.add(ref.key);
});
return keys;
}
containsKey(key) {
const ref = new DocReference(key, 0);
const firstRef = this.refsByKey.firstAfterOrEqual(ref);
return firstRef !== null && key.isEqual(firstRef.key);
}
}
class DocReference {
constructor(key, targetOrBatchId) {
this.key = key;
this.targetOrBatchId = targetOrBatchId;
}
/** Compare by key then by ID */
static compareByKey(left, right) {
return (DocumentKey.comparator(left.key, right.key) ||
primitiveComparator(left.targetOrBatchId, right.targetOrBatchId));
}
/** Compare by ID then by key */
static compareByTargetId(left, right) {
return (primitiveComparator(left.targetOrBatchId, right.targetOrBatchId) ||
DocumentKey.comparator(left.key, right.key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryMutationQueue {
constructor(indexManager, referenceDelegate) {
this.indexManager = indexManager;
this.referenceDelegate = referenceDelegate;
/**
* The set of all mutations that have been sent but not yet been applied to
* the backend.
*/
this.mutationQueue = [];
/** Next value to use when assigning sequential IDs to each mutation batch. */
this.nextBatchId = 1;
/** An ordered mapping between documents and the mutations batch IDs. */
this.batchesByDocumentKey = new SortedSet(DocReference.compareByKey);
}
checkEmpty(transaction) {
return PersistencePromise.resolve(this.mutationQueue.length === 0);
}
addMutationBatch(transaction, localWriteTime, baseMutations, mutations) {
const batchId = this.nextBatchId;
this.nextBatchId++;
if (this.mutationQueue.length > 0) {
this.mutationQueue[this.mutationQueue.length - 1];
}
const batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
this.mutationQueue.push(batch);
// Track references by document key and index collection parents.
for (const mutation of mutations) {
this.batchesByDocumentKey = this.batchesByDocumentKey.add(new DocReference(mutation.key, batchId));
this.indexManager.addToCollectionParentIndex(transaction, mutation.key.path.popLast());
}
return PersistencePromise.resolve(batch);
}
lookupMutationBatch(transaction, batchId) {
return PersistencePromise.resolve(this.findMutationBatch(batchId));
}
getNextMutationBatchAfterBatchId(transaction, batchId) {
const nextBatchId = batchId + 1;
// The requested batchId may still be out of range so normalize it to the
// start of the queue.
const rawIndex = this.indexOfBatchId(nextBatchId);
const index = rawIndex < 0 ? 0 : rawIndex;
return PersistencePromise.resolve(this.mutationQueue.length > index ? this.mutationQueue[index] : null);
}
getHighestUnacknowledgedBatchId() {
return PersistencePromise.resolve(this.mutationQueue.length === 0 ? BATCHID_UNKNOWN : this.nextBatchId - 1);
}
getAllMutationBatches(transaction) {
return PersistencePromise.resolve(this.mutationQueue.slice());
}
getAllMutationBatchesAffectingDocumentKey(transaction, documentKey) {
const start = new DocReference(documentKey, 0);
const end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
const result = [];
this.batchesByDocumentKey.forEachInRange([start, end], ref => {
const batch = this.findMutationBatch(ref.targetOrBatchId);
result.push(batch);
});
return PersistencePromise.resolve(result);
}
getAllMutationBatchesAffectingDocumentKeys(transaction, documentKeys) {
let uniqueBatchIDs = new SortedSet(primitiveComparator);
documentKeys.forEach(documentKey => {
const start = new DocReference(documentKey, 0);
const end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
this.batchesByDocumentKey.forEachInRange([start, end], ref => {
uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
});
});
return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
}
getAllMutationBatchesAffectingQuery(transaction, query) {
// Use the query path as a prefix for testing if a document matches the
// query.
const prefix = query.path;
const immediateChildrenPathLength = prefix.length + 1;
// Construct a document reference for actually scanning the index. Unlike
// the prefix the document key in this reference must have an even number of
// segments. The empty segment can be used a suffix of the query path
// because it precedes all other segments in an ordered traversal.
let startPath = prefix;
if (!DocumentKey.isDocumentKey(startPath)) {
startPath = startPath.child('');
}
const start = new DocReference(new DocumentKey(startPath), 0);
// Find unique batchIDs referenced by all documents potentially matching the
// query.
let uniqueBatchIDs = new SortedSet(primitiveComparator);
this.batchesByDocumentKey.forEachWhile(ref => {
const rowKeyPath = ref.key.path;
if (!prefix.isPrefixOf(rowKeyPath)) {
return false;
}
else {
// Rows with document keys more than one segment longer than the query
// path can't be matches. For example, a query on 'rooms' can't match
// the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
if (rowKeyPath.length === immediateChildrenPathLength) {
uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
}
return true;
}
}, start);
return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
}
findMutationBatches(batchIDs) {
// Construct an array of matching batches, sorted by batchID to ensure that
// multiple mutations affecting the same document key are applied in order.
const result = [];
batchIDs.forEach(batchId => {
const batch = this.findMutationBatch(batchId);
if (batch !== null) {
result.push(batch);
}
});
return result;
}
removeMutationBatch(transaction, batch) {
// Find the position of the first batch for removal.
const batchIndex = this.indexOfExistingBatchId(batch.batchId, 'removed');
hardAssert(batchIndex === 0);
this.mutationQueue.shift();
let references = this.batchesByDocumentKey;
return PersistencePromise.forEach(batch.mutations, (mutation) => {
const ref = new DocReference(mutation.key, batch.batchId);
references = references.delete(ref);
return this.referenceDelegate.markPotentiallyOrphaned(transaction, mutation.key);
}).next(() => {
this.batchesByDocumentKey = references;
});
}
removeCachedMutationKeys(batchId) {
// No-op since the memory mutation queue does not maintain a separate cache.
}
containsKey(txn, key) {
const ref = new DocReference(key, 0);
const firstRef = this.batchesByDocumentKey.firstAfterOrEqual(ref);
return PersistencePromise.resolve(key.isEqual(firstRef && firstRef.key));
}
performConsistencyCheck(txn) {
if (this.mutationQueue.length === 0) ;
return PersistencePromise.resolve();
}
/**
* Finds the index of the given batchId in the mutation queue and asserts that
* the resulting index is within the bounds of the queue.
*
* @param batchId - The batchId to search for
* @param action - A description of what the caller is doing, phrased in passive
* form (e.g. "acknowledged" in a routine that acknowledges batches).
*/
indexOfExistingBatchId(batchId, action) {
const index = this.indexOfBatchId(batchId);
return index;
}
/**
* Finds the index of the given batchId in the mutation queue. This operation
* is O(1).
*
* @returns The computed index of the batch with the given batchId, based on
* the state of the queue. Note this index can be negative if the requested
* batchId has already been remvoed from the queue or past the end of the
* queue if the batchId is larger than the last added batch.
*/
indexOfBatchId(batchId) {
if (this.mutationQueue.length === 0) {
// As an index this is past the end of the queue
return 0;
}
// Examine the front of the queue to figure out the difference between the
// batchId and indexes in the array. Note that since the queue is ordered
// by batchId, if the first batch has a larger batchId then the requested
// batchId doesn't exist in the queue.
const firstBatchId = this.mutationQueue[0].batchId;
return batchId - firstBatchId;
}
/**
* A version of lookupMutationBatch that doesn't return a promise, this makes
* other functions that uses this code easier to read and more efficent.
*/
findMutationBatch(batchId) {
const index = this.indexOfBatchId(batchId);
if (index < 0 || index >= this.mutationQueue.length) {
return null;
}
const batch = this.mutationQueue[index];
return batch;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function documentEntryMap() {
return new SortedMap(DocumentKey.comparator);
}
/**
* The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke
* `newMemoryRemoteDocumentCache()`.
*/
class MemoryRemoteDocumentCacheImpl {
/**
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
constructor(sizer) {
this.sizer = sizer;
/** Underlying cache of documents and their read times. */
this.docs = documentEntryMap();
/** Size of all cached documents. */
this.size = 0;
}
setIndexManager(indexManager) {
this.indexManager = indexManager;
}
/**
* Adds the supplied entry to the cache and updates the cache size as appropriate.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
addEntry(transaction, doc) {
const key = doc.key;
const entry = this.docs.get(key);
const previousSize = entry ? entry.size : 0;
const currentSize = this.sizer(doc);
this.docs = this.docs.insert(key, {
document: doc.mutableCopy(),
size: currentSize
});
this.size += currentSize - previousSize;
return this.indexManager.addToCollectionParentIndex(transaction, key.path.popLast());
}
/**
* Removes the specified entry from the cache and updates the cache size as appropriate.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
removeEntry(documentKey) {
const entry = this.docs.get(documentKey);
if (entry) {
this.docs = this.docs.remove(documentKey);
this.size -= entry.size;
}
}
getEntry(transaction, documentKey) {
const entry = this.docs.get(documentKey);
return PersistencePromise.resolve(entry
? entry.document.mutableCopy()
: MutableDocument.newInvalidDocument(documentKey));
}
getEntries(transaction, documentKeys) {
let results = mutableDocumentMap();
documentKeys.forEach(documentKey => {
const entry = this.docs.get(documentKey);
results = results.insert(documentKey, entry
? entry.document.mutableCopy()
: MutableDocument.newInvalidDocument(documentKey));
});
return PersistencePromise.resolve(results);
}
getAllFromCollection(transaction, collectionPath, offset) {
let results = mutableDocumentMap();
// Documents are ordered by key, so we can use a prefix scan to narrow down
// the documents we need to match the query against.
const prefix = new DocumentKey(collectionPath.child(''));
const iterator = this.docs.getIteratorFrom(prefix);
while (iterator.hasNext()) {
const { key, value: { document } } = iterator.getNext();
if (!collectionPath.isPrefixOf(key.path)) {
break;
}
if (key.path.length > collectionPath.length + 1) {
// Exclude entries from subcollections.
continue;
}
if (indexOffsetComparator(newIndexOffsetFromDocument(document), offset) <= 0) {
// The document sorts before the offset.
continue;
}
results = results.insert(document.key, document.mutableCopy());
}
return PersistencePromise.resolve(results);
}
getAllFromCollectionGroup(transaction, collectionGroup, offset, limti) {
// This method should only be called from the IndexBackfiller if persistence
// is enabled.
fail();
}
forEachDocumentKey(transaction, f) {
return PersistencePromise.forEach(this.docs, (key) => f(key));
}
newChangeBuffer(options) {
// `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps
// a separate changelog and does not need special handling for removals.
return new MemoryRemoteDocumentChangeBuffer(this);
}
getSize(txn) {
return PersistencePromise.resolve(this.size);
}
}
/**
* Creates a new memory-only RemoteDocumentCache.
*
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
function newMemoryRemoteDocumentCache(sizer) {
return new MemoryRemoteDocumentCacheImpl(sizer);
}
/**
* Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
*/
class MemoryRemoteDocumentChangeBuffer extends RemoteDocumentChangeBuffer {
constructor(documentCache) {
super();
this.documentCache = documentCache;
}
applyChanges(transaction) {
const promises = [];
this.changes.forEach((key, doc) => {
if (doc.isValidDocument()) {
promises.push(this.documentCache.addEntry(transaction, doc));
}
else {
this.documentCache.removeEntry(key);
}
});
return PersistencePromise.waitFor(promises);
}
getFromCache(transaction, documentKey) {
return this.documentCache.getEntry(transaction, documentKey);
}
getAllFromCache(transaction, documentKeys) {
return this.documentCache.getEntries(transaction, documentKeys);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class MemoryTargetCache {
constructor(persistence) {
this.persistence = persistence;
/**
* Maps a target to the data about that target
*/
this.targets = new ObjectMap(t => canonifyTarget(t), targetEquals);
/** The last received snapshot version. */
this.lastRemoteSnapshotVersion = SnapshotVersion.min();
/** The highest numbered target ID encountered. */
this.highestTargetId = 0;
/** The highest sequence number encountered. */
this.highestSequenceNumber = 0;
/**
* A ordered bidirectional mapping between documents and the remote target
* IDs.
*/
this.references = new ReferenceSet();
this.targetCount = 0;
this.targetIdGenerator = TargetIdGenerator.forTargetCache();
}
forEachTarget(txn, f) {
this.targets.forEach((_, targetData) => f(targetData));
return PersistencePromise.resolve();
}
getLastRemoteSnapshotVersion(transaction) {
return PersistencePromise.resolve(this.lastRemoteSnapshotVersion);
}
getHighestSequenceNumber(transaction) {
return PersistencePromise.resolve(this.highestSequenceNumber);
}
allocateTargetId(transaction) {
this.highestTargetId = this.targetIdGenerator.next();
return PersistencePromise.resolve(this.highestTargetId);
}
setTargetsMetadata(transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
if (lastRemoteSnapshotVersion) {
this.lastRemoteSnapshotVersion = lastRemoteSnapshotVersion;
}
if (highestListenSequenceNumber > this.highestSequenceNumber) {
this.highestSequenceNumber = highestListenSequenceNumber;
}
return PersistencePromise.resolve();
}
saveTargetData(targetData) {
this.targets.set(targetData.target, targetData);
const targetId = targetData.targetId;
if (targetId > this.highestTargetId) {
this.targetIdGenerator = new TargetIdGenerator(targetId);
this.highestTargetId = targetId;
}
if (targetData.sequenceNumber > this.highestSequenceNumber) {
this.highestSequenceNumber = targetData.sequenceNumber;
}
}
addTargetData(transaction, targetData) {
this.saveTargetData(targetData);
this.targetCount += 1;
return PersistencePromise.resolve();
}
updateTargetData(transaction, targetData) {
this.saveTargetData(targetData);
return PersistencePromise.resolve();
}
removeTargetData(transaction, targetData) {
this.targets.delete(targetData.target);
this.references.removeReferencesForId(targetData.targetId);
this.targetCount -= 1;
return PersistencePromise.resolve();
}
removeTargets(transaction, upperBound, activeTargetIds) {
let count = 0;
const removals = [];
this.targets.forEach((key, targetData) => {
if (targetData.sequenceNumber <= upperBound &&
activeTargetIds.get(targetData.targetId) === null) {
this.targets.delete(key);
removals.push(this.removeMatchingKeysForTargetId(transaction, targetData.targetId));
count++;
}
});
return PersistencePromise.waitFor(removals).next(() => count);
}
getTargetCount(transaction) {
return PersistencePromise.resolve(this.targetCount);
}
getTargetData(transaction, target) {
const targetData = this.targets.get(target) || null;
return PersistencePromise.resolve(targetData);
}
addMatchingKeys(txn, keys, targetId) {
this.references.addReferences(keys, targetId);
return PersistencePromise.resolve();
}
removeMatchingKeys(txn, keys, targetId) {
this.references.removeReferences(keys, targetId);
const referenceDelegate = this.persistence.referenceDelegate;
const promises = [];
if (referenceDelegate) {
keys.forEach(key => {
promises.push(referenceDelegate.markPotentiallyOrphaned(txn, key));
});
}
return PersistencePromise.waitFor(promises);
}
removeMatchingKeysForTargetId(txn, targetId) {
this.references.removeReferencesForId(targetId);
return PersistencePromise.resolve();
}
getMatchingKeysForTargetId(txn, targetId) {
const matchingKeys = this.references.referencesForId(targetId);
return PersistencePromise.resolve(matchingKeys);
}
containsKey(txn, key) {
return PersistencePromise.resolve(this.references.containsKey(key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$d = 'MemoryPersistence';
/**
* A memory-backed instance of Persistence. Data is stored only in RAM and
* not persisted across sessions.
*/
class MemoryPersistence {
/**
* The constructor accepts a factory for creating a reference delegate. This
* allows both the delegate and this instance to have strong references to
* each other without having nullable fields that would then need to be
* checked or asserted on every access.
*/
constructor(referenceDelegateFactory, serializer) {
this.mutationQueues = {};
this.overlays = {};
this.listenSequence = new ListenSequence(0);
this._started = false;
this._started = true;
this.referenceDelegate = referenceDelegateFactory(this);
this.targetCache = new MemoryTargetCache(this);
const sizer = (doc) => this.referenceDelegate.documentSize(doc);
this.indexManager = new MemoryIndexManager();
this.remoteDocumentCache = newMemoryRemoteDocumentCache(sizer);
this.serializer = new LocalSerializer(serializer);
this.bundleCache = new MemoryBundleCache(this.serializer);
}
start() {
return Promise.resolve();
}
shutdown() {
// No durable state to ensure is closed on shutdown.
this._started = false;
return Promise.resolve();
}
get started() {
return this._started;
}
setDatabaseDeletedListener() {
// No op.
}
setNetworkEnabled() {
// No op.
}
getIndexManager(user) {
// We do not currently support indices for memory persistence, so we can
// return the same shared instance of the memory index manager.
return this.indexManager;
}
getDocumentOverlayCache(user) {
let overlay = this.overlays[user.toKey()];
if (!overlay) {
overlay = new MemoryDocumentOverlayCache();
this.overlays[user.toKey()] = overlay;
}
return overlay;
}
getMutationQueue(user, indexManager) {
let queue = this.mutationQueues[user.toKey()];
if (!queue) {
queue = new MemoryMutationQueue(indexManager, this.referenceDelegate);
this.mutationQueues[user.toKey()] = queue;
}
return queue;
}
getTargetCache() {
return this.targetCache;
}
getRemoteDocumentCache() {
return this.remoteDocumentCache;
}
getBundleCache() {
return this.bundleCache;
}
runTransaction(action, mode, transactionOperation) {
logDebug(LOG_TAG$d, 'Starting transaction:', action);
const txn = new MemoryTransaction(this.listenSequence.next());
this.referenceDelegate.onTransactionStarted();
return transactionOperation(txn)
.next(result => {
return this.referenceDelegate
.onTransactionCommitted(txn)
.next(() => result);
})
.toPromise()
.then(result => {
txn.raiseOnCommittedEvent();
return result;
});
}
mutationQueuesContainKey(transaction, key) {
return PersistencePromise.or(Object.values(this.mutationQueues).map(queue => () => queue.containsKey(transaction, key)));
}
}
/**
* Memory persistence is not actually transactional, but future implementations
* may have transaction-scoped state.
*/
class MemoryTransaction extends PersistenceTransaction {
constructor(currentSequenceNumber) {
super();
this.currentSequenceNumber = currentSequenceNumber;
}
}
class MemoryEagerDelegate {
constructor(persistence) {
this.persistence = persistence;
/** Tracks all documents that are active in Query views. */
this.localViewReferences = new ReferenceSet();
/** The list of documents that are potentially GCed after each transaction. */
this._orphanedDocuments = null;
}
static factory(persistence) {
return new MemoryEagerDelegate(persistence);
}
get orphanedDocuments() {
if (!this._orphanedDocuments) {
throw fail();
}
else {
return this._orphanedDocuments;
}
}
addReference(txn, targetId, key) {
this.localViewReferences.addReference(key, targetId);
this.orphanedDocuments.delete(key.toString());
return PersistencePromise.resolve();
}
removeReference(txn, targetId, key) {
this.localViewReferences.removeReference(key, targetId);
this.orphanedDocuments.add(key.toString());
return PersistencePromise.resolve();
}
markPotentiallyOrphaned(txn, key) {
this.orphanedDocuments.add(key.toString());
return PersistencePromise.resolve();
}
removeTarget(txn, targetData) {
const orphaned = this.localViewReferences.removeReferencesForId(targetData.targetId);
orphaned.forEach(key => this.orphanedDocuments.add(key.toString()));
const cache = this.persistence.getTargetCache();
return cache
.getMatchingKeysForTargetId(txn, targetData.targetId)
.next(keys => {
keys.forEach(key => this.orphanedDocuments.add(key.toString()));
})
.next(() => cache.removeTargetData(txn, targetData));
}
onTransactionStarted() {
this._orphanedDocuments = new Set();
}
onTransactionCommitted(txn) {
// Remove newly orphaned documents.
const cache = this.persistence.getRemoteDocumentCache();
const changeBuffer = cache.newChangeBuffer();
return PersistencePromise.forEach(this.orphanedDocuments, (path) => {
const key = DocumentKey.fromPath(path);
return this.isReferenced(txn, key).next(isReferenced => {
if (!isReferenced) {
changeBuffer.removeEntry(key, SnapshotVersion.min());
}
});
}).next(() => {
this._orphanedDocuments = null;
return changeBuffer.apply(txn);
});
}
updateLimboDocument(txn, key) {
return this.isReferenced(txn, key).next(isReferenced => {
if (isReferenced) {
this.orphanedDocuments.delete(key.toString());
}
else {
this.orphanedDocuments.add(key.toString());
}
});
}
documentSize(doc) {
// For eager GC, we don't care about the document size, there are no size thresholds.
return 0;
}
isReferenced(txn, key) {
return PersistencePromise.or([
() => PersistencePromise.resolve(this.localViewReferences.containsKey(key)),
() => this.persistence.getTargetCache().containsKey(txn, key),
() => this.persistence.mutationQueuesContainKey(txn, key)
]);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Performs database creation and schema upgrades. */
class SchemaConverter {
constructor(serializer) {
this.serializer = serializer;
}
/**
* Performs database creation and schema upgrades.
*
* Note that in production, this method is only ever used to upgrade the schema
* to SCHEMA_VERSION. Different values of toVersion are only used for testing
* and local feature development.
*/
createOrUpgrade(db, txn, fromVersion, toVersion) {
const simpleDbTransaction = new SimpleDbTransaction('createOrUpgrade', txn);
if (fromVersion < 1 && toVersion >= 1) {
createPrimaryClientStore(db);
createMutationQueue(db);
createQueryCache(db);
createLegacyRemoteDocumentCache(db);
}
// Migration 2 to populate the targetGlobal object no longer needed since
// migration 3 unconditionally clears it.
let p = PersistencePromise.resolve();
if (fromVersion < 3 && toVersion >= 3) {
// Brand new clients don't need to drop and recreate--only clients that
// potentially have corrupt data.
if (fromVersion !== 0) {
dropQueryCache(db);
createQueryCache(db);
}
p = p.next(() => writeEmptyTargetGlobalEntry(simpleDbTransaction));
}
if (fromVersion < 4 && toVersion >= 4) {
if (fromVersion !== 0) {
// Schema version 3 uses auto-generated keys to generate globally unique
// mutation batch IDs (this was previously ensured internally by the
// client). To migrate to the new schema, we have to read all mutations
// and write them back out. We preserve the existing batch IDs to guarantee
// consistency with other object stores. Any further mutation batch IDs will
// be auto-generated.
p = p.next(() => upgradeMutationBatchSchemaAndMigrateData(db, simpleDbTransaction));
}
p = p.next(() => {
createClientMetadataStore(db);
});
}
if (fromVersion < 5 && toVersion >= 5) {
p = p.next(() => this.removeAcknowledgedMutations(simpleDbTransaction));
}
if (fromVersion < 6 && toVersion >= 6) {
p = p.next(() => {
createDocumentGlobalStore(db);
return this.addDocumentGlobal(simpleDbTransaction);
});
}
if (fromVersion < 7 && toVersion >= 7) {
p = p.next(() => this.ensureSequenceNumbers(simpleDbTransaction));
}
if (fromVersion < 8 && toVersion >= 8) {
p = p.next(() => this.createCollectionParentIndex(db, simpleDbTransaction));
}
if (fromVersion < 9 && toVersion >= 9) {
p = p.next(() => {
// Multi-Tab used to manage its own changelog, but this has been moved
// to the DbRemoteDocument object store itself. Since the previous change
// log only contained transient data, we can drop its object store.
dropRemoteDocumentChangesStore(db);
// Note: Schema version 9 used to create a read time index for the
// RemoteDocumentCache. This is now done with schema version 13.
});
}
if (fromVersion < 10 && toVersion >= 10) {
p = p.next(() => this.rewriteCanonicalIds(simpleDbTransaction));
}
if (fromVersion < 11 && toVersion >= 11) {
p = p.next(() => {
createBundlesStore(db);
createNamedQueriesStore(db);
});
}
if (fromVersion < 12 && toVersion >= 12) {
p = p.next(() => {
createDocumentOverlayStore(db);
});
}
if (fromVersion < 13 && toVersion >= 13) {
p = p
.next(() => createRemoteDocumentCache(db))
.next(() => this.rewriteRemoteDocumentCache(db, simpleDbTransaction))
.next(() => db.deleteObjectStore(DbRemoteDocumentStore$1));
}
if (fromVersion < 14 && toVersion >= 14) {
p = p.next(() => this.runOverlayMigration(db, simpleDbTransaction));
}
if (fromVersion < 15 && toVersion >= 15) {
p = p.next(() => createFieldIndex(db));
}
return p;
}
addDocumentGlobal(txn) {
let byteSize = 0;
return txn
.store(DbRemoteDocumentStore$1)
.iterate((_, doc) => {
byteSize += dbDocumentSize(doc);
})
.next(() => {
const metadata = { byteSize };
return txn
.store(DbRemoteDocumentGlobalStore)
.put(DbRemoteDocumentGlobalKey, metadata);
});
}
removeAcknowledgedMutations(txn) {
const queuesStore = txn.store(DbMutationQueueStore);
const mutationsStore = txn.store(DbMutationBatchStore);
return queuesStore.loadAll().next(queues => {
return PersistencePromise.forEach(queues, (queue) => {
const range = IDBKeyRange.bound([queue.userId, BATCHID_UNKNOWN], [queue.userId, queue.lastAcknowledgedBatchId]);
return mutationsStore
.loadAll(DbMutationBatchUserMutationsIndex, range)
.next(dbBatches => {
return PersistencePromise.forEach(dbBatches, (dbBatch) => {
hardAssert(dbBatch.userId === queue.userId);
const batch = fromDbMutationBatch(this.serializer, dbBatch);
return removeMutationBatch(txn, queue.userId, batch).next(() => { });
});
});
});
});
}
/**
* Ensures that every document in the remote document cache has a corresponding sentinel row
* with a sequence number. Missing rows are given the most recently used sequence number.
*/
ensureSequenceNumbers(txn) {
const documentTargetStore = txn.store(DbTargetDocumentStore);
const documentsStore = txn.store(DbRemoteDocumentStore$1);
const globalTargetStore = txn.store(DbTargetGlobalStore);
return globalTargetStore.get(DbTargetGlobalKey).next(metadata => {
const writeSentinelKey = (path) => {
return documentTargetStore.put({
targetId: 0,
path: encodeResourcePath(path),
sequenceNumber: metadata.highestListenSequenceNumber
});
};
const promises = [];
return documentsStore
.iterate((key, doc) => {
const path = new ResourcePath(key);
const docSentinelKey = sentinelKey(path);
promises.push(documentTargetStore.get(docSentinelKey).next(maybeSentinel => {
if (!maybeSentinel) {
return writeSentinelKey(path);
}
else {
return PersistencePromise.resolve();
}
}));
})
.next(() => PersistencePromise.waitFor(promises));
});
}
createCollectionParentIndex(db, txn) {
// Create the index.
db.createObjectStore(DbCollectionParentStore, {
keyPath: DbCollectionParentKeyPath
});
const collectionParentsStore = txn.store(DbCollectionParentStore);
// Helper to add an index entry iff we haven't already written it.
const cache = new MemoryCollectionParentIndex();
const addEntry = (collectionPath) => {
if (cache.add(collectionPath)) {
const collectionId = collectionPath.lastSegment();
const parentPath = collectionPath.popLast();
return collectionParentsStore.put({
collectionId,
parent: encodeResourcePath(parentPath)
});
}
};
// Index existing remote documents.
return txn
.store(DbRemoteDocumentStore$1)
.iterate({ keysOnly: true }, (pathSegments, _) => {
const path = new ResourcePath(pathSegments);
return addEntry(path.popLast());
})
.next(() => {
// Index existing mutations.
return txn
.store(DbDocumentMutationStore)
.iterate({ keysOnly: true }, ([userID, encodedPath, batchId], _) => {
const path = decodeResourcePath(encodedPath);
return addEntry(path.popLast());
});
});
}
rewriteCanonicalIds(txn) {
const targetStore = txn.store(DbTargetStore);
return targetStore.iterate((key, originalDbTarget) => {
const originalTargetData = fromDbTarget(originalDbTarget);
const updatedDbTarget = toDbTarget(this.serializer, originalTargetData);
return targetStore.put(updatedDbTarget);
});
}
rewriteRemoteDocumentCache(db, transaction) {
const legacyRemoteDocumentStore = transaction.store(DbRemoteDocumentStore$1);
const writes = [];
return legacyRemoteDocumentStore
.iterate((_, legacyDocument) => {
const remoteDocumentStore = transaction.store(DbRemoteDocumentStore);
const path = extractKey(legacyDocument).path.toArray();
const dbRemoteDocument = {
prefixPath: path.slice(0, path.length - 2),
collectionGroup: path[path.length - 2],
documentId: path[path.length - 1],
readTime: legacyDocument.readTime || [0, 0],
unknownDocument: legacyDocument.unknownDocument,
noDocument: legacyDocument.noDocument,
document: legacyDocument.document,
hasCommittedMutations: !!legacyDocument.hasCommittedMutations
};
writes.push(remoteDocumentStore.put(dbRemoteDocument));
})
.next(() => PersistencePromise.waitFor(writes));
}
runOverlayMigration(db, transaction) {
const mutationsStore = transaction.store(DbMutationBatchStore);
const remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer);
const memoryPersistence = new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer.remoteSerializer);
return mutationsStore.loadAll().next(dbBatches => {
const userToDocumentSet = new Map();
dbBatches.forEach(dbBatch => {
var _a;
let documentSet = (_a = userToDocumentSet.get(dbBatch.userId)) !== null && _a !== void 0 ? _a : documentKeySet();
const batch = fromDbMutationBatch(this.serializer, dbBatch);
batch.keys().forEach(key => (documentSet = documentSet.add(key)));
userToDocumentSet.set(dbBatch.userId, documentSet);
});
return PersistencePromise.forEach(userToDocumentSet, (allDocumentKeysForUser, userId) => {
const user = new User(userId);
const documentOverlayCache = IndexedDbDocumentOverlayCache.forUser(this.serializer, user);
// NOTE: The index manager and the reference delegate are
// irrelevant for the purpose of recalculating and saving
// overlays. We can therefore simply use the memory
// implementation.
const indexManager = memoryPersistence.getIndexManager(user);
const mutationQueue = IndexedDbMutationQueue.forUser(user, this.serializer, indexManager, memoryPersistence.referenceDelegate);
const localDocumentsView = new LocalDocumentsView(remoteDocumentCache, mutationQueue, documentOverlayCache, indexManager);
return localDocumentsView
.recalculateAndSaveOverlaysForDocumentKeys(new IndexedDbTransaction(transaction, ListenSequence.INVALID), allDocumentKeysForUser)
.next();
});
});
}
}
function sentinelKey(path) {
return [0, encodeResourcePath(path)];
}
function createPrimaryClientStore(db) {
db.createObjectStore(DbPrimaryClientStore);
}
function createMutationQueue(db) {
db.createObjectStore(DbMutationQueueStore, {
keyPath: DbMutationQueueKeyPath
});
const mutationBatchesStore = db.createObjectStore(DbMutationBatchStore, {
keyPath: DbMutationBatchKeyPath,
autoIncrement: true
});
mutationBatchesStore.createIndex(DbMutationBatchUserMutationsIndex, DbMutationBatchUserMutationsKeyPath, { unique: true });
db.createObjectStore(DbDocumentMutationStore);
}
/**
* Upgrade function to migrate the 'mutations' store from V1 to V3. Loads
* and rewrites all data.
*/
function upgradeMutationBatchSchemaAndMigrateData(db, txn) {
const v1MutationsStore = txn.store(DbMutationBatchStore);
return v1MutationsStore.loadAll().next(existingMutations => {
db.deleteObjectStore(DbMutationBatchStore);
const mutationsStore = db.createObjectStore(DbMutationBatchStore, {
keyPath: DbMutationBatchKeyPath,
autoIncrement: true
});
mutationsStore.createIndex(DbMutationBatchUserMutationsIndex, DbMutationBatchUserMutationsKeyPath, { unique: true });
const v3MutationsStore = txn.store(DbMutationBatchStore);
const writeAll = existingMutations.map(mutation => v3MutationsStore.put(mutation));
return PersistencePromise.waitFor(writeAll);
});
}
function createLegacyRemoteDocumentCache(db) {
db.createObjectStore(DbRemoteDocumentStore$1);
}
function createRemoteDocumentCache(db) {
const remoteDocumentStore = db.createObjectStore(DbRemoteDocumentStore, {
keyPath: DbRemoteDocumentKeyPath
});
remoteDocumentStore.createIndex(DbRemoteDocumentDocumentKeyIndex, DbRemoteDocumentDocumentKeyIndexPath);
remoteDocumentStore.createIndex(DbRemoteDocumentCollectionGroupIndex, DbRemoteDocumentCollectionGroupIndexPath);
}
function createDocumentGlobalStore(db) {
db.createObjectStore(DbRemoteDocumentGlobalStore);
}
function createQueryCache(db) {
const targetDocumentsStore = db.createObjectStore(DbTargetDocumentStore, {
keyPath: DbTargetDocumentKeyPath
});
targetDocumentsStore.createIndex(DbTargetDocumentDocumentTargetsIndex, DbTargetDocumentDocumentTargetsKeyPath, { unique: true });
const targetStore = db.createObjectStore(DbTargetStore, {
keyPath: DbTargetKeyPath
});
// NOTE: This is unique only because the TargetId is the suffix.
targetStore.createIndex(DbTargetQueryTargetsIndexName, DbTargetQueryTargetsKeyPath, { unique: true });
db.createObjectStore(DbTargetGlobalStore);
}
function dropQueryCache(db) {
db.deleteObjectStore(DbTargetDocumentStore);
db.deleteObjectStore(DbTargetStore);
db.deleteObjectStore(DbTargetGlobalStore);
}
function dropRemoteDocumentChangesStore(db) {
if (db.objectStoreNames.contains('remoteDocumentChanges')) {
db.deleteObjectStore('remoteDocumentChanges');
}
}
/**
* Creates the target global singleton row.
*
* @param txn - The version upgrade transaction for indexeddb
*/
function writeEmptyTargetGlobalEntry(txn) {
const globalStore = txn.store(DbTargetGlobalStore);
const metadata = {
highestTargetId: 0,
highestListenSequenceNumber: 0,
lastRemoteSnapshotVersion: SnapshotVersion.min().toTimestamp(),
targetCount: 0
};
return globalStore.put(DbTargetGlobalKey, metadata);
}
function createClientMetadataStore(db) {
db.createObjectStore(DbClientMetadataStore, {
keyPath: DbClientMetadataKeyPath
});
}
function createBundlesStore(db) {
db.createObjectStore(DbBundleStore, {
keyPath: DbBundleKeyPath
});
}
function createNamedQueriesStore(db) {
db.createObjectStore(DbNamedQueryStore, {
keyPath: DbNamedQueryKeyPath
});
}
function createFieldIndex(db) {
const indexConfigurationStore = db.createObjectStore(DbIndexConfigurationStore, {
keyPath: DbIndexConfigurationKeyPath,
autoIncrement: true
});
indexConfigurationStore.createIndex(DbIndexConfigurationCollectionGroupIndex, DbIndexConfigurationCollectionGroupIndexPath, { unique: false });
const indexStateStore = db.createObjectStore(DbIndexStateStore, {
keyPath: DbIndexStateKeyPath
});
indexStateStore.createIndex(DbIndexStateSequenceNumberIndex, DbIndexStateSequenceNumberIndexPath, { unique: false });
const indexEntryStore = db.createObjectStore(DbIndexEntryStore, {
keyPath: DbIndexEntryKeyPath
});
indexEntryStore.createIndex(DbIndexEntryDocumentKeyIndex, DbIndexEntryDocumentKeyIndexPath, { unique: false });
}
function createDocumentOverlayStore(db) {
const documentOverlayStore = db.createObjectStore(DbDocumentOverlayStore, {
keyPath: DbDocumentOverlayKeyPath
});
documentOverlayStore.createIndex(DbDocumentOverlayCollectionPathOverlayIndex, DbDocumentOverlayCollectionPathOverlayIndexPath, { unique: false });
documentOverlayStore.createIndex(DbDocumentOverlayCollectionGroupOverlayIndex, DbDocumentOverlayCollectionGroupOverlayIndexPath, { unique: false });
}
function extractKey(remoteDoc) {
if (remoteDoc.document) {
return new DocumentKey(ResourcePath.fromString(remoteDoc.document.name).popFirst(5));
}
else if (remoteDoc.noDocument) {
return DocumentKey.fromSegments(remoteDoc.noDocument.path);
}
else if (remoteDoc.unknownDocument) {
return DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
}
else {
return fail();
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$c = 'IndexedDbPersistence';
/**
* Oldest acceptable age in milliseconds for client metadata before the client
* is considered inactive and its associated data is garbage collected.
*/
const MAX_CLIENT_AGE_MS = 30 * 60 * 1000; // 30 minutes
/**
* Oldest acceptable metadata age for clients that may participate in the
* primary lease election. Clients that have not updated their client metadata
* within 5 seconds are not eligible to receive a primary lease.
*/
const MAX_PRIMARY_ELIGIBLE_AGE_MS = 5000;
/**
* The interval at which clients will update their metadata, including
* refreshing their primary lease if held or potentially trying to acquire it if
* not held.
*
* Primary clients may opportunistically refresh their metadata earlier
* if they're already performing an IndexedDB operation.
*/
const CLIENT_METADATA_REFRESH_INTERVAL_MS = 4000;
/** User-facing error when the primary lease is required but not available. */
const PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG = 'Failed to obtain exclusive access to the persistence layer. To allow ' +
'shared access, multi-tab synchronization has to be enabled in all tabs. ' +
'If you are using `experimentalForceOwningTab:true`, make sure that only ' +
'one tab has persistence enabled at any given time.';
const UNSUPPORTED_PLATFORM_ERROR_MSG = 'This platform is either missing IndexedDB or is known to have ' +
'an incomplete implementation. Offline persistence has been disabled.';
// The format of the LocalStorage key that stores zombied client is:
// firestore_zombie__
const ZOMBIED_CLIENTS_KEY_PREFIX = 'firestore_zombie';
/**
* The name of the main (and currently only) IndexedDB database. This name is
* appended to the prefix provided to the IndexedDbPersistence constructor.
*/
const MAIN_DATABASE = 'main';
/**
* An IndexedDB-backed instance of Persistence. Data is stored persistently
* across sessions.
*
* On Web only, the Firestore SDKs support shared access to its persistence
* layer. This allows multiple browser tabs to read and write to IndexedDb and
* to synchronize state even without network connectivity. Shared access is
* currently optional and not enabled unless all clients invoke
* `enablePersistence()` with `{synchronizeTabs:true}`.
*
* In multi-tab mode, if multiple clients are active at the same time, the SDK
* will designate one client as the “primary client”. An effort is made to pick
* a visible, network-connected and active client, and this client is
* responsible for letting other clients know about its presence. The primary
* client writes a unique client-generated identifier (the client ID) to
* IndexedDb’s “owner” store every 4 seconds. If the primary client fails to
* update this entry, another client can acquire the lease and take over as
* primary.
*
* Some persistence operations in the SDK are designated as primary-client only
* operations. This includes the acknowledgment of mutations and all updates of
* remote documents. The effects of these operations are written to persistence
* and then broadcast to other tabs via LocalStorage (see
* `WebStorageSharedClientState`), which then refresh their state from
* persistence.
*
* Similarly, the primary client listens to notifications sent by secondary
* clients to discover persistence changes written by secondary clients, such as
* the addition of new mutations and query targets.
*
* If multi-tab is not enabled and another tab already obtained the primary
* lease, IndexedDbPersistence enters a failed state and all subsequent
* operations will automatically fail.
*
* Additionally, there is an optimization so that when a tab is closed, the
* primary lease is released immediately (this is especially important to make
* sure that a refreshed tab is able to immediately re-acquire the primary
* lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
* since it is an asynchronous API. So in addition to attempting to give up the
* lease, the leaseholder writes its client ID to a "zombiedClient" entry in
* LocalStorage which acts as an indicator that another tab should go ahead and
* take the primary lease immediately regardless of the current lease timestamp.
*
* TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
* longer optional.
*/
class IndexedDbPersistence {
constructor(
/**
* Whether to synchronize the in-memory state of multiple tabs and share
* access to local persistence.
*/
allowTabSynchronization, persistenceKey, clientId, lruParams, queue, window, document, serializer, sequenceNumberSyncer,
/**
* If set to true, forcefully obtains database access. Existing tabs will
* no longer be able to access IndexedDB.
*/
forceOwningTab, schemaVersion = SCHEMA_VERSION) {
this.allowTabSynchronization = allowTabSynchronization;
this.persistenceKey = persistenceKey;
this.clientId = clientId;
this.queue = queue;
this.window = window;
this.document = document;
this.sequenceNumberSyncer = sequenceNumberSyncer;
this.forceOwningTab = forceOwningTab;
this.schemaVersion = schemaVersion;
this.listenSequence = null;
this._started = false;
this.isPrimary = false;
this.networkEnabled = true;
/** Our window.unload handler, if registered. */
this.windowUnloadHandler = null;
this.inForeground = false;
/** Our 'visibilitychange' listener if registered. */
this.documentVisibilityHandler = null;
/** The client metadata refresh task. */
this.clientMetadataRefresher = null;
/** The last time we garbage collected the client metadata object store. */
this.lastGarbageCollectionTime = Number.NEGATIVE_INFINITY;
/** A listener to notify on primary state changes. */
this.primaryStateListener = _ => Promise.resolve();
if (!IndexedDbPersistence.isAvailable()) {
throw new FirestoreError(Code.UNIMPLEMENTED, UNSUPPORTED_PLATFORM_ERROR_MSG);
}
this.referenceDelegate = new IndexedDbLruDelegateImpl(this, lruParams);
this.dbName = persistenceKey + MAIN_DATABASE;
this.serializer = new LocalSerializer(serializer);
this.simpleDb = new SimpleDb(this.dbName, this.schemaVersion, new SchemaConverter(this.serializer));
this.targetCache = new IndexedDbTargetCache(this.referenceDelegate, this.serializer);
this.remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer);
this.bundleCache = new IndexedDbBundleCache();
if (this.window && this.window.localStorage) {
this.webStorage = this.window.localStorage;
}
else {
this.webStorage = null;
if (forceOwningTab === false) {
logError(LOG_TAG$c, 'LocalStorage is unavailable. As a result, persistence may not work ' +
'reliably. In particular enablePersistence() could fail immediately ' +
'after refreshing the page.');
}
}
}
/**
* Attempt to start IndexedDb persistence.
*
* @returns Whether persistence was enabled.
*/
start() {
// NOTE: This is expected to fail sometimes (in the case of another tab
// already having the persistence lock), so it's the first thing we should
// do.
return this.updateClientMetadataAndTryBecomePrimary()
.then(() => {
if (!this.isPrimary && !this.allowTabSynchronization) {
// Fail `start()` if `synchronizeTabs` is disabled and we cannot
// obtain the primary lease.
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
this.attachVisibilityHandler();
this.attachWindowUnloadHook();
this.scheduleClientMetadataAndPrimaryLeaseRefreshes();
return this.runTransaction('getHighestListenSequenceNumber', 'readonly', txn => this.targetCache.getHighestSequenceNumber(txn));
})
.then(highestListenSequenceNumber => {
this.listenSequence = new ListenSequence(highestListenSequenceNumber, this.sequenceNumberSyncer);
})
.then(() => {
this._started = true;
})
.catch(reason => {
this.simpleDb && this.simpleDb.close();
return Promise.reject(reason);
});
}
/**
* Registers a listener that gets called when the primary state of the
* instance changes. Upon registering, this listener is invoked immediately
* with the current primary state.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setPrimaryStateListener(primaryStateListener) {
this.primaryStateListener = async (primaryState) => {
if (this.started) {
return primaryStateListener(primaryState);
}
};
return primaryStateListener(this.isPrimary);
}
/**
* Registers a listener that gets called when the database receives a
* version change event indicating that it has deleted.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setDatabaseDeletedListener(databaseDeletedListener) {
this.simpleDb.setVersionChangeListener(async (event) => {
// Check if an attempt is made to delete IndexedDB.
if (event.newVersion === null) {
await databaseDeletedListener();
}
});
}
/**
* Adjusts the current network state in the client's metadata, potentially
* affecting the primary lease.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
setNetworkEnabled(networkEnabled) {
if (this.networkEnabled !== networkEnabled) {
this.networkEnabled = networkEnabled;
// Schedule a primary lease refresh for immediate execution. The eventual
// lease update will be propagated via `primaryStateListener`.
this.queue.enqueueAndForget(async () => {
if (this.started) {
await this.updateClientMetadataAndTryBecomePrimary();
}
});
}
}
/**
* Updates the client metadata in IndexedDb and attempts to either obtain or
* extend the primary lease for the local client. Asynchronously notifies the
* primary state listener if the client either newly obtained or released its
* primary lease.
*/
updateClientMetadataAndTryBecomePrimary() {
return this.runTransaction('updateClientMetadataAndTryBecomePrimary', 'readwrite', txn => {
const metadataStore = clientMetadataStore(txn);
return metadataStore
.put({
clientId: this.clientId,
updateTimeMs: Date.now(),
networkEnabled: this.networkEnabled,
inForeground: this.inForeground
})
.next(() => {
if (this.isPrimary) {
return this.verifyPrimaryLease(txn).next(success => {
if (!success) {
this.isPrimary = false;
this.queue.enqueueRetryable(() => this.primaryStateListener(false));
}
});
}
})
.next(() => this.canActAsPrimary(txn))
.next(canActAsPrimary => {
if (this.isPrimary && !canActAsPrimary) {
return this.releasePrimaryLeaseIfHeld(txn).next(() => false);
}
else if (canActAsPrimary) {
return this.acquireOrExtendPrimaryLease(txn).next(() => true);
}
else {
return /* canActAsPrimary= */ false;
}
});
})
.catch(e => {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG$c, 'Failed to extend owner lease: ', e);
// Proceed with the existing state. Any subsequent access to
// IndexedDB will verify the lease.
return this.isPrimary;
}
if (!this.allowTabSynchronization) {
throw e;
}
logDebug(LOG_TAG$c, 'Releasing owner lease after error during lease refresh', e);
return /* isPrimary= */ false;
})
.then(isPrimary => {
if (this.isPrimary !== isPrimary) {
this.queue.enqueueRetryable(() => this.primaryStateListener(isPrimary));
}
this.isPrimary = isPrimary;
});
}
verifyPrimaryLease(txn) {
const store = primaryClientStore(txn);
return store.get(DbPrimaryClientKey).next(primaryClient => {
return PersistencePromise.resolve(this.isLocalClient(primaryClient));
});
}
removeClientMetadata(txn) {
const metadataStore = clientMetadataStore(txn);
return metadataStore.delete(this.clientId);
}
/**
* If the garbage collection threshold has passed, prunes the
* RemoteDocumentChanges and the ClientMetadata store based on the last update
* time of all clients.
*/
async maybeGarbageCollectMultiClientState() {
if (this.isPrimary &&
!this.isWithinAge(this.lastGarbageCollectionTime, MAX_CLIENT_AGE_MS)) {
this.lastGarbageCollectionTime = Date.now();
const inactiveClients = await this.runTransaction('maybeGarbageCollectMultiClientState', 'readwrite-primary', txn => {
const metadataStore = getStore(txn, DbClientMetadataStore);
return metadataStore.loadAll().next(existingClients => {
const active = this.filterActiveClients(existingClients, MAX_CLIENT_AGE_MS);
const inactive = existingClients.filter(client => active.indexOf(client) === -1);
// Delete metadata for clients that are no longer considered active.
return PersistencePromise.forEach(inactive, (inactiveClient) => metadataStore.delete(inactiveClient.clientId)).next(() => inactive);
});
}).catch(() => {
// Ignore primary lease violations or any other type of error. The next
// primary will run `maybeGarbageCollectMultiClientState()` again.
// We don't use `ignoreIfPrimaryLeaseLoss()` since we don't want to depend
// on LocalStore.
return [];
});
// Delete potential leftover entries that may continue to mark the
// inactive clients as zombied in LocalStorage.
// Ideally we'd delete the IndexedDb and LocalStorage zombie entries for
// the client atomically, but we can't. So we opt to delete the IndexedDb
// entries first to avoid potentially reviving a zombied client.
if (this.webStorage) {
for (const inactiveClient of inactiveClients) {
this.webStorage.removeItem(this.zombiedClientLocalStorageKey(inactiveClient.clientId));
}
}
}
}
/**
* Schedules a recurring timer to update the client metadata and to either
* extend or acquire the primary lease if the client is eligible.
*/
scheduleClientMetadataAndPrimaryLeaseRefreshes() {
this.clientMetadataRefresher = this.queue.enqueueAfterDelay("client_metadata_refresh" /* TimerId.ClientMetadataRefresh */, CLIENT_METADATA_REFRESH_INTERVAL_MS, () => {
return this.updateClientMetadataAndTryBecomePrimary()
.then(() => this.maybeGarbageCollectMultiClientState())
.then(() => this.scheduleClientMetadataAndPrimaryLeaseRefreshes());
});
}
/** Checks whether `client` is the local client. */
isLocalClient(client) {
return client ? client.ownerId === this.clientId : false;
}
/**
* Evaluate the state of all active clients and determine whether the local
* client is or can act as the holder of the primary lease. Returns whether
* the client is eligible for the lease, but does not actually acquire it.
* May return 'false' even if there is no active leaseholder and another
* (foreground) client should become leaseholder instead.
*/
canActAsPrimary(txn) {
if (this.forceOwningTab) {
return PersistencePromise.resolve(true);
}
const store = primaryClientStore(txn);
return store
.get(DbPrimaryClientKey)
.next(currentPrimary => {
const currentLeaseIsValid = currentPrimary !== null &&
this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
!this.isClientZombied(currentPrimary.ownerId);
// A client is eligible for the primary lease if:
// - its network is enabled and the client's tab is in the foreground.
// - its network is enabled and no other client's tab is in the
// foreground.
// - every clients network is disabled and the client's tab is in the
// foreground.
// - every clients network is disabled and no other client's tab is in
// the foreground.
// - the `forceOwningTab` setting was passed in.
if (currentLeaseIsValid) {
if (this.isLocalClient(currentPrimary) && this.networkEnabled) {
return true;
}
if (!this.isLocalClient(currentPrimary)) {
if (!currentPrimary.allowTabSynchronization) {
// Fail the `canActAsPrimary` check if the current leaseholder has
// not opted into multi-tab synchronization. If this happens at
// client startup, we reject the Promise returned by
// `enablePersistence()` and the user can continue to use Firestore
// with in-memory persistence.
// If this fails during a lease refresh, we will instead block the
// AsyncQueue from executing further operations. Note that this is
// acceptable since mixing & matching different `synchronizeTabs`
// settings is not supported.
//
// TODO(b/114226234): Remove this check when `synchronizeTabs` can
// no longer be turned off.
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
return false;
}
}
if (this.networkEnabled && this.inForeground) {
return true;
}
return clientMetadataStore(txn)
.loadAll()
.next(existingClients => {
// Process all existing clients and determine whether at least one of
// them is better suited to obtain the primary lease.
const preferredCandidate = this.filterActiveClients(existingClients, MAX_PRIMARY_ELIGIBLE_AGE_MS).find(otherClient => {
if (this.clientId !== otherClient.clientId) {
const otherClientHasBetterNetworkState = !this.networkEnabled && otherClient.networkEnabled;
const otherClientHasBetterVisibility = !this.inForeground && otherClient.inForeground;
const otherClientHasSameNetworkState = this.networkEnabled === otherClient.networkEnabled;
if (otherClientHasBetterNetworkState ||
(otherClientHasBetterVisibility &&
otherClientHasSameNetworkState)) {
return true;
}
}
return false;
});
return preferredCandidate === undefined;
});
})
.next(canActAsPrimary => {
if (this.isPrimary !== canActAsPrimary) {
logDebug(LOG_TAG$c, `Client ${canActAsPrimary ? 'is' : 'is not'} eligible for a primary lease.`);
}
return canActAsPrimary;
});
}
async shutdown() {
// The shutdown() operations are idempotent and can be called even when
// start() aborted (e.g. because it couldn't acquire the persistence lease).
this._started = false;
this.markClientZombied();
if (this.clientMetadataRefresher) {
this.clientMetadataRefresher.cancel();
this.clientMetadataRefresher = null;
}
this.detachVisibilityHandler();
this.detachWindowUnloadHook();
// Use `SimpleDb.runTransaction` directly to avoid failing if another tab
// has obtained the primary lease.
await this.simpleDb.runTransaction('shutdown', 'readwrite', [DbPrimaryClientStore, DbClientMetadataStore], simpleDbTxn => {
const persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, ListenSequence.INVALID);
return this.releasePrimaryLeaseIfHeld(persistenceTransaction).next(() => this.removeClientMetadata(persistenceTransaction));
});
this.simpleDb.close();
// Remove the entry marking the client as zombied from LocalStorage since
// we successfully deleted its metadata from IndexedDb.
this.removeClientZombiedEntry();
}
/**
* Returns clients that are not zombied and have an updateTime within the
* provided threshold.
*/
filterActiveClients(clients, activityThresholdMs) {
return clients.filter(client => this.isWithinAge(client.updateTimeMs, activityThresholdMs) &&
!this.isClientZombied(client.clientId));
}
/**
* Returns the IDs of the clients that are currently active. If multi-tab
* is not supported, returns an array that only contains the local client's
* ID.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
getActiveClients() {
return this.runTransaction('getActiveClients', 'readonly', txn => {
return clientMetadataStore(txn)
.loadAll()
.next(clients => this.filterActiveClients(clients, MAX_CLIENT_AGE_MS).map(clientMetadata => clientMetadata.clientId));
});
}
get started() {
return this._started;
}
getMutationQueue(user, indexManager) {
return IndexedDbMutationQueue.forUser(user, this.serializer, indexManager, this.referenceDelegate);
}
getTargetCache() {
return this.targetCache;
}
getRemoteDocumentCache() {
return this.remoteDocumentCache;
}
getIndexManager(user) {
return new IndexedDbIndexManager(user, this.serializer.remoteSerializer.databaseId);
}
getDocumentOverlayCache(user) {
return IndexedDbDocumentOverlayCache.forUser(this.serializer, user);
}
getBundleCache() {
return this.bundleCache;
}
runTransaction(action, mode, transactionOperation) {
logDebug(LOG_TAG$c, 'Starting transaction:', action);
const simpleDbMode = mode === 'readonly' ? 'readonly' : 'readwrite';
const objectStores = getObjectStores(this.schemaVersion);
let persistenceTransaction;
// Do all transactions as readwrite against all object stores, since we
// are the only reader/writer.
return this.simpleDb
.runTransaction(action, simpleDbMode, objectStores, simpleDbTxn => {
persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, this.listenSequence
? this.listenSequence.next()
: ListenSequence.INVALID);
if (mode === 'readwrite-primary') {
// While we merely verify that we have (or can acquire) the lease
// immediately, we wait to extend the primary lease until after
// executing transactionOperation(). This ensures that even if the
// transactionOperation takes a long time, we'll use a recent
// leaseTimestampMs in the extended (or newly acquired) lease.
return this.verifyPrimaryLease(persistenceTransaction)
.next(holdsPrimaryLease => {
if (holdsPrimaryLease) {
return /* holdsPrimaryLease= */ true;
}
return this.canActAsPrimary(persistenceTransaction);
})
.next(holdsPrimaryLease => {
if (!holdsPrimaryLease) {
logError(`Failed to obtain primary lease for action '${action}'.`);
this.isPrimary = false;
this.queue.enqueueRetryable(() => this.primaryStateListener(false));
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_LOST_ERROR_MSG);
}
return transactionOperation(persistenceTransaction);
})
.next(result => {
return this.acquireOrExtendPrimaryLease(persistenceTransaction).next(() => result);
});
}
else {
return this.verifyAllowTabSynchronization(persistenceTransaction).next(() => transactionOperation(persistenceTransaction));
}
})
.then(result => {
persistenceTransaction.raiseOnCommittedEvent();
return result;
});
}
/**
* Verifies that the current tab is the primary leaseholder or alternatively
* that the leaseholder has opted into multi-tab synchronization.
*/
// TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer
// be turned off.
verifyAllowTabSynchronization(txn) {
const store = primaryClientStore(txn);
return store.get(DbPrimaryClientKey).next(currentPrimary => {
const currentLeaseIsValid = currentPrimary !== null &&
this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
!this.isClientZombied(currentPrimary.ownerId);
if (currentLeaseIsValid && !this.isLocalClient(currentPrimary)) {
if (!this.forceOwningTab &&
(!this.allowTabSynchronization ||
!currentPrimary.allowTabSynchronization)) {
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
}
});
}
/**
* Obtains or extends the new primary lease for the local client. This
* method does not verify that the client is eligible for this lease.
*/
acquireOrExtendPrimaryLease(txn) {
const newPrimary = {
ownerId: this.clientId,
allowTabSynchronization: this.allowTabSynchronization,
leaseTimestampMs: Date.now()
};
return primaryClientStore(txn).put(DbPrimaryClientKey, newPrimary);
}
static isAvailable() {
return SimpleDb.isAvailable();
}
/** Checks the primary lease and removes it if we are the current primary. */
releasePrimaryLeaseIfHeld(txn) {
const store = primaryClientStore(txn);
return store.get(DbPrimaryClientKey).next(primaryClient => {
if (this.isLocalClient(primaryClient)) {
logDebug(LOG_TAG$c, 'Releasing primary lease.');
return store.delete(DbPrimaryClientKey);
}
else {
return PersistencePromise.resolve();
}
});
}
/** Verifies that `updateTimeMs` is within `maxAgeMs`. */
isWithinAge(updateTimeMs, maxAgeMs) {
const now = Date.now();
const minAcceptable = now - maxAgeMs;
const maxAcceptable = now;
if (updateTimeMs < minAcceptable) {
return false;
}
else if (updateTimeMs > maxAcceptable) {
logError(`Detected an update time that is in the future: ${updateTimeMs} > ${maxAcceptable}`);
return false;
}
return true;
}
attachVisibilityHandler() {
if (this.document !== null &&
typeof this.document.addEventListener === 'function') {
this.documentVisibilityHandler = () => {
this.queue.enqueueAndForget(() => {
this.inForeground = this.document.visibilityState === 'visible';
return this.updateClientMetadataAndTryBecomePrimary();
});
};
this.document.addEventListener('visibilitychange', this.documentVisibilityHandler);
this.inForeground = this.document.visibilityState === 'visible';
}
}
detachVisibilityHandler() {
if (this.documentVisibilityHandler) {
this.document.removeEventListener('visibilitychange', this.documentVisibilityHandler);
this.documentVisibilityHandler = null;
}
}
/**
* Attaches a window.unload handler that will synchronously write our
* clientId to a "zombie client id" location in LocalStorage. This can be used
* by tabs trying to acquire the primary lease to determine that the lease
* is no longer valid even if the timestamp is recent. This is particularly
* important for the refresh case (so the tab correctly re-acquires the
* primary lease). LocalStorage is used for this rather than IndexedDb because
* it is a synchronous API and so can be used reliably from an unload
* handler.
*/
attachWindowUnloadHook() {
var _a;
if (typeof ((_a = this.window) === null || _a === void 0 ? void 0 : _a.addEventListener) === 'function') {
this.windowUnloadHandler = () => {
// Note: In theory, this should be scheduled on the AsyncQueue since it
// accesses internal state. We execute this code directly during shutdown
// to make sure it gets a chance to run.
this.markClientZombied();
if (util.isSafari() && navigator.appVersion.match(/Version\/1[45]/)) {
// On Safari 14 and 15, we do not run any cleanup actions as it might
// trigger a bug that prevents Safari from re-opening IndexedDB during
// the next page load.
// See https://bugs.webkit.org/show_bug.cgi?id=226547
this.queue.enterRestrictedMode(/* purgeExistingTasks= */ true);
}
this.queue.enqueueAndForget(() => {
// Attempt graceful shutdown (including releasing our primary lease),
// but there's no guarantee it will complete.
return this.shutdown();
});
};
this.window.addEventListener('pagehide', this.windowUnloadHandler);
}
}
detachWindowUnloadHook() {
if (this.windowUnloadHandler) {
this.window.removeEventListener('pagehide', this.windowUnloadHandler);
this.windowUnloadHandler = null;
}
}
/**
* Returns whether a client is "zombied" based on its LocalStorage entry.
* Clients become zombied when their tab closes without running all of the
* cleanup logic in `shutdown()`.
*/
isClientZombied(clientId) {
var _a;
try {
const isZombied = ((_a = this.webStorage) === null || _a === void 0 ? void 0 : _a.getItem(this.zombiedClientLocalStorageKey(clientId))) !== null;
logDebug(LOG_TAG$c, `Client '${clientId}' ${isZombied ? 'is' : 'is not'} zombied in LocalStorage`);
return isZombied;
}
catch (e) {
// Gracefully handle if LocalStorage isn't working.
logError(LOG_TAG$c, 'Failed to get zombied client id.', e);
return false;
}
}
/**
* Record client as zombied (a client that had its tab closed). Zombied
* clients are ignored during primary tab selection.
*/
markClientZombied() {
if (!this.webStorage) {
return;
}
try {
this.webStorage.setItem(this.zombiedClientLocalStorageKey(this.clientId), String(Date.now()));
}
catch (e) {
// Gracefully handle if LocalStorage isn't available / working.
logError('Failed to set zombie client id.', e);
}
}
/** Removes the zombied client entry if it exists. */
removeClientZombiedEntry() {
if (!this.webStorage) {
return;
}
try {
this.webStorage.removeItem(this.zombiedClientLocalStorageKey(this.clientId));
}
catch (e) {
// Ignore
}
}
zombiedClientLocalStorageKey(clientId) {
return `${ZOMBIED_CLIENTS_KEY_PREFIX}_${this.persistenceKey}_${clientId}`;
}
}
/**
* Helper to get a typed SimpleDbStore for the primary client object store.
*/
function primaryClientStore(txn) {
return getStore(txn, DbPrimaryClientStore);
}
/**
* Helper to get a typed SimpleDbStore for the client metadata object store.
*/
function clientMetadataStore(txn) {
return getStore(txn, DbClientMetadataStore);
}
/**
* Generates a string used as a prefix when storing data in IndexedDB and
* LocalStorage.
*/
function indexedDbStoragePrefix(databaseId, persistenceKey) {
// Use two different prefix formats:
//
// * firestore / persistenceKey / projectID . databaseID / ...
// * firestore / persistenceKey / projectID / ...
//
// projectIDs are DNS-compatible names and cannot contain dots
// so there's no danger of collisions.
let database = databaseId.projectId;
if (!databaseId.isDefaultDatabase) {
database += '.' + databaseId.database;
}
return 'firestore/' + persistenceKey + '/' + database + '/';
}
async function indexedDbClearPersistence(persistenceKey) {
if (!SimpleDb.isAvailable()) {
return Promise.resolve();
}
const dbName = persistenceKey + MAIN_DATABASE;
await SimpleDb.delete(dbName);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compares two array for equality using comparator. The method computes the
* intersection and invokes `onAdd` for every element that is in `after` but not
* `before`. `onRemove` is invoked for every element in `before` but missing
* from `after`.
*
* The method creates a copy of both `before` and `after` and runs in O(n log
* n), where n is the size of the two lists.
*
* @param before - The elements that exist in the original array.
* @param after - The elements to diff against the original array.
* @param comparator - The comparator for the elements in before and after.
* @param onAdd - A function to invoke for every element that is part of `
* after` but not `before`.
* @param onRemove - A function to invoke for every element that is part of
* `before` but not `after`.
*/
function diffArrays(before, after, comparator, onAdd, onRemove) {
before = [...before];
after = [...after];
before.sort(comparator);
after.sort(comparator);
const bLen = before.length;
const aLen = after.length;
let a = 0;
let b = 0;
while (a < aLen && b < bLen) {
const cmp = comparator(before[b], after[a]);
if (cmp < 0) {
// The element was removed if the next element in our ordered
// walkthrough is only in `before`.
onRemove(before[b++]);
}
else if (cmp > 0) {
// The element was added if the next element in our ordered walkthrough
// is only in `after`.
onAdd(after[a++]);
}
else {
a++;
b++;
}
}
while (a < aLen) {
onAdd(after[a++]);
}
while (b < bLen) {
onRemove(before[b++]);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$b = 'LocalStore';
/**
* The maximum time to leave a resume token buffered without writing it out.
* This value is arbitrary: it's long enough to avoid several writes
* (possibly indefinitely if updates come more frequently than this) but
* short enough that restarting after crashing will still have a pretty
* recent resume token.
*/
const RESUME_TOKEN_MAX_AGE_MICROS = 5 * 60 * 1e6;
/**
* Implements `LocalStore` interface.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
class LocalStoreImpl {
constructor(
/** Manages our in-memory or durable persistence. */
persistence, queryEngine, initialUser, serializer) {
this.persistence = persistence;
this.queryEngine = queryEngine;
this.serializer = serializer;
/**
* Maps a targetID to data about its target.
*
* PORTING NOTE: We are using an immutable data structure on Web to make re-runs
* of `applyRemoteEvent()` idempotent.
*/
this.targetDataByTarget = new SortedMap(primitiveComparator);
/** Maps a target to its targetID. */
// TODO(wuandy): Evaluate if TargetId can be part of Target.
this.targetIdByTarget = new ObjectMap(t => canonifyTarget(t), targetEquals);
/**
* A per collection group index of the last read time processed by
* `getNewDocumentChanges()`.
*
* PORTING NOTE: This is only used for multi-tab synchronization.
*/
this.collectionGroupReadTime = new Map();
this.remoteDocuments = persistence.getRemoteDocumentCache();
this.targetCache = persistence.getTargetCache();
this.bundleCache = persistence.getBundleCache();
this.initializeUserComponents(initialUser);
}
initializeUserComponents(user) {
// TODO(indexing): Add spec tests that test these components change after a
// user change
this.documentOverlayCache = this.persistence.getDocumentOverlayCache(user);
this.indexManager = this.persistence.getIndexManager(user);
this.mutationQueue = this.persistence.getMutationQueue(user, this.indexManager);
this.localDocuments = new LocalDocumentsView(this.remoteDocuments, this.mutationQueue, this.documentOverlayCache, this.indexManager);
this.remoteDocuments.setIndexManager(this.indexManager);
this.queryEngine.initialize(this.localDocuments, this.indexManager);
}
collectGarbage(garbageCollector) {
return this.persistence.runTransaction('Collect garbage', 'readwrite-primary', txn => garbageCollector.collect(txn, this.targetDataByTarget));
}
}
function newLocalStore(
/** Manages our in-memory or durable persistence. */
persistence, queryEngine, initialUser, serializer) {
return new LocalStoreImpl(persistence, queryEngine, initialUser, serializer);
}
/**
* Tells the LocalStore that the currently authenticated user has changed.
*
* In response the local store switches the mutation queue to the new user and
* returns any resulting document changes.
*/
// PORTING NOTE: Android and iOS only return the documents affected by the
// change.
async function localStoreHandleUserChange(localStore, user) {
const localStoreImpl = debugCast(localStore);
const result = await localStoreImpl.persistence.runTransaction('Handle user change', 'readonly', txn => {
// Swap out the mutation queue, grabbing the pending mutation batches
// before and after.
let oldBatches;
return localStoreImpl.mutationQueue
.getAllMutationBatches(txn)
.next(promisedOldBatches => {
oldBatches = promisedOldBatches;
localStoreImpl.initializeUserComponents(user);
return localStoreImpl.mutationQueue.getAllMutationBatches(txn);
})
.next(newBatches => {
const removedBatchIds = [];
const addedBatchIds = [];
// Union the old/new changed keys.
let changedKeys = documentKeySet();
for (const batch of oldBatches) {
removedBatchIds.push(batch.batchId);
for (const mutation of batch.mutations) {
changedKeys = changedKeys.add(mutation.key);
}
}
for (const batch of newBatches) {
addedBatchIds.push(batch.batchId);
for (const mutation of batch.mutations) {
changedKeys = changedKeys.add(mutation.key);
}
}
// Return the set of all (potentially) changed documents and the list
// of mutation batch IDs that were affected by change.
return localStoreImpl.localDocuments
.getDocuments(txn, changedKeys)
.next(affectedDocuments => {
return {
affectedDocuments,
removedBatchIds,
addedBatchIds
};
});
});
});
return result;
}
/* Accepts locally generated Mutations and commit them to storage. */
function localStoreWriteLocally(localStore, mutations) {
const localStoreImpl = debugCast(localStore);
const localWriteTime = Timestamp.now();
const keys = mutations.reduce((keys, m) => keys.add(m.key), documentKeySet());
let overlayedDocuments;
let mutationBatch;
return localStoreImpl.persistence
.runTransaction('Locally write mutations', 'readwrite', txn => {
// Figure out which keys do not have a remote version in the cache, this
// is needed to create the right overlay mutation: if no remote version
// presents, we do not need to create overlays as patch mutations.
// TODO(Overlay): Is there a better way to determine this? Using the
// document version does not work because local mutations set them back
// to 0.
let remoteDocs = mutableDocumentMap();
let docsWithoutRemoteVersion = documentKeySet();
return localStoreImpl.remoteDocuments
.getEntries(txn, keys)
.next(docs => {
remoteDocs = docs;
remoteDocs.forEach((key, doc) => {
if (!doc.isValidDocument()) {
docsWithoutRemoteVersion = docsWithoutRemoteVersion.add(key);
}
});
})
.next(() => {
// Load and apply all existing mutations. This lets us compute the
// current base state for all non-idempotent transforms before applying
// any additional user-provided writes.
return localStoreImpl.localDocuments.getOverlayedDocuments(txn, remoteDocs);
})
.next((docs) => {
overlayedDocuments = docs;
// For non-idempotent mutations (such as `FieldValue.increment()`),
// we record the base state in a separate patch mutation. This is
// later used to guarantee consistent values and prevents flicker
// even if the backend sends us an update that already includes our
// transform.
const baseMutations = [];
for (const mutation of mutations) {
const baseValue = mutationExtractBaseValue(mutation, overlayedDocuments.get(mutation.key).overlayedDocument);
if (baseValue != null) {
// NOTE: The base state should only be applied if there's some
// existing document to override, so use a Precondition of
// exists=true
baseMutations.push(new PatchMutation(mutation.key, baseValue, extractFieldMask(baseValue.value.mapValue), Precondition.exists(true)));
}
}
return localStoreImpl.mutationQueue.addMutationBatch(txn, localWriteTime, baseMutations, mutations);
})
.next(batch => {
mutationBatch = batch;
const overlays = batch.applyToLocalDocumentSet(overlayedDocuments, docsWithoutRemoteVersion);
return localStoreImpl.documentOverlayCache.saveOverlays(txn, batch.batchId, overlays);
});
})
.then(() => ({
batchId: mutationBatch.batchId,
changes: convertOverlayedDocumentMapToDocumentMap(overlayedDocuments)
}));
}
/**
* Acknowledges the given batch.
*
* On the happy path when a batch is acknowledged, the local store will
*
* + remove the batch from the mutation queue;
* + apply the changes to the remote document cache;
* + recalculate the latency compensated view implied by those changes (there
* may be mutations in the queue that affect the documents but haven't been
* acknowledged yet); and
* + give the changed documents back the sync engine
*
* @returns The resulting (modified) documents.
*/
function localStoreAcknowledgeBatch(localStore, batchResult) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Acknowledge batch', 'readwrite-primary', txn => {
const affected = batchResult.batch.keys();
const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
return applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer)
.next(() => documentBuffer.apply(txn))
.next(() => localStoreImpl.mutationQueue.performConsistencyCheck(txn))
.next(() => localStoreImpl.documentOverlayCache.removeOverlaysForBatchId(txn, affected, batchResult.batch.batchId))
.next(() => localStoreImpl.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(txn, getKeysWithTransformResults(batchResult)))
.next(() => localStoreImpl.localDocuments.getDocuments(txn, affected));
});
}
function getKeysWithTransformResults(batchResult) {
let result = documentKeySet();
for (let i = 0; i < batchResult.mutationResults.length; ++i) {
const mutationResult = batchResult.mutationResults[i];
if (mutationResult.transformResults.length > 0) {
result = result.add(batchResult.batch.mutations[i].key);
}
}
return result;
}
/**
* Removes mutations from the MutationQueue for the specified batch;
* LocalDocuments will be recalculated.
*
* @returns The resulting modified documents.
*/
function localStoreRejectBatch(localStore, batchId) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Reject batch', 'readwrite-primary', txn => {
let affectedKeys;
return localStoreImpl.mutationQueue
.lookupMutationBatch(txn, batchId)
.next((batch) => {
hardAssert(batch !== null);
affectedKeys = batch.keys();
return localStoreImpl.mutationQueue.removeMutationBatch(txn, batch);
})
.next(() => localStoreImpl.mutationQueue.performConsistencyCheck(txn))
.next(() => localStoreImpl.documentOverlayCache.removeOverlaysForBatchId(txn, affectedKeys, batchId))
.next(() => localStoreImpl.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(txn, affectedKeys))
.next(() => localStoreImpl.localDocuments.getDocuments(txn, affectedKeys));
});
}
/**
* Returns the largest (latest) batch id in mutation queue that is pending
* server response.
*
* Returns `BATCHID_UNKNOWN` if the queue is empty.
*/
function localStoreGetHighestUnacknowledgedBatchId(localStore) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get highest unacknowledged batch id', 'readonly', txn => localStoreImpl.mutationQueue.getHighestUnacknowledgedBatchId(txn));
}
/**
* Returns the last consistent snapshot processed (used by the RemoteStore to
* determine whether to buffer incoming snapshots from the backend).
*/
function localStoreGetLastRemoteSnapshotVersion(localStore) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get last remote snapshot version', 'readonly', txn => localStoreImpl.targetCache.getLastRemoteSnapshotVersion(txn));
}
/**
* Updates the "ground-state" (remote) documents. We assume that the remote
* event reflects any write batches that have been acknowledged or rejected
* (i.e. we do not re-apply local mutations to updates from this event).
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
function localStoreApplyRemoteEventToLocalCache(localStore, remoteEvent) {
const localStoreImpl = debugCast(localStore);
const remoteVersion = remoteEvent.snapshotVersion;
let newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
return localStoreImpl.persistence
.runTransaction('Apply remote event', 'readwrite-primary', txn => {
const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
// Reset newTargetDataByTargetMap in case this transaction gets re-run.
newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
const promises = [];
remoteEvent.targetChanges.forEach((change, targetId) => {
const oldTargetData = newTargetDataByTargetMap.get(targetId);
if (!oldTargetData) {
return;
}
// Only update the remote keys if the target is still active. This
// ensures that we can persist the updated target data along with
// the updated assignment.
promises.push(localStoreImpl.targetCache
.removeMatchingKeys(txn, change.removedDocuments, targetId)
.next(() => {
return localStoreImpl.targetCache.addMatchingKeys(txn, change.addedDocuments, targetId);
}));
let newTargetData = oldTargetData.withSequenceNumber(txn.currentSequenceNumber);
if (remoteEvent.targetMismatches.has(targetId)) {
newTargetData = newTargetData
.withResumeToken(ByteString.EMPTY_BYTE_STRING, SnapshotVersion.min())
.withLastLimboFreeSnapshotVersion(SnapshotVersion.min());
}
else if (change.resumeToken.approximateByteSize() > 0) {
newTargetData = newTargetData.withResumeToken(change.resumeToken, remoteVersion);
}
newTargetDataByTargetMap = newTargetDataByTargetMap.insert(targetId, newTargetData);
// Update the target data if there are target changes (or if
// sufficient time has passed since the last update).
if (shouldPersistTargetData(oldTargetData, newTargetData, change)) {
promises.push(localStoreImpl.targetCache.updateTargetData(txn, newTargetData));
}
});
let changedDocs = mutableDocumentMap();
let existenceChangedKeys = documentKeySet();
remoteEvent.documentUpdates.forEach(key => {
if (remoteEvent.resolvedLimboDocuments.has(key)) {
promises.push(localStoreImpl.persistence.referenceDelegate.updateLimboDocument(txn, key));
}
});
// Each loop iteration only affects its "own" doc, so it's safe to get all
// the remote documents in advance in a single call.
promises.push(populateDocumentChangeBuffer(txn, documentBuffer, remoteEvent.documentUpdates).next(result => {
changedDocs = result.changedDocuments;
existenceChangedKeys = result.existenceChangedKeys;
}));
// HACK: The only reason we allow a null snapshot version is so that we
// can synthesize remote events when we get permission denied errors while
// trying to resolve the state of a locally cached document that is in
// limbo.
if (!remoteVersion.isEqual(SnapshotVersion.min())) {
const updateRemoteVersion = localStoreImpl.targetCache
.getLastRemoteSnapshotVersion(txn)
.next(lastRemoteSnapshotVersion => {
return localStoreImpl.targetCache.setTargetsMetadata(txn, txn.currentSequenceNumber, remoteVersion);
});
promises.push(updateRemoteVersion);
}
return PersistencePromise.waitFor(promises)
.next(() => documentBuffer.apply(txn))
.next(() => localStoreImpl.localDocuments.getLocalViewOfDocuments(txn, changedDocs, existenceChangedKeys))
.next(() => changedDocs);
})
.then(changedDocs => {
localStoreImpl.targetDataByTarget = newTargetDataByTargetMap;
return changedDocs;
});
}
/**
* Populates document change buffer with documents from backend or a bundle.
* Returns the document changes resulting from applying those documents, and
* also a set of documents whose existence state are changed as a result.
*
* @param txn - Transaction to use to read existing documents from storage.
* @param documentBuffer - Document buffer to collect the resulted changes to be
* applied to storage.
* @param documents - Documents to be applied.
*/
function populateDocumentChangeBuffer(txn, documentBuffer, documents) {
let updatedKeys = documentKeySet();
let existenceChangedKeys = documentKeySet();
documents.forEach(k => (updatedKeys = updatedKeys.add(k)));
return documentBuffer.getEntries(txn, updatedKeys).next(existingDocs => {
let changedDocuments = mutableDocumentMap();
documents.forEach((key, doc) => {
const existingDoc = existingDocs.get(key);
// Check if see if there is a existence state change for this document.
if (doc.isFoundDocument() !== existingDoc.isFoundDocument()) {
existenceChangedKeys = existenceChangedKeys.add(key);
}
// Note: The order of the steps below is important, since we want
// to ensure that rejected limbo resolutions (which fabricate
// NoDocuments with SnapshotVersion.min()) never add documents to
// cache.
if (doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min())) {
// NoDocuments with SnapshotVersion.min() are used in manufactured
// events. We remove these documents from cache since we lost
// access.
documentBuffer.removeEntry(key, doc.readTime);
changedDocuments = changedDocuments.insert(key, doc);
}
else if (!existingDoc.isValidDocument() ||
doc.version.compareTo(existingDoc.version) > 0 ||
(doc.version.compareTo(existingDoc.version) === 0 &&
existingDoc.hasPendingWrites)) {
documentBuffer.addEntry(doc);
changedDocuments = changedDocuments.insert(key, doc);
}
else {
logDebug(LOG_TAG$b, 'Ignoring outdated watch update for ', key, '. Current version:', existingDoc.version, ' Watch version:', doc.version);
}
});
return { changedDocuments, existenceChangedKeys };
});
}
/**
* Returns true if the newTargetData should be persisted during an update of
* an active target. TargetData should always be persisted when a target is
* being released and should not call this function.
*
* While the target is active, TargetData updates can be omitted when nothing
* about the target has changed except metadata like the resume token or
* snapshot version. Occasionally it's worth the extra write to prevent these
* values from getting too stale after a crash, but this doesn't have to be
* too frequent.
*/
function shouldPersistTargetData(oldTargetData, newTargetData, change) {
// Always persist target data if we don't already have a resume token.
if (oldTargetData.resumeToken.approximateByteSize() === 0) {
return true;
}
// Don't allow resume token changes to be buffered indefinitely. This
// allows us to be reasonably up-to-date after a crash and avoids needing
// to loop over all active queries on shutdown. Especially in the browser
// we may not get time to do anything interesting while the current tab is
// closing.
const timeDelta = newTargetData.snapshotVersion.toMicroseconds() -
oldTargetData.snapshotVersion.toMicroseconds();
if (timeDelta >= RESUME_TOKEN_MAX_AGE_MICROS) {
return true;
}
// Otherwise if the only thing that has changed about a target is its resume
// token it's not worth persisting. Note that the RemoteStore keeps an
// in-memory view of the currently active targets which includes the current
// resume token, so stream failure or user changes will still use an
// up-to-date resume token regardless of what we do here.
const changes = change.addedDocuments.size +
change.modifiedDocuments.size +
change.removedDocuments.size;
return changes > 0;
}
/**
* Notifies local store of the changed views to locally pin documents.
*/
async function localStoreNotifyLocalViewChanges(localStore, viewChanges) {
const localStoreImpl = debugCast(localStore);
try {
await localStoreImpl.persistence.runTransaction('notifyLocalViewChanges', 'readwrite', txn => {
return PersistencePromise.forEach(viewChanges, (viewChange) => {
return PersistencePromise.forEach(viewChange.addedKeys, (key) => localStoreImpl.persistence.referenceDelegate.addReference(txn, viewChange.targetId, key)).next(() => PersistencePromise.forEach(viewChange.removedKeys, (key) => localStoreImpl.persistence.referenceDelegate.removeReference(txn, viewChange.targetId, key)));
});
});
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
// If `notifyLocalViewChanges` fails, we did not advance the sequence
// number for the documents that were included in this transaction.
// This might trigger them to be deleted earlier than they otherwise
// would have, but it should not invalidate the integrity of the data.
logDebug(LOG_TAG$b, 'Failed to update sequence numbers: ' + e);
}
else {
throw e;
}
}
for (const viewChange of viewChanges) {
const targetId = viewChange.targetId;
if (!viewChange.fromCache) {
const targetData = localStoreImpl.targetDataByTarget.get(targetId);
// Advance the last limbo free snapshot version
const lastLimboFreeSnapshotVersion = targetData.snapshotVersion;
const updatedTargetData = targetData.withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion);
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.insert(targetId, updatedTargetData);
}
}
}
/**
* Gets the mutation batch after the passed in batchId in the mutation queue
* or null if empty.
* @param afterBatchId - If provided, the batch to search after.
* @returns The next mutation or null if there wasn't one.
*/
function localStoreGetNextMutationBatch(localStore, afterBatchId) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get next mutation batch', 'readonly', txn => {
if (afterBatchId === undefined) {
afterBatchId = BATCHID_UNKNOWN;
}
return localStoreImpl.mutationQueue.getNextMutationBatchAfterBatchId(txn, afterBatchId);
});
}
/**
* Reads the current value of a Document with a given key or null if not
* found - used for testing.
*/
function localStoreReadDocument(localStore, key) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('read document', 'readonly', txn => localStoreImpl.localDocuments.getDocument(txn, key));
}
/**
* Assigns the given target an internal ID so that its results can be pinned so
* they don't get GC'd. A target must be allocated in the local store before
* the store can be used to manage its view.
*
* Allocating an already allocated `Target` will return the existing `TargetData`
* for that `Target`.
*/
function localStoreAllocateTarget(localStore, target) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence
.runTransaction('Allocate target', 'readwrite', txn => {
let targetData;
return localStoreImpl.targetCache
.getTargetData(txn, target)
.next((cached) => {
if (cached) {
// This target has been listened to previously, so reuse the
// previous targetID.
// TODO(mcg): freshen last accessed date?
targetData = cached;
return PersistencePromise.resolve(targetData);
}
else {
return localStoreImpl.targetCache
.allocateTargetId(txn)
.next(targetId => {
targetData = new TargetData(target, targetId, 0 /* TargetPurpose.Listen */, txn.currentSequenceNumber);
return localStoreImpl.targetCache
.addTargetData(txn, targetData)
.next(() => targetData);
});
}
});
})
.then(targetData => {
// If Multi-Tab is enabled, the existing target data may be newer than
// the in-memory data
const cachedTargetData = localStoreImpl.targetDataByTarget.get(targetData.targetId);
if (cachedTargetData === null ||
targetData.snapshotVersion.compareTo(cachedTargetData.snapshotVersion) >
0) {
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.insert(targetData.targetId, targetData);
localStoreImpl.targetIdByTarget.set(target, targetData.targetId);
}
return targetData;
});
}
/**
* Returns the TargetData as seen by the LocalStore, including updates that may
* have not yet been persisted to the TargetCache.
*/
// Visible for testing.
function localStoreGetTargetData(localStore, transaction, target) {
const localStoreImpl = debugCast(localStore);
const targetId = localStoreImpl.targetIdByTarget.get(target);
if (targetId !== undefined) {
return PersistencePromise.resolve(localStoreImpl.targetDataByTarget.get(targetId));
}
else {
return localStoreImpl.targetCache.getTargetData(transaction, target);
}
}
/**
* Unpins all the documents associated with the given target. If
* `keepPersistedTargetData` is set to false and Eager GC enabled, the method
* directly removes the associated target data from the target cache.
*
* Releasing a non-existing `Target` is a no-op.
*/
// PORTING NOTE: `keepPersistedTargetData` is multi-tab only.
async function localStoreReleaseTarget(localStore, targetId, keepPersistedTargetData) {
const localStoreImpl = debugCast(localStore);
const targetData = localStoreImpl.targetDataByTarget.get(targetId);
const mode = keepPersistedTargetData ? 'readwrite' : 'readwrite-primary';
try {
if (!keepPersistedTargetData) {
await localStoreImpl.persistence.runTransaction('Release target', mode, txn => {
return localStoreImpl.persistence.referenceDelegate.removeTarget(txn, targetData);
});
}
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
// All `releaseTarget` does is record the final metadata state for the
// target, but we've been recording this periodically during target
// activity. If we lose this write this could cause a very slight
// difference in the order of target deletion during GC, but we
// don't define exact LRU semantics so this is acceptable.
logDebug(LOG_TAG$b, `Failed to update sequence numbers for target ${targetId}: ${e}`);
}
else {
throw e;
}
}
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.remove(targetId);
localStoreImpl.targetIdByTarget.delete(targetData.target);
}
/**
* Runs the specified query against the local store and returns the results,
* potentially taking advantage of query data from previous executions (such
* as the set of remote keys).
*
* @param usePreviousResults - Whether results from previous executions can
* be used to optimize this query execution.
*/
function localStoreExecuteQuery(localStore, query, usePreviousResults) {
const localStoreImpl = debugCast(localStore);
let lastLimboFreeSnapshotVersion = SnapshotVersion.min();
let remoteKeys = documentKeySet();
return localStoreImpl.persistence.runTransaction('Execute query', 'readonly', txn => {
return localStoreGetTargetData(localStoreImpl, txn, queryToTarget(query))
.next(targetData => {
if (targetData) {
lastLimboFreeSnapshotVersion =
targetData.lastLimboFreeSnapshotVersion;
return localStoreImpl.targetCache
.getMatchingKeysForTargetId(txn, targetData.targetId)
.next(result => {
remoteKeys = result;
});
}
})
.next(() => localStoreImpl.queryEngine.getDocumentsMatchingQuery(txn, query, usePreviousResults
? lastLimboFreeSnapshotVersion
: SnapshotVersion.min(), usePreviousResults ? remoteKeys : documentKeySet()))
.next(documents => {
setMaxReadTime(localStoreImpl, queryCollectionGroup(query), documents);
return { documents, remoteKeys };
});
});
}
function applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer) {
const batch = batchResult.batch;
const docKeys = batch.keys();
let promiseChain = PersistencePromise.resolve();
docKeys.forEach(docKey => {
promiseChain = promiseChain
.next(() => documentBuffer.getEntry(txn, docKey))
.next(doc => {
const ackVersion = batchResult.docVersions.get(docKey);
hardAssert(ackVersion !== null);
if (doc.version.compareTo(ackVersion) < 0) {
batch.applyToRemoteDocument(doc, batchResult);
if (doc.isValidDocument()) {
// We use the commitVersion as the readTime rather than the
// document's updateTime since the updateTime is not advanced
// for updates that do not modify the underlying document.
doc.setReadTime(batchResult.commitVersion);
documentBuffer.addEntry(doc);
}
}
});
});
return promiseChain.next(() => localStoreImpl.mutationQueue.removeMutationBatch(txn, batch));
}
/** Returns the local view of the documents affected by a mutation batch. */
// PORTING NOTE: Multi-Tab only.
function localStoreLookupMutationDocuments(localStore, batchId) {
const localStoreImpl = debugCast(localStore);
const mutationQueueImpl = debugCast(localStoreImpl.mutationQueue);
return localStoreImpl.persistence.runTransaction('Lookup mutation documents', 'readonly', txn => {
return mutationQueueImpl.lookupMutationKeys(txn, batchId).next(keys => {
if (keys) {
return localStoreImpl.localDocuments.getDocuments(txn, keys);
}
else {
return PersistencePromise.resolve(null);
}
});
});
}
// PORTING NOTE: Multi-Tab only.
function localStoreRemoveCachedMutationBatchMetadata(localStore, batchId) {
const mutationQueueImpl = debugCast(debugCast(localStore, LocalStoreImpl).mutationQueue);
mutationQueueImpl.removeCachedMutationKeys(batchId);
}
// PORTING NOTE: Multi-Tab only.
function localStoreGetActiveClients(localStore) {
const persistenceImpl = debugCast(debugCast(localStore, LocalStoreImpl).persistence);
return persistenceImpl.getActiveClients();
}
// PORTING NOTE: Multi-Tab only.
function localStoreGetCachedTarget(localStore, targetId) {
const localStoreImpl = debugCast(localStore);
const targetCacheImpl = debugCast(localStoreImpl.targetCache);
const cachedTargetData = localStoreImpl.targetDataByTarget.get(targetId);
if (cachedTargetData) {
return Promise.resolve(cachedTargetData.target);
}
else {
return localStoreImpl.persistence.runTransaction('Get target data', 'readonly', txn => {
return targetCacheImpl
.getTargetDataForTarget(txn, targetId)
.next(targetData => (targetData ? targetData.target : null));
});
}
}
/**
* Returns the set of documents that have been updated since the last call.
* If this is the first call, returns the set of changes since client
* initialization. Further invocations will return document that have changed
* since the prior call.
*/
// PORTING NOTE: Multi-Tab only.
function localStoreGetNewDocumentChanges(localStore, collectionGroup) {
const localStoreImpl = debugCast(localStore);
// Get the current maximum read time for the collection. This should always
// exist, but to reduce the chance for regressions we default to
// SnapshotVersion.Min()
// TODO(indexing): Consider removing the default value.
const readTime = localStoreImpl.collectionGroupReadTime.get(collectionGroup) ||
SnapshotVersion.min();
return localStoreImpl.persistence
.runTransaction('Get new document changes', 'readonly', txn => localStoreImpl.remoteDocuments.getAllFromCollectionGroup(txn, collectionGroup, newIndexOffsetSuccessorFromReadTime(readTime, INITIAL_LARGEST_BATCH_ID),
/* limit= */ Number.MAX_SAFE_INTEGER))
.then(changedDocs => {
setMaxReadTime(localStoreImpl, collectionGroup, changedDocs);
return changedDocs;
});
}
/** Sets the collection group's maximum read time from the given documents. */
// PORTING NOTE: Multi-Tab only.
function setMaxReadTime(localStoreImpl, collectionGroup, changedDocs) {
let readTime = localStoreImpl.collectionGroupReadTime.get(collectionGroup) ||
SnapshotVersion.min();
changedDocs.forEach((_, doc) => {
if (doc.readTime.compareTo(readTime) > 0) {
readTime = doc.readTime;
}
});
localStoreImpl.collectionGroupReadTime.set(collectionGroup, readTime);
}
/**
* Creates a new target using the given bundle name, which will be used to
* hold the keys of all documents from the bundle in query-document mappings.
* This ensures that the loaded documents do not get garbage collected
* right away.
*/
function umbrellaTarget(bundleName) {
// It is OK that the path used for the query is not valid, because this will
// not be read and queried.
return queryToTarget(newQueryForPath(ResourcePath.fromString(`__bundle__/docs/${bundleName}`)));
}
/**
* Applies the documents from a bundle to the "ground-state" (remote)
* documents.
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
async function localStoreApplyBundledDocuments(localStore, bundleConverter, documents, bundleName) {
const localStoreImpl = debugCast(localStore);
let documentKeys = documentKeySet();
let documentMap = mutableDocumentMap();
for (const bundleDoc of documents) {
const documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
if (bundleDoc.document) {
documentKeys = documentKeys.add(documentKey);
}
const doc = bundleConverter.toMutableDocument(bundleDoc);
doc.setReadTime(bundleConverter.toSnapshotVersion(bundleDoc.metadata.readTime));
documentMap = documentMap.insert(documentKey, doc);
}
const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
// Allocates a target to hold all document keys from the bundle, such that
// they will not get garbage collected right away.
const umbrellaTargetData = await localStoreAllocateTarget(localStoreImpl, umbrellaTarget(bundleName));
return localStoreImpl.persistence.runTransaction('Apply bundle documents', 'readwrite', txn => {
return populateDocumentChangeBuffer(txn, documentBuffer, documentMap)
.next(documentChangeResult => {
documentBuffer.apply(txn);
return documentChangeResult;
})
.next(documentChangeResult => {
return localStoreImpl.targetCache
.removeMatchingKeysForTargetId(txn, umbrellaTargetData.targetId)
.next(() => localStoreImpl.targetCache.addMatchingKeys(txn, documentKeys, umbrellaTargetData.targetId))
.next(() => localStoreImpl.localDocuments.getLocalViewOfDocuments(txn, documentChangeResult.changedDocuments, documentChangeResult.existenceChangedKeys))
.next(() => documentChangeResult.changedDocuments);
});
});
}
/**
* Returns a promise of a boolean to indicate if the given bundle has already
* been loaded and the create time is newer than the current loading bundle.
*/
function localStoreHasNewerBundle(localStore, bundleMetadata) {
const localStoreImpl = debugCast(localStore);
const currentReadTime = fromVersion(bundleMetadata.createTime);
return localStoreImpl.persistence
.runTransaction('hasNewerBundle', 'readonly', transaction => {
return localStoreImpl.bundleCache.getBundleMetadata(transaction, bundleMetadata.id);
})
.then(cached => {
return !!cached && cached.createTime.compareTo(currentReadTime) >= 0;
});
}
/**
* Saves the given `BundleMetadata` to local persistence.
*/
function localStoreSaveBundle(localStore, bundleMetadata) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Save bundle', 'readwrite', transaction => {
return localStoreImpl.bundleCache.saveBundleMetadata(transaction, bundleMetadata);
});
}
/**
* Returns a promise of a `NamedQuery` associated with given query name. Promise
* resolves to undefined if no persisted data can be found.
*/
function localStoreGetNamedQuery(localStore, queryName) {
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get named query', 'readonly', transaction => localStoreImpl.bundleCache.getNamedQuery(transaction, queryName));
}
/**
* Saves the given `NamedQuery` to local persistence.
*/
async function localStoreSaveNamedQuery(localStore, query, documents = documentKeySet()) {
// Allocate a target for the named query such that it can be resumed
// from associated read time if users use it to listen.
// NOTE: this also means if no corresponding target exists, the new target
// will remain active and will not get collected, unless users happen to
// unlisten the query somehow.
const allocated = await localStoreAllocateTarget(localStore, queryToTarget(fromBundledQuery(query.bundledQuery)));
const localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Save named query', 'readwrite', transaction => {
const readTime = fromVersion(query.readTime);
// Simply save the query itself if it is older than what the SDK already
// has.
if (allocated.snapshotVersion.compareTo(readTime) >= 0) {
return localStoreImpl.bundleCache.saveNamedQuery(transaction, query);
}
// Update existing target data because the query from the bundle is newer.
const newTargetData = allocated.withResumeToken(ByteString.EMPTY_BYTE_STRING, readTime);
localStoreImpl.targetDataByTarget =
localStoreImpl.targetDataByTarget.insert(newTargetData.targetId, newTargetData);
return localStoreImpl.targetCache
.updateTargetData(transaction, newTargetData)
.next(() => localStoreImpl.targetCache.removeMatchingKeysForTargetId(transaction, allocated.targetId))
.next(() => localStoreImpl.targetCache.addMatchingKeys(transaction, documents, allocated.targetId))
.next(() => localStoreImpl.bundleCache.saveNamedQuery(transaction, query));
});
}
async function localStoreConfigureFieldIndexes(localStore, newFieldIndexes) {
const localStoreImpl = debugCast(localStore);
const indexManager = localStoreImpl.indexManager;
const promises = [];
return localStoreImpl.persistence.runTransaction('Configure indexes', 'readwrite', transaction => indexManager
.getFieldIndexes(transaction)
.next(oldFieldIndexes => diffArrays(oldFieldIndexes, newFieldIndexes, fieldIndexSemanticComparator, fieldIndex => {
promises.push(indexManager.addFieldIndex(transaction, fieldIndex));
}, fieldIndex => {
promises.push(indexManager.deleteFieldIndex(transaction, fieldIndex));
}))
.next(() => PersistencePromise.waitFor(promises)));
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The Firestore query engine.
*
* Firestore queries can be executed in three modes. The Query Engine determines
* what mode to use based on what data is persisted. The mode only determines
* the runtime complexity of the query - the result set is equivalent across all
* implementations.
*
* The Query engine will use indexed-based execution if a user has configured
* any index that can be used to execute query (via `setIndexConfiguration()`).
* Otherwise, the engine will try to optimize the query by re-using a previously
* persisted query result. If that is not possible, the query will be executed
* via a full collection scan.
*
* Index-based execution is the default when available. The query engine
* supports partial indexed execution and merges the result from the index
* lookup with documents that have not yet been indexed. The index evaluation
* matches the backend's format and as such, the SDK can use indexing for all
* queries that the backend supports.
*
* If no index exists, the query engine tries to take advantage of the target
* document mapping in the TargetCache. These mappings exists for all queries
* that have been synced with the backend at least once and allow the query
* engine to only read documents that previously matched a query plus any
* documents that were edited after the query was last listened to.
*
* There are some cases when this optimization is not guaranteed to produce
* the same results as full collection scans. In these cases, query
* processing falls back to full scans. These cases are:
*
* - Limit queries where a document that matched the query previously no longer
* matches the query.
*
* - Limit queries where a document edit may cause the document to sort below
* another document that is in the local cache.
*
* - Queries that have never been CURRENT or free of limbo documents.
*/
class QueryEngine {
constructor() {
this.initialized = false;
}
/** Sets the document view to query against. */
initialize(localDocuments, indexManager) {
this.localDocumentsView = localDocuments;
this.indexManager = indexManager;
this.initialized = true;
}
/** Returns all local documents matching the specified query. */
getDocumentsMatchingQuery(transaction, query, lastLimboFreeSnapshotVersion, remoteKeys) {
return this.performQueryUsingIndex(transaction, query)
.next(result => result
? result
: this.performQueryUsingRemoteKeys(transaction, query, remoteKeys, lastLimboFreeSnapshotVersion))
.next(result => result ? result : this.executeFullCollectionScan(transaction, query));
}
/**
* Performs an indexed query that evaluates the query based on a collection's
* persisted index values. Returns `null` if an index is not available.
*/
performQueryUsingIndex(transaction, query) {
if (queryMatchesAllDocuments(query)) {
// Queries that match all documents don't benefit from using
// key-based lookups. It is more efficient to scan all documents in a
// collection, rather than to perform individual lookups.
return PersistencePromise.resolve(null);
}
let target = queryToTarget(query);
return this.indexManager
.getIndexType(transaction, target)
.next(indexType => {
if (indexType === 0 /* IndexType.NONE */) {
// The target cannot be served from any index.
return null;
}
if (query.limit !== null && indexType === 1 /* IndexType.PARTIAL */) {
// We cannot apply a limit for targets that are served using a partial
// index. If a partial index will be used to serve the target, the
// query may return a superset of documents that match the target
// (e.g. if the index doesn't include all the target's filters), or
// may return the correct set of documents in the wrong order (e.g. if
// the index doesn't include a segment for one of the orderBys).
// Therefore, a limit should not be applied in such cases.
query = queryWithLimit(query, null, "F" /* LimitType.First */);
target = queryToTarget(query);
}
return this.indexManager
.getDocumentsMatchingTarget(transaction, target)
.next(keys => {
const sortedKeys = documentKeySet(...keys);
return this.localDocumentsView
.getDocuments(transaction, sortedKeys)
.next(indexedDocuments => {
return this.indexManager
.getMinOffset(transaction, target)
.next(offset => {
const previousResults = this.applyQuery(query, indexedDocuments);
if (this.needsRefill(query, previousResults, sortedKeys, offset.readTime)) {
// A limit query whose boundaries change due to local
// edits can be re-run against the cache by excluding the
// limit. This ensures that all documents that match the
// query's filters are included in the result set. The SDK
// can then apply the limit once all local edits are
// incorporated.
return this.performQueryUsingIndex(transaction, queryWithLimit(query, null, "F" /* LimitType.First */));
}
return this.appendRemainingResults(transaction, previousResults, query, offset);
});
});
});
});
}
/**
* Performs a query based on the target's persisted query mapping. Returns
* `null` if the mapping is not available or cannot be used.
*/
performQueryUsingRemoteKeys(transaction, query, remoteKeys, lastLimboFreeSnapshotVersion) {
if (queryMatchesAllDocuments(query)) {
// Queries that match all documents don't benefit from using
// key-based lookups. It is more efficient to scan all documents in a
// collection, rather than to perform individual lookups.
return this.executeFullCollectionScan(transaction, query);
}
// Queries that have never seen a snapshot without limbo free documents
// should also be run as a full collection scan.
if (lastLimboFreeSnapshotVersion.isEqual(SnapshotVersion.min())) {
return this.executeFullCollectionScan(transaction, query);
}
return this.localDocumentsView.getDocuments(transaction, remoteKeys).next(documents => {
const previousResults = this.applyQuery(query, documents);
if (this.needsRefill(query, previousResults, remoteKeys, lastLimboFreeSnapshotVersion)) {
return this.executeFullCollectionScan(transaction, query);
}
if (getLogLevel() <= logger.LogLevel.DEBUG) {
logDebug('QueryEngine', 'Re-using previous result from %s to execute query: %s', lastLimboFreeSnapshotVersion.toString(), stringifyQuery(query));
}
// Retrieve all results for documents that were updated since the last
// limbo-document free remote snapshot.
return this.appendRemainingResults(transaction, previousResults, query, newIndexOffsetSuccessorFromReadTime(lastLimboFreeSnapshotVersion, INITIAL_LARGEST_BATCH_ID));
});
}
/** Applies the query filter and sorting to the provided documents. */
applyQuery(query, documents) {
// Sort the documents and re-apply the query filter since previously
// matching documents do not necessarily still match the query.
let queryResults = new SortedSet(newQueryComparator(query));
documents.forEach((_, maybeDoc) => {
if (queryMatches(query, maybeDoc)) {
queryResults = queryResults.add(maybeDoc);
}
});
return queryResults;
}
/**
* Determines if a limit query needs to be refilled from cache, making it
* ineligible for index-free execution.
*
* @param query - The query.
* @param sortedPreviousResults - The documents that matched the query when it
* was last synchronized, sorted by the query's comparator.
* @param remoteKeys - The document keys that matched the query at the last
* snapshot.
* @param limboFreeSnapshotVersion - The version of the snapshot when the
* query was last synchronized.
*/
needsRefill(query, sortedPreviousResults, remoteKeys, limboFreeSnapshotVersion) {
if (query.limit === null) {
// Queries without limits do not need to be refilled.
return false;
}
if (remoteKeys.size !== sortedPreviousResults.size) {
// The query needs to be refilled if a previously matching document no
// longer matches.
return true;
}
// Limit queries are not eligible for index-free query execution if there is
// a potential that an older document from cache now sorts before a document
// that was previously part of the limit. This, however, can only happen if
// the document at the edge of the limit goes out of limit.
// If a document that is not the limit boundary sorts differently,
// the boundary of the limit itself did not change and documents from cache
// will continue to be "rejected" by this boundary. Therefore, we can ignore
// any modifications that don't affect the last document.
const docAtLimitEdge = query.limitType === "F" /* LimitType.First */
? sortedPreviousResults.last()
: sortedPreviousResults.first();
if (!docAtLimitEdge) {
// We don't need to refill the query if there were already no documents.
return false;
}
return (docAtLimitEdge.hasPendingWrites ||
docAtLimitEdge.version.compareTo(limboFreeSnapshotVersion) > 0);
}
executeFullCollectionScan(transaction, query) {
if (getLogLevel() <= logger.LogLevel.DEBUG) {
logDebug('QueryEngine', 'Using full collection scan to execute query:', stringifyQuery(query));
}
return this.localDocumentsView.getDocumentsMatchingQuery(transaction, query, IndexOffset.min());
}
/**
* Combines the results from an indexed execution with the remaining documents
* that have not yet been indexed.
*/
appendRemainingResults(transaction, indexedResults, query, offset) {
// Retrieve all results for documents that were updated since the offset.
return this.localDocumentsView
.getDocumentsMatchingQuery(transaction, query, offset)
.next(remainingResults => {
// Merge with existing results
indexedResults.forEach(d => {
remainingResults = remainingResults.insert(d.key, d);
});
return remainingResults;
});
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The format of the LocalStorage key that stores the client state is:
// firestore_clients__
const CLIENT_STATE_KEY_PREFIX = 'firestore_clients';
/** Assembles the key for a client state in WebStorage */
function createWebStorageClientStateKey(persistenceKey, clientId) {
return `${CLIENT_STATE_KEY_PREFIX}_${persistenceKey}_${clientId}`;
}
// The format of the WebStorage key that stores the mutation state is:
// firestore_mutations__
// (for unauthenticated users)
// or: firestore_mutations___
//
// 'user_uid' is last to avoid needing to escape '_' characters that it might
// contain.
const MUTATION_BATCH_KEY_PREFIX = 'firestore_mutations';
/** Assembles the key for a mutation batch in WebStorage */
function createWebStorageMutationBatchKey(persistenceKey, user, batchId) {
let mutationKey = `${MUTATION_BATCH_KEY_PREFIX}_${persistenceKey}_${batchId}`;
if (user.isAuthenticated()) {
mutationKey += `_${user.uid}`;
}
return mutationKey;
}
// The format of the WebStorage key that stores a query target's metadata is:
// firestore_targets__
const QUERY_TARGET_KEY_PREFIX = 'firestore_targets';
/** Assembles the key for a query state in WebStorage */
function createWebStorageQueryTargetMetadataKey(persistenceKey, targetId) {
return `${QUERY_TARGET_KEY_PREFIX}_${persistenceKey}_${targetId}`;
}
// The WebStorage prefix that stores the primary tab's online state. The
// format of the key is:
// firestore_online_state_
const ONLINE_STATE_KEY_PREFIX = 'firestore_online_state';
/** Assembles the key for the online state of the primary tab. */
function createWebStorageOnlineStateKey(persistenceKey) {
return `${ONLINE_STATE_KEY_PREFIX}_${persistenceKey}`;
}
// The WebStorage prefix that plays as a event to indicate the remote documents
// might have changed due to some secondary tabs loading a bundle.
// format of the key is:
// firestore_bundle_loaded_v2_
// The version ending with "v2" stores the list of modified collection groups.
const BUNDLE_LOADED_KEY_PREFIX = 'firestore_bundle_loaded_v2';
function createBundleLoadedKey(persistenceKey) {
return `${BUNDLE_LOADED_KEY_PREFIX}_${persistenceKey}`;
}
// The WebStorage key prefix for the key that stores the last sequence number allocated. The key
// looks like 'firestore_sequence_number_'.
const SEQUENCE_NUMBER_KEY_PREFIX = 'firestore_sequence_number';
/** Assembles the key for the current sequence number. */
function createWebStorageSequenceNumberKey(persistenceKey) {
return `${SEQUENCE_NUMBER_KEY_PREFIX}_${persistenceKey}`;
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$a = 'SharedClientState';
/**
* Holds the state of a mutation batch, including its user ID, batch ID and
* whether the batch is 'pending', 'acknowledged' or 'rejected'.
*/
// Visible for testing
class MutationMetadata {
constructor(user, batchId, state, error) {
this.user = user;
this.batchId = batchId;
this.state = state;
this.error = error;
}
/**
* Parses a MutationMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(user, batchId, value) {
const mutationBatch = JSON.parse(value);
let validData = typeof mutationBatch === 'object' &&
['pending', 'acknowledged', 'rejected'].indexOf(mutationBatch.state) !==
-1 &&
(mutationBatch.error === undefined ||
typeof mutationBatch.error === 'object');
let firestoreError = undefined;
if (validData && mutationBatch.error) {
validData =
typeof mutationBatch.error.message === 'string' &&
typeof mutationBatch.error.code === 'string';
if (validData) {
firestoreError = new FirestoreError(mutationBatch.error.code, mutationBatch.error.message);
}
}
if (validData) {
return new MutationMetadata(user, batchId, mutationBatch.state, firestoreError);
}
else {
logError(LOG_TAG$a, `Failed to parse mutation state for ID '${batchId}': ${value}`);
return null;
}
}
toWebStorageJSON() {
const batchMetadata = {
state: this.state,
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
if (this.error) {
batchMetadata.error = {
code: this.error.code,
message: this.error.message
};
}
return JSON.stringify(batchMetadata);
}
}
/**
* Holds the state of a query target, including its target ID and whether the
* target is 'not-current', 'current' or 'rejected'.
*/
// Visible for testing
class QueryTargetMetadata {
constructor(targetId, state, error) {
this.targetId = targetId;
this.state = state;
this.error = error;
}
/**
* Parses a QueryTargetMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(targetId, value) {
const targetState = JSON.parse(value);
let validData = typeof targetState === 'object' &&
['not-current', 'current', 'rejected'].indexOf(targetState.state) !==
-1 &&
(targetState.error === undefined ||
typeof targetState.error === 'object');
let firestoreError = undefined;
if (validData && targetState.error) {
validData =
typeof targetState.error.message === 'string' &&
typeof targetState.error.code === 'string';
if (validData) {
firestoreError = new FirestoreError(targetState.error.code, targetState.error.message);
}
}
if (validData) {
return new QueryTargetMetadata(targetId, targetState.state, firestoreError);
}
else {
logError(LOG_TAG$a, `Failed to parse target state for ID '${targetId}': ${value}`);
return null;
}
}
toWebStorageJSON() {
const targetState = {
state: this.state,
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
if (this.error) {
targetState.error = {
code: this.error.code,
message: this.error.message
};
}
return JSON.stringify(targetState);
}
}
/**
* This class represents the immutable ClientState for a client read from
* WebStorage, containing the list of active query targets.
*/
class RemoteClientState {
constructor(clientId, activeTargetIds) {
this.clientId = clientId;
this.activeTargetIds = activeTargetIds;
}
/**
* Parses a RemoteClientState from the JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(clientId, value) {
const clientState = JSON.parse(value);
let validData = typeof clientState === 'object' &&
clientState.activeTargetIds instanceof Array;
let activeTargetIdsSet = targetIdSet();
for (let i = 0; validData && i < clientState.activeTargetIds.length; ++i) {
validData = isSafeInteger(clientState.activeTargetIds[i]);
activeTargetIdsSet = activeTargetIdsSet.add(clientState.activeTargetIds[i]);
}
if (validData) {
return new RemoteClientState(clientId, activeTargetIdsSet);
}
else {
logError(LOG_TAG$a, `Failed to parse client data for instance '${clientId}': ${value}`);
return null;
}
}
}
/**
* This class represents the online state for all clients participating in
* multi-tab. The online state is only written to by the primary client, and
* used in secondary clients to update their query views.
*/
class SharedOnlineState {
constructor(clientId, onlineState) {
this.clientId = clientId;
this.onlineState = onlineState;
}
/**
* Parses a SharedOnlineState from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
static fromWebStorageEntry(value) {
const onlineState = JSON.parse(value);
const validData = typeof onlineState === 'object' &&
['Unknown', 'Online', 'Offline'].indexOf(onlineState.onlineState) !==
-1 &&
typeof onlineState.clientId === 'string';
if (validData) {
return new SharedOnlineState(onlineState.clientId, onlineState.onlineState);
}
else {
logError(LOG_TAG$a, `Failed to parse online state: ${value}`);
return null;
}
}
}
/**
* Metadata state of the local client. Unlike `RemoteClientState`, this class is
* mutable and keeps track of all pending mutations, which allows us to
* update the range of pending mutation batch IDs as new mutations are added or
* removed.
*
* The data in `LocalClientState` is not read from WebStorage and instead
* updated via its instance methods. The updated state can be serialized via
* `toWebStorageJSON()`.
*/
// Visible for testing.
class LocalClientState {
constructor() {
this.activeTargetIds = targetIdSet();
}
addQueryTarget(targetId) {
this.activeTargetIds = this.activeTargetIds.add(targetId);
}
removeQueryTarget(targetId) {
this.activeTargetIds = this.activeTargetIds.delete(targetId);
}
/**
* Converts this entry into a JSON-encoded format we can use for WebStorage.
* Does not encode `clientId` as it is part of the key in WebStorage.
*/
toWebStorageJSON() {
const data = {
activeTargetIds: this.activeTargetIds.toArray(),
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
return JSON.stringify(data);
}
}
/**
* `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
* backing store for the SharedClientState. It keeps track of all active
* clients and supports modifications of the local client's data.
*/
class WebStorageSharedClientState {
constructor(window, queue, persistenceKey, localClientId, initialUser) {
this.window = window;
this.queue = queue;
this.persistenceKey = persistenceKey;
this.localClientId = localClientId;
this.syncEngine = null;
this.onlineStateHandler = null;
this.sequenceNumberHandler = null;
this.storageListener = this.handleWebStorageEvent.bind(this);
this.activeClients = new SortedMap(primitiveComparator);
this.started = false;
/**
* Captures WebStorage events that occur before `start()` is called. These
* events are replayed once `WebStorageSharedClientState` is started.
*/
this.earlyEvents = [];
// Escape the special characters mentioned here:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
const escapedPersistenceKey = persistenceKey.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
this.storage = this.window.localStorage;
this.currentUser = initialUser;
this.localClientStorageKey = createWebStorageClientStateKey(this.persistenceKey, this.localClientId);
this.sequenceNumberKey = createWebStorageSequenceNumberKey(this.persistenceKey);
this.activeClients = this.activeClients.insert(this.localClientId, new LocalClientState());
this.clientStateKeyRe = new RegExp(`^${CLIENT_STATE_KEY_PREFIX}_${escapedPersistenceKey}_([^_]*)$`);
this.mutationBatchKeyRe = new RegExp(`^${MUTATION_BATCH_KEY_PREFIX}_${escapedPersistenceKey}_(\\d+)(?:_(.*))?$`);
this.queryTargetKeyRe = new RegExp(`^${QUERY_TARGET_KEY_PREFIX}_${escapedPersistenceKey}_(\\d+)$`);
this.onlineStateKey = createWebStorageOnlineStateKey(this.persistenceKey);
this.bundleLoadedKey = createBundleLoadedKey(this.persistenceKey);
// Rather than adding the storage observer during start(), we add the
// storage observer during initialization. This ensures that we collect
// events before other components populate their initial state (during their
// respective start() calls). Otherwise, we might for example miss a
// mutation that is added after LocalStore's start() processed the existing
// mutations but before we observe WebStorage events.
this.window.addEventListener('storage', this.storageListener);
}
/** Returns 'true' if WebStorage is available in the current environment. */
static isAvailable(window) {
return !!(window && window.localStorage);
}
async start() {
// Retrieve the list of existing clients to backfill the data in
// SharedClientState.
const existingClients = await this.syncEngine.getActiveClients();
for (const clientId of existingClients) {
if (clientId === this.localClientId) {
continue;
}
const storageItem = this.getItem(createWebStorageClientStateKey(this.persistenceKey, clientId));
if (storageItem) {
const clientState = RemoteClientState.fromWebStorageEntry(clientId, storageItem);
if (clientState) {
this.activeClients = this.activeClients.insert(clientState.clientId, clientState);
}
}
}
this.persistClientState();
// Check if there is an existing online state and call the callback handler
// if applicable.
const onlineStateJSON = this.storage.getItem(this.onlineStateKey);
if (onlineStateJSON) {
const onlineState = this.fromWebStorageOnlineState(onlineStateJSON);
if (onlineState) {
this.handleOnlineStateEvent(onlineState);
}
}
for (const event of this.earlyEvents) {
this.handleWebStorageEvent(event);
}
this.earlyEvents = [];
// Register a window unload hook to remove the client metadata entry from
// WebStorage even if `shutdown()` was not called.
this.window.addEventListener('pagehide', () => this.shutdown());
this.started = true;
}
writeSequenceNumber(sequenceNumber) {
this.setItem(this.sequenceNumberKey, JSON.stringify(sequenceNumber));
}
getAllActiveQueryTargets() {
return this.extractActiveQueryTargets(this.activeClients);
}
isActiveQueryTarget(targetId) {
let found = false;
this.activeClients.forEach((key, value) => {
if (value.activeTargetIds.has(targetId)) {
found = true;
}
});
return found;
}
addPendingMutation(batchId) {
this.persistMutationState(batchId, 'pending');
}
updateMutationState(batchId, state, error) {
this.persistMutationState(batchId, state, error);
// Once a final mutation result is observed by other clients, they no longer
// access the mutation's metadata entry. Since WebStorage replays events
// in order, it is safe to delete the entry right after updating it.
this.removeMutationState(batchId);
}
addLocalQueryTarget(targetId) {
let queryState = 'not-current';
// Lookup an existing query state if the target ID was already registered
// by another tab
if (this.isActiveQueryTarget(targetId)) {
const storageItem = this.storage.getItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
if (storageItem) {
const metadata = QueryTargetMetadata.fromWebStorageEntry(targetId, storageItem);
if (metadata) {
queryState = metadata.state;
}
}
}
this.localClientState.addQueryTarget(targetId);
this.persistClientState();
return queryState;
}
removeLocalQueryTarget(targetId) {
this.localClientState.removeQueryTarget(targetId);
this.persistClientState();
}
isLocalQueryTarget(targetId) {
return this.localClientState.activeTargetIds.has(targetId);
}
clearQueryState(targetId) {
this.removeItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
}
updateQueryState(targetId, state, error) {
this.persistQueryTargetState(targetId, state, error);
}
handleUserChange(user, removedBatchIds, addedBatchIds) {
removedBatchIds.forEach(batchId => {
this.removeMutationState(batchId);
});
this.currentUser = user;
addedBatchIds.forEach(batchId => {
this.addPendingMutation(batchId);
});
}
setOnlineState(onlineState) {
this.persistOnlineState(onlineState);
}
notifyBundleLoaded(collectionGroups) {
this.persistBundleLoadedState(collectionGroups);
}
shutdown() {
if (this.started) {
this.window.removeEventListener('storage', this.storageListener);
this.removeItem(this.localClientStorageKey);
this.started = false;
}
}
getItem(key) {
const value = this.storage.getItem(key);
logDebug(LOG_TAG$a, 'READ', key, value);
return value;
}
setItem(key, value) {
logDebug(LOG_TAG$a, 'SET', key, value);
this.storage.setItem(key, value);
}
removeItem(key) {
logDebug(LOG_TAG$a, 'REMOVE', key);
this.storage.removeItem(key);
}
handleWebStorageEvent(event) {
// Note: The function is typed to take Event to be interface-compatible with
// `Window.addEventListener`.
const storageEvent = event;
if (storageEvent.storageArea === this.storage) {
logDebug(LOG_TAG$a, 'EVENT', storageEvent.key, storageEvent.newValue);
if (storageEvent.key === this.localClientStorageKey) {
logError('Received WebStorage notification for local change. Another client might have ' +
'garbage-collected our state');
return;
}
this.queue.enqueueRetryable(async () => {
if (!this.started) {
this.earlyEvents.push(storageEvent);
return;
}
if (storageEvent.key === null) {
return;
}
if (this.clientStateKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue != null) {
const clientState = this.fromWebStorageClientState(storageEvent.key, storageEvent.newValue);
if (clientState) {
return this.handleClientStateEvent(clientState.clientId, clientState);
}
}
else {
const clientId = this.fromWebStorageClientStateKey(storageEvent.key);
return this.handleClientStateEvent(clientId, null);
}
}
else if (this.mutationBatchKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue !== null) {
const mutationMetadata = this.fromWebStorageMutationMetadata(storageEvent.key, storageEvent.newValue);
if (mutationMetadata) {
return this.handleMutationBatchEvent(mutationMetadata);
}
}
}
else if (this.queryTargetKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue !== null) {
const queryTargetMetadata = this.fromWebStorageQueryTargetMetadata(storageEvent.key, storageEvent.newValue);
if (queryTargetMetadata) {
return this.handleQueryTargetEvent(queryTargetMetadata);
}
}
}
else if (storageEvent.key === this.onlineStateKey) {
if (storageEvent.newValue !== null) {
const onlineState = this.fromWebStorageOnlineState(storageEvent.newValue);
if (onlineState) {
return this.handleOnlineStateEvent(onlineState);
}
}
}
else if (storageEvent.key === this.sequenceNumberKey) {
const sequenceNumber = fromWebStorageSequenceNumber(storageEvent.newValue);
if (sequenceNumber !== ListenSequence.INVALID) {
this.sequenceNumberHandler(sequenceNumber);
}
}
else if (storageEvent.key === this.bundleLoadedKey) {
const collectionGroups = this.fromWebStoreBundleLoadedState(storageEvent.newValue);
await Promise.all(collectionGroups.map(cg => this.syncEngine.synchronizeWithChangedDocuments(cg)));
}
});
}
}
get localClientState() {
return this.activeClients.get(this.localClientId);
}
persistClientState() {
this.setItem(this.localClientStorageKey, this.localClientState.toWebStorageJSON());
}
persistMutationState(batchId, state, error) {
const mutationState = new MutationMetadata(this.currentUser, batchId, state, error);
const mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
this.setItem(mutationKey, mutationState.toWebStorageJSON());
}
removeMutationState(batchId) {
const mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
this.removeItem(mutationKey);
}
persistOnlineState(onlineState) {
const entry = {
clientId: this.localClientId,
onlineState
};
this.storage.setItem(this.onlineStateKey, JSON.stringify(entry));
}
persistQueryTargetState(targetId, state, error) {
const targetKey = createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId);
const targetMetadata = new QueryTargetMetadata(targetId, state, error);
this.setItem(targetKey, targetMetadata.toWebStorageJSON());
}
persistBundleLoadedState(collectionGroups) {
const json = JSON.stringify(Array.from(collectionGroups));
this.setItem(this.bundleLoadedKey, json);
}
/**
* Parses a client state key in WebStorage. Returns null if the key does not
* match the expected key format.
*/
fromWebStorageClientStateKey(key) {
const match = this.clientStateKeyRe.exec(key);
return match ? match[1] : null;
}
/**
* Parses a client state in WebStorage. Returns 'null' if the value could not
* be parsed.
*/
fromWebStorageClientState(key, value) {
const clientId = this.fromWebStorageClientStateKey(key);
return RemoteClientState.fromWebStorageEntry(clientId, value);
}
/**
* Parses a mutation batch state in WebStorage. Returns 'null' if the value
* could not be parsed.
*/
fromWebStorageMutationMetadata(key, value) {
const match = this.mutationBatchKeyRe.exec(key);
const batchId = Number(match[1]);
const userId = match[2] !== undefined ? match[2] : null;
return MutationMetadata.fromWebStorageEntry(new User(userId), batchId, value);
}
/**
* Parses a query target state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
fromWebStorageQueryTargetMetadata(key, value) {
const match = this.queryTargetKeyRe.exec(key);
const targetId = Number(match[1]);
return QueryTargetMetadata.fromWebStorageEntry(targetId, value);
}
/**
* Parses an online state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
fromWebStorageOnlineState(value) {
return SharedOnlineState.fromWebStorageEntry(value);
}
fromWebStoreBundleLoadedState(value) {
return JSON.parse(value);
}
async handleMutationBatchEvent(mutationBatch) {
if (mutationBatch.user.uid !== this.currentUser.uid) {
logDebug(LOG_TAG$a, `Ignoring mutation for non-active user ${mutationBatch.user.uid}`);
return;
}
return this.syncEngine.applyBatchState(mutationBatch.batchId, mutationBatch.state, mutationBatch.error);
}
handleQueryTargetEvent(targetMetadata) {
return this.syncEngine.applyTargetState(targetMetadata.targetId, targetMetadata.state, targetMetadata.error);
}
handleClientStateEvent(clientId, clientState) {
const updatedClients = clientState
? this.activeClients.insert(clientId, clientState)
: this.activeClients.remove(clientId);
const existingTargets = this.extractActiveQueryTargets(this.activeClients);
const newTargets = this.extractActiveQueryTargets(updatedClients);
const addedTargets = [];
const removedTargets = [];
newTargets.forEach(targetId => {
if (!existingTargets.has(targetId)) {
addedTargets.push(targetId);
}
});
existingTargets.forEach(targetId => {
if (!newTargets.has(targetId)) {
removedTargets.push(targetId);
}
});
return this.syncEngine.applyActiveTargetsChange(addedTargets, removedTargets).then(() => {
this.activeClients = updatedClients;
});
}
handleOnlineStateEvent(onlineState) {
// We check whether the client that wrote this online state is still active
// by comparing its client ID to the list of clients kept active in
// IndexedDb. If a client does not update their IndexedDb client state
// within 5 seconds, it is considered inactive and we don't emit an online
// state event.
if (this.activeClients.get(onlineState.clientId)) {
this.onlineStateHandler(onlineState.onlineState);
}
}
extractActiveQueryTargets(clients) {
let activeTargets = targetIdSet();
clients.forEach((kev, value) => {
activeTargets = activeTargets.unionWith(value.activeTargetIds);
});
return activeTargets;
}
}
function fromWebStorageSequenceNumber(seqString) {
let sequenceNumber = ListenSequence.INVALID;
if (seqString != null) {
try {
const parsed = JSON.parse(seqString);
hardAssert(typeof parsed === 'number');
sequenceNumber = parsed;
}
catch (e) {
logError(LOG_TAG$a, 'Failed to read sequence number from WebStorage', e);
}
}
return sequenceNumber;
}
/**
* `MemorySharedClientState` is a simple implementation of SharedClientState for
* clients using memory persistence. The state in this class remains fully
* isolated and no synchronization is performed.
*/
class MemorySharedClientState {
constructor() {
this.localState = new LocalClientState();
this.queryState = {};
this.onlineStateHandler = null;
this.sequenceNumberHandler = null;
}
addPendingMutation(batchId) {
// No op.
}
updateMutationState(batchId, state, error) {
// No op.
}
addLocalQueryTarget(targetId) {
this.localState.addQueryTarget(targetId);
return this.queryState[targetId] || 'not-current';
}
updateQueryState(targetId, state, error) {
this.queryState[targetId] = state;
}
removeLocalQueryTarget(targetId) {
this.localState.removeQueryTarget(targetId);
}
isLocalQueryTarget(targetId) {
return this.localState.activeTargetIds.has(targetId);
}
clearQueryState(targetId) {
delete this.queryState[targetId];
}
getAllActiveQueryTargets() {
return this.localState.activeTargetIds;
}
isActiveQueryTarget(targetId) {
return this.localState.activeTargetIds.has(targetId);
}
start() {
this.localState = new LocalClientState();
return Promise.resolve();
}
handleUserChange(user, removedBatchIds, addedBatchIds) {
// No op.
}
setOnlineState(onlineState) {
// No op.
}
shutdown() { }
writeSequenceNumber(sequenceNumber) { }
notifyBundleLoaded(collectionGroups) {
// No op.
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class NoopConnectivityMonitor {
addCallback(callback) {
// No-op.
}
shutdown() {
// No-op.
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a simple helper class that implements the Stream interface to
* bridge to other implementations that are streams but do not implement the
* interface. The stream callbacks are invoked with the callOn... methods.
*/
class StreamBridge {
constructor(args) {
this.sendFn = args.sendFn;
this.closeFn = args.closeFn;
}
onOpen(callback) {
this.wrappedOnOpen = callback;
}
onClose(callback) {
this.wrappedOnClose = callback;
}
onMessage(callback) {
this.wrappedOnMessage = callback;
}
close() {
this.closeFn();
}
send(msg) {
this.sendFn(msg);
}
callOnOpen() {
this.wrappedOnOpen();
}
callOnClose(err) {
this.wrappedOnClose(err);
}
callOnMessage(msg) {
this.wrappedOnMessage(msg);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Utilities for dealing with node.js-style APIs. See nodePromise for more
* details.
*/
/**
* Creates a node-style callback that resolves or rejects a new Promise. The
* callback is passed to the given action which can then use the callback as
* a parameter to a node-style function.
*
* The intent is to directly bridge a node-style function (which takes a
* callback) into a Promise without manually converting between the node-style
* callback and the promise at each call.
*
* In effect it allows you to convert:
*
* @example
* new Promise((resolve: (value?: fs.Stats) => void,
* reject: (error?: any) => void) => {
* fs.stat(path, (error?: any, stat?: fs.Stats) => {
* if (error) {
* reject(error);
* } else {
* resolve(stat);
* }
* });
* });
*
* Into
* @example
* nodePromise((callback: NodeCallback) => {
* fs.stat(path, callback);
* });
*
* @param action - a function that takes a node-style callback as an argument
* and then uses that callback to invoke some node-style API.
* @returns a new Promise which will be rejected if the callback is given the
* first Error parameter or will resolve to the value given otherwise.
*/
function nodePromise(action) {
return new Promise((resolve, reject) => {
action((error, value) => {
if (error) {
reject(error);
}
else {
resolve(value);
}
});
});
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO: Fetch runtime version from grpc-js/package.json instead
// when there's a cleaner way to dynamic require JSON in both Node ESM and CJS
const grpcVersion = '1.7.3';
const LOG_TAG$9 = 'Connection';
const X_GOOG_API_CLIENT_VALUE = `gl-node/${process.versions.node} fire/${SDK_VERSION} grpc/${grpcVersion}`;
function createMetadata(databasePath, authToken, appCheckToken, appId) {
hardAssert(authToken === null || authToken.type === 'OAuth');
const metadata = new grpc__namespace.Metadata();
if (authToken) {
authToken.headers.forEach((value, key) => metadata.set(key, value));
}
if (appCheckToken) {
appCheckToken.headers.forEach((value, key) => metadata.set(key, value));
}
if (appId) {
metadata.set('X-Firebase-GMPID', appId);
}
metadata.set('X-Goog-Api-Client', X_GOOG_API_CLIENT_VALUE);
// These headers are used to improve routing and project isolation by the
// backend.
// TODO(b/199767712): We are keeping 'Google-Cloud-Resource-Prefix' until Emulators can be
// released with cl/428820046. Currently blocked because Emulators are now built with Java
// 11 from Google3.
metadata.set('Google-Cloud-Resource-Prefix', databasePath);
metadata.set('x-goog-request-params', databasePath);
return metadata;
}
/**
* A Connection implemented by GRPC-Node.
*/
class GrpcConnection {
constructor(protos, databaseInfo) {
this.databaseInfo = databaseInfo;
// We cache stubs for the most-recently-used token.
this.cachedStub = null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.firestore = protos['google']['firestore']['v1'];
this.databasePath = `projects/${databaseInfo.databaseId.projectId}/databases/${databaseInfo.databaseId.database}`;
}
get shouldResourcePathBeIncludedInRequest() {
// Both `invokeRPC()` and `invokeStreamingRPC()` ignore their `path` arguments, and expect
// the "path" to be part of the given `request`.
return true;
}
ensureActiveStub() {
if (!this.cachedStub) {
logDebug(LOG_TAG$9, 'Creating Firestore stub.');
const credentials = this.databaseInfo.ssl
? grpc__namespace.credentials.createSsl()
: grpc__namespace.credentials.createInsecure();
this.cachedStub = new this.firestore.Firestore(this.databaseInfo.host, credentials);
}
return this.cachedStub;
}
invokeRPC(rpcName, path, request, authToken, appCheckToken) {
const stub = this.ensureActiveStub();
const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
const jsonRequest = Object.assign({ database: this.databasePath }, request);
return nodePromise((callback) => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' invoked with request:`, request);
return stub[rpcName](jsonRequest, metadata, (grpcError, value) => {
if (grpcError) {
logDebug(LOG_TAG$9, `RPC '${rpcName}' failed with error:`, grpcError);
callback(new FirestoreError(mapCodeFromRpcCode(grpcError.code), grpcError.message));
}
else {
logDebug(LOG_TAG$9, `RPC '${rpcName}' completed with response:`, value);
callback(undefined, value);
}
});
});
}
invokeStreamingRPC(rpcName, path, request, authToken, appCheckToken, expectedResponseCount) {
const results = [];
const responseDeferred = new Deferred();
logDebug(LOG_TAG$9, `RPC '${rpcName}' invoked (streaming) with request:`, request);
const stub = this.ensureActiveStub();
const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
const jsonRequest = Object.assign(Object.assign({}, request), { database: this.databasePath });
const stream = stub[rpcName](jsonRequest, metadata);
let callbackFired = false;
stream.on('data', (response) => {
logDebug(LOG_TAG$9, `RPC ${rpcName} received result:`, response);
results.push(response);
if (expectedResponseCount !== undefined &&
results.length === expectedResponseCount) {
callbackFired = true;
responseDeferred.resolve(results);
}
});
stream.on('end', () => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' completed.`);
if (!callbackFired) {
callbackFired = true;
responseDeferred.resolve(results);
}
});
stream.on('error', (grpcError) => {
logDebug(LOG_TAG$9, `RPC '${rpcName}' failed with error:`, grpcError);
const code = mapCodeFromRpcCode(grpcError.code);
responseDeferred.reject(new FirestoreError(code, grpcError.message));
});
return responseDeferred.promise;
}
// TODO(mikelehen): This "method" is a monster. Should be refactored.
openStream(rpcName, authToken, appCheckToken) {
const stub = this.ensureActiveStub();
const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
const grpcStream = stub[rpcName](metadata);
let closed = false;
const close = (err) => {
if (!closed) {
closed = true;
stream.callOnClose(err);
grpcStream.end();
}
};
const stream = new StreamBridge({
sendFn: (msg) => {
if (!closed) {
logDebug(LOG_TAG$9, 'GRPC stream sending:', msg);
try {
grpcStream.write(msg);
}
catch (e) {
// This probably means we didn't conform to the proto. Make sure to
// log the message we sent.
logError('Failure sending:', msg);
logError('Error:', e);
throw e;
}
}
else {
logDebug(LOG_TAG$9, 'Not sending because gRPC stream is closed:', msg);
}
},
closeFn: () => {
logDebug(LOG_TAG$9, 'GRPC stream closed locally via close().');
close();
}
});
grpcStream.on('data', (msg) => {
if (!closed) {
logDebug(LOG_TAG$9, 'GRPC stream received:', msg);
stream.callOnMessage(msg);
}
});
grpcStream.on('end', () => {
logDebug(LOG_TAG$9, 'GRPC stream ended.');
close();
});
grpcStream.on('error', (grpcError) => {
if (!closed) {
logWarn(LOG_TAG$9, 'GRPC stream error. Code:', grpcError.code, 'Message:', grpcError.message);
const code = mapCodeFromRpcCode(grpcError.code);
close(new FirestoreError(code, grpcError.message));
}
});
logDebug(LOG_TAG$9, 'Opening GRPC stream');
// TODO(dimond): Since grpc has no explicit open status (or does it?) we
// simulate an onOpen in the next loop after the stream had it's listeners
// registered
setTimeout(() => {
stream.callOnOpen();
}, 0);
return stream;
}
}
const nested = {
google: {
nested: {
protobuf: {
options: {
csharp_namespace: "Google.Protobuf.WellKnownTypes",
go_package: "github.com/golang/protobuf/ptypes/wrappers",
java_package: "com.google.protobuf",
java_outer_classname: "WrappersProto",
java_multiple_files: true,
objc_class_prefix: "GPB",
cc_enable_arenas: true,
optimize_for: "SPEED"
},
nested: {
Timestamp: {
fields: {
seconds: {
type: "int64",
id: 1
},
nanos: {
type: "int32",
id: 2
}
}
},
FileDescriptorSet: {
fields: {
file: {
rule: "repeated",
type: "FileDescriptorProto",
id: 1
}
}
},
FileDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
"package": {
type: "string",
id: 2
},
dependency: {
rule: "repeated",
type: "string",
id: 3
},
publicDependency: {
rule: "repeated",
type: "int32",
id: 10,
options: {
packed: false
}
},
weakDependency: {
rule: "repeated",
type: "int32",
id: 11,
options: {
packed: false
}
},
messageType: {
rule: "repeated",
type: "DescriptorProto",
id: 4
},
enumType: {
rule: "repeated",
type: "EnumDescriptorProto",
id: 5
},
service: {
rule: "repeated",
type: "ServiceDescriptorProto",
id: 6
},
extension: {
rule: "repeated",
type: "FieldDescriptorProto",
id: 7
},
options: {
type: "FileOptions",
id: 8
},
sourceCodeInfo: {
type: "SourceCodeInfo",
id: 9
},
syntax: {
type: "string",
id: 12
}
}
},
DescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
field: {
rule: "repeated",
type: "FieldDescriptorProto",
id: 2
},
extension: {
rule: "repeated",
type: "FieldDescriptorProto",
id: 6
},
nestedType: {
rule: "repeated",
type: "DescriptorProto",
id: 3
},
enumType: {
rule: "repeated",
type: "EnumDescriptorProto",
id: 4
},
extensionRange: {
rule: "repeated",
type: "ExtensionRange",
id: 5
},
oneofDecl: {
rule: "repeated",
type: "OneofDescriptorProto",
id: 8
},
options: {
type: "MessageOptions",
id: 7
},
reservedRange: {
rule: "repeated",
type: "ReservedRange",
id: 9
},
reservedName: {
rule: "repeated",
type: "string",
id: 10
}
},
nested: {
ExtensionRange: {
fields: {
start: {
type: "int32",
id: 1
},
end: {
type: "int32",
id: 2
}
}
},
ReservedRange: {
fields: {
start: {
type: "int32",
id: 1
},
end: {
type: "int32",
id: 2
}
}
}
}
},
FieldDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
number: {
type: "int32",
id: 3
},
label: {
type: "Label",
id: 4
},
type: {
type: "Type",
id: 5
},
typeName: {
type: "string",
id: 6
},
extendee: {
type: "string",
id: 2
},
defaultValue: {
type: "string",
id: 7
},
oneofIndex: {
type: "int32",
id: 9
},
jsonName: {
type: "string",
id: 10
},
options: {
type: "FieldOptions",
id: 8
}
},
nested: {
Type: {
values: {
TYPE_DOUBLE: 1,
TYPE_FLOAT: 2,
TYPE_INT64: 3,
TYPE_UINT64: 4,
TYPE_INT32: 5,
TYPE_FIXED64: 6,
TYPE_FIXED32: 7,
TYPE_BOOL: 8,
TYPE_STRING: 9,
TYPE_GROUP: 10,
TYPE_MESSAGE: 11,
TYPE_BYTES: 12,
TYPE_UINT32: 13,
TYPE_ENUM: 14,
TYPE_SFIXED32: 15,
TYPE_SFIXED64: 16,
TYPE_SINT32: 17,
TYPE_SINT64: 18
}
},
Label: {
values: {
LABEL_OPTIONAL: 1,
LABEL_REQUIRED: 2,
LABEL_REPEATED: 3
}
}
}
},
OneofDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
options: {
type: "OneofOptions",
id: 2
}
}
},
EnumDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
value: {
rule: "repeated",
type: "EnumValueDescriptorProto",
id: 2
},
options: {
type: "EnumOptions",
id: 3
}
}
},
EnumValueDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
number: {
type: "int32",
id: 2
},
options: {
type: "EnumValueOptions",
id: 3
}
}
},
ServiceDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
method: {
rule: "repeated",
type: "MethodDescriptorProto",
id: 2
},
options: {
type: "ServiceOptions",
id: 3
}
}
},
MethodDescriptorProto: {
fields: {
name: {
type: "string",
id: 1
},
inputType: {
type: "string",
id: 2
},
outputType: {
type: "string",
id: 3
},
options: {
type: "MethodOptions",
id: 4
},
clientStreaming: {
type: "bool",
id: 5
},
serverStreaming: {
type: "bool",
id: 6
}
}
},
FileOptions: {
fields: {
javaPackage: {
type: "string",
id: 1
},
javaOuterClassname: {
type: "string",
id: 8
},
javaMultipleFiles: {
type: "bool",
id: 10
},
javaGenerateEqualsAndHash: {
type: "bool",
id: 20,
options: {
deprecated: true
}
},
javaStringCheckUtf8: {
type: "bool",
id: 27
},
optimizeFor: {
type: "OptimizeMode",
id: 9,
options: {
"default": "SPEED"
}
},
goPackage: {
type: "string",
id: 11
},
ccGenericServices: {
type: "bool",
id: 16
},
javaGenericServices: {
type: "bool",
id: 17
},
pyGenericServices: {
type: "bool",
id: 18
},
deprecated: {
type: "bool",
id: 23
},
ccEnableArenas: {
type: "bool",
id: 31
},
objcClassPrefix: {
type: "string",
id: 36
},
csharpNamespace: {
type: "string",
id: 37
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
],
reserved: [
[
38,
38
]
],
nested: {
OptimizeMode: {
values: {
SPEED: 1,
CODE_SIZE: 2,
LITE_RUNTIME: 3
}
}
}
},
MessageOptions: {
fields: {
messageSetWireFormat: {
type: "bool",
id: 1
},
noStandardDescriptorAccessor: {
type: "bool",
id: 2
},
deprecated: {
type: "bool",
id: 3
},
mapEntry: {
type: "bool",
id: 7
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
],
reserved: [
[
8,
8
]
]
},
FieldOptions: {
fields: {
ctype: {
type: "CType",
id: 1,
options: {
"default": "STRING"
}
},
packed: {
type: "bool",
id: 2
},
jstype: {
type: "JSType",
id: 6,
options: {
"default": "JS_NORMAL"
}
},
lazy: {
type: "bool",
id: 5
},
deprecated: {
type: "bool",
id: 3
},
weak: {
type: "bool",
id: 10
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
],
reserved: [
[
4,
4
]
],
nested: {
CType: {
values: {
STRING: 0,
CORD: 1,
STRING_PIECE: 2
}
},
JSType: {
values: {
JS_NORMAL: 0,
JS_STRING: 1,
JS_NUMBER: 2
}
}
}
},
OneofOptions: {
fields: {
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
EnumOptions: {
fields: {
allowAlias: {
type: "bool",
id: 2
},
deprecated: {
type: "bool",
id: 3
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
EnumValueOptions: {
fields: {
deprecated: {
type: "bool",
id: 1
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
ServiceOptions: {
fields: {
deprecated: {
type: "bool",
id: 33
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
MethodOptions: {
fields: {
deprecated: {
type: "bool",
id: 33
},
uninterpretedOption: {
rule: "repeated",
type: "UninterpretedOption",
id: 999
}
},
extensions: [
[
1000,
536870911
]
]
},
UninterpretedOption: {
fields: {
name: {
rule: "repeated",
type: "NamePart",
id: 2
},
identifierValue: {
type: "string",
id: 3
},
positiveIntValue: {
type: "uint64",
id: 4
},
negativeIntValue: {
type: "int64",
id: 5
},
doubleValue: {
type: "double",
id: 6
},
stringValue: {
type: "bytes",
id: 7
},
aggregateValue: {
type: "string",
id: 8
}
},
nested: {
NamePart: {
fields: {
namePart: {
rule: "required",
type: "string",
id: 1
},
isExtension: {
rule: "required",
type: "bool",
id: 2
}
}
}
}
},
SourceCodeInfo: {
fields: {
location: {
rule: "repeated",
type: "Location",
id: 1
}
},
nested: {
Location: {
fields: {
path: {
rule: "repeated",
type: "int32",
id: 1
},
span: {
rule: "repeated",
type: "int32",
id: 2
},
leadingComments: {
type: "string",
id: 3
},
trailingComments: {
type: "string",
id: 4
},
leadingDetachedComments: {
rule: "repeated",
type: "string",
id: 6
}
}
}
}
},
GeneratedCodeInfo: {
fields: {
annotation: {
rule: "repeated",
type: "Annotation",
id: 1
}
},
nested: {
Annotation: {
fields: {
path: {
rule: "repeated",
type: "int32",
id: 1
},
sourceFile: {
type: "string",
id: 2
},
begin: {
type: "int32",
id: 3
},
end: {
type: "int32",
id: 4
}
}
}
}
},
Struct: {
fields: {
fields: {
keyType: "string",
type: "Value",
id: 1
}
}
},
Value: {
oneofs: {
kind: {
oneof: [
"nullValue",
"numberValue",
"stringValue",
"boolValue",
"structValue",
"listValue"
]
}
},
fields: {
nullValue: {
type: "NullValue",
id: 1
},
numberValue: {
type: "double",
id: 2
},
stringValue: {
type: "string",
id: 3
},
boolValue: {
type: "bool",
id: 4
},
structValue: {
type: "Struct",
id: 5
},
listValue: {
type: "ListValue",
id: 6
}
}
},
NullValue: {
values: {
NULL_VALUE: 0
}
},
ListValue: {
fields: {
values: {
rule: "repeated",
type: "Value",
id: 1
}
}
},
Empty: {
fields: {
}
},
DoubleValue: {
fields: {
value: {
type: "double",
id: 1
}
}
},
FloatValue: {
fields: {
value: {
type: "float",
id: 1
}
}
},
Int64Value: {
fields: {
value: {
type: "int64",
id: 1
}
}
},
UInt64Value: {
fields: {
value: {
type: "uint64",
id: 1
}
}
},
Int32Value: {
fields: {
value: {
type: "int32",
id: 1
}
}
},
UInt32Value: {
fields: {
value: {
type: "uint32",
id: 1
}
}
},
BoolValue: {
fields: {
value: {
type: "bool",
id: 1
}
}
},
StringValue: {
fields: {
value: {
type: "string",
id: 1
}
}
},
BytesValue: {
fields: {
value: {
type: "bytes",
id: 1
}
}
},
Any: {
fields: {
typeUrl: {
type: "string",
id: 1
},
value: {
type: "bytes",
id: 2
}
}
}
}
},
firestore: {
nested: {
v1: {
options: {
csharp_namespace: "Google.Cloud.Firestore.V1",
go_package: "google.golang.org/genproto/googleapis/firestore/v1;firestore",
java_multiple_files: true,
java_outer_classname: "WriteProto",
java_package: "com.google.firestore.v1",
objc_class_prefix: "GCFS",
php_namespace: "Google\\Cloud\\Firestore\\V1",
ruby_package: "Google::Cloud::Firestore::V1"
},
nested: {
AggregationResult: {
fields: {
aggregateFields: {
keyType: "string",
type: "Value",
id: 2
}
}
},
DocumentMask: {
fields: {
fieldPaths: {
rule: "repeated",
type: "string",
id: 1
}
}
},
Precondition: {
oneofs: {
conditionType: {
oneof: [
"exists",
"updateTime"
]
}
},
fields: {
exists: {
type: "bool",
id: 1
},
updateTime: {
type: "google.protobuf.Timestamp",
id: 2
}
}
},
TransactionOptions: {
oneofs: {
mode: {
oneof: [
"readOnly",
"readWrite"
]
}
},
fields: {
readOnly: {
type: "ReadOnly",
id: 2
},
readWrite: {
type: "ReadWrite",
id: 3
}
},
nested: {
ReadWrite: {
fields: {
retryTransaction: {
type: "bytes",
id: 1
}
}
},
ReadOnly: {
oneofs: {
consistencySelector: {
oneof: [
"readTime"
]
}
},
fields: {
readTime: {
type: "google.protobuf.Timestamp",
id: 2
}
}
}
}
},
Document: {
fields: {
name: {
type: "string",
id: 1
},
fields: {
keyType: "string",
type: "Value",
id: 2
},
createTime: {
type: "google.protobuf.Timestamp",
id: 3
},
updateTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
Value: {
oneofs: {
valueType: {
oneof: [
"nullValue",
"booleanValue",
"integerValue",
"doubleValue",
"timestampValue",
"stringValue",
"bytesValue",
"referenceValue",
"geoPointValue",
"arrayValue",
"mapValue"
]
}
},
fields: {
nullValue: {
type: "google.protobuf.NullValue",
id: 11
},
booleanValue: {
type: "bool",
id: 1
},
integerValue: {
type: "int64",
id: 2
},
doubleValue: {
type: "double",
id: 3
},
timestampValue: {
type: "google.protobuf.Timestamp",
id: 10
},
stringValue: {
type: "string",
id: 17
},
bytesValue: {
type: "bytes",
id: 18
},
referenceValue: {
type: "string",
id: 5
},
geoPointValue: {
type: "google.type.LatLng",
id: 8
},
arrayValue: {
type: "ArrayValue",
id: 9
},
mapValue: {
type: "MapValue",
id: 6
}
}
},
ArrayValue: {
fields: {
values: {
rule: "repeated",
type: "Value",
id: 1
}
}
},
MapValue: {
fields: {
fields: {
keyType: "string",
type: "Value",
id: 1
}
}
},
Firestore: {
options: {
"(google.api.default_host)": "firestore.googleapis.com",
"(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore"
},
methods: {
GetDocument: {
requestType: "GetDocumentRequest",
responseType: "Document",
options: {
"(google.api.http).get": "/v1/{name=projects/*/databases/*/documents/*/**}"
},
parsedOptions: [
{
"(google.api.http)": {
get: "/v1/{name=projects/*/databases/*/documents/*/**}"
}
}
]
},
ListDocuments: {
requestType: "ListDocumentsRequest",
responseType: "ListDocumentsResponse",
options: {
"(google.api.http).get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
},
parsedOptions: [
{
"(google.api.http)": {
get: "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
}
}
]
},
UpdateDocument: {
requestType: "UpdateDocumentRequest",
responseType: "Document",
options: {
"(google.api.http).patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}",
"(google.api.http).body": "document",
"(google.api.method_signature)": "document,update_mask"
},
parsedOptions: [
{
"(google.api.http)": {
patch: "/v1/{document.name=projects/*/databases/*/documents/*/**}",
body: "document"
}
},
{
"(google.api.method_signature)": "document,update_mask"
}
]
},
DeleteDocument: {
requestType: "DeleteDocumentRequest",
responseType: "google.protobuf.Empty",
options: {
"(google.api.http).delete": "/v1/{name=projects/*/databases/*/documents/*/**}",
"(google.api.method_signature)": "name"
},
parsedOptions: [
{
"(google.api.http)": {
"delete": "/v1/{name=projects/*/databases/*/documents/*/**}"
}
},
{
"(google.api.method_signature)": "name"
}
]
},
BatchGetDocuments: {
requestType: "BatchGetDocumentsRequest",
responseType: "BatchGetDocumentsResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchGet",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:batchGet",
body: "*"
}
}
]
},
BeginTransaction: {
requestType: "BeginTransactionRequest",
responseType: "BeginTransactionResponse",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction",
"(google.api.http).body": "*",
"(google.api.method_signature)": "database"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:beginTransaction",
body: "*"
}
},
{
"(google.api.method_signature)": "database"
}
]
},
Commit: {
requestType: "CommitRequest",
responseType: "CommitResponse",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:commit",
"(google.api.http).body": "*",
"(google.api.method_signature)": "database,writes"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:commit",
body: "*"
}
},
{
"(google.api.method_signature)": "database,writes"
}
]
},
Rollback: {
requestType: "RollbackRequest",
responseType: "google.protobuf.Empty",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:rollback",
"(google.api.http).body": "*",
"(google.api.method_signature)": "database,transaction"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:rollback",
body: "*"
}
},
{
"(google.api.method_signature)": "database,transaction"
}
]
},
RunQuery: {
requestType: "RunQueryRequest",
responseType: "RunQueryResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runQuery",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery",
"(google.api.http).additional_bindings.body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:runQuery",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery",
body: "*"
}
}
}
]
},
RunAggregationQuery: {
requestType: "RunAggregationQueryRequest",
responseType: "RunAggregationQueryResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery",
"(google.api.http).additional_bindings.body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery",
body: "*"
}
}
}
]
},
PartitionQuery: {
requestType: "PartitionQueryRequest",
responseType: "PartitionQueryResponse",
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery",
"(google.api.http).additional_bindings.body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:partitionQuery",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery",
body: "*"
}
}
}
]
},
Write: {
requestType: "WriteRequest",
requestStream: true,
responseType: "WriteResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:write",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:write",
body: "*"
}
}
]
},
Listen: {
requestType: "ListenRequest",
requestStream: true,
responseType: "ListenResponse",
responseStream: true,
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:listen",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:listen",
body: "*"
}
}
]
},
ListCollectionIds: {
requestType: "ListCollectionIdsRequest",
responseType: "ListCollectionIdsResponse",
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds",
"(google.api.http).body": "*",
"(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds",
"(google.api.http).additional_bindings.body": "*",
"(google.api.method_signature)": "parent"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds",
body: "*",
additional_bindings: {
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds",
body: "*"
}
}
},
{
"(google.api.method_signature)": "parent"
}
]
},
BatchWrite: {
requestType: "BatchWriteRequest",
responseType: "BatchWriteResponse",
options: {
"(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchWrite",
"(google.api.http).body": "*"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{database=projects/*/databases/*}/documents:batchWrite",
body: "*"
}
}
]
},
CreateDocument: {
requestType: "CreateDocumentRequest",
responseType: "Document",
options: {
"(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}",
"(google.api.http).body": "document"
},
parsedOptions: [
{
"(google.api.http)": {
post: "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}",
body: "document"
}
}
]
}
}
},
GetDocumentRequest: {
oneofs: {
consistencySelector: {
oneof: [
"transaction",
"readTime"
]
}
},
fields: {
name: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
mask: {
type: "DocumentMask",
id: 2
},
transaction: {
type: "bytes",
id: 3
},
readTime: {
type: "google.protobuf.Timestamp",
id: 5
}
}
},
ListDocumentsRequest: {
oneofs: {
consistencySelector: {
oneof: [
"transaction",
"readTime"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
collectionId: {
type: "string",
id: 2,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
pageSize: {
type: "int32",
id: 3
},
pageToken: {
type: "string",
id: 4
},
orderBy: {
type: "string",
id: 6
},
mask: {
type: "DocumentMask",
id: 7
},
transaction: {
type: "bytes",
id: 8
},
readTime: {
type: "google.protobuf.Timestamp",
id: 10
},
showMissing: {
type: "bool",
id: 12
}
}
},
ListDocumentsResponse: {
fields: {
documents: {
rule: "repeated",
type: "Document",
id: 1
},
nextPageToken: {
type: "string",
id: 2
}
}
},
CreateDocumentRequest: {
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
collectionId: {
type: "string",
id: 2,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
documentId: {
type: "string",
id: 3
},
document: {
type: "Document",
id: 4,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
mask: {
type: "DocumentMask",
id: 5
}
}
},
UpdateDocumentRequest: {
fields: {
document: {
type: "Document",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
updateMask: {
type: "DocumentMask",
id: 2
},
mask: {
type: "DocumentMask",
id: 3
},
currentDocument: {
type: "Precondition",
id: 4
}
}
},
DeleteDocumentRequest: {
fields: {
name: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
currentDocument: {
type: "Precondition",
id: 2
}
}
},
BatchGetDocumentsRequest: {
oneofs: {
consistencySelector: {
oneof: [
"transaction",
"newTransaction",
"readTime"
]
}
},
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
documents: {
rule: "repeated",
type: "string",
id: 2
},
mask: {
type: "DocumentMask",
id: 3
},
transaction: {
type: "bytes",
id: 4
},
newTransaction: {
type: "TransactionOptions",
id: 5
},
readTime: {
type: "google.protobuf.Timestamp",
id: 7
}
}
},
BatchGetDocumentsResponse: {
oneofs: {
result: {
oneof: [
"found",
"missing"
]
}
},
fields: {
found: {
type: "Document",
id: 1
},
missing: {
type: "string",
id: 2
},
transaction: {
type: "bytes",
id: 3
},
readTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
BeginTransactionRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
options: {
type: "TransactionOptions",
id: 2
}
}
},
BeginTransactionResponse: {
fields: {
transaction: {
type: "bytes",
id: 1
}
}
},
CommitRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
writes: {
rule: "repeated",
type: "Write",
id: 2
},
transaction: {
type: "bytes",
id: 3
}
}
},
CommitResponse: {
fields: {
writeResults: {
rule: "repeated",
type: "WriteResult",
id: 1
},
commitTime: {
type: "google.protobuf.Timestamp",
id: 2
}
}
},
RollbackRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
transaction: {
type: "bytes",
id: 2,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
}
}
},
RunQueryRequest: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
},
consistencySelector: {
oneof: [
"transaction",
"newTransaction",
"readTime"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
structuredQuery: {
type: "StructuredQuery",
id: 2
},
transaction: {
type: "bytes",
id: 5
},
newTransaction: {
type: "TransactionOptions",
id: 6
},
readTime: {
type: "google.protobuf.Timestamp",
id: 7
}
}
},
RunQueryResponse: {
fields: {
transaction: {
type: "bytes",
id: 2
},
document: {
type: "Document",
id: 1
},
readTime: {
type: "google.protobuf.Timestamp",
id: 3
},
skippedResults: {
type: "int32",
id: 4
}
}
},
RunAggregationQueryRequest: {
oneofs: {
queryType: {
oneof: [
"structuredAggregationQuery"
]
},
consistencySelector: {
oneof: [
"transaction",
"newTransaction",
"readTime"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
structuredAggregationQuery: {
type: "StructuredAggregationQuery",
id: 2
},
transaction: {
type: "bytes",
id: 4
},
newTransaction: {
type: "TransactionOptions",
id: 5
},
readTime: {
type: "google.protobuf.Timestamp",
id: 6
}
}
},
RunAggregationQueryResponse: {
fields: {
result: {
type: "AggregationResult",
id: 1
},
transaction: {
type: "bytes",
id: 2
},
readTime: {
type: "google.protobuf.Timestamp",
id: 3
}
}
},
PartitionQueryRequest: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
}
},
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
structuredQuery: {
type: "StructuredQuery",
id: 2
},
partitionCount: {
type: "int64",
id: 3
},
pageToken: {
type: "string",
id: 4
},
pageSize: {
type: "int32",
id: 5
}
}
},
PartitionQueryResponse: {
fields: {
partitions: {
rule: "repeated",
type: "Cursor",
id: 1
},
nextPageToken: {
type: "string",
id: 2
}
}
},
WriteRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
streamId: {
type: "string",
id: 2
},
writes: {
rule: "repeated",
type: "Write",
id: 3
},
streamToken: {
type: "bytes",
id: 4
},
labels: {
keyType: "string",
type: "string",
id: 5
}
}
},
WriteResponse: {
fields: {
streamId: {
type: "string",
id: 1
},
streamToken: {
type: "bytes",
id: 2
},
writeResults: {
rule: "repeated",
type: "WriteResult",
id: 3
},
commitTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
ListenRequest: {
oneofs: {
targetChange: {
oneof: [
"addTarget",
"removeTarget"
]
}
},
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
addTarget: {
type: "Target",
id: 2
},
removeTarget: {
type: "int32",
id: 3
},
labels: {
keyType: "string",
type: "string",
id: 4
}
}
},
ListenResponse: {
oneofs: {
responseType: {
oneof: [
"targetChange",
"documentChange",
"documentDelete",
"documentRemove",
"filter"
]
}
},
fields: {
targetChange: {
type: "TargetChange",
id: 2
},
documentChange: {
type: "DocumentChange",
id: 3
},
documentDelete: {
type: "DocumentDelete",
id: 4
},
documentRemove: {
type: "DocumentRemove",
id: 6
},
filter: {
type: "ExistenceFilter",
id: 5
}
}
},
Target: {
oneofs: {
targetType: {
oneof: [
"query",
"documents"
]
},
resumeType: {
oneof: [
"resumeToken",
"readTime"
]
}
},
fields: {
query: {
type: "QueryTarget",
id: 2
},
documents: {
type: "DocumentsTarget",
id: 3
},
resumeToken: {
type: "bytes",
id: 4
},
readTime: {
type: "google.protobuf.Timestamp",
id: 11
},
targetId: {
type: "int32",
id: 5
},
once: {
type: "bool",
id: 6
}
},
nested: {
DocumentsTarget: {
fields: {
documents: {
rule: "repeated",
type: "string",
id: 2
}
}
},
QueryTarget: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
}
},
fields: {
parent: {
type: "string",
id: 1
},
structuredQuery: {
type: "StructuredQuery",
id: 2
}
}
}
}
},
TargetChange: {
fields: {
targetChangeType: {
type: "TargetChangeType",
id: 1
},
targetIds: {
rule: "repeated",
type: "int32",
id: 2
},
cause: {
type: "google.rpc.Status",
id: 3
},
resumeToken: {
type: "bytes",
id: 4
},
readTime: {
type: "google.protobuf.Timestamp",
id: 6
}
},
nested: {
TargetChangeType: {
values: {
NO_CHANGE: 0,
ADD: 1,
REMOVE: 2,
CURRENT: 3,
RESET: 4
}
}
}
},
ListCollectionIdsRequest: {
fields: {
parent: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
pageSize: {
type: "int32",
id: 2
},
pageToken: {
type: "string",
id: 3
}
}
},
ListCollectionIdsResponse: {
fields: {
collectionIds: {
rule: "repeated",
type: "string",
id: 1
},
nextPageToken: {
type: "string",
id: 2
}
}
},
BatchWriteRequest: {
fields: {
database: {
type: "string",
id: 1,
options: {
"(google.api.field_behavior)": "REQUIRED"
}
},
writes: {
rule: "repeated",
type: "Write",
id: 2
},
labels: {
keyType: "string",
type: "string",
id: 3
}
}
},
BatchWriteResponse: {
fields: {
writeResults: {
rule: "repeated",
type: "WriteResult",
id: 1
},
status: {
rule: "repeated",
type: "google.rpc.Status",
id: 2
}
}
},
StructuredQuery: {
fields: {
select: {
type: "Projection",
id: 1
},
from: {
rule: "repeated",
type: "CollectionSelector",
id: 2
},
where: {
type: "Filter",
id: 3
},
orderBy: {
rule: "repeated",
type: "Order",
id: 4
},
startAt: {
type: "Cursor",
id: 7
},
endAt: {
type: "Cursor",
id: 8
},
offset: {
type: "int32",
id: 6
},
limit: {
type: "google.protobuf.Int32Value",
id: 5
}
},
nested: {
CollectionSelector: {
fields: {
collectionId: {
type: "string",
id: 2
},
allDescendants: {
type: "bool",
id: 3
}
}
},
Filter: {
oneofs: {
filterType: {
oneof: [
"compositeFilter",
"fieldFilter",
"unaryFilter"
]
}
},
fields: {
compositeFilter: {
type: "CompositeFilter",
id: 1
},
fieldFilter: {
type: "FieldFilter",
id: 2
},
unaryFilter: {
type: "UnaryFilter",
id: 3
}
}
},
CompositeFilter: {
fields: {
op: {
type: "Operator",
id: 1
},
filters: {
rule: "repeated",
type: "Filter",
id: 2
}
},
nested: {
Operator: {
values: {
OPERATOR_UNSPECIFIED: 0,
AND: 1,
OR: 2
}
}
}
},
FieldFilter: {
fields: {
field: {
type: "FieldReference",
id: 1
},
op: {
type: "Operator",
id: 2
},
value: {
type: "Value",
id: 3
}
},
nested: {
Operator: {
values: {
OPERATOR_UNSPECIFIED: 0,
LESS_THAN: 1,
LESS_THAN_OR_EQUAL: 2,
GREATER_THAN: 3,
GREATER_THAN_OR_EQUAL: 4,
EQUAL: 5,
NOT_EQUAL: 6,
ARRAY_CONTAINS: 7,
IN: 8,
ARRAY_CONTAINS_ANY: 9,
NOT_IN: 10
}
}
}
},
UnaryFilter: {
oneofs: {
operandType: {
oneof: [
"field"
]
}
},
fields: {
op: {
type: "Operator",
id: 1
},
field: {
type: "FieldReference",
id: 2
}
},
nested: {
Operator: {
values: {
OPERATOR_UNSPECIFIED: 0,
IS_NAN: 2,
IS_NULL: 3,
IS_NOT_NAN: 4,
IS_NOT_NULL: 5
}
}
}
},
Order: {
fields: {
field: {
type: "FieldReference",
id: 1
},
direction: {
type: "Direction",
id: 2
}
}
},
FieldReference: {
fields: {
fieldPath: {
type: "string",
id: 2
}
}
},
Projection: {
fields: {
fields: {
rule: "repeated",
type: "FieldReference",
id: 2
}
}
},
Direction: {
values: {
DIRECTION_UNSPECIFIED: 0,
ASCENDING: 1,
DESCENDING: 2
}
}
}
},
StructuredAggregationQuery: {
oneofs: {
queryType: {
oneof: [
"structuredQuery"
]
}
},
fields: {
structuredQuery: {
type: "StructuredQuery",
id: 1
},
aggregations: {
rule: "repeated",
type: "Aggregation",
id: 3
}
},
nested: {
Aggregation: {
oneofs: {
operator: {
oneof: [
"count"
]
}
},
fields: {
count: {
type: "Count",
id: 1
},
alias: {
type: "string",
id: 7
}
},
nested: {
Count: {
fields: {
upTo: {
type: "google.protobuf.Int64Value",
id: 1
}
}
}
}
}
}
},
Cursor: {
fields: {
values: {
rule: "repeated",
type: "Value",
id: 1
},
before: {
type: "bool",
id: 2
}
}
},
Write: {
oneofs: {
operation: {
oneof: [
"update",
"delete",
"verify",
"transform"
]
}
},
fields: {
update: {
type: "Document",
id: 1
},
"delete": {
type: "string",
id: 2
},
verify: {
type: "string",
id: 5
},
transform: {
type: "DocumentTransform",
id: 6
},
updateMask: {
type: "DocumentMask",
id: 3
},
updateTransforms: {
rule: "repeated",
type: "DocumentTransform.FieldTransform",
id: 7
},
currentDocument: {
type: "Precondition",
id: 4
}
}
},
DocumentTransform: {
fields: {
document: {
type: "string",
id: 1
},
fieldTransforms: {
rule: "repeated",
type: "FieldTransform",
id: 2
}
},
nested: {
FieldTransform: {
oneofs: {
transformType: {
oneof: [
"setToServerValue",
"increment",
"maximum",
"minimum",
"appendMissingElements",
"removeAllFromArray"
]
}
},
fields: {
fieldPath: {
type: "string",
id: 1
},
setToServerValue: {
type: "ServerValue",
id: 2
},
increment: {
type: "Value",
id: 3
},
maximum: {
type: "Value",
id: 4
},
minimum: {
type: "Value",
id: 5
},
appendMissingElements: {
type: "ArrayValue",
id: 6
},
removeAllFromArray: {
type: "ArrayValue",
id: 7
}
},
nested: {
ServerValue: {
values: {
SERVER_VALUE_UNSPECIFIED: 0,
REQUEST_TIME: 1
}
}
}
}
}
},
WriteResult: {
fields: {
updateTime: {
type: "google.protobuf.Timestamp",
id: 1
},
transformResults: {
rule: "repeated",
type: "Value",
id: 2
}
}
},
DocumentChange: {
fields: {
document: {
type: "Document",
id: 1
},
targetIds: {
rule: "repeated",
type: "int32",
id: 5
},
removedTargetIds: {
rule: "repeated",
type: "int32",
id: 6
}
}
},
DocumentDelete: {
fields: {
document: {
type: "string",
id: 1
},
removedTargetIds: {
rule: "repeated",
type: "int32",
id: 6
},
readTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
DocumentRemove: {
fields: {
document: {
type: "string",
id: 1
},
removedTargetIds: {
rule: "repeated",
type: "int32",
id: 2
},
readTime: {
type: "google.protobuf.Timestamp",
id: 4
}
}
},
ExistenceFilter: {
fields: {
targetId: {
type: "int32",
id: 1
},
count: {
type: "int32",
id: 2
}
}
}
}
}
}
},
api: {
options: {
go_package: "google.golang.org/genproto/googleapis/api/annotations;annotations",
java_multiple_files: true,
java_outer_classname: "HttpProto",
java_package: "com.google.api",
objc_class_prefix: "GAPI",
cc_enable_arenas: true
},
nested: {
http: {
type: "HttpRule",
id: 72295728,
extend: "google.protobuf.MethodOptions"
},
Http: {
fields: {
rules: {
rule: "repeated",
type: "HttpRule",
id: 1
}
}
},
HttpRule: {
oneofs: {
pattern: {
oneof: [
"get",
"put",
"post",
"delete",
"patch",
"custom"
]
}
},
fields: {
get: {
type: "string",
id: 2
},
put: {
type: "string",
id: 3
},
post: {
type: "string",
id: 4
},
"delete": {
type: "string",
id: 5
},
patch: {
type: "string",
id: 6
},
custom: {
type: "CustomHttpPattern",
id: 8
},
selector: {
type: "string",
id: 1
},
body: {
type: "string",
id: 7
},
additionalBindings: {
rule: "repeated",
type: "HttpRule",
id: 11
}
}
},
CustomHttpPattern: {
fields: {
kind: {
type: "string",
id: 1
},
path: {
type: "string",
id: 2
}
}
},
methodSignature: {
rule: "repeated",
type: "string",
id: 1051,
extend: "google.protobuf.MethodOptions"
},
defaultHost: {
type: "string",
id: 1049,
extend: "google.protobuf.ServiceOptions"
},
oauthScopes: {
type: "string",
id: 1050,
extend: "google.protobuf.ServiceOptions"
},
fieldBehavior: {
rule: "repeated",
type: "google.api.FieldBehavior",
id: 1052,
extend: "google.protobuf.FieldOptions"
},
FieldBehavior: {
values: {
FIELD_BEHAVIOR_UNSPECIFIED: 0,
OPTIONAL: 1,
REQUIRED: 2,
OUTPUT_ONLY: 3,
INPUT_ONLY: 4,
IMMUTABLE: 5,
UNORDERED_LIST: 6,
NON_EMPTY_DEFAULT: 7
}
}
}
},
type: {
options: {
cc_enable_arenas: true,
go_package: "google.golang.org/genproto/googleapis/type/latlng;latlng",
java_multiple_files: true,
java_outer_classname: "LatLngProto",
java_package: "com.google.type",
objc_class_prefix: "GTP"
},
nested: {
LatLng: {
fields: {
latitude: {
type: "double",
id: 1
},
longitude: {
type: "double",
id: 2
}
}
}
}
},
rpc: {
options: {
cc_enable_arenas: true,
go_package: "google.golang.org/genproto/googleapis/rpc/status;status",
java_multiple_files: true,
java_outer_classname: "StatusProto",
java_package: "com.google.rpc",
objc_class_prefix: "RPC"
},
nested: {
Status: {
fields: {
code: {
type: "int32",
id: 1
},
message: {
type: "string",
id: 2
},
details: {
rule: "repeated",
type: "google.protobuf.Any",
id: 3
}
}
}
}
}
}
}
};
var protos = {
nested: nested
};
var protos$1 = /*#__PURE__*/Object.freeze({
__proto__: null,
nested: nested,
'default': protos
});
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Used by tests so we can match @grpc/proto-loader behavior. */
const protoLoaderOptions = {
longs: String,
enums: String,
defaults: true,
oneofs: false
};
/**
* Loads the protocol buffer definitions for Firestore.
*
* @returns The GrpcObject representing our protos.
*/
function loadProtos() {
const packageDefinition = protoLoader__namespace.fromJSON(protos$1, protoLoaderOptions);
return grpc__namespace.loadPackageDefinition(packageDefinition);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Loads the GRPC stack */
function newConnection(databaseInfo) {
const protos = loadProtos();
return new GrpcConnection(protos, databaseInfo);
}
/** Return the Platform-specific connectivity monitor. */
function newConnectivityMonitor() {
return new NoopConnectivityMonitor();
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** The Platform's 'window' implementation or null if not available. */
function getWindow() {
if (process.env.USE_MOCK_PERSISTENCE === 'YES') {
// eslint-disable-next-line no-restricted-globals
return window;
}
return null;
}
/** The Platform's 'document' implementation or null if not available. */
function getDocument() {
return null;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function newSerializer(databaseId) {
return new JsonProtoSerializer(databaseId, /* useProto3Json= */ false);
}
/**
* An instance of the Platform's 'TextEncoder' implementation.
*/
function newTextEncoder() {
return new util$1.TextEncoder();
}
/**
* An instance of the Platform's 'TextDecoder' implementation.
*/
function newTextDecoder() {
return new util$1.TextDecoder('utf-8');
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$8 = 'ExponentialBackoff';
/**
* Initial backoff time in milliseconds after an error.
* Set to 1s according to https://cloud.google.com/apis/design/errors.
*/
const DEFAULT_BACKOFF_INITIAL_DELAY_MS = 1000;
const DEFAULT_BACKOFF_FACTOR = 1.5;
/** Maximum backoff time in milliseconds */
const DEFAULT_BACKOFF_MAX_DELAY_MS = 60 * 1000;
/**
* A helper for running delayed tasks following an exponential backoff curve
* between attempts.
*
* Each delay is made up of a "base" delay which follows the exponential
* backoff curve, and a +/- 50% "jitter" that is calculated and added to the
* base delay. This prevents clients from accidentally synchronizing their
* delays causing spikes of load to the backend.
*/
class ExponentialBackoff {
constructor(
/**
* The AsyncQueue to run backoff operations on.
*/
queue,
/**
* The ID to use when scheduling backoff operations on the AsyncQueue.
*/
timerId,
/**
* The initial delay (used as the base delay on the first retry attempt).
* Note that jitter will still be applied, so the actual delay could be as
* little as 0.5*initialDelayMs.
*/
initialDelayMs = DEFAULT_BACKOFF_INITIAL_DELAY_MS,
/**
* The multiplier to use to determine the extended base delay after each
* attempt.
*/
backoffFactor = DEFAULT_BACKOFF_FACTOR,
/**
* The maximum base delay after which no further backoff is performed.
* Note that jitter will still be applied, so the actual delay could be as
* much as 1.5*maxDelayMs.
*/
maxDelayMs = DEFAULT_BACKOFF_MAX_DELAY_MS) {
this.queue = queue;
this.timerId = timerId;
this.initialDelayMs = initialDelayMs;
this.backoffFactor = backoffFactor;
this.maxDelayMs = maxDelayMs;
this.currentBaseMs = 0;
this.timerPromise = null;
/** The last backoff attempt, as epoch milliseconds. */
this.lastAttemptTime = Date.now();
this.reset();
}
/**
* Resets the backoff delay.
*
* The very next backoffAndWait() will have no delay. If it is called again
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
* subsequent ones will increase according to the backoffFactor.
*/
reset() {
this.currentBaseMs = 0;
}
/**
* Resets the backoff delay to the maximum delay (e.g. for use after a
* RESOURCE_EXHAUSTED error).
*/
resetToMax() {
this.currentBaseMs = this.maxDelayMs;
}
/**
* Returns a promise that resolves after currentDelayMs, and increases the
* delay for any subsequent attempts. If there was a pending backoff operation
* already, it will be canceled.
*/
backoffAndRun(op) {
// Cancel any pending backoff operation.
this.cancel();
// First schedule using the current base (which may be 0 and should be
// honored as such).
const desiredDelayWithJitterMs = Math.floor(this.currentBaseMs + this.jitterDelayMs());
// Guard against lastAttemptTime being in the future due to a clock change.
const delaySoFarMs = Math.max(0, Date.now() - this.lastAttemptTime);
// Guard against the backoff delay already being past.
const remainingDelayMs = Math.max(0, desiredDelayWithJitterMs - delaySoFarMs);
if (remainingDelayMs > 0) {
logDebug(LOG_TAG$8, `Backing off for ${remainingDelayMs} ms ` +
`(base delay: ${this.currentBaseMs} ms, ` +
`delay with jitter: ${desiredDelayWithJitterMs} ms, ` +
`last attempt: ${delaySoFarMs} ms ago)`);
}
this.timerPromise = this.queue.enqueueAfterDelay(this.timerId, remainingDelayMs, () => {
this.lastAttemptTime = Date.now();
return op();
});
// Apply backoff factor to determine next delay and ensure it is within
// bounds.
this.currentBaseMs *= this.backoffFactor;
if (this.currentBaseMs < this.initialDelayMs) {
this.currentBaseMs = this.initialDelayMs;
}
if (this.currentBaseMs > this.maxDelayMs) {
this.currentBaseMs = this.maxDelayMs;
}
}
skipBackoff() {
if (this.timerPromise !== null) {
this.timerPromise.skipDelay();
this.timerPromise = null;
}
}
cancel() {
if (this.timerPromise !== null) {
this.timerPromise.cancel();
this.timerPromise = null;
}
}
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */
jitterDelayMs() {
return (Math.random() - 0.5) * this.currentBaseMs;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$7 = 'PersistentStream';
/** The time a stream stays open after it is marked idle. */
const IDLE_TIMEOUT_MS = 60 * 1000;
/** The time a stream stays open until we consider it healthy. */
const HEALTHY_TIMEOUT_MS = 10 * 1000;
/**
* A PersistentStream is an abstract base class that represents a streaming RPC
* to the Firestore backend. It's built on top of the connections own support
* for streaming RPCs, and adds several critical features for our clients:
*
* - Exponential backoff on failure
* - Authentication via CredentialsProvider
* - Dispatching all callbacks into the shared worker queue
* - Closing idle streams after 60 seconds of inactivity
*
* Subclasses of PersistentStream implement serialization of models to and
* from the JSON representation of the protocol buffers for a specific
* streaming RPC.
*
* ## Starting and Stopping
*
* Streaming RPCs are stateful and need to be start()ed before messages can
* be sent and received. The PersistentStream will call the onOpen() function
* of the listener once the stream is ready to accept requests.
*
* Should a start() fail, PersistentStream will call the registered onClose()
* listener with a FirestoreError indicating what went wrong.
*
* A PersistentStream can be started and stopped repeatedly.
*
* Generic types:
* SendType: The type of the outgoing message of the underlying
* connection stream
* ReceiveType: The type of the incoming message of the underlying
* connection stream
* ListenerType: The type of the listener that will be used for callbacks
*/
class PersistentStream {
constructor(queue, connectionTimerId, idleTimerId, healthTimerId, connection, authCredentialsProvider, appCheckCredentialsProvider, listener) {
this.queue = queue;
this.idleTimerId = idleTimerId;
this.healthTimerId = healthTimerId;
this.connection = connection;
this.authCredentialsProvider = authCredentialsProvider;
this.appCheckCredentialsProvider = appCheckCredentialsProvider;
this.listener = listener;
this.state = 0 /* PersistentStreamState.Initial */;
/**
* A close count that's incremented every time the stream is closed; used by
* getCloseGuardedDispatcher() to invalidate callbacks that happen after
* close.
*/
this.closeCount = 0;
this.idleTimer = null;
this.healthCheck = null;
this.stream = null;
this.backoff = new ExponentialBackoff(queue, connectionTimerId);
}
/**
* Returns true if start() has been called and no error has occurred. True
* indicates the stream is open or in the process of opening (which
* encompasses respecting backoff, getting auth tokens, and starting the
* actual RPC). Use isOpen() to determine if the stream is open and ready for
* outbound requests.
*/
isStarted() {
return (this.state === 1 /* PersistentStreamState.Starting */ ||
this.state === 5 /* PersistentStreamState.Backoff */ ||
this.isOpen());
}
/**
* Returns true if the underlying RPC is open (the onOpen() listener has been
* called) and the stream is ready for outbound requests.
*/
isOpen() {
return (this.state === 2 /* PersistentStreamState.Open */ ||
this.state === 3 /* PersistentStreamState.Healthy */);
}
/**
* Starts the RPC. Only allowed if isStarted() returns false. The stream is
* not immediately ready for use: onOpen() will be invoked when the RPC is
* ready for outbound requests, at which point isOpen() will return true.
*
* When start returns, isStarted() will return true.
*/
start() {
if (this.state === 4 /* PersistentStreamState.Error */) {
this.performBackoff();
return;
}
this.auth();
}
/**
* Stops the RPC. This call is idempotent and allowed regardless of the
* current isStarted() state.
*
* When stop returns, isStarted() and isOpen() will both return false.
*/
async stop() {
if (this.isStarted()) {
await this.close(0 /* PersistentStreamState.Initial */);
}
}
/**
* After an error the stream will usually back off on the next attempt to
* start it. If the error warrants an immediate restart of the stream, the
* sender can use this to indicate that the receiver should not back off.
*
* Each error will call the onClose() listener. That function can decide to
* inhibit backoff if required.
*/
inhibitBackoff() {
this.state = 0 /* PersistentStreamState.Initial */;
this.backoff.reset();
}
/**
* Marks this stream as idle. If no further actions are performed on the
* stream for one minute, the stream will automatically close itself and
* notify the stream's onClose() handler with Status.OK. The stream will then
* be in a !isStarted() state, requiring the caller to start the stream again
* before further use.
*
* Only streams that are in state 'Open' can be marked idle, as all other
* states imply pending network operations.
*/
markIdle() {
// Starts the idle time if we are in state 'Open' and are not yet already
// running a timer (in which case the previous idle timeout still applies).
if (this.isOpen() && this.idleTimer === null) {
this.idleTimer = this.queue.enqueueAfterDelay(this.idleTimerId, IDLE_TIMEOUT_MS, () => this.handleIdleCloseTimer());
}
}
/** Sends a message to the underlying stream. */
sendRequest(msg) {
this.cancelIdleCheck();
this.stream.send(msg);
}
/** Called by the idle timer when the stream should close due to inactivity. */
async handleIdleCloseTimer() {
if (this.isOpen()) {
// When timing out an idle stream there's no reason to force the stream into backoff when
// it restarts so set the stream state to Initial instead of Error.
return this.close(0 /* PersistentStreamState.Initial */);
}
}
/** Marks the stream as active again. */
cancelIdleCheck() {
if (this.idleTimer) {
this.idleTimer.cancel();
this.idleTimer = null;
}
}
/** Cancels the health check delayed operation. */
cancelHealthCheck() {
if (this.healthCheck) {
this.healthCheck.cancel();
this.healthCheck = null;
}
}
/**
* Closes the stream and cleans up as necessary:
*
* * closes the underlying GRPC stream;
* * calls the onClose handler with the given 'error';
* * sets internal stream state to 'finalState';
* * adjusts the backoff timer based on the error
*
* A new stream can be opened by calling start().
*
* @param finalState - the intended state of the stream after closing.
* @param error - the error the connection was closed with.
*/
async close(finalState, error) {
// Cancel any outstanding timers (they're guaranteed not to execute).
this.cancelIdleCheck();
this.cancelHealthCheck();
this.backoff.cancel();
// Invalidates any stream-related callbacks (e.g. from auth or the
// underlying stream), guaranteeing they won't execute.
this.closeCount++;
if (finalState !== 4 /* PersistentStreamState.Error */) {
// If this is an intentional close ensure we don't delay our next connection attempt.
this.backoff.reset();
}
else if (error && error.code === Code.RESOURCE_EXHAUSTED) {
// Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
logError(error.toString());
logError('Using maximum backoff delay to prevent overloading the backend.');
this.backoff.resetToMax();
}
else if (error &&
error.code === Code.UNAUTHENTICATED &&
this.state !== 3 /* PersistentStreamState.Healthy */) {
// "unauthenticated" error means the token was rejected. This should rarely
// happen since both Auth and AppCheck ensure a sufficient TTL when we
// request a token. If a user manually resets their system clock this can
// fail, however. In this case, we should get a Code.UNAUTHENTICATED error
// before we received the first message and we need to invalidate the token
// to ensure that we fetch a new token.
this.authCredentialsProvider.invalidateToken();
this.appCheckCredentialsProvider.invalidateToken();
}
// Clean up the underlying stream because we are no longer interested in events.
if (this.stream !== null) {
this.tearDown();
this.stream.close();
this.stream = null;
}
// This state must be assigned before calling onClose() to allow the callback to
// inhibit backoff or otherwise manipulate the state in its non-started state.
this.state = finalState;
// Notify the listener that the stream closed.
await this.listener.onClose(error);
}
/**
* Can be overridden to perform additional cleanup before the stream is closed.
* Calling super.tearDown() is not required.
*/
tearDown() { }
auth() {
this.state = 1 /* PersistentStreamState.Starting */;
const dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
// TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
const closeCount = this.closeCount;
Promise.all([
this.authCredentialsProvider.getToken(),
this.appCheckCredentialsProvider.getToken()
]).then(([authToken, appCheckToken]) => {
// Stream can be stopped while waiting for authentication.
// TODO(mikelehen): We really should just use dispatchIfNotClosed
// and let this dispatch onto the queue, but that opened a spec test can
// of worms that I don't want to deal with in this PR.
if (this.closeCount === closeCount) {
// Normally we'd have to schedule the callback on the AsyncQueue.
// However, the following calls are safe to be called outside the
// AsyncQueue since they don't chain asynchronous calls
this.startStream(authToken, appCheckToken);
}
}, (error) => {
dispatchIfNotClosed(() => {
const rpcError = new FirestoreError(Code.UNKNOWN, 'Fetching auth token failed: ' + error.message);
return this.handleStreamClose(rpcError);
});
});
}
startStream(authToken, appCheckToken) {
const dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
this.stream = this.startRpc(authToken, appCheckToken);
this.stream.onOpen(() => {
dispatchIfNotClosed(() => {
this.state = 2 /* PersistentStreamState.Open */;
this.healthCheck = this.queue.enqueueAfterDelay(this.healthTimerId, HEALTHY_TIMEOUT_MS, () => {
if (this.isOpen()) {
this.state = 3 /* PersistentStreamState.Healthy */;
}
return Promise.resolve();
});
return this.listener.onOpen();
});
});
this.stream.onClose((error) => {
dispatchIfNotClosed(() => {
return this.handleStreamClose(error);
});
});
this.stream.onMessage((msg) => {
dispatchIfNotClosed(() => {
return this.onMessage(msg);
});
});
}
performBackoff() {
this.state = 5 /* PersistentStreamState.Backoff */;
this.backoff.backoffAndRun(async () => {
this.state = 0 /* PersistentStreamState.Initial */;
this.start();
});
}
// Visible for tests
handleStreamClose(error) {
logDebug(LOG_TAG$7, `close with error: ${error}`);
this.stream = null;
// In theory the stream could close cleanly, however, in our current model
// we never expect this to happen because if we stop a stream ourselves,
// this callback will never be called. To prevent cases where we retry
// without a backoff accidentally, we set the stream to error in all cases.
return this.close(4 /* PersistentStreamState.Error */, error);
}
/**
* Returns a "dispatcher" function that dispatches operations onto the
* AsyncQueue but only runs them if closeCount remains unchanged. This allows
* us to turn auth / stream callbacks into no-ops if the stream is closed /
* re-opened, etc.
*/
getCloseGuardedDispatcher(startCloseCount) {
return (fn) => {
this.queue.enqueueAndForget(() => {
if (this.closeCount === startCloseCount) {
return fn();
}
else {
logDebug(LOG_TAG$7, 'stream callback skipped by getCloseGuardedDispatcher.');
return Promise.resolve();
}
});
};
}
}
/**
* A PersistentStream that implements the Listen RPC.
*
* Once the Listen stream has called the onOpen() listener, any number of
* listen() and unlisten() calls can be made to control what changes will be
* sent from the server for ListenResponses.
*/
class PersistentListenStream extends PersistentStream {
constructor(queue, connection, authCredentials, appCheckCredentials, serializer, listener) {
super(queue, "listen_stream_connection_backoff" /* TimerId.ListenStreamConnectionBackoff */, "listen_stream_idle" /* TimerId.ListenStreamIdle */, "health_check_timeout" /* TimerId.HealthCheckTimeout */, connection, authCredentials, appCheckCredentials, listener);
this.serializer = serializer;
}
startRpc(authToken, appCheckToken) {
return this.connection.openStream('Listen', authToken, appCheckToken);
}
onMessage(watchChangeProto) {
// A successful response means the stream is healthy
this.backoff.reset();
const watchChange = fromWatchChange(this.serializer, watchChangeProto);
const snapshot = versionFromListenResponse(watchChangeProto);
return this.listener.onWatchChange(watchChange, snapshot);
}
/**
* Registers interest in the results of the given target. If the target
* includes a resumeToken it will be included in the request. Results that
* affect the target will be streamed back as WatchChange messages that
* reference the targetId.
*/
watch(targetData) {
const request = {};
request.database = getEncodedDatabaseId(this.serializer);
request.addTarget = toTarget(this.serializer, targetData);
const labels = toListenRequestLabels(this.serializer, targetData);
if (labels) {
request.labels = labels;
}
this.sendRequest(request);
}
/**
* Unregisters interest in the results of the target associated with the
* given targetId.
*/
unwatch(targetId) {
const request = {};
request.database = getEncodedDatabaseId(this.serializer);
request.removeTarget = targetId;
this.sendRequest(request);
}
}
/**
* A Stream that implements the Write RPC.
*
* The Write RPC requires the caller to maintain special streamToken
* state in between calls, to help the server understand which responses the
* client has processed by the time the next request is made. Every response
* will contain a streamToken; this value must be passed to the next
* request.
*
* After calling start() on this stream, the next request must be a handshake,
* containing whatever streamToken is on hand. Once a response to this
* request is received, all pending mutations may be submitted. When
* submitting multiple batches of mutations at the same time, it's
* okay to use the same streamToken for the calls to writeMutations.
*
* TODO(b/33271235): Use proto types
*/
class PersistentWriteStream extends PersistentStream {
constructor(queue, connection, authCredentials, appCheckCredentials, serializer, listener) {
super(queue, "write_stream_connection_backoff" /* TimerId.WriteStreamConnectionBackoff */, "write_stream_idle" /* TimerId.WriteStreamIdle */, "health_check_timeout" /* TimerId.HealthCheckTimeout */, connection, authCredentials, appCheckCredentials, listener);
this.serializer = serializer;
this.handshakeComplete_ = false;
}
/**
* Tracks whether or not a handshake has been successfully exchanged and
* the stream is ready to accept mutations.
*/
get handshakeComplete() {
return this.handshakeComplete_;
}
// Override of PersistentStream.start
start() {
this.handshakeComplete_ = false;
this.lastStreamToken = undefined;
super.start();
}
tearDown() {
if (this.handshakeComplete_) {
this.writeMutations([]);
}
}
startRpc(authToken, appCheckToken) {
return this.connection.openStream('Write', authToken, appCheckToken);
}
onMessage(responseProto) {
// Always capture the last stream token.
hardAssert(!!responseProto.streamToken);
this.lastStreamToken = responseProto.streamToken;
if (!this.handshakeComplete_) {
// The first response is always the handshake response
hardAssert(!responseProto.writeResults || responseProto.writeResults.length === 0);
this.handshakeComplete_ = true;
return this.listener.onHandshakeComplete();
}
else {
// A successful first write response means the stream is healthy,
// Note, that we could consider a successful handshake healthy, however,
// the write itself might be causing an error we want to back off from.
this.backoff.reset();
const results = fromWriteResults(responseProto.writeResults, responseProto.commitTime);
const commitVersion = fromVersion(responseProto.commitTime);
return this.listener.onMutationResult(commitVersion, results);
}
}
/**
* Sends an initial streamToken to the server, performing the handshake
* required to make the StreamingWrite RPC work. Subsequent
* calls should wait until onHandshakeComplete was called.
*/
writeHandshake() {
// TODO(dimond): Support stream resumption. We intentionally do not set the
// stream token on the handshake, ignoring any stream token we might have.
const request = {};
request.database = getEncodedDatabaseId(this.serializer);
this.sendRequest(request);
}
/** Sends a group of mutations to the Firestore backend to apply. */
writeMutations(mutations) {
const request = {
streamToken: this.lastStreamToken,
writes: mutations.map(mutation => toMutation(this.serializer, mutation))
};
this.sendRequest(request);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Datastore and its related methods are a wrapper around the external Google
* Cloud Datastore grpc API, which provides an interface that is more convenient
* for the rest of the client SDK architecture to consume.
*/
class Datastore {
}
/**
* An implementation of Datastore that exposes additional state for internal
* consumption.
*/
class DatastoreImpl extends Datastore {
constructor(authCredentials, appCheckCredentials, connection, serializer) {
super();
this.authCredentials = authCredentials;
this.appCheckCredentials = appCheckCredentials;
this.connection = connection;
this.serializer = serializer;
this.terminated = false;
}
verifyInitialized() {
if (this.terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
}
}
/** Invokes the provided RPC with auth and AppCheck tokens. */
invokeRPC(rpcName, path, request) {
this.verifyInitialized();
return Promise.all([
this.authCredentials.getToken(),
this.appCheckCredentials.getToken()
])
.then(([authToken, appCheckToken]) => {
return this.connection.invokeRPC(rpcName, path, request, authToken, appCheckToken);
})
.catch((error) => {
if (error.name === 'FirebaseError') {
if (error.code === Code.UNAUTHENTICATED) {
this.authCredentials.invalidateToken();
this.appCheckCredentials.invalidateToken();
}
throw error;
}
else {
throw new FirestoreError(Code.UNKNOWN, error.toString());
}
});
}
/** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */
invokeStreamingRPC(rpcName, path, request, expectedResponseCount) {
this.verifyInitialized();
return Promise.all([
this.authCredentials.getToken(),
this.appCheckCredentials.getToken()
])
.then(([authToken, appCheckToken]) => {
return this.connection.invokeStreamingRPC(rpcName, path, request, authToken, appCheckToken, expectedResponseCount);
})
.catch((error) => {
if (error.name === 'FirebaseError') {
if (error.code === Code.UNAUTHENTICATED) {
this.authCredentials.invalidateToken();
this.appCheckCredentials.invalidateToken();
}
throw error;
}
else {
throw new FirestoreError(Code.UNKNOWN, error.toString());
}
});
}
terminate() {
this.terminated = true;
}
}
// TODO(firestorexp): Make sure there is only one Datastore instance per
// firestore-exp client.
function newDatastore(authCredentials, appCheckCredentials, connection, serializer) {
return new DatastoreImpl(authCredentials, appCheckCredentials, connection, serializer);
}
async function invokeCommitRpc(datastore, mutations) {
const datastoreImpl = debugCast(datastore);
const path = getEncodedDatabaseId(datastoreImpl.serializer) + '/documents';
const request = {
writes: mutations.map(m => toMutation(datastoreImpl.serializer, m))
};
await datastoreImpl.invokeRPC('Commit', path, request);
}
async function invokeBatchGetDocumentsRpc(datastore, keys) {
const datastoreImpl = debugCast(datastore);
const path = getEncodedDatabaseId(datastoreImpl.serializer) + '/documents';
const request = {
documents: keys.map(k => toName(datastoreImpl.serializer, k))
};
const response = await datastoreImpl.invokeStreamingRPC('BatchGetDocuments', path, request, keys.length);
const docs = new Map();
response.forEach(proto => {
const doc = fromBatchGetDocumentsResponse(datastoreImpl.serializer, proto);
docs.set(doc.key.toString(), doc);
});
const result = [];
keys.forEach(key => {
const doc = docs.get(key.toString());
hardAssert(!!doc);
result.push(doc);
});
return result;
}
async function invokeRunAggregationQueryRpc(datastore, query) {
const datastoreImpl = debugCast(datastore);
const request = toRunAggregationQueryRequest(datastoreImpl.serializer, queryToTarget(query));
const parent = request.parent;
if (!datastoreImpl.connection.shouldResourcePathBeIncludedInRequest) {
delete request.parent;
}
const response = await datastoreImpl.invokeStreamingRPC('RunAggregationQuery', parent, request, /*expectedResponseCount=*/ 1);
return (response
// Omit RunAggregationQueryResponse that only contain readTimes.
.filter(proto => !!proto.result)
.map(proto => proto.result.aggregateFields));
}
function newPersistentWriteStream(datastore, queue, listener) {
const datastoreImpl = debugCast(datastore);
datastoreImpl.verifyInitialized();
return new PersistentWriteStream(queue, datastoreImpl.connection, datastoreImpl.authCredentials, datastoreImpl.appCheckCredentials, datastoreImpl.serializer, listener);
}
function newPersistentWatchStream(datastore, queue, listener) {
const datastoreImpl = debugCast(datastore);
datastoreImpl.verifyInitialized();
return new PersistentListenStream(queue, datastoreImpl.connection, datastoreImpl.authCredentials, datastoreImpl.appCheckCredentials, datastoreImpl.serializer, listener);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$6 = 'OnlineStateTracker';
// To deal with transient failures, we allow multiple stream attempts before
// giving up and transitioning from OnlineState.Unknown to Offline.
// TODO(mikelehen): This used to be set to 2 as a mitigation for b/66228394.
// @jdimond thinks that bug is sufficiently fixed so that we can set this back
// to 1. If that works okay, we could potentially remove this logic entirely.
const MAX_WATCH_STREAM_FAILURES = 1;
// To deal with stream attempts that don't succeed or fail in a timely manner,
// we have a timeout for OnlineState to reach Online or Offline.
// If the timeout is reached, we transition to Offline rather than waiting
// indefinitely.
const ONLINE_STATE_TIMEOUT_MS = 10 * 1000;
/**
* A component used by the RemoteStore to track the OnlineState (that is,
* whether or not the client as a whole should be considered to be online or
* offline), implementing the appropriate heuristics.
*
* In particular, when the client is trying to connect to the backend, we
* allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
* a connection to succeed. If we have too many failures or the timeout elapses,
* then we set the OnlineState to Offline, and the client will behave as if
* it is offline (get()s will return cached data, etc.).
*/
class OnlineStateTracker {
constructor(asyncQueue, onlineStateHandler) {
this.asyncQueue = asyncQueue;
this.onlineStateHandler = onlineStateHandler;
/** The current OnlineState. */
this.state = "Unknown" /* OnlineState.Unknown */;
/**
* A count of consecutive failures to open the stream. If it reaches the
* maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
* Offline.
*/
this.watchStreamFailures = 0;
/**
* A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
* transition from OnlineState.Unknown to OnlineState.Offline without waiting
* for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
*/
this.onlineStateTimer = null;
/**
* Whether the client should log a warning message if it fails to connect to
* the backend (initially true, cleared after a successful stream, or if we've
* logged the message already).
*/
this.shouldWarnClientIsOffline = true;
}
/**
* Called by RemoteStore when a watch stream is started (including on each
* backoff attempt).
*
* If this is the first attempt, it sets the OnlineState to Unknown and starts
* the onlineStateTimer.
*/
handleWatchStreamStart() {
if (this.watchStreamFailures === 0) {
this.setAndBroadcast("Unknown" /* OnlineState.Unknown */);
this.onlineStateTimer = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */, ONLINE_STATE_TIMEOUT_MS, () => {
this.onlineStateTimer = null;
this.logClientOfflineWarningIfNecessary(`Backend didn't respond within ${ONLINE_STATE_TIMEOUT_MS / 1000} ` +
`seconds.`);
this.setAndBroadcast("Offline" /* OnlineState.Offline */);
// NOTE: handleWatchStreamFailure() will continue to increment
// watchStreamFailures even though we are already marked Offline,
// but this is non-harmful.
return Promise.resolve();
});
}
}
/**
* Updates our OnlineState as appropriate after the watch stream reports a
* failure. The first failure moves us to the 'Unknown' state. We then may
* allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
* actually transition to the 'Offline' state.
*/
handleWatchStreamFailure(error) {
if (this.state === "Online" /* OnlineState.Online */) {
this.setAndBroadcast("Unknown" /* OnlineState.Unknown */);
}
else {
this.watchStreamFailures++;
if (this.watchStreamFailures >= MAX_WATCH_STREAM_FAILURES) {
this.clearOnlineStateTimer();
this.logClientOfflineWarningIfNecessary(`Connection failed ${MAX_WATCH_STREAM_FAILURES} ` +
`times. Most recent error: ${error.toString()}`);
this.setAndBroadcast("Offline" /* OnlineState.Offline */);
}
}
}
/**
* Explicitly sets the OnlineState to the specified state.
*
* Note that this resets our timers / failure counters, etc. used by our
* Offline heuristics, so must not be used in place of
* handleWatchStreamStart() and handleWatchStreamFailure().
*/
set(newState) {
this.clearOnlineStateTimer();
this.watchStreamFailures = 0;
if (newState === "Online" /* OnlineState.Online */) {
// We've connected to watch at least once. Don't warn the developer
// about being offline going forward.
this.shouldWarnClientIsOffline = false;
}
this.setAndBroadcast(newState);
}
setAndBroadcast(newState) {
if (newState !== this.state) {
this.state = newState;
this.onlineStateHandler(newState);
}
}
logClientOfflineWarningIfNecessary(details) {
const message = `Could not reach Cloud Firestore backend. ${details}\n` +
`This typically indicates that your device does not have a healthy ` +
`Internet connection at the moment. The client will operate in offline ` +
`mode until it is able to successfully connect to the backend.`;
if (this.shouldWarnClientIsOffline) {
logError(message);
this.shouldWarnClientIsOffline = false;
}
else {
logDebug(LOG_TAG$6, message);
}
}
clearOnlineStateTimer() {
if (this.onlineStateTimer !== null) {
this.onlineStateTimer.cancel();
this.onlineStateTimer = null;
}
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$5 = 'RemoteStore';
// TODO(b/35853402): Negotiate this with the stream.
const MAX_PENDING_WRITES = 10;
class RemoteStoreImpl {
constructor(
/**
* The local store, used to fill the write pipeline with outbound mutations.
*/
localStore,
/** The client-side proxy for interacting with the backend. */
datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
this.localStore = localStore;
this.datastore = datastore;
this.asyncQueue = asyncQueue;
this.remoteSyncer = {};
/**
* A list of up to MAX_PENDING_WRITES writes that we have fetched from the
* LocalStore via fillWritePipeline() and have or will send to the write
* stream.
*
* Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
* restart the write stream. When the stream is established the writes in the
* pipeline will be sent in order.
*
* Writes remain in writePipeline until they are acknowledged by the backend
* and thus will automatically be re-sent if the stream is interrupted /
* restarted before they're acknowledged.
*
* Write responses from the backend are linked to their originating request
* purely based on order, and so we can just shift() writes from the front of
* the writePipeline as we receive responses.
*/
this.writePipeline = [];
/**
* A mapping of watched targets that the client cares about tracking and the
* user has explicitly called a 'listen' for this target.
*
* These targets may or may not have been sent to or acknowledged by the
* server. On re-establishing the listen stream, these targets should be sent
* to the server. The targets removed with unlistens are removed eagerly
* without waiting for confirmation from the listen stream.
*/
this.listenTargets = new Map();
/**
* A set of reasons for why the RemoteStore may be offline. If empty, the
* RemoteStore may start its network connections.
*/
this.offlineCauses = new Set();
/**
* Event handlers that get called when the network is disabled or enabled.
*
* PORTING NOTE: These functions are used on the Web client to create the
* underlying streams (to support tree-shakeable streams). On Android and iOS,
* the streams are created during construction of RemoteStore.
*/
this.onNetworkStatusChange = [];
this.connectivityMonitor = connectivityMonitor;
this.connectivityMonitor.addCallback((_) => {
asyncQueue.enqueueAndForget(async () => {
// Porting Note: Unlike iOS, `restartNetwork()` is called even when the
// network becomes unreachable as we don't have any other way to tear
// down our streams.
if (canUseNetwork(this)) {
logDebug(LOG_TAG$5, 'Restarting streams for network reachability change.');
await restartNetwork(this);
}
});
});
this.onlineStateTracker = new OnlineStateTracker(asyncQueue, onlineStateHandler);
}
}
function newRemoteStore(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
return new RemoteStoreImpl(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor);
}
/** Re-enables the network. Idempotent. */
function remoteStoreEnableNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.delete(0 /* OfflineCause.UserDisabled */);
return enableNetworkInternal(remoteStoreImpl);
}
async function enableNetworkInternal(remoteStoreImpl) {
if (canUseNetwork(remoteStoreImpl)) {
for (const networkStatusHandler of remoteStoreImpl.onNetworkStatusChange) {
await networkStatusHandler(/* enabled= */ true);
}
}
}
/**
* Temporarily disables the network. The network can be re-enabled using
* enableNetwork().
*/
async function remoteStoreDisableNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.add(0 /* OfflineCause.UserDisabled */);
await disableNetworkInternal(remoteStoreImpl);
// Set the OnlineState to Offline so get()s return from cache, etc.
remoteStoreImpl.onlineStateTracker.set("Offline" /* OnlineState.Offline */);
}
async function disableNetworkInternal(remoteStoreImpl) {
for (const networkStatusHandler of remoteStoreImpl.onNetworkStatusChange) {
await networkStatusHandler(/* enabled= */ false);
}
}
async function remoteStoreShutdown(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
logDebug(LOG_TAG$5, 'RemoteStore shutting down.');
remoteStoreImpl.offlineCauses.add(5 /* OfflineCause.Shutdown */);
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.connectivityMonitor.shutdown();
// Set the OnlineState to Unknown (rather than Offline) to avoid potentially
// triggering spurious listener events with cached data, etc.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
/**
* Starts new listen for the given target. Uses resume token if provided. It
* is a no-op if the target of given `TargetData` is already being listened to.
*/
function remoteStoreListen(remoteStore, targetData) {
const remoteStoreImpl = debugCast(remoteStore);
if (remoteStoreImpl.listenTargets.has(targetData.targetId)) {
return;
}
// Mark this as something the client is currently listening for.
remoteStoreImpl.listenTargets.set(targetData.targetId, targetData);
if (shouldStartWatchStream(remoteStoreImpl)) {
// The listen will be sent in onWatchStreamOpen
startWatchStream(remoteStoreImpl);
}
else if (ensureWatchStream(remoteStoreImpl).isOpen()) {
sendWatchRequest(remoteStoreImpl, targetData);
}
}
/**
* Removes the listen from server. It is a no-op if the given target id is
* not being listened to.
*/
function remoteStoreUnlisten(remoteStore, targetId) {
const remoteStoreImpl = debugCast(remoteStore);
const watchStream = ensureWatchStream(remoteStoreImpl);
remoteStoreImpl.listenTargets.delete(targetId);
if (watchStream.isOpen()) {
sendUnwatchRequest(remoteStoreImpl, targetId);
}
if (remoteStoreImpl.listenTargets.size === 0) {
if (watchStream.isOpen()) {
watchStream.markIdle();
}
else if (canUseNetwork(remoteStoreImpl)) {
// Revert to OnlineState.Unknown if the watch stream is not open and we
// have no listeners, since without any listens to send we cannot
// confirm if the stream is healthy and upgrade to OnlineState.Online.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
}
/**
* We need to increment the the expected number of pending responses we're due
* from watch so we wait for the ack to process any messages from this target.
*/
function sendWatchRequest(remoteStoreImpl, targetData) {
remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetData.targetId);
ensureWatchStream(remoteStoreImpl).watch(targetData);
}
/**
* We need to increment the expected number of pending responses we're due
* from watch so we wait for the removal on the server before we process any
* messages from this target.
*/
function sendUnwatchRequest(remoteStoreImpl, targetId) {
remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetId);
ensureWatchStream(remoteStoreImpl).unwatch(targetId);
}
function startWatchStream(remoteStoreImpl) {
remoteStoreImpl.watchChangeAggregator = new WatchChangeAggregator({
getRemoteKeysForTarget: targetId => remoteStoreImpl.remoteSyncer.getRemoteKeysForTarget(targetId),
getTargetDataForTarget: targetId => remoteStoreImpl.listenTargets.get(targetId) || null
});
ensureWatchStream(remoteStoreImpl).start();
remoteStoreImpl.onlineStateTracker.handleWatchStreamStart();
}
/**
* Returns whether the watch stream should be started because it's necessary
* and has not yet been started.
*/
function shouldStartWatchStream(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
!ensureWatchStream(remoteStoreImpl).isStarted() &&
remoteStoreImpl.listenTargets.size > 0);
}
function canUseNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
return remoteStoreImpl.offlineCauses.size === 0;
}
function cleanUpWatchStreamState(remoteStoreImpl) {
remoteStoreImpl.watchChangeAggregator = undefined;
}
async function onWatchStreamOpen(remoteStoreImpl) {
remoteStoreImpl.listenTargets.forEach((targetData, targetId) => {
sendWatchRequest(remoteStoreImpl, targetData);
});
}
async function onWatchStreamClose(remoteStoreImpl, error) {
cleanUpWatchStreamState(remoteStoreImpl);
// If we still need the watch stream, retry the connection.
if (shouldStartWatchStream(remoteStoreImpl)) {
remoteStoreImpl.onlineStateTracker.handleWatchStreamFailure(error);
startWatchStream(remoteStoreImpl);
}
else {
// No need to restart watch stream because there are no active targets.
// The online state is set to unknown because there is no active attempt
// at establishing a connection
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
async function onWatchStreamChange(remoteStoreImpl, watchChange, snapshotVersion) {
// Mark the client as online since we got a message from the server
remoteStoreImpl.onlineStateTracker.set("Online" /* OnlineState.Online */);
if (watchChange instanceof WatchTargetChange &&
watchChange.state === 2 /* WatchTargetChangeState.Removed */ &&
watchChange.cause) {
// There was an error on a target, don't wait for a consistent snapshot
// to raise events
try {
await handleTargetError(remoteStoreImpl, watchChange);
}
catch (e) {
logDebug(LOG_TAG$5, 'Failed to remove targets %s: %s ', watchChange.targetIds.join(','), e);
await disableNetworkUntilRecovery(remoteStoreImpl, e);
}
return;
}
if (watchChange instanceof DocumentWatchChange) {
remoteStoreImpl.watchChangeAggregator.handleDocumentChange(watchChange);
}
else if (watchChange instanceof ExistenceFilterChange) {
remoteStoreImpl.watchChangeAggregator.handleExistenceFilter(watchChange);
}
else {
remoteStoreImpl.watchChangeAggregator.handleTargetChange(watchChange);
}
if (!snapshotVersion.isEqual(SnapshotVersion.min())) {
try {
const lastRemoteSnapshotVersion = await localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore);
if (snapshotVersion.compareTo(lastRemoteSnapshotVersion) >= 0) {
// We have received a target change with a global snapshot if the snapshot
// version is not equal to SnapshotVersion.min().
await raiseWatchSnapshot(remoteStoreImpl, snapshotVersion);
}
}
catch (e) {
logDebug(LOG_TAG$5, 'Failed to raise snapshot:', e);
await disableNetworkUntilRecovery(remoteStoreImpl, e);
}
}
}
/**
* Recovery logic for IndexedDB errors that takes the network offline until
* `op` succeeds. Retries are scheduled with backoff using
* `enqueueRetryable()`. If `op()` is not provided, IndexedDB access is
* validated via a generic operation.
*
* The returned Promise is resolved once the network is disabled and before
* any retry attempt.
*/
async function disableNetworkUntilRecovery(remoteStoreImpl, e, op) {
if (isIndexedDbTransactionError(e)) {
remoteStoreImpl.offlineCauses.add(1 /* OfflineCause.IndexedDbFailed */);
// Disable network and raise offline snapshots
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.onlineStateTracker.set("Offline" /* OnlineState.Offline */);
if (!op) {
// Use a simple read operation to determine if IndexedDB recovered.
// Ideally, we would expose a health check directly on SimpleDb, but
// RemoteStore only has access to persistence through LocalStore.
op = () => localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore);
}
// Probe IndexedDB periodically and re-enable network
remoteStoreImpl.asyncQueue.enqueueRetryable(async () => {
logDebug(LOG_TAG$5, 'Retrying IndexedDB access');
await op();
remoteStoreImpl.offlineCauses.delete(1 /* OfflineCause.IndexedDbFailed */);
await enableNetworkInternal(remoteStoreImpl);
});
}
else {
throw e;
}
}
/**
* Executes `op`. If `op` fails, takes the network offline until `op`
* succeeds. Returns after the first attempt.
*/
function executeWithRecovery(remoteStoreImpl, op) {
return op().catch(e => disableNetworkUntilRecovery(remoteStoreImpl, e, op));
}
/**
* Takes a batch of changes from the Datastore, repackages them as a
* RemoteEvent, and passes that on to the listener, which is typically the
* SyncEngine.
*/
function raiseWatchSnapshot(remoteStoreImpl, snapshotVersion) {
const remoteEvent = remoteStoreImpl.watchChangeAggregator.createRemoteEvent(snapshotVersion);
// Update in-memory resume tokens. LocalStore will update the
// persistent view of these when applying the completed RemoteEvent.
remoteEvent.targetChanges.forEach((change, targetId) => {
if (change.resumeToken.approximateByteSize() > 0) {
const targetData = remoteStoreImpl.listenTargets.get(targetId);
// A watched target might have been removed already.
if (targetData) {
remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(change.resumeToken, snapshotVersion));
}
}
});
// Re-establish listens for the targets that have been invalidated by
// existence filter mismatches.
remoteEvent.targetMismatches.forEach(targetId => {
const targetData = remoteStoreImpl.listenTargets.get(targetId);
if (!targetData) {
// A watched target might have been removed already.
return;
}
// Clear the resume token for the target, since we're in a known mismatch
// state.
remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(ByteString.EMPTY_BYTE_STRING, targetData.snapshotVersion));
// Cause a hard reset by unwatching and rewatching immediately, but
// deliberately don't send a resume token so that we get a full update.
sendUnwatchRequest(remoteStoreImpl, targetId);
// Mark the target we send as being on behalf of an existence filter
// mismatch, but don't actually retain that in listenTargets. This ensures
// that we flag the first re-listen this way without impacting future
// listens of this target (that might happen e.g. on reconnect).
const requestTargetData = new TargetData(targetData.target, targetId, 1 /* TargetPurpose.ExistenceFilterMismatch */, targetData.sequenceNumber);
sendWatchRequest(remoteStoreImpl, requestTargetData);
});
return remoteStoreImpl.remoteSyncer.applyRemoteEvent(remoteEvent);
}
/** Handles an error on a target */
async function handleTargetError(remoteStoreImpl, watchChange) {
const error = watchChange.cause;
for (const targetId of watchChange.targetIds) {
// A watched target might have been removed already.
if (remoteStoreImpl.listenTargets.has(targetId)) {
await remoteStoreImpl.remoteSyncer.rejectListen(targetId, error);
remoteStoreImpl.listenTargets.delete(targetId);
remoteStoreImpl.watchChangeAggregator.removeTarget(targetId);
}
}
}
/**
* Attempts to fill our write pipeline with writes from the LocalStore.
*
* Called internally to bootstrap or refill the write pipeline and by
* SyncEngine whenever there are new mutations to process.
*
* Starts the write stream if necessary.
*/
async function fillWritePipeline(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
const writeStream = ensureWriteStream(remoteStoreImpl);
let lastBatchIdRetrieved = remoteStoreImpl.writePipeline.length > 0
? remoteStoreImpl.writePipeline[remoteStoreImpl.writePipeline.length - 1]
.batchId
: BATCHID_UNKNOWN;
while (canAddToWritePipeline(remoteStoreImpl)) {
try {
const batch = await localStoreGetNextMutationBatch(remoteStoreImpl.localStore, lastBatchIdRetrieved);
if (batch === null) {
if (remoteStoreImpl.writePipeline.length === 0) {
writeStream.markIdle();
}
break;
}
else {
lastBatchIdRetrieved = batch.batchId;
addToWritePipeline(remoteStoreImpl, batch);
}
}
catch (e) {
await disableNetworkUntilRecovery(remoteStoreImpl, e);
}
}
if (shouldStartWriteStream(remoteStoreImpl)) {
startWriteStream(remoteStoreImpl);
}
}
/**
* Returns true if we can add to the write pipeline (i.e. the network is
* enabled and the write pipeline is not full).
*/
function canAddToWritePipeline(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
remoteStoreImpl.writePipeline.length < MAX_PENDING_WRITES);
}
/**
* Queues additional writes to be sent to the write stream, sending them
* immediately if the write stream is established.
*/
function addToWritePipeline(remoteStoreImpl, batch) {
remoteStoreImpl.writePipeline.push(batch);
const writeStream = ensureWriteStream(remoteStoreImpl);
if (writeStream.isOpen() && writeStream.handshakeComplete) {
writeStream.writeMutations(batch.mutations);
}
}
function shouldStartWriteStream(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
!ensureWriteStream(remoteStoreImpl).isStarted() &&
remoteStoreImpl.writePipeline.length > 0);
}
function startWriteStream(remoteStoreImpl) {
ensureWriteStream(remoteStoreImpl).start();
}
async function onWriteStreamOpen(remoteStoreImpl) {
ensureWriteStream(remoteStoreImpl).writeHandshake();
}
async function onWriteHandshakeComplete(remoteStoreImpl) {
const writeStream = ensureWriteStream(remoteStoreImpl);
// Send the write pipeline now that the stream is established.
for (const batch of remoteStoreImpl.writePipeline) {
writeStream.writeMutations(batch.mutations);
}
}
async function onMutationResult(remoteStoreImpl, commitVersion, results) {
const batch = remoteStoreImpl.writePipeline.shift();
const success = MutationBatchResult.from(batch, commitVersion, results);
await executeWithRecovery(remoteStoreImpl, () => remoteStoreImpl.remoteSyncer.applySuccessfulWrite(success));
// It's possible that with the completion of this mutation another
// slot has freed up.
await fillWritePipeline(remoteStoreImpl);
}
async function onWriteStreamClose(remoteStoreImpl, error) {
// If the write stream closed after the write handshake completes, a write
// operation failed and we fail the pending operation.
if (error && ensureWriteStream(remoteStoreImpl).handshakeComplete) {
// This error affects the actual write.
await handleWriteError(remoteStoreImpl, error);
}
// The write stream might have been started by refilling the write
// pipeline for failed writes
if (shouldStartWriteStream(remoteStoreImpl)) {
startWriteStream(remoteStoreImpl);
}
}
async function handleWriteError(remoteStoreImpl, error) {
// Only handle permanent errors here. If it's transient, just let the retry
// logic kick in.
if (isPermanentWriteError(error.code)) {
// This was a permanent error, the request itself was the problem
// so it's not going to succeed if we resend it.
const batch = remoteStoreImpl.writePipeline.shift();
// In this case it's also unlikely that the server itself is melting
// down -- this was just a bad request so inhibit backoff on the next
// restart.
ensureWriteStream(remoteStoreImpl).inhibitBackoff();
await executeWithRecovery(remoteStoreImpl, () => remoteStoreImpl.remoteSyncer.rejectFailedWrite(batch.batchId, error));
// It's possible that with the completion of this mutation
// another slot has freed up.
await fillWritePipeline(remoteStoreImpl);
}
}
async function restartNetwork(remoteStore) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.add(4 /* OfflineCause.ConnectivityChange */);
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
remoteStoreImpl.offlineCauses.delete(4 /* OfflineCause.ConnectivityChange */);
await enableNetworkInternal(remoteStoreImpl);
}
async function remoteStoreHandleCredentialChange(remoteStore, user) {
const remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.asyncQueue.verifyOperationInProgress();
logDebug(LOG_TAG$5, 'RemoteStore received new credentials');
const usesNetwork = canUseNetwork(remoteStoreImpl);
// Tear down and re-create our network streams. This will ensure we get a
// fresh auth token for the new user and re-fill the write pipeline with
// new mutations from the LocalStore (since mutations are per-user).
remoteStoreImpl.offlineCauses.add(3 /* OfflineCause.CredentialChange */);
await disableNetworkInternal(remoteStoreImpl);
if (usesNetwork) {
// Don't set the network status to Unknown if we are offline.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
await remoteStoreImpl.remoteSyncer.handleCredentialChange(user);
remoteStoreImpl.offlineCauses.delete(3 /* OfflineCause.CredentialChange */);
await enableNetworkInternal(remoteStoreImpl);
}
/**
* Toggles the network state when the client gains or loses its primary lease.
*/
async function remoteStoreApplyPrimaryState(remoteStore, isPrimary) {
const remoteStoreImpl = debugCast(remoteStore);
if (isPrimary) {
remoteStoreImpl.offlineCauses.delete(2 /* OfflineCause.IsSecondary */);
await enableNetworkInternal(remoteStoreImpl);
}
else if (!isPrimary) {
remoteStoreImpl.offlineCauses.add(2 /* OfflineCause.IsSecondary */);
await disableNetworkInternal(remoteStoreImpl);
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
/**
* If not yet initialized, registers the WatchStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/
function ensureWatchStream(remoteStoreImpl) {
if (!remoteStoreImpl.watchStream) {
// Create stream (but note that it is not started yet).
remoteStoreImpl.watchStream = newPersistentWatchStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
onOpen: onWatchStreamOpen.bind(null, remoteStoreImpl),
onClose: onWatchStreamClose.bind(null, remoteStoreImpl),
onWatchChange: onWatchStreamChange.bind(null, remoteStoreImpl)
});
remoteStoreImpl.onNetworkStatusChange.push(async (enabled) => {
if (enabled) {
remoteStoreImpl.watchStream.inhibitBackoff();
if (shouldStartWatchStream(remoteStoreImpl)) {
startWatchStream(remoteStoreImpl);
}
else {
remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
}
}
else {
await remoteStoreImpl.watchStream.stop();
cleanUpWatchStreamState(remoteStoreImpl);
}
});
}
return remoteStoreImpl.watchStream;
}
/**
* If not yet initialized, registers the WriteStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/
function ensureWriteStream(remoteStoreImpl) {
if (!remoteStoreImpl.writeStream) {
// Create stream (but note that it is not started yet).
remoteStoreImpl.writeStream = newPersistentWriteStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
onOpen: onWriteStreamOpen.bind(null, remoteStoreImpl),
onClose: onWriteStreamClose.bind(null, remoteStoreImpl),
onHandshakeComplete: onWriteHandshakeComplete.bind(null, remoteStoreImpl),
onMutationResult: onMutationResult.bind(null, remoteStoreImpl)
});
remoteStoreImpl.onNetworkStatusChange.push(async (enabled) => {
if (enabled) {
remoteStoreImpl.writeStream.inhibitBackoff();
// This will start the write stream if necessary.
await fillWritePipeline(remoteStoreImpl);
}
else {
await remoteStoreImpl.writeStream.stop();
if (remoteStoreImpl.writePipeline.length > 0) {
logDebug(LOG_TAG$5, `Stopping write stream with ${remoteStoreImpl.writePipeline.length} pending writes`);
remoteStoreImpl.writePipeline = [];
}
}
});
}
return remoteStoreImpl.writeStream;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$4 = 'AsyncQueue';
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*
* Note: We implement `PromiseLike` instead of `Promise`, as the `Promise` type
* in newer versions of TypeScript defines `finally`, which is not available in
* IE.
*/
class DelayedOperation {
constructor(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(err => { });
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue - The queue to schedule the operation on.
* @param id - A Timer ID identifying the type of operation this is.
* @param delayMs - The delay (ms) before the operation should be scheduled.
* @param op - The operation to run.
* @param removalCallback - A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
static createAndSchedule(asyncQueue, timerId, delayMs, op, removalCallback) {
const targetTime = Date.now() + delayMs;
const delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
}
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
start(delayMs) {
this.timerHandle = setTimeout(() => this.handleDelayElapsed(), delayMs);
}
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
skipDelay() {
return this.handleDelayElapsed();
}
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
cancel(reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
}
handleDelayElapsed() {
this.asyncQueue.enqueueAndForget(() => {
if (this.timerHandle !== null) {
this.clearTimeout();
return this.op().then(result => {
return this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
}
clearTimeout() {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
}
}
/**
* Returns a FirestoreError that can be surfaced to the user if the provided
* error is an IndexedDbTransactionError. Re-throws the error otherwise.
*/
function wrapInUserErrorIfRecoverable(e, msg) {
logError(LOG_TAG$4, `${msg}: ${e}`);
if (isIndexedDbTransactionError(e)) {
return new FirestoreError(Code.UNAVAILABLE, `${msg}: ${e}`);
}
else {
throw e;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentSet is an immutable (copy-on-write) collection that holds documents
* in order specified by the provided comparator. We always add a document key
* comparator on top of what is provided to guarantee document equality based on
* the key.
*/
class DocumentSet {
/** The default ordering is by key if the comparator is omitted */
constructor(comp) {
// We are adding document key comparator to the end as it's the only
// guaranteed unique property of a document.
if (comp) {
this.comparator = (d1, d2) => comp(d1, d2) || DocumentKey.comparator(d1.key, d2.key);
}
else {
this.comparator = (d1, d2) => DocumentKey.comparator(d1.key, d2.key);
}
this.keyedMap = documentMap();
this.sortedSet = new SortedMap(this.comparator);
}
/**
* Returns an empty copy of the existing DocumentSet, using the same
* comparator.
*/
static emptySet(oldSet) {
return new DocumentSet(oldSet.comparator);
}
has(key) {
return this.keyedMap.get(key) != null;
}
get(key) {
return this.keyedMap.get(key);
}
first() {
return this.sortedSet.minKey();
}
last() {
return this.sortedSet.maxKey();
}
isEmpty() {
return this.sortedSet.isEmpty();
}
/**
* Returns the index of the provided key in the document set, or -1 if the
* document key is not present in the set;
*/
indexOf(key) {
const doc = this.keyedMap.get(key);
return doc ? this.sortedSet.indexOf(doc) : -1;
}
get size() {
return this.sortedSet.size;
}
/** Iterates documents in order defined by "comparator" */
forEach(cb) {
this.sortedSet.inorderTraversal((k, v) => {
cb(k);
return false;
});
}
/** Inserts or updates a document with the same key */
add(doc) {
// First remove the element if we have it.
const set = this.delete(doc.key);
return set.copy(set.keyedMap.insert(doc.key, doc), set.sortedSet.insert(doc, null));
}
/** Deletes a document with a given key */
delete(key) {
const doc = this.get(key);
if (!doc) {
return this;
}
return this.copy(this.keyedMap.remove(key), this.sortedSet.remove(doc));
}
isEqual(other) {
if (!(other instanceof DocumentSet)) {
return false;
}
if (this.size !== other.size) {
return false;
}
const thisIt = this.sortedSet.getIterator();
const otherIt = other.sortedSet.getIterator();
while (thisIt.hasNext()) {
const thisDoc = thisIt.getNext().key;
const otherDoc = otherIt.getNext().key;
if (!thisDoc.isEqual(otherDoc)) {
return false;
}
}
return true;
}
toString() {
const docStrings = [];
this.forEach(doc => {
docStrings.push(doc.toString());
});
if (docStrings.length === 0) {
return 'DocumentSet ()';
}
else {
return 'DocumentSet (\n ' + docStrings.join(' \n') + '\n)';
}
}
copy(keyedMap, sortedSet) {
const newSet = new DocumentSet();
newSet.comparator = this.comparator;
newSet.keyedMap = keyedMap;
newSet.sortedSet = sortedSet;
return newSet;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentChangeSet keeps track of a set of changes to docs in a query, merging
* duplicate events for the same doc.
*/
class DocumentChangeSet {
constructor() {
this.changeMap = new SortedMap(DocumentKey.comparator);
}
track(change) {
const key = change.doc.key;
const oldChange = this.changeMap.get(key);
if (!oldChange) {
this.changeMap = this.changeMap.insert(key, change);
return;
}
// Merge the new change with the existing change.
if (change.type !== 0 /* ChangeType.Added */ &&
oldChange.type === 3 /* ChangeType.Metadata */) {
this.changeMap = this.changeMap.insert(key, change);
}
else if (change.type === 3 /* ChangeType.Metadata */ &&
oldChange.type !== 1 /* ChangeType.Removed */) {
this.changeMap = this.changeMap.insert(key, {
type: oldChange.type,
doc: change.doc
});
}
else if (change.type === 2 /* ChangeType.Modified */ &&
oldChange.type === 2 /* ChangeType.Modified */) {
this.changeMap = this.changeMap.insert(key, {
type: 2 /* ChangeType.Modified */,
doc: change.doc
});
}
else if (change.type === 2 /* ChangeType.Modified */ &&
oldChange.type === 0 /* ChangeType.Added */) {
this.changeMap = this.changeMap.insert(key, {
type: 0 /* ChangeType.Added */,
doc: change.doc
});
}
else if (change.type === 1 /* ChangeType.Removed */ &&
oldChange.type === 0 /* ChangeType.Added */) {
this.changeMap = this.changeMap.remove(key);
}
else if (change.type === 1 /* ChangeType.Removed */ &&
oldChange.type === 2 /* ChangeType.Modified */) {
this.changeMap = this.changeMap.insert(key, {
type: 1 /* ChangeType.Removed */,
doc: oldChange.doc
});
}
else if (change.type === 0 /* ChangeType.Added */ &&
oldChange.type === 1 /* ChangeType.Removed */) {
this.changeMap = this.changeMap.insert(key, {
type: 2 /* ChangeType.Modified */,
doc: change.doc
});
}
else {
// This includes these cases, which don't make sense:
// Added->Added
// Removed->Removed
// Modified->Added
// Removed->Modified
// Metadata->Added
// Removed->Metadata
fail();
}
}
getChanges() {
const changes = [];
this.changeMap.inorderTraversal((key, change) => {
changes.push(change);
});
return changes;
}
}
class ViewSnapshot {
constructor(query, docs, oldDocs, docChanges, mutatedKeys, fromCache, syncStateChanged, excludesMetadataChanges, hasCachedResults) {
this.query = query;
this.docs = docs;
this.oldDocs = oldDocs;
this.docChanges = docChanges;
this.mutatedKeys = mutatedKeys;
this.fromCache = fromCache;
this.syncStateChanged = syncStateChanged;
this.excludesMetadataChanges = excludesMetadataChanges;
this.hasCachedResults = hasCachedResults;
}
/** Returns a view snapshot as if all documents in the snapshot were added. */
static fromInitialDocuments(query, documents, mutatedKeys, fromCache, hasCachedResults) {
const changes = [];
documents.forEach(doc => {
changes.push({ type: 0 /* ChangeType.Added */, doc });
});
return new ViewSnapshot(query, documents, DocumentSet.emptySet(documents), changes, mutatedKeys, fromCache,
/* syncStateChanged= */ true,
/* excludesMetadataChanges= */ false, hasCachedResults);
}
get hasPendingWrites() {
return !this.mutatedKeys.isEmpty();
}
isEqual(other) {
if (this.fromCache !== other.fromCache ||
this.hasCachedResults !== other.hasCachedResults ||
this.syncStateChanged !== other.syncStateChanged ||
!this.mutatedKeys.isEqual(other.mutatedKeys) ||
!queryEquals(this.query, other.query) ||
!this.docs.isEqual(other.docs) ||
!this.oldDocs.isEqual(other.oldDocs)) {
return false;
}
const changes = this.docChanges;
const otherChanges = other.docChanges;
if (changes.length !== otherChanges.length) {
return false;
}
for (let i = 0; i < changes.length; i++) {
if (changes[i].type !== otherChanges[i].type ||
!changes[i].doc.isEqual(otherChanges[i].doc)) {
return false;
}
}
return true;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Holds the listeners and the last received ViewSnapshot for a query being
* tracked by EventManager.
*/
class QueryListenersInfo {
constructor() {
this.viewSnap = undefined;
this.listeners = [];
}
}
function newEventManager() {
return new EventManagerImpl();
}
class EventManagerImpl {
constructor() {
this.queries = new ObjectMap(q => canonifyQuery(q), queryEquals);
this.onlineState = "Unknown" /* OnlineState.Unknown */;
this.snapshotsInSyncListeners = new Set();
}
}
async function eventManagerListen(eventManager, listener) {
const eventManagerImpl = debugCast(eventManager);
const query = listener.query;
let firstListen = false;
let queryInfo = eventManagerImpl.queries.get(query);
if (!queryInfo) {
firstListen = true;
queryInfo = new QueryListenersInfo();
}
if (firstListen) {
try {
queryInfo.viewSnap = await eventManagerImpl.onListen(query);
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Initialization of query '${stringifyQuery(listener.query)}' failed`);
listener.onError(firestoreError);
return;
}
}
eventManagerImpl.queries.set(query, queryInfo);
queryInfo.listeners.push(listener);
// Run global snapshot listeners if a consistent snapshot has been emitted.
listener.applyOnlineStateChange(eventManagerImpl.onlineState);
if (queryInfo.viewSnap) {
const raisedEvent = listener.onViewSnapshot(queryInfo.viewSnap);
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
}
async function eventManagerUnlisten(eventManager, listener) {
const eventManagerImpl = debugCast(eventManager);
const query = listener.query;
let lastListen = false;
const queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
const i = queryInfo.listeners.indexOf(listener);
if (i >= 0) {
queryInfo.listeners.splice(i, 1);
lastListen = queryInfo.listeners.length === 0;
}
}
if (lastListen) {
eventManagerImpl.queries.delete(query);
return eventManagerImpl.onUnlisten(query);
}
}
function eventManagerOnWatchChange(eventManager, viewSnaps) {
const eventManagerImpl = debugCast(eventManager);
let raisedEvent = false;
for (const viewSnap of viewSnaps) {
const query = viewSnap.query;
const queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
for (const listener of queryInfo.listeners) {
if (listener.onViewSnapshot(viewSnap)) {
raisedEvent = true;
}
}
queryInfo.viewSnap = viewSnap;
}
}
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
function eventManagerOnWatchError(eventManager, query, error) {
const eventManagerImpl = debugCast(eventManager);
const queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
for (const listener of queryInfo.listeners) {
listener.onError(error);
}
}
// Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
// after an error.
eventManagerImpl.queries.delete(query);
}
function eventManagerOnOnlineStateChange(eventManager, onlineState) {
const eventManagerImpl = debugCast(eventManager);
eventManagerImpl.onlineState = onlineState;
let raisedEvent = false;
eventManagerImpl.queries.forEach((_, queryInfo) => {
for (const listener of queryInfo.listeners) {
// Run global snapshot listeners if a consistent snapshot has been emitted.
if (listener.applyOnlineStateChange(onlineState)) {
raisedEvent = true;
}
}
});
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
function addSnapshotsInSyncListener(eventManager, observer) {
const eventManagerImpl = debugCast(eventManager);
eventManagerImpl.snapshotsInSyncListeners.add(observer);
// Immediately fire an initial event, indicating all existing listeners
// are in-sync.
observer.next();
}
function removeSnapshotsInSyncListener(eventManager, observer) {
const eventManagerImpl = debugCast(eventManager);
eventManagerImpl.snapshotsInSyncListeners.delete(observer);
}
// Call all global snapshot listeners that have been set.
function raiseSnapshotsInSyncEvent(eventManagerImpl) {
eventManagerImpl.snapshotsInSyncListeners.forEach(observer => {
observer.next();
});
}
/**
* QueryListener takes a series of internal view snapshots and determines
* when to raise the event.
*
* It uses an Observer to dispatch events.
*/
class QueryListener {
constructor(query, queryObserver, options) {
this.query = query;
this.queryObserver = queryObserver;
/**
* Initial snapshots (e.g. from cache) may not be propagated to the wrapped
* observer. This flag is set to true once we've actually raised an event.
*/
this.raisedInitialEvent = false;
this.snap = null;
this.onlineState = "Unknown" /* OnlineState.Unknown */;
this.options = options || {};
}
/**
* Applies the new ViewSnapshot to this listener, raising a user-facing event
* if applicable (depending on what changed, whether the user has opted into
* metadata-only changes, etc.). Returns true if a user-facing event was
* indeed raised.
*/
onViewSnapshot(snap) {
if (!this.options.includeMetadataChanges) {
// Remove the metadata only changes.
const docChanges = [];
for (const docChange of snap.docChanges) {
if (docChange.type !== 3 /* ChangeType.Metadata */) {
docChanges.push(docChange);
}
}
snap = new ViewSnapshot(snap.query, snap.docs, snap.oldDocs, docChanges, snap.mutatedKeys, snap.fromCache, snap.syncStateChanged,
/* excludesMetadataChanges= */ true, snap.hasCachedResults);
}
let raisedEvent = false;
if (!this.raisedInitialEvent) {
if (this.shouldRaiseInitialEvent(snap, this.onlineState)) {
this.raiseInitialEvent(snap);
raisedEvent = true;
}
}
else if (this.shouldRaiseEvent(snap)) {
this.queryObserver.next(snap);
raisedEvent = true;
}
this.snap = snap;
return raisedEvent;
}
onError(error) {
this.queryObserver.error(error);
}
/** Returns whether a snapshot was raised. */
applyOnlineStateChange(onlineState) {
this.onlineState = onlineState;
let raisedEvent = false;
if (this.snap &&
!this.raisedInitialEvent &&
this.shouldRaiseInitialEvent(this.snap, onlineState)) {
this.raiseInitialEvent(this.snap);
raisedEvent = true;
}
return raisedEvent;
}
shouldRaiseInitialEvent(snap, onlineState) {
// Always raise the first event when we're synced
if (!snap.fromCache) {
return true;
}
// NOTE: We consider OnlineState.Unknown as online (it should become Offline
// or Online if we wait long enough).
const maybeOnline = onlineState !== "Offline" /* OnlineState.Offline */;
// Don't raise the event if we're online, aren't synced yet (checked
// above) and are waiting for a sync.
if (this.options.waitForSyncWhenOnline && maybeOnline) {
return false;
}
// Raise data from cache if we have any documents, have cached results before,
// or we are offline.
return (!snap.docs.isEmpty() ||
snap.hasCachedResults ||
onlineState === "Offline" /* OnlineState.Offline */);
}
shouldRaiseEvent(snap) {
// We don't need to handle includeDocumentMetadataChanges here because
// the Metadata only changes have already been stripped out if needed.
// At this point the only changes we will see are the ones we should
// propagate.
if (snap.docChanges.length > 0) {
return true;
}
const hasPendingWritesChanged = this.snap && this.snap.hasPendingWrites !== snap.hasPendingWrites;
if (snap.syncStateChanged || hasPendingWritesChanged) {
return this.options.includeMetadataChanges === true;
}
// Generally we should have hit one of the cases above, but it's possible
// to get here if there were only metadata docChanges and they got
// stripped out.
return false;
}
raiseInitialEvent(snap) {
snap = ViewSnapshot.fromInitialDocuments(snap.query, snap.docs, snap.mutatedKeys, snap.fromCache, snap.hasCachedResults);
this.raisedInitialEvent = true;
this.queryObserver.next(snap);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A set of changes to what documents are currently in view and out of view for
* a given query. These changes are sent to the LocalStore by the View (via
* the SyncEngine) and are used to pin / unpin documents as appropriate.
*/
class LocalViewChanges {
constructor(targetId, fromCache, addedKeys, removedKeys) {
this.targetId = targetId;
this.fromCache = fromCache;
this.addedKeys = addedKeys;
this.removedKeys = removedKeys;
}
static fromSnapshot(targetId, viewSnapshot) {
let addedKeys = documentKeySet();
let removedKeys = documentKeySet();
for (const docChange of viewSnapshot.docChanges) {
switch (docChange.type) {
case 0 /* ChangeType.Added */:
addedKeys = addedKeys.add(docChange.doc.key);
break;
case 1 /* ChangeType.Removed */:
removedKeys = removedKeys.add(docChange.doc.key);
break;
// do nothing
}
}
return new LocalViewChanges(targetId, viewSnapshot.fromCache, addedKeys, removedKeys);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Helper to convert objects from bundles to model objects in the SDK.
*/
class BundleConverterImpl {
constructor(serializer) {
this.serializer = serializer;
}
toDocumentKey(name) {
return fromName(this.serializer, name);
}
/**
* Converts a BundleDocument to a MutableDocument.
*/
toMutableDocument(bundledDoc) {
if (bundledDoc.metadata.exists) {
return fromDocument(this.serializer, bundledDoc.document, false);
}
else {
return MutableDocument.newNoDocument(this.toDocumentKey(bundledDoc.metadata.name), this.toSnapshotVersion(bundledDoc.metadata.readTime));
}
}
toSnapshotVersion(time) {
return fromVersion(time);
}
}
/**
* A class to process the elements from a bundle, load them into local
* storage and provide progress update while loading.
*/
class BundleLoader {
constructor(bundleMetadata, localStore, serializer) {
this.bundleMetadata = bundleMetadata;
this.localStore = localStore;
this.serializer = serializer;
/** Batched queries to be saved into storage */
this.queries = [];
/** Batched documents to be saved into storage */
this.documents = [];
/** The collection groups affected by this bundle. */
this.collectionGroups = new Set();
this.progress = bundleInitialProgress(bundleMetadata);
}
/**
* Adds an element from the bundle to the loader.
*
* Returns a new progress if adding the element leads to a new progress,
* otherwise returns null.
*/
addSizedElement(element) {
this.progress.bytesLoaded += element.byteLength;
let documentsLoaded = this.progress.documentsLoaded;
if (element.payload.namedQuery) {
this.queries.push(element.payload.namedQuery);
}
else if (element.payload.documentMetadata) {
this.documents.push({ metadata: element.payload.documentMetadata });
if (!element.payload.documentMetadata.exists) {
++documentsLoaded;
}
const path = ResourcePath.fromString(element.payload.documentMetadata.name);
this.collectionGroups.add(path.get(path.length - 2));
}
else if (element.payload.document) {
this.documents[this.documents.length - 1].document =
element.payload.document;
++documentsLoaded;
}
if (documentsLoaded !== this.progress.documentsLoaded) {
this.progress.documentsLoaded = documentsLoaded;
return Object.assign({}, this.progress);
}
return null;
}
getQueryDocumentMapping(documents) {
const queryDocumentMap = new Map();
const bundleConverter = new BundleConverterImpl(this.serializer);
for (const bundleDoc of documents) {
if (bundleDoc.metadata.queries) {
const documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
for (const queryName of bundleDoc.metadata.queries) {
const documentKeys = (queryDocumentMap.get(queryName) || documentKeySet()).add(documentKey);
queryDocumentMap.set(queryName, documentKeys);
}
}
}
return queryDocumentMap;
}
/**
* Update the progress to 'Success' and return the updated progress.
*/
async complete() {
const changedDocs = await localStoreApplyBundledDocuments(this.localStore, new BundleConverterImpl(this.serializer), this.documents, this.bundleMetadata.id);
const queryDocumentMap = this.getQueryDocumentMapping(this.documents);
for (const q of this.queries) {
await localStoreSaveNamedQuery(this.localStore, q, queryDocumentMap.get(q.name));
}
this.progress.taskState = 'Success';
return {
progress: this.progress,
changedCollectionGroups: this.collectionGroups,
changedDocs
};
}
}
/**
* Returns a `LoadBundleTaskProgress` representing the initial progress of
* loading a bundle.
*/
function bundleInitialProgress(metadata) {
return {
taskState: 'Running',
documentsLoaded: 0,
bytesLoaded: 0,
totalDocuments: metadata.totalDocuments,
totalBytes: metadata.totalBytes
};
}
/**
* Returns a `LoadBundleTaskProgress` representing the progress that the loading
* has succeeded.
*/
function bundleSuccessProgress(metadata) {
return {
taskState: 'Success',
documentsLoaded: metadata.totalDocuments,
bytesLoaded: metadata.totalBytes,
totalDocuments: metadata.totalDocuments,
totalBytes: metadata.totalBytes
};
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class AddedLimboDocument {
constructor(key) {
this.key = key;
}
}
class RemovedLimboDocument {
constructor(key) {
this.key = key;
}
}
/**
* View is responsible for computing the final merged truth of what docs are in
* a query. It gets notified of local and remote changes to docs, and applies
* the query filters and limits to determine the most correct possible results.
*/
class View {
constructor(query,
/** Documents included in the remote target */
_syncedDocuments) {
this.query = query;
this._syncedDocuments = _syncedDocuments;
this.syncState = null;
this.hasCachedResults = false;
/**
* A flag whether the view is current with the backend. A view is considered
* current after it has seen the current flag from the backend and did not
* lose consistency within the watch stream (e.g. because of an existence
* filter mismatch).
*/
this.current = false;
/** Documents in the view but not in the remote target */
this.limboDocuments = documentKeySet();
/** Document Keys that have local changes */
this.mutatedKeys = documentKeySet();
this.docComparator = newQueryComparator(query);
this.documentSet = new DocumentSet(this.docComparator);
}
/**
* The set of remote documents that the server has told us belongs to the target associated with
* this view.
*/
get syncedDocuments() {
return this._syncedDocuments;
}
/**
* Iterates over a set of doc changes, applies the query limit, and computes
* what the new results should be, what the changes were, and whether we may
* need to go back to the local cache for more results. Does not make any
* changes to the view.
* @param docChanges - The doc changes to apply to this view.
* @param previousChanges - If this is being called with a refill, then start
* with this set of docs and changes instead of the current view.
* @returns a new set of docs, changes, and refill flag.
*/
computeDocChanges(docChanges, previousChanges) {
const changeSet = previousChanges
? previousChanges.changeSet
: new DocumentChangeSet();
const oldDocumentSet = previousChanges
? previousChanges.documentSet
: this.documentSet;
let newMutatedKeys = previousChanges
? previousChanges.mutatedKeys
: this.mutatedKeys;
let newDocumentSet = oldDocumentSet;
let needsRefill = false;
// Track the last doc in a (full) limit. This is necessary, because some
// update (a delete, or an update moving a doc past the old limit) might
// mean there is some other document in the local cache that either should
// come (1) between the old last limit doc and the new last document, in the
// case of updates, or (2) after the new last document, in the case of
// deletes. So we keep this doc at the old limit to compare the updates to.
//
// Note that this should never get used in a refill (when previousChanges is
// set), because there will only be adds -- no deletes or updates.
const lastDocInLimit = this.query.limitType === "F" /* LimitType.First */ &&
oldDocumentSet.size === this.query.limit
? oldDocumentSet.last()
: null;
const firstDocInLimit = this.query.limitType === "L" /* LimitType.Last */ &&
oldDocumentSet.size === this.query.limit
? oldDocumentSet.first()
: null;
docChanges.inorderTraversal((key, entry) => {
const oldDoc = oldDocumentSet.get(key);
const newDoc = queryMatches(this.query, entry) ? entry : null;
const oldDocHadPendingMutations = oldDoc
? this.mutatedKeys.has(oldDoc.key)
: false;
const newDocHasPendingMutations = newDoc
? newDoc.hasLocalMutations ||
// We only consider committed mutations for documents that were
// mutated during the lifetime of the view.
(this.mutatedKeys.has(newDoc.key) && newDoc.hasCommittedMutations)
: false;
let changeApplied = false;
// Calculate change
if (oldDoc && newDoc) {
const docsEqual = oldDoc.data.isEqual(newDoc.data);
if (!docsEqual) {
if (!this.shouldWaitForSyncedDocument(oldDoc, newDoc)) {
changeSet.track({
type: 2 /* ChangeType.Modified */,
doc: newDoc
});
changeApplied = true;
if ((lastDocInLimit &&
this.docComparator(newDoc, lastDocInLimit) > 0) ||
(firstDocInLimit &&
this.docComparator(newDoc, firstDocInLimit) < 0)) {
// This doc moved from inside the limit to outside the limit.
// That means there may be some other doc in the local cache
// that should be included instead.
needsRefill = true;
}
}
}
else if (oldDocHadPendingMutations !== newDocHasPendingMutations) {
changeSet.track({ type: 3 /* ChangeType.Metadata */, doc: newDoc });
changeApplied = true;
}
}
else if (!oldDoc && newDoc) {
changeSet.track({ type: 0 /* ChangeType.Added */, doc: newDoc });
changeApplied = true;
}
else if (oldDoc && !newDoc) {
changeSet.track({ type: 1 /* ChangeType.Removed */, doc: oldDoc });
changeApplied = true;
if (lastDocInLimit || firstDocInLimit) {
// A doc was removed from a full limit query. We'll need to
// requery from the local cache to see if we know about some other
// doc that should be in the results.
needsRefill = true;
}
}
if (changeApplied) {
if (newDoc) {
newDocumentSet = newDocumentSet.add(newDoc);
if (newDocHasPendingMutations) {
newMutatedKeys = newMutatedKeys.add(key);
}
else {
newMutatedKeys = newMutatedKeys.delete(key);
}
}
else {
newDocumentSet = newDocumentSet.delete(key);
newMutatedKeys = newMutatedKeys.delete(key);
}
}
});
// Drop documents out to meet limit/limitToLast requirement.
if (this.query.limit !== null) {
while (newDocumentSet.size > this.query.limit) {
const oldDoc = this.query.limitType === "F" /* LimitType.First */
? newDocumentSet.last()
: newDocumentSet.first();
newDocumentSet = newDocumentSet.delete(oldDoc.key);
newMutatedKeys = newMutatedKeys.delete(oldDoc.key);
changeSet.track({ type: 1 /* ChangeType.Removed */, doc: oldDoc });
}
}
return {
documentSet: newDocumentSet,
changeSet,
needsRefill,
mutatedKeys: newMutatedKeys
};
}
shouldWaitForSyncedDocument(oldDoc, newDoc) {
// We suppress the initial change event for documents that were modified as
// part of a write acknowledgment (e.g. when the value of a server transform
// is applied) as Watch will send us the same document again.
// By suppressing the event, we only raise two user visible events (one with
// `hasPendingWrites` and the final state of the document) instead of three
// (one with `hasPendingWrites`, the modified document with
// `hasPendingWrites` and the final state of the document).
return (oldDoc.hasLocalMutations &&
newDoc.hasCommittedMutations &&
!newDoc.hasLocalMutations);
}
/**
* Updates the view with the given ViewDocumentChanges and optionally updates
* limbo docs and sync state from the provided target change.
* @param docChanges - The set of changes to make to the view's docs.
* @param updateLimboDocuments - Whether to update limbo documents based on
* this change.
* @param targetChange - A target change to apply for computing limbo docs and
* sync state.
* @returns A new ViewChange with the given docs, changes, and sync state.
*/
// PORTING NOTE: The iOS/Android clients always compute limbo document changes.
applyChanges(docChanges, updateLimboDocuments, targetChange) {
const oldDocs = this.documentSet;
this.documentSet = docChanges.documentSet;
this.mutatedKeys = docChanges.mutatedKeys;
// Sort changes based on type and query comparator
const changes = docChanges.changeSet.getChanges();
changes.sort((c1, c2) => {
return (compareChangeType(c1.type, c2.type) ||
this.docComparator(c1.doc, c2.doc));
});
this.applyTargetChange(targetChange);
const limboChanges = updateLimboDocuments
? this.updateLimboDocuments()
: [];
const synced = this.limboDocuments.size === 0 && this.current;
const newSyncState = synced ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */;
const syncStateChanged = newSyncState !== this.syncState;
this.syncState = newSyncState;
if (changes.length === 0 && !syncStateChanged) {
// no changes
return { limboChanges };
}
else {
const snap = new ViewSnapshot(this.query, docChanges.documentSet, oldDocs, changes, docChanges.mutatedKeys, newSyncState === 0 /* SyncState.Local */, syncStateChanged,
/* excludesMetadataChanges= */ false, targetChange
? targetChange.resumeToken.approximateByteSize() > 0
: false);
return {
snapshot: snap,
limboChanges
};
}
}
/**
* Applies an OnlineState change to the view, potentially generating a
* ViewChange if the view's syncState changes as a result.
*/
applyOnlineStateChange(onlineState) {
if (this.current && onlineState === "Offline" /* OnlineState.Offline */) {
// If we're offline, set `current` to false and then call applyChanges()
// to refresh our syncState and generate a ViewChange as appropriate. We
// are guaranteed to get a new TargetChange that sets `current` back to
// true once the client is back online.
this.current = false;
return this.applyChanges({
documentSet: this.documentSet,
changeSet: new DocumentChangeSet(),
mutatedKeys: this.mutatedKeys,
needsRefill: false
},
/* updateLimboDocuments= */ false);
}
else {
// No effect, just return a no-op ViewChange.
return { limboChanges: [] };
}
}
/**
* Returns whether the doc for the given key should be in limbo.
*/
shouldBeInLimbo(key) {
// If the remote end says it's part of this query, it's not in limbo.
if (this._syncedDocuments.has(key)) {
return false;
}
// The local store doesn't think it's a result, so it shouldn't be in limbo.
if (!this.documentSet.has(key)) {
return false;
}
// If there are local changes to the doc, they might explain why the server
// doesn't know that it's part of the query. So don't put it in limbo.
// TODO(klimt): Ideally, we would only consider changes that might actually
// affect this specific query.
if (this.documentSet.get(key).hasLocalMutations) {
return false;
}
// Everything else is in limbo.
return true;
}
/**
* Updates syncedDocuments, current, and limbo docs based on the given change.
* Returns the list of changes to which docs are in limbo.
*/
applyTargetChange(targetChange) {
if (targetChange) {
targetChange.addedDocuments.forEach(key => (this._syncedDocuments = this._syncedDocuments.add(key)));
targetChange.modifiedDocuments.forEach(key => {
});
targetChange.removedDocuments.forEach(key => (this._syncedDocuments = this._syncedDocuments.delete(key)));
this.current = targetChange.current;
}
}
updateLimboDocuments() {
// We can only determine limbo documents when we're in-sync with the server.
if (!this.current) {
return [];
}
// TODO(klimt): Do this incrementally so that it's not quadratic when
// updating many documents.
const oldLimboDocuments = this.limboDocuments;
this.limboDocuments = documentKeySet();
this.documentSet.forEach(doc => {
if (this.shouldBeInLimbo(doc.key)) {
this.limboDocuments = this.limboDocuments.add(doc.key);
}
});
// Diff the new limbo docs with the old limbo docs.
const changes = [];
oldLimboDocuments.forEach(key => {
if (!this.limboDocuments.has(key)) {
changes.push(new RemovedLimboDocument(key));
}
});
this.limboDocuments.forEach(key => {
if (!oldLimboDocuments.has(key)) {
changes.push(new AddedLimboDocument(key));
}
});
return changes;
}
/**
* Update the in-memory state of the current view with the state read from
* persistence.
*
* We update the query view whenever a client's primary status changes:
* - When a client transitions from primary to secondary, it can miss
* LocalStorage updates and its query views may temporarily not be
* synchronized with the state on disk.
* - For secondary to primary transitions, the client needs to update the list
* of `syncedDocuments` since secondary clients update their query views
* based purely on synthesized RemoteEvents.
*
* @param queryResult.documents - The documents that match the query according
* to the LocalStore.
* @param queryResult.remoteKeys - The keys of the documents that match the
* query according to the backend.
*
* @returns The ViewChange that resulted from this synchronization.
*/
// PORTING NOTE: Multi-tab only.
synchronizeWithPersistedState(queryResult) {
this._syncedDocuments = queryResult.remoteKeys;
this.limboDocuments = documentKeySet();
const docChanges = this.computeDocChanges(queryResult.documents);
return this.applyChanges(docChanges, /*updateLimboDocuments=*/ true);
}
/**
* Returns a view snapshot as if this query was just listened to. Contains
* a document add for every existing document and the `fromCache` and
* `hasPendingWrites` status of the already established view.
*/
// PORTING NOTE: Multi-tab only.
computeInitialSnapshot() {
return ViewSnapshot.fromInitialDocuments(this.query, this.documentSet, this.mutatedKeys, this.syncState === 0 /* SyncState.Local */, this.hasCachedResults);
}
}
function compareChangeType(c1, c2) {
const order = (change) => {
switch (change) {
case 0 /* ChangeType.Added */:
return 1;
case 2 /* ChangeType.Modified */:
return 2;
case 3 /* ChangeType.Metadata */:
// A metadata change is converted to a modified change at the public
// api layer. Since we sort by document key and then change type,
// metadata and modified changes must be sorted equivalently.
return 2;
case 1 /* ChangeType.Removed */:
return 0;
default:
return fail();
}
};
return order(c1) - order(c2);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$3 = 'SyncEngine';
/**
* QueryView contains all of the data that SyncEngine needs to keep track of for
* a particular query.
*/
class QueryView {
constructor(
/**
* The query itself.
*/
query,
/**
* The target number created by the client that is used in the watch
* stream to identify this query.
*/
targetId,
/**
* The view is responsible for computing the final merged truth of what
* docs are in the query. It gets notified of local and remote changes,
* and applies the query filters and limits to determine the most correct
* possible results.
*/
view) {
this.query = query;
this.targetId = targetId;
this.view = view;
}
}
/** Tracks a limbo resolution. */
class LimboResolution {
constructor(key) {
this.key = key;
/**
* Set to true once we've received a document. This is used in
* getRemoteKeysForTarget() and ultimately used by WatchChangeAggregator to
* decide whether it needs to manufacture a delete event for the target once
* the target is CURRENT.
*/
this.receivedDocument = false;
}
}
/**
* An implementation of `SyncEngine` coordinating with other parts of SDK.
*
* The parts of SyncEngine that act as a callback to RemoteStore need to be
* registered individually. This is done in `syncEngineWrite()` and
* `syncEngineListen()` (as well as `applyPrimaryState()`) as these methods
* serve as entry points to RemoteStore's functionality.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
class SyncEngineImpl {
constructor(localStore, remoteStore, eventManager,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
sharedClientState, currentUser, maxConcurrentLimboResolutions) {
this.localStore = localStore;
this.remoteStore = remoteStore;
this.eventManager = eventManager;
this.sharedClientState = sharedClientState;
this.currentUser = currentUser;
this.maxConcurrentLimboResolutions = maxConcurrentLimboResolutions;
this.syncEngineListener = {};
this.queryViewsByQuery = new ObjectMap(q => canonifyQuery(q), queryEquals);
this.queriesByTarget = new Map();
/**
* The keys of documents that are in limbo for which we haven't yet started a
* limbo resolution query. The strings in this set are the result of calling
* `key.path.canonicalString()` where `key` is a `DocumentKey` object.
*
* The `Set` type was chosen because it provides efficient lookup and removal
* of arbitrary elements and it also maintains insertion order, providing the
* desired queue-like FIFO semantics.
*/
this.enqueuedLimboResolutions = new Set();
/**
* Keeps track of the target ID for each document that is in limbo with an
* active target.
*/
this.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
/**
* Keeps track of the information about an active limbo resolution for each
* active target ID that was started for the purpose of limbo resolution.
*/
this.activeLimboResolutionsByTarget = new Map();
this.limboDocumentRefs = new ReferenceSet();
/** Stores user completion handlers, indexed by User and BatchId. */
this.mutationUserCallbacks = {};
/** Stores user callbacks waiting for all pending writes to be acknowledged. */
this.pendingWritesCallbacks = new Map();
this.limboTargetIdGenerator = TargetIdGenerator.forSyncEngine();
this.onlineState = "Unknown" /* OnlineState.Unknown */;
// The primary state is set to `true` or `false` immediately after Firestore
// startup. In the interim, a client should only be considered primary if
// `isPrimary` is true.
this._isPrimaryClient = undefined;
}
get isPrimaryClient() {
return this._isPrimaryClient === true;
}
}
function newSyncEngine(localStore, remoteStore, eventManager,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
sharedClientState, currentUser, maxConcurrentLimboResolutions, isPrimary) {
const syncEngine = new SyncEngineImpl(localStore, remoteStore, eventManager, sharedClientState, currentUser, maxConcurrentLimboResolutions);
if (isPrimary) {
syncEngine._isPrimaryClient = true;
}
return syncEngine;
}
/**
* Initiates the new listen, resolves promise when listen enqueued to the
* server. All the subsequent view snapshots or errors are sent to the
* subscribed handlers. Returns the initial snapshot.
*/
async function syncEngineListen(syncEngine, query) {
const syncEngineImpl = ensureWatchCallbacks(syncEngine);
let targetId;
let viewSnapshot;
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
if (queryView) {
// PORTING NOTE: With Multi-Tab Web, it is possible that a query view
// already exists when EventManager calls us for the first time. This
// happens when the primary tab is already listening to this query on
// behalf of another tab and the user of the primary also starts listening
// to the query. EventManager will not have an assigned target ID in this
// case and calls `listen` to obtain this ID.
targetId = queryView.targetId;
syncEngineImpl.sharedClientState.addLocalQueryTarget(targetId);
viewSnapshot = queryView.view.computeInitialSnapshot();
}
else {
const targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(query));
if (syncEngineImpl.isPrimaryClient) {
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
const status = syncEngineImpl.sharedClientState.addLocalQueryTarget(targetData.targetId);
targetId = targetData.targetId;
viewSnapshot = await initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, status === 'current', targetData.resumeToken);
}
return viewSnapshot;
}
/**
* Registers a view for a previously unknown query and computes its initial
* snapshot.
*/
async function initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, current, resumeToken) {
// PORTING NOTE: On Web only, we inject the code that registers new Limbo
// targets based on view changes. This allows us to only depend on Limbo
// changes when user code includes queries.
syncEngineImpl.applyDocChanges = (queryView, changes, remoteEvent) => applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent);
const queryResult = await localStoreExecuteQuery(syncEngineImpl.localStore, query,
/* usePreviousResults= */ true);
const view = new View(query, queryResult.remoteKeys);
const viewDocChanges = view.computeDocChanges(queryResult.documents);
const synthesizedTargetChange = TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current && syncEngineImpl.onlineState !== "Offline" /* OnlineState.Offline */, resumeToken);
const viewChange = view.applyChanges(viewDocChanges,
/* updateLimboDocuments= */ syncEngineImpl.isPrimaryClient, synthesizedTargetChange);
updateTrackedLimbos(syncEngineImpl, targetId, viewChange.limboChanges);
const data = new QueryView(query, targetId, view);
syncEngineImpl.queryViewsByQuery.set(query, data);
if (syncEngineImpl.queriesByTarget.has(targetId)) {
syncEngineImpl.queriesByTarget.get(targetId).push(query);
}
else {
syncEngineImpl.queriesByTarget.set(targetId, [query]);
}
return viewChange.snapshot;
}
/** Stops listening to the query. */
async function syncEngineUnlisten(syncEngine, query) {
const syncEngineImpl = debugCast(syncEngine);
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
// Only clean up the query view and target if this is the only query mapped
// to the target.
const queries = syncEngineImpl.queriesByTarget.get(queryView.targetId);
if (queries.length > 1) {
syncEngineImpl.queriesByTarget.set(queryView.targetId, queries.filter(q => !queryEquals(q, query)));
syncEngineImpl.queryViewsByQuery.delete(query);
return;
}
// No other queries are mapped to the target, clean up the query and the target.
if (syncEngineImpl.isPrimaryClient) {
// We need to remove the local query target first to allow us to verify
// whether any other client is still interested in this target.
syncEngineImpl.sharedClientState.removeLocalQueryTarget(queryView.targetId);
const targetRemainsActive = syncEngineImpl.sharedClientState.isActiveQueryTarget(queryView.targetId);
if (!targetRemainsActive) {
await localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
/*keepPersistedTargetData=*/ false)
.then(() => {
syncEngineImpl.sharedClientState.clearQueryState(queryView.targetId);
remoteStoreUnlisten(syncEngineImpl.remoteStore, queryView.targetId);
removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
})
.catch(ignoreIfPrimaryLeaseLoss);
}
}
else {
removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
await localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
/*keepPersistedTargetData=*/ true);
}
}
/**
* Initiates the write of local mutation batch which involves adding the
* writes to the mutation queue, notifying the remote store about new
* mutations and raising events for any changes this write caused.
*
* The promise returned by this call is resolved when the above steps
* have completed, *not* when the write was acked by the backend. The
* userCallback is resolved once the write was acked/rejected by the
* backend (or failed locally for any other reason).
*/
async function syncEngineWrite(syncEngine, batch, userCallback) {
const syncEngineImpl = syncEngineEnsureWriteCallbacks(syncEngine);
try {
const result = await localStoreWriteLocally(syncEngineImpl.localStore, batch);
syncEngineImpl.sharedClientState.addPendingMutation(result.batchId);
addMutationCallback(syncEngineImpl, result.batchId, userCallback);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.changes);
await fillWritePipeline(syncEngineImpl.remoteStore);
}
catch (e) {
// If we can't persist the mutation, we reject the user callback and
// don't send the mutation. The user can then retry the write.
const error = wrapInUserErrorIfRecoverable(e, `Failed to persist write`);
userCallback.reject(error);
}
}
/**
* Applies one remote event to the sync engine, notifying any views of the
* changes, and releasing any pending mutation batches that would become
* visible because of the snapshot version the remote event contains.
*/
async function syncEngineApplyRemoteEvent(syncEngine, remoteEvent) {
const syncEngineImpl = debugCast(syncEngine);
try {
const changes = await localStoreApplyRemoteEventToLocalCache(syncEngineImpl.localStore, remoteEvent);
// Update `receivedDocument` as appropriate for any limbo targets.
remoteEvent.targetChanges.forEach((targetChange, targetId) => {
const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
if (limboResolution) {
// Since this is a limbo resolution lookup, it's for a single document
// and it could be added, modified, or removed, but not a combination.
hardAssert(targetChange.addedDocuments.size +
targetChange.modifiedDocuments.size +
targetChange.removedDocuments.size <=
1);
if (targetChange.addedDocuments.size > 0) {
limboResolution.receivedDocument = true;
}
else if (targetChange.modifiedDocuments.size > 0) {
hardAssert(limboResolution.receivedDocument);
}
else if (targetChange.removedDocuments.size > 0) {
hardAssert(limboResolution.receivedDocument);
limboResolution.receivedDocument = false;
}
else {
// This was probably just a CURRENT targetChange or similar.
}
}
});
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, remoteEvent);
}
catch (error) {
await ignoreIfPrimaryLeaseLoss(error);
}
}
/**
* Applies an OnlineState change to the sync engine and notifies any views of
* the change.
*/
function syncEngineApplyOnlineStateChange(syncEngine, onlineState, source) {
const syncEngineImpl = debugCast(syncEngine);
// If we are the secondary client, we explicitly ignore the remote store's
// online state (the local client may go offline, even though the primary
// tab remains online) and only apply the primary tab's online state from
// SharedClientState.
if ((syncEngineImpl.isPrimaryClient &&
source === 0 /* OnlineStateSource.RemoteStore */) ||
(!syncEngineImpl.isPrimaryClient &&
source === 1 /* OnlineStateSource.SharedClientState */)) {
const newViewSnapshots = [];
syncEngineImpl.queryViewsByQuery.forEach((query, queryView) => {
const viewChange = queryView.view.applyOnlineStateChange(onlineState);
if (viewChange.snapshot) {
newViewSnapshots.push(viewChange.snapshot);
}
});
eventManagerOnOnlineStateChange(syncEngineImpl.eventManager, onlineState);
if (newViewSnapshots.length) {
syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
}
syncEngineImpl.onlineState = onlineState;
if (syncEngineImpl.isPrimaryClient) {
syncEngineImpl.sharedClientState.setOnlineState(onlineState);
}
}
}
/**
* Rejects the listen for the given targetID. This can be triggered by the
* backend for any active target.
*
* @param syncEngine - The sync engine implementation.
* @param targetId - The targetID corresponds to one previously initiated by the
* user as part of TargetData passed to listen() on RemoteStore.
* @param err - A description of the condition that has forced the rejection.
* Nearly always this will be an indication that the user is no longer
* authorized to see the data matching the target.
*/
async function syncEngineRejectListen(syncEngine, targetId, err) {
const syncEngineImpl = debugCast(syncEngine);
// PORTING NOTE: Multi-tab only.
syncEngineImpl.sharedClientState.updateQueryState(targetId, 'rejected', err);
const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
const limboKey = limboResolution && limboResolution.key;
if (limboKey) {
// TODO(klimt): We really only should do the following on permission
// denied errors, but we don't have the cause code here.
// It's a limbo doc. Create a synthetic event saying it was deleted.
// This is kind of a hack. Ideally, we would have a method in the local
// store to purge a document. However, it would be tricky to keep all of
// the local store's invariants with another method.
let documentUpdates = new SortedMap(DocumentKey.comparator);
// TODO(b/217189216): This limbo document should ideally have a read time,
// so that it is picked up by any read-time based scans. The backend,
// however, does not send a read time for target removals.
documentUpdates = documentUpdates.insert(limboKey, MutableDocument.newNoDocument(limboKey, SnapshotVersion.min()));
const resolvedLimboDocuments = documentKeySet().add(limboKey);
const event = new RemoteEvent(SnapshotVersion.min(),
/* targetChanges= */ new Map(),
/* targetMismatches= */ new SortedSet(primitiveComparator), documentUpdates, resolvedLimboDocuments);
await syncEngineApplyRemoteEvent(syncEngineImpl, event);
// Since this query failed, we won't want to manually unlisten to it.
// We only remove it from bookkeeping after we successfully applied the
// RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
// this query when the RemoteStore restarts the Watch stream, which should
// re-trigger the target failure.
syncEngineImpl.activeLimboTargetsByKey =
syncEngineImpl.activeLimboTargetsByKey.remove(limboKey);
syncEngineImpl.activeLimboResolutionsByTarget.delete(targetId);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
else {
await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ false)
.then(() => removeAndCleanupTarget(syncEngineImpl, targetId, err))
.catch(ignoreIfPrimaryLeaseLoss);
}
}
async function syncEngineApplySuccessfulWrite(syncEngine, mutationBatchResult) {
const syncEngineImpl = debugCast(syncEngine);
const batchId = mutationBatchResult.batch.batchId;
try {
const changes = await localStoreAcknowledgeBatch(syncEngineImpl.localStore, mutationBatchResult);
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught
// up), so we raise user callbacks first so that they consistently happen
// before listen events.
processUserCallback(syncEngineImpl, batchId, /*error=*/ null);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
syncEngineImpl.sharedClientState.updateMutationState(batchId, 'acknowledged');
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes);
}
catch (error) {
await ignoreIfPrimaryLeaseLoss(error);
}
}
async function syncEngineRejectFailedWrite(syncEngine, batchId, error) {
const syncEngineImpl = debugCast(syncEngine);
try {
const changes = await localStoreRejectBatch(syncEngineImpl.localStore, batchId);
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught up),
// so we raise user callbacks first so that they consistently happen before
// listen events.
processUserCallback(syncEngineImpl, batchId, error);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
syncEngineImpl.sharedClientState.updateMutationState(batchId, 'rejected', error);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes);
}
catch (error) {
await ignoreIfPrimaryLeaseLoss(error);
}
}
/**
* Registers a user callback that resolves when all pending mutations at the moment of calling
* are acknowledged .
*/
async function syncEngineRegisterPendingWritesCallback(syncEngine, callback) {
const syncEngineImpl = debugCast(syncEngine);
if (!canUseNetwork(syncEngineImpl.remoteStore)) {
logDebug(LOG_TAG$3, 'The network is disabled. The task returned by ' +
"'awaitPendingWrites()' will not complete until the network is enabled.");
}
try {
const highestBatchId = await localStoreGetHighestUnacknowledgedBatchId(syncEngineImpl.localStore);
if (highestBatchId === BATCHID_UNKNOWN) {
// Trigger the callback right away if there is no pending writes at the moment.
callback.resolve();
return;
}
const callbacks = syncEngineImpl.pendingWritesCallbacks.get(highestBatchId) || [];
callbacks.push(callback);
syncEngineImpl.pendingWritesCallbacks.set(highestBatchId, callbacks);
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, 'Initialization of waitForPendingWrites() operation failed');
callback.reject(firestoreError);
}
}
/**
* Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
* if there are any.
*/
function triggerPendingWritesCallbacks(syncEngineImpl, batchId) {
(syncEngineImpl.pendingWritesCallbacks.get(batchId) || []).forEach(callback => {
callback.resolve();
});
syncEngineImpl.pendingWritesCallbacks.delete(batchId);
}
/** Reject all outstanding callbacks waiting for pending writes to complete. */
function rejectOutstandingPendingWritesCallbacks(syncEngineImpl, errorMessage) {
syncEngineImpl.pendingWritesCallbacks.forEach(callbacks => {
callbacks.forEach(callback => {
callback.reject(new FirestoreError(Code.CANCELLED, errorMessage));
});
});
syncEngineImpl.pendingWritesCallbacks.clear();
}
function addMutationCallback(syncEngineImpl, batchId, callback) {
let newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
if (!newCallbacks) {
newCallbacks = new SortedMap(primitiveComparator);
}
newCallbacks = newCallbacks.insert(batchId, callback);
syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] =
newCallbacks;
}
/**
* Resolves or rejects the user callback for the given batch and then discards
* it.
*/
function processUserCallback(syncEngine, batchId, error) {
const syncEngineImpl = debugCast(syncEngine);
let newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
// NOTE: Mutations restored from persistence won't have callbacks, so it's
// okay for there to be no callback for this ID.
if (newCallbacks) {
const callback = newCallbacks.get(batchId);
if (callback) {
if (error) {
callback.reject(error);
}
else {
callback.resolve();
}
newCallbacks = newCallbacks.remove(batchId);
}
syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] =
newCallbacks;
}
}
function removeAndCleanupTarget(syncEngineImpl, targetId, error = null) {
syncEngineImpl.sharedClientState.removeLocalQueryTarget(targetId);
for (const query of syncEngineImpl.queriesByTarget.get(targetId)) {
syncEngineImpl.queryViewsByQuery.delete(query);
if (error) {
syncEngineImpl.syncEngineListener.onWatchError(query, error);
}
}
syncEngineImpl.queriesByTarget.delete(targetId);
if (syncEngineImpl.isPrimaryClient) {
const limboKeys = syncEngineImpl.limboDocumentRefs.removeReferencesForId(targetId);
limboKeys.forEach(limboKey => {
const isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboKey);
if (!isReferenced) {
// We removed the last reference for this key
removeLimboTarget(syncEngineImpl, limboKey);
}
});
}
}
function removeLimboTarget(syncEngineImpl, key) {
syncEngineImpl.enqueuedLimboResolutions.delete(key.path.canonicalString());
// It's possible that the target already got removed because the query failed. In that case,
// the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
const limboTargetId = syncEngineImpl.activeLimboTargetsByKey.get(key);
if (limboTargetId === null) {
// This target already got removed, because the query failed.
return;
}
remoteStoreUnlisten(syncEngineImpl.remoteStore, limboTargetId);
syncEngineImpl.activeLimboTargetsByKey =
syncEngineImpl.activeLimboTargetsByKey.remove(key);
syncEngineImpl.activeLimboResolutionsByTarget.delete(limboTargetId);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
function updateTrackedLimbos(syncEngineImpl, targetId, limboChanges) {
for (const limboChange of limboChanges) {
if (limboChange instanceof AddedLimboDocument) {
syncEngineImpl.limboDocumentRefs.addReference(limboChange.key, targetId);
trackLimboChange(syncEngineImpl, limboChange);
}
else if (limboChange instanceof RemovedLimboDocument) {
logDebug(LOG_TAG$3, 'Document no longer in limbo: ' + limboChange.key);
syncEngineImpl.limboDocumentRefs.removeReference(limboChange.key, targetId);
const isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboChange.key);
if (!isReferenced) {
// We removed the last reference for this key
removeLimboTarget(syncEngineImpl, limboChange.key);
}
}
else {
fail();
}
}
}
function trackLimboChange(syncEngineImpl, limboChange) {
const key = limboChange.key;
const keyString = key.path.canonicalString();
if (!syncEngineImpl.activeLimboTargetsByKey.get(key) &&
!syncEngineImpl.enqueuedLimboResolutions.has(keyString)) {
logDebug(LOG_TAG$3, 'New document in limbo: ' + key);
syncEngineImpl.enqueuedLimboResolutions.add(keyString);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
}
/**
* Starts listens for documents in limbo that are enqueued for resolution,
* subject to a maximum number of concurrent resolutions.
*
* Without bounding the number of concurrent resolutions, the server can fail
* with "resource exhausted" errors which can lead to pathological client
* behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
*/
function pumpEnqueuedLimboResolutions(syncEngineImpl) {
while (syncEngineImpl.enqueuedLimboResolutions.size > 0 &&
syncEngineImpl.activeLimboTargetsByKey.size <
syncEngineImpl.maxConcurrentLimboResolutions) {
const keyString = syncEngineImpl.enqueuedLimboResolutions
.values()
.next().value;
syncEngineImpl.enqueuedLimboResolutions.delete(keyString);
const key = new DocumentKey(ResourcePath.fromString(keyString));
const limboTargetId = syncEngineImpl.limboTargetIdGenerator.next();
syncEngineImpl.activeLimboResolutionsByTarget.set(limboTargetId, new LimboResolution(key));
syncEngineImpl.activeLimboTargetsByKey =
syncEngineImpl.activeLimboTargetsByKey.insert(key, limboTargetId);
remoteStoreListen(syncEngineImpl.remoteStore, new TargetData(queryToTarget(newQueryForPath(key.path)), limboTargetId, 2 /* TargetPurpose.LimboResolution */, ListenSequence.INVALID));
}
}
async function syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, changes, remoteEvent) {
const syncEngineImpl = debugCast(syncEngine);
const newSnaps = [];
const docChangesInAllViews = [];
const queriesProcessed = [];
if (syncEngineImpl.queryViewsByQuery.isEmpty()) {
// Return early since `onWatchChange()` might not have been assigned yet.
return;
}
syncEngineImpl.queryViewsByQuery.forEach((_, queryView) => {
queriesProcessed.push(syncEngineImpl
.applyDocChanges(queryView, changes, remoteEvent)
.then(viewSnapshot => {
// If there are changes, or we are handling a global snapshot, notify
// secondary clients to update query state.
if (viewSnapshot || remoteEvent) {
if (syncEngineImpl.isPrimaryClient) {
syncEngineImpl.sharedClientState.updateQueryState(queryView.targetId, (viewSnapshot === null || viewSnapshot === void 0 ? void 0 : viewSnapshot.fromCache) ? 'not-current' : 'current');
}
}
// Update views if there are actual changes.
if (!!viewSnapshot) {
newSnaps.push(viewSnapshot);
const docChanges = LocalViewChanges.fromSnapshot(queryView.targetId, viewSnapshot);
docChangesInAllViews.push(docChanges);
}
}));
});
await Promise.all(queriesProcessed);
syncEngineImpl.syncEngineListener.onWatchChange(newSnaps);
await localStoreNotifyLocalViewChanges(syncEngineImpl.localStore, docChangesInAllViews);
}
async function applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent) {
let viewDocChanges = queryView.view.computeDocChanges(changes);
if (viewDocChanges.needsRefill) {
// The query has a limit and some docs were removed, so we need
// to re-run the query against the local store to make sure we
// didn't lose any good docs that had been past the limit.
viewDocChanges = await localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
/* usePreviousResults= */ false).then(({ documents }) => {
return queryView.view.computeDocChanges(documents, viewDocChanges);
});
}
const targetChange = remoteEvent && remoteEvent.targetChanges.get(queryView.targetId);
const viewChange = queryView.view.applyChanges(viewDocChanges,
/* updateLimboDocuments= */ syncEngineImpl.isPrimaryClient, targetChange);
updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewChange.limboChanges);
return viewChange.snapshot;
}
async function syncEngineHandleCredentialChange(syncEngine, user) {
const syncEngineImpl = debugCast(syncEngine);
const userChanged = !syncEngineImpl.currentUser.isEqual(user);
if (userChanged) {
logDebug(LOG_TAG$3, 'User change. New user:', user.toKey());
const result = await localStoreHandleUserChange(syncEngineImpl.localStore, user);
syncEngineImpl.currentUser = user;
// Fails tasks waiting for pending writes requested by previous user.
rejectOutstandingPendingWritesCallbacks(syncEngineImpl, "'waitForPendingWrites' promise is rejected due to a user change.");
// TODO(b/114226417): Consider calling this only in the primary tab.
syncEngineImpl.sharedClientState.handleUserChange(user, result.removedBatchIds, result.addedBatchIds);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.affectedDocuments);
}
}
function syncEngineGetRemoteKeysForTarget(syncEngine, targetId) {
const syncEngineImpl = debugCast(syncEngine);
const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
if (limboResolution && limboResolution.receivedDocument) {
return documentKeySet().add(limboResolution.key);
}
else {
let keySet = documentKeySet();
const queries = syncEngineImpl.queriesByTarget.get(targetId);
if (!queries) {
return keySet;
}
for (const query of queries) {
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
keySet = keySet.unionWith(queryView.view.syncedDocuments);
}
return keySet;
}
}
/**
* Reconcile the list of synced documents in an existing view with those
* from persistence.
*/
async function synchronizeViewAndComputeSnapshot(syncEngine, queryView) {
const syncEngineImpl = debugCast(syncEngine);
const queryResult = await localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
/* usePreviousResults= */ true);
const viewSnapshot = queryView.view.synchronizeWithPersistedState(queryResult);
if (syncEngineImpl.isPrimaryClient) {
updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewSnapshot.limboChanges);
}
return viewSnapshot;
}
/**
* Retrieves newly changed documents from remote document cache and raises
* snapshots if needed.
*/
// PORTING NOTE: Multi-Tab only.
async function syncEngineSynchronizeWithChangedDocuments(syncEngine, collectionGroup) {
const syncEngineImpl = debugCast(syncEngine);
return localStoreGetNewDocumentChanges(syncEngineImpl.localStore, collectionGroup).then(changes => syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes));
}
/** Applies a mutation state to an existing batch. */
// PORTING NOTE: Multi-Tab only.
async function syncEngineApplyBatchState(syncEngine, batchId, batchState, error) {
const syncEngineImpl = debugCast(syncEngine);
const documents = await localStoreLookupMutationDocuments(syncEngineImpl.localStore, batchId);
if (documents === null) {
// A throttled tab may not have seen the mutation before it was completed
// and removed from the mutation queue, in which case we won't have cached
// the affected documents. In this case we can safely ignore the update
// since that means we didn't apply the mutation locally at all (if we
// had, we would have cached the affected documents), and so we will just
// see any resulting document changes via normal remote document updates
// as applicable.
logDebug(LOG_TAG$3, 'Cannot apply mutation batch with id: ' + batchId);
return;
}
if (batchState === 'pending') {
// If we are the primary client, we need to send this write to the
// backend. Secondary clients will ignore these writes since their remote
// connection is disabled.
await fillWritePipeline(syncEngineImpl.remoteStore);
}
else if (batchState === 'acknowledged' || batchState === 'rejected') {
// NOTE: Both these methods are no-ops for batches that originated from
// other clients.
processUserCallback(syncEngineImpl, batchId, error ? error : null);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
localStoreRemoveCachedMutationBatchMetadata(syncEngineImpl.localStore, batchId);
}
else {
fail();
}
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, documents);
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
async function syncEngineApplyPrimaryState(syncEngine, isPrimary) {
const syncEngineImpl = debugCast(syncEngine);
ensureWatchCallbacks(syncEngineImpl);
syncEngineEnsureWriteCallbacks(syncEngineImpl);
if (isPrimary === true && syncEngineImpl._isPrimaryClient !== true) {
// Secondary tabs only maintain Views for their local listeners and the
// Views internal state may not be 100% populated (in particular
// secondary tabs don't track syncedDocuments, the set of documents the
// server considers to be in the target). So when a secondary becomes
// primary, we need to need to make sure that all views for all targets
// match the state on disk.
const activeTargets = syncEngineImpl.sharedClientState.getAllActiveQueryTargets();
const activeQueries = await synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets.toArray());
syncEngineImpl._isPrimaryClient = true;
await remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, true);
for (const targetData of activeQueries) {
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
}
else if (isPrimary === false && syncEngineImpl._isPrimaryClient !== false) {
const activeTargets = [];
let p = Promise.resolve();
syncEngineImpl.queriesByTarget.forEach((_, targetId) => {
if (syncEngineImpl.sharedClientState.isLocalQueryTarget(targetId)) {
activeTargets.push(targetId);
}
else {
p = p.then(() => {
removeAndCleanupTarget(syncEngineImpl, targetId);
return localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/*keepPersistedTargetData=*/ true);
});
}
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
});
await p;
await synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets);
resetLimboDocuments(syncEngineImpl);
syncEngineImpl._isPrimaryClient = false;
await remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, false);
}
}
// PORTING NOTE: Multi-Tab only.
function resetLimboDocuments(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.activeLimboResolutionsByTarget.forEach((_, targetId) => {
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
});
syncEngineImpl.limboDocumentRefs.removeAllReferences();
syncEngineImpl.activeLimboResolutionsByTarget = new Map();
syncEngineImpl.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
}
/**
* Reconcile the query views of the provided query targets with the state from
* persistence. Raises snapshots for any changes that affect the local
* client and returns the updated state of all target's query data.
*
* @param syncEngine - The sync engine implementation
* @param targets - the list of targets with views that need to be recomputed
* @param transitionToPrimary - `true` iff the tab transitions from a secondary
* tab to a primary tab
*/
// PORTING NOTE: Multi-Tab only.
async function synchronizeQueryViewsAndRaiseSnapshots(syncEngine, targets, transitionToPrimary) {
const syncEngineImpl = debugCast(syncEngine);
const activeQueries = [];
const newViewSnapshots = [];
for (const targetId of targets) {
let targetData;
const queries = syncEngineImpl.queriesByTarget.get(targetId);
if (queries && queries.length !== 0) {
// For queries that have a local View, we fetch their current state
// from LocalStore (as the resume token and the snapshot version
// might have changed) and reconcile their views with the persisted
// state (the list of syncedDocuments may have gotten out of sync).
targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(queries[0]));
for (const query of queries) {
const queryView = syncEngineImpl.queryViewsByQuery.get(query);
const viewChange = await synchronizeViewAndComputeSnapshot(syncEngineImpl, queryView);
if (viewChange.snapshot) {
newViewSnapshots.push(viewChange.snapshot);
}
}
}
else {
// For queries that never executed on this client, we need to
// allocate the target in LocalStore and initialize a new View.
const target = await localStoreGetCachedTarget(syncEngineImpl.localStore, targetId);
targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, target);
await initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetId,
/*current=*/ false, targetData.resumeToken);
}
activeQueries.push(targetData);
}
syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
return activeQueries;
}
/**
* Creates a `Query` object from the specified `Target`. There is no way to
* obtain the original `Query`, so we synthesize a `Query` from the `Target`
* object.
*
* The synthesized result might be different from the original `Query`, but
* since the synthesized `Query` should return the same results as the
* original one (only the presentation of results might differ), the potential
* difference will not cause issues.
*/
// PORTING NOTE: Multi-Tab only.
function synthesizeTargetToQuery(target) {
return newQuery(target.path, target.collectionGroup, target.orderBy, target.filters, target.limit, "F" /* LimitType.First */, target.startAt, target.endAt);
}
/** Returns the IDs of the clients that are currently active. */
// PORTING NOTE: Multi-Tab only.
function syncEngineGetActiveClients(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
return localStoreGetActiveClients(syncEngineImpl.localStore);
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
async function syncEngineApplyTargetState(syncEngine, targetId, state, error) {
const syncEngineImpl = debugCast(syncEngine);
if (syncEngineImpl._isPrimaryClient) {
// If we receive a target state notification via WebStorage, we are
// either already secondary or another tab has taken the primary lease.
logDebug(LOG_TAG$3, 'Ignoring unexpected query state notification.');
return;
}
const query = syncEngineImpl.queriesByTarget.get(targetId);
if (query && query.length > 0) {
switch (state) {
case 'current':
case 'not-current': {
const changes = await localStoreGetNewDocumentChanges(syncEngineImpl.localStore, queryCollectionGroup(query[0]));
const synthesizedRemoteEvent = RemoteEvent.createSynthesizedRemoteEventForCurrentChange(targetId, state === 'current', ByteString.EMPTY_BYTE_STRING);
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, synthesizedRemoteEvent);
break;
}
case 'rejected': {
await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ true);
removeAndCleanupTarget(syncEngineImpl, targetId, error);
break;
}
default:
fail();
}
}
}
/** Adds or removes Watch targets for queries from different tabs. */
async function syncEngineApplyActiveTargetsChange(syncEngine, added, removed) {
const syncEngineImpl = ensureWatchCallbacks(syncEngine);
if (!syncEngineImpl._isPrimaryClient) {
return;
}
for (const targetId of added) {
if (syncEngineImpl.queriesByTarget.has(targetId)) {
// A target might have been added in a previous attempt
logDebug(LOG_TAG$3, 'Adding an already active target ' + targetId);
continue;
}
const target = await localStoreGetCachedTarget(syncEngineImpl.localStore, targetId);
const targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, target);
await initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetData.targetId,
/*current=*/ false, targetData.resumeToken);
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
for (const targetId of removed) {
// Check that the target is still active since the target might have been
// removed if it has been rejected by the backend.
if (!syncEngineImpl.queriesByTarget.has(targetId)) {
continue;
}
// Release queries that are still active.
await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ false)
.then(() => {
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
removeAndCleanupTarget(syncEngineImpl, targetId);
})
.catch(ignoreIfPrimaryLeaseLoss);
}
}
function ensureWatchCallbacks(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.remoteStore.remoteSyncer.applyRemoteEvent =
syncEngineApplyRemoteEvent.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.getRemoteKeysForTarget =
syncEngineGetRemoteKeysForTarget.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.rejectListen =
syncEngineRejectListen.bind(null, syncEngineImpl);
syncEngineImpl.syncEngineListener.onWatchChange =
eventManagerOnWatchChange.bind(null, syncEngineImpl.eventManager);
syncEngineImpl.syncEngineListener.onWatchError =
eventManagerOnWatchError.bind(null, syncEngineImpl.eventManager);
return syncEngineImpl;
}
function syncEngineEnsureWriteCallbacks(syncEngine) {
const syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.remoteStore.remoteSyncer.applySuccessfulWrite =
syncEngineApplySuccessfulWrite.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.rejectFailedWrite =
syncEngineRejectFailedWrite.bind(null, syncEngineImpl);
return syncEngineImpl;
}
/**
* Loads a Firestore bundle into the SDK. The returned promise resolves when
* the bundle finished loading.
*
* @param syncEngine - SyncEngine to use.
* @param bundleReader - Bundle to load into the SDK.
* @param task - LoadBundleTask used to update the loading progress to public API.
*/
function syncEngineLoadBundle(syncEngine, bundleReader, task) {
const syncEngineImpl = debugCast(syncEngine);
// eslint-disable-next-line @typescript-eslint/no-floating-promises
loadBundleImpl(syncEngineImpl, bundleReader, task).then(collectionGroups => {
syncEngineImpl.sharedClientState.notifyBundleLoaded(collectionGroups);
});
}
/** Loads a bundle and returns the list of affected collection groups. */
async function loadBundleImpl(syncEngine, reader, task) {
try {
const metadata = await reader.getMetadata();
const skip = await localStoreHasNewerBundle(syncEngine.localStore, metadata);
if (skip) {
await reader.close();
task._completeWith(bundleSuccessProgress(metadata));
return Promise.resolve(new Set());
}
task._updateProgress(bundleInitialProgress(metadata));
const loader = new BundleLoader(metadata, syncEngine.localStore, reader.serializer);
let element = await reader.nextElement();
while (element) {
;
const progress = await loader.addSizedElement(element);
if (progress) {
task._updateProgress(progress);
}
element = await reader.nextElement();
}
const result = await loader.complete();
await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, result.changedDocs,
/* remoteEvent */ undefined);
// Save metadata, so loading the same bundle will skip.
await localStoreSaveBundle(syncEngine.localStore, metadata);
task._completeWith(result.progress);
return Promise.resolve(result.changedCollectionGroups);
}
catch (e) {
logWarn(LOG_TAG$3, `Loading bundle failed with ${e}`);
task._failWith(e);
return Promise.resolve(new Set());
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides all components needed for Firestore with in-memory persistence.
* Uses EagerGC garbage collection.
*/
class MemoryOfflineComponentProvider {
constructor() {
this.synchronizeTabs = false;
}
async initialize(cfg) {
this.serializer = newSerializer(cfg.databaseInfo.databaseId);
this.sharedClientState = this.createSharedClientState(cfg);
this.persistence = this.createPersistence(cfg);
await this.persistence.start();
this.localStore = this.createLocalStore(cfg);
this.gcScheduler = this.createGarbageCollectionScheduler(cfg, this.localStore);
this.indexBackfillerScheduler = this.createIndexBackfillerScheduler(cfg, this.localStore);
}
createGarbageCollectionScheduler(cfg, localStore) {
return null;
}
createIndexBackfillerScheduler(cfg, localStore) {
return null;
}
createLocalStore(cfg) {
return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
}
createPersistence(cfg) {
return new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer);
}
createSharedClientState(cfg) {
return new MemorySharedClientState();
}
async terminate() {
if (this.gcScheduler) {
this.gcScheduler.stop();
}
await this.sharedClientState.shutdown();
await this.persistence.shutdown();
}
}
/**
* Provides all components needed for Firestore with IndexedDB persistence.
*/
class IndexedDbOfflineComponentProvider extends MemoryOfflineComponentProvider {
constructor(onlineComponentProvider, cacheSizeBytes, forceOwnership) {
super();
this.onlineComponentProvider = onlineComponentProvider;
this.cacheSizeBytes = cacheSizeBytes;
this.forceOwnership = forceOwnership;
this.synchronizeTabs = false;
}
async initialize(cfg) {
await super.initialize(cfg);
await this.onlineComponentProvider.initialize(this, cfg);
// Enqueue writes from a previous session
await syncEngineEnsureWriteCallbacks(this.onlineComponentProvider.syncEngine);
await fillWritePipeline(this.onlineComponentProvider.remoteStore);
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
await this.persistence.setPrimaryStateListener(() => {
if (this.gcScheduler && !this.gcScheduler.started) {
this.gcScheduler.start();
}
if (this.indexBackfillerScheduler &&
!this.indexBackfillerScheduler.started) {
this.indexBackfillerScheduler.start();
}
return Promise.resolve();
});
}
createLocalStore(cfg) {
return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
}
createGarbageCollectionScheduler(cfg, localStore) {
const garbageCollector = this.persistence.referenceDelegate.garbageCollector;
return new LruScheduler(garbageCollector, cfg.asyncQueue, localStore);
}
createIndexBackfillerScheduler(cfg, localStore) {
const indexBackfiller = new IndexBackfiller(localStore, this.persistence);
return new IndexBackfillerScheduler(cfg.asyncQueue, indexBackfiller);
}
createPersistence(cfg) {
const persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
const lruParams = this.cacheSizeBytes !== undefined
? LruParams.withCacheSize(this.cacheSizeBytes)
: LruParams.DEFAULT;
return new IndexedDbPersistence(this.synchronizeTabs, persistenceKey, cfg.clientId, lruParams, cfg.asyncQueue, getWindow(), getDocument(), this.serializer, this.sharedClientState, !!this.forceOwnership);
}
createSharedClientState(cfg) {
return new MemorySharedClientState();
}
}
/**
* Provides all components needed for Firestore with multi-tab IndexedDB
* persistence.
*
* In the legacy client, this provider is used to provide both multi-tab and
* non-multi-tab persistence since we cannot tell at build time whether
* `synchronizeTabs` will be enabled.
*/
class MultiTabOfflineComponentProvider extends IndexedDbOfflineComponentProvider {
constructor(onlineComponentProvider, cacheSizeBytes) {
super(onlineComponentProvider, cacheSizeBytes, /* forceOwnership= */ false);
this.onlineComponentProvider = onlineComponentProvider;
this.cacheSizeBytes = cacheSizeBytes;
this.synchronizeTabs = true;
}
async initialize(cfg) {
await super.initialize(cfg);
const syncEngine = this.onlineComponentProvider.syncEngine;
if (this.sharedClientState instanceof WebStorageSharedClientState) {
this.sharedClientState.syncEngine = {
applyBatchState: syncEngineApplyBatchState.bind(null, syncEngine),
applyTargetState: syncEngineApplyTargetState.bind(null, syncEngine),
applyActiveTargetsChange: syncEngineApplyActiveTargetsChange.bind(null, syncEngine),
getActiveClients: syncEngineGetActiveClients.bind(null, syncEngine),
synchronizeWithChangedDocuments: syncEngineSynchronizeWithChangedDocuments.bind(null, syncEngine)
};
await this.sharedClientState.start();
}
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
await this.persistence.setPrimaryStateListener(async (isPrimary) => {
await syncEngineApplyPrimaryState(this.onlineComponentProvider.syncEngine, isPrimary);
if (this.gcScheduler) {
if (isPrimary && !this.gcScheduler.started) {
this.gcScheduler.start();
}
else if (!isPrimary) {
this.gcScheduler.stop();
}
}
if (this.indexBackfillerScheduler) {
if (isPrimary && !this.indexBackfillerScheduler.started) {
this.indexBackfillerScheduler.start();
}
else if (!isPrimary) {
this.indexBackfillerScheduler.stop();
}
}
});
}
createSharedClientState(cfg) {
const window = getWindow();
if (!WebStorageSharedClientState.isAvailable(window)) {
throw new FirestoreError(Code.UNIMPLEMENTED, 'IndexedDB persistence is only available on platforms that support LocalStorage.');
}
const persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
return new WebStorageSharedClientState(window, cfg.asyncQueue, persistenceKey, cfg.clientId, cfg.initialUser);
}
}
/**
* Initializes and wires the components that are needed to interface with the
* network.
*/
class OnlineComponentProvider {
async initialize(offlineComponentProvider, cfg) {
if (this.localStore) {
// OnlineComponentProvider may get initialized multiple times if
// multi-tab persistence is used.
return;
}
this.localStore = offlineComponentProvider.localStore;
this.sharedClientState = offlineComponentProvider.sharedClientState;
this.datastore = this.createDatastore(cfg);
this.remoteStore = this.createRemoteStore(cfg);
this.eventManager = this.createEventManager(cfg);
this.syncEngine = this.createSyncEngine(cfg,
/* startAsPrimary=*/ !offlineComponentProvider.synchronizeTabs);
this.sharedClientState.onlineStateHandler = onlineState => syncEngineApplyOnlineStateChange(this.syncEngine, onlineState, 1 /* OnlineStateSource.SharedClientState */);
this.remoteStore.remoteSyncer.handleCredentialChange =
syncEngineHandleCredentialChange.bind(null, this.syncEngine);
await remoteStoreApplyPrimaryState(this.remoteStore, this.syncEngine.isPrimaryClient);
}
createEventManager(cfg) {
return newEventManager();
}
createDatastore(cfg) {
const serializer = newSerializer(cfg.databaseInfo.databaseId);
const connection = newConnection(cfg.databaseInfo);
return newDatastore(cfg.authCredentials, cfg.appCheckCredentials, connection, serializer);
}
createRemoteStore(cfg) {
return newRemoteStore(this.localStore, this.datastore, cfg.asyncQueue, onlineState => syncEngineApplyOnlineStateChange(this.syncEngine, onlineState, 0 /* OnlineStateSource.RemoteStore */), newConnectivityMonitor());
}
createSyncEngine(cfg, startAsPrimary) {
return newSyncEngine(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, cfg.initialUser, cfg.maxConcurrentLimboResolutions, startAsPrimary);
}
terminate() {
return remoteStoreShutdown(this.remoteStore);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* How many bytes to read each time when `ReadableStreamReader.read()` is
* called. Only applicable for byte streams that we control (e.g. those backed
* by an UInt8Array).
*/
const DEFAULT_BYTES_PER_READ = 10240;
/**
* Builds a `ByteStreamReader` from a UInt8Array.
* @param source - The data source to use.
* @param bytesPerRead - How many bytes each `read()` from the returned reader
* will read.
*/
function toByteStreamReaderHelper(source, bytesPerRead = DEFAULT_BYTES_PER_READ) {
let readFrom = 0;
// The TypeScript definition for ReadableStreamReader changed. We use
// `any` here to allow this code to compile with different versions.
// See https://github.com/microsoft/TypeScript/issues/42970
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const reader = {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async read() {
if (readFrom < source.byteLength) {
const result = {
value: source.slice(readFrom, readFrom + bytesPerRead),
done: false
};
readFrom += bytesPerRead;
return result;
}
return { done: true };
},
async cancel() { },
releaseLock() { },
closed: Promise.reject('unimplemented')
};
return reader;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function validateNonEmptyArgument(functionName, argumentName, argument) {
if (!argument) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() cannot be called with an empty ${argumentName}.`);
}
}
/**
* Validates that two boolean options are not set at the same time.
* @internal
*/
function validateIsNotUsedTogether(optionName1, argument1, optionName2, argument2) {
if (argument1 === true && argument2 === true) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `${optionName1} and ${optionName2} cannot be used together.`);
}
}
/**
* Validates that `path` refers to a document (indicated by the fact it contains
* an even numbers of segments).
*/
function validateDocumentPath(path) {
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid document reference. Document references must have an even number of segments, but ${path} has ${path.length}.`);
}
}
/**
* Validates that `path` refers to a collection (indicated by the fact it
* contains an odd numbers of segments).
*/
function validateCollectionPath(path) {
if (DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid collection reference. Collection references must have an odd number of segments, but ${path} has ${path.length}.`);
}
}
/**
* Returns true if it's a non-null object without a custom prototype
* (i.e. excludes Array, Date, etc.).
*/
function isPlainObject(input) {
return (typeof input === 'object' &&
input !== null &&
(Object.getPrototypeOf(input) === Object.prototype ||
Object.getPrototypeOf(input) === null));
}
/** Returns a string describing the type / value of the provided input. */
function valueDescription(input) {
if (input === undefined) {
return 'undefined';
}
else if (input === null) {
return 'null';
}
else if (typeof input === 'string') {
if (input.length > 20) {
input = `${input.substring(0, 20)}...`;
}
return JSON.stringify(input);
}
else if (typeof input === 'number' || typeof input === 'boolean') {
return '' + input;
}
else if (typeof input === 'object') {
if (input instanceof Array) {
return 'an array';
}
else {
const customObjectName = tryGetCustomObjectType(input);
if (customObjectName) {
return `a custom ${customObjectName} object`;
}
else {
return 'an object';
}
}
}
else if (typeof input === 'function') {
return 'a function';
}
else {
return fail();
}
}
/** try to get the constructor name for an object. */
function tryGetCustomObjectType(input) {
if (input.constructor) {
return input.constructor.name;
}
return null;
}
/**
* Casts `obj` to `T`, optionally unwrapping Compat types to expose the
* underlying instance. Throws if `obj` is not an instance of `T`.
*
* This cast is used in the Lite and Full SDK to verify instance types for
* arguments passed to the public API.
* @internal
*/
function cast(obj,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
constructor) {
if ('_delegate' in obj) {
// Unwrap Compat types
// eslint-disable-next-line @typescript-eslint/no-explicit-any
obj = obj._delegate;
}
if (!(obj instanceof constructor)) {
if (constructor.name === obj.constructor.name) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Type does not match the expected instance. Did you pass a ' +
`reference from a different Firestore SDK?`);
}
else {
const description = valueDescription(obj);
throw new FirestoreError(Code.INVALID_ARGUMENT, `Expected type '${constructor.name}', but it was: ${description}`);
}
}
return obj;
}
function validatePositiveNumber(functionName, n) {
if (n <= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() requires a positive number, but it was: ${n}.`);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* On Node, only supported data source is a `Uint8Array` for now.
*/
function toByteStreamReader(source, bytesPerRead) {
if (!(source instanceof Uint8Array)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `NodePlatform.toByteStreamReader expects source to be Uint8Array, got ${valueDescription(source)}`);
}
return toByteStreamReaderHelper(source, bytesPerRead);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* A wrapper implementation of Observer that will dispatch events
* asynchronously. To allow immediate silencing, a mute call is added which
* causes events scheduled to no longer be raised.
*/
class AsyncObserver {
constructor(observer) {
this.observer = observer;
/**
* When set to true, will not raise future events. Necessary to deal with
* async detachment of listener.
*/
this.muted = false;
}
next(value) {
if (this.observer.next) {
this.scheduleEvent(this.observer.next, value);
}
}
error(error) {
if (this.observer.error) {
this.scheduleEvent(this.observer.error, error);
}
else {
logError('Uncaught Error in snapshot listener:', error.toString());
}
}
mute() {
this.muted = true;
}
scheduleEvent(eventHandler, event) {
if (!this.muted) {
setTimeout(() => {
if (!this.muted) {
eventHandler(event);
}
}, 0);
}
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A complete element in the bundle stream, together with the byte length it
* occupies in the stream.
*/
class SizedBundleElement {
constructor(payload,
// How many bytes this element takes to store in the bundle.
byteLength) {
this.payload = payload;
this.byteLength = byteLength;
}
isBundleMetadata() {
return 'metadata' in this.payload;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A class representing a bundle.
*
* Takes a bundle stream or buffer, and presents abstractions to read bundled
* elements out of the underlying content.
*/
class BundleReaderImpl {
constructor(
/** The reader to read from underlying binary bundle data source. */
reader, serializer) {
this.reader = reader;
this.serializer = serializer;
/** Cached bundle metadata. */
this.metadata = new Deferred();
/**
* Internal buffer to hold bundle content, accumulating incomplete element
* content.
*/
this.buffer = new Uint8Array();
this.textDecoder = newTextDecoder();
// Read the metadata (which is the first element).
this.nextElementImpl().then(element => {
if (element && element.isBundleMetadata()) {
this.metadata.resolve(element.payload.metadata);
}
else {
this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is
${JSON.stringify(element === null || element === void 0 ? void 0 : element.payload)}`));
}
}, error => this.metadata.reject(error));
}
close() {
return this.reader.cancel();
}
async getMetadata() {
return this.metadata.promise;
}
async nextElement() {
// Makes sure metadata is read before proceeding.
await this.getMetadata();
return this.nextElementImpl();
}
/**
* Reads from the head of internal buffer, and pulling more data from
* underlying stream if a complete element cannot be found, until an
* element(including the prefixed length and the JSON string) is found.
*
* Once a complete element is read, it is dropped from internal buffer.
*
* Returns either the bundled element, or null if we have reached the end of
* the stream.
*/
async nextElementImpl() {
const lengthBuffer = await this.readLength();
if (lengthBuffer === null) {
return null;
}
const lengthString = this.textDecoder.decode(lengthBuffer);
const length = Number(lengthString);
if (isNaN(length)) {
this.raiseError(`length string (${lengthString}) is not valid number`);
}
const jsonString = await this.readJsonString(length);
return new SizedBundleElement(JSON.parse(jsonString), lengthBuffer.length + length);
}
/** First index of '{' from the underlying buffer. */
indexOfOpenBracket() {
return this.buffer.findIndex(v => v === '{'.charCodeAt(0));
}
/**
* Reads from the beginning of the internal buffer, until the first '{', and
* return the content.
*
* If reached end of the stream, returns a null.
*/
async readLength() {
while (this.indexOfOpenBracket() < 0) {
const done = await this.pullMoreDataToBuffer();
if (done) {
break;
}
}
// Broke out of the loop because underlying stream is closed, and there
// happens to be no more data to process.
if (this.buffer.length === 0) {
return null;
}
const position = this.indexOfOpenBracket();
// Broke out of the loop because underlying stream is closed, but still
// cannot find an open bracket.
if (position < 0) {
this.raiseError('Reached the end of bundle when a length string is expected.');
}
const result = this.buffer.slice(0, position);
// Update the internal buffer to drop the read length.
this.buffer = this.buffer.slice(position);
return result;
}
/**
* Reads from a specified position from the internal buffer, for a specified
* number of bytes, pulling more data from the underlying stream if needed.
*
* Returns a string decoded from the read bytes.
*/
async readJsonString(length) {
while (this.buffer.length < length) {
const done = await this.pullMoreDataToBuffer();
if (done) {
this.raiseError('Reached the end of bundle when more is expected.');
}
}
const result = this.textDecoder.decode(this.buffer.slice(0, length));
// Update the internal buffer to drop the read json string.
this.buffer = this.buffer.slice(length);
return result;
}
raiseError(message) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.reader.cancel();
throw new Error(`Invalid bundle format: ${message}`);
}
/**
* Pulls more data from underlying stream to internal buffer.
* Returns a boolean indicating whether the stream is finished.
*/
async pullMoreDataToBuffer() {
const result = await this.reader.read();
if (!result.done) {
const newBuffer = new Uint8Array(this.buffer.length + result.value.length);
newBuffer.set(this.buffer);
newBuffer.set(result.value, this.buffer.length);
this.buffer = newBuffer;
}
return result.done;
}
}
function newBundleReader(reader, serializer) {
return new BundleReaderImpl(reader, serializer);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents an aggregation that can be performed by Firestore.
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
class AggregateField {
constructor() {
/** A type string to uniquely identify instances of this class. */
this.type = 'AggregateField';
}
}
/**
* The results of executing an aggregation query.
*/
class AggregateQuerySnapshot {
/** @hideconstructor */
constructor(query, _data) {
this._data = _data;
/** A type string to uniquely identify instances of this class. */
this.type = 'AggregateQuerySnapshot';
this.query = query;
}
/**
* Returns the results of the aggregations performed over the underlying
* query.
*
* The keys of the returned object will be the same as those of the
* `AggregateSpec` object specified to the aggregation method, and the values
* will be the corresponding aggregation result.
*
* @returns The results of the aggregations performed over the underlying
* query.
*/
data() {
return this._data;
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* CountQueryRunner encapsulates the logic needed to run the count aggregation
* queries.
*/
class CountQueryRunner {
constructor(query, datastore, userDataWriter) {
this.query = query;
this.datastore = datastore;
this.userDataWriter = userDataWriter;
}
run() {
return invokeRunAggregationQueryRpc(this.datastore, this.query._query).then(result => {
hardAssert(result[0] !== undefined);
const counts = Object.entries(result[0])
.filter(([key, value]) => key === 'count_alias')
.map(([key, value]) => this.userDataWriter.convertValue(value));
const countValue = counts[0];
hardAssert(typeof countValue === 'number');
return Promise.resolve(new AggregateQuerySnapshot(this.query, {
count: countValue
}));
});
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal transaction object responsible for accumulating the mutations to
* perform and the base versions for any documents read.
*/
class Transaction$2 {
constructor(datastore) {
this.datastore = datastore;
// The version of each document that was read during this transaction.
this.readVersions = new Map();
this.mutations = [];
this.committed = false;
/**
* A deferred usage error that occurred previously in this transaction that
* will cause the transaction to fail once it actually commits.
*/
this.lastWriteError = null;
/**
* Set of documents that have been written in the transaction.
*
* When there's more than one write to the same key in a transaction, any
* writes after the first are handled differently.
*/
this.writtenDocs = new Set();
}
async lookup(keys) {
this.ensureCommitNotCalled();
if (this.mutations.length > 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Firestore transactions require all reads to be executed before all writes.');
}
const docs = await invokeBatchGetDocumentsRpc(this.datastore, keys);
docs.forEach(doc => this.recordVersion(doc));
return docs;
}
set(key, data) {
this.write(data.toMutation(key, this.precondition(key)));
this.writtenDocs.add(key.toString());
}
update(key, data) {
try {
this.write(data.toMutation(key, this.preconditionForUpdate(key)));
}
catch (e) {
this.lastWriteError = e;
}
this.writtenDocs.add(key.toString());
}
delete(key) {
this.write(new DeleteMutation(key, this.precondition(key)));
this.writtenDocs.add(key.toString());
}
async commit() {
this.ensureCommitNotCalled();
if (this.lastWriteError) {
throw this.lastWriteError;
}
const unwritten = this.readVersions;
// For each mutation, note that the doc was written.
this.mutations.forEach(mutation => {
unwritten.delete(mutation.key.toString());
});
// For each document that was read but not written to, we want to perform
// a `verify` operation.
unwritten.forEach((_, path) => {
const key = DocumentKey.fromPath(path);
this.mutations.push(new VerifyMutation(key, this.precondition(key)));
});
await invokeCommitRpc(this.datastore, this.mutations);
this.committed = true;
}
recordVersion(doc) {
let docVersion;
if (doc.isFoundDocument()) {
docVersion = doc.version;
}
else if (doc.isNoDocument()) {
// Represent a deleted doc using SnapshotVersion.min().
docVersion = SnapshotVersion.min();
}
else {
throw fail();
}
const existingVersion = this.readVersions.get(doc.key.toString());
if (existingVersion) {
if (!docVersion.isEqual(existingVersion)) {
// This transaction will fail no matter what.
throw new FirestoreError(Code.ABORTED, 'Document version changed between two reads.');
}
}
else {
this.readVersions.set(doc.key.toString(), docVersion);
}
}
/**
* Returns the version of this document when it was read in this transaction,
* as a precondition, or no precondition if it was not read.
*/
precondition(key) {
const version = this.readVersions.get(key.toString());
if (!this.writtenDocs.has(key.toString()) && version) {
if (version.isEqual(SnapshotVersion.min())) {
return Precondition.exists(false);
}
else {
return Precondition.updateTime(version);
}
}
else {
return Precondition.none();
}
}
/**
* Returns the precondition for a document if the operation is an update.
*/
preconditionForUpdate(key) {
const version = this.readVersions.get(key.toString());
// The first time a document is written, we want to take into account the
// read time and existence
if (!this.writtenDocs.has(key.toString()) && version) {
if (version.isEqual(SnapshotVersion.min())) {
// The document doesn't exist, so fail the transaction.
// This has to be validated locally because you can't send a
// precondition that a document does not exist without changing the
// semantics of the backend write to be an insert. This is the reverse
// of what we want, since we want to assert that the document doesn't
// exist but then send the update and have it fail. Since we can't
// express that to the backend, we have to validate locally.
// Note: this can change once we can send separate verify writes in the
// transaction.
throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't update a document that doesn't exist.");
}
// Document exists, base precondition on document update time.
return Precondition.updateTime(version);
}
else {
// Document was not read, so we just use the preconditions for a blind
// update.
return Precondition.exists(true);
}
}
write(mutation) {
this.ensureCommitNotCalled();
this.mutations.push(mutation);
}
ensureCommitNotCalled() {
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* TransactionRunner encapsulates the logic needed to run and retry transactions
* with backoff.
*/
class TransactionRunner {
constructor(asyncQueue, datastore, options, updateFunction, deferred) {
this.asyncQueue = asyncQueue;
this.datastore = datastore;
this.options = options;
this.updateFunction = updateFunction;
this.deferred = deferred;
this.attemptsRemaining = options.maxAttempts;
this.backoff = new ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
}
/** Runs the transaction and sets the result on deferred. */
run() {
this.attemptsRemaining -= 1;
this.runWithBackOff();
}
runWithBackOff() {
this.backoff.backoffAndRun(async () => {
const transaction = new Transaction$2(this.datastore);
const userPromise = this.tryRunUpdateFunction(transaction);
if (userPromise) {
userPromise
.then(result => {
this.asyncQueue.enqueueAndForget(() => {
return transaction
.commit()
.then(() => {
this.deferred.resolve(result);
})
.catch(commitError => {
this.handleTransactionError(commitError);
});
});
})
.catch(userPromiseError => {
this.handleTransactionError(userPromiseError);
});
}
});
}
tryRunUpdateFunction(transaction) {
try {
const userPromise = this.updateFunction(transaction);
if (isNullOrUndefined(userPromise) ||
!userPromise.catch ||
!userPromise.then) {
this.deferred.reject(Error('Transaction callback must return a Promise'));
return null;
}
return userPromise;
}
catch (error) {
// Do not retry errors thrown by user provided updateFunction.
this.deferred.reject(error);
return null;
}
}
handleTransactionError(error) {
if (this.attemptsRemaining > 0 && this.isRetryableTransactionError(error)) {
this.attemptsRemaining -= 1;
this.asyncQueue.enqueueAndForget(() => {
this.runWithBackOff();
return Promise.resolve();
});
}
else {
this.deferred.reject(error);
}
}
isRetryableTransactionError(error) {
if (error.name === 'FirebaseError') {
// In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
// non-matching document versions with ABORTED. These errors should be retried.
const code = error.code;
return (code === 'aborted' ||
code === 'failed-precondition' ||
code === 'already-exists' ||
!isPermanentError(code));
}
return false;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$2 = 'FirestoreClient';
const MAX_CONCURRENT_LIMBO_RESOLUTIONS = 100;
/**
* FirestoreClient is a top-level class that constructs and owns all of the
* pieces of the client SDK architecture. It is responsible for creating the
* async queue that is shared by all of the other components in the system.
*/
class FirestoreClient {
constructor(authCredentials, appCheckCredentials,
/**
* Asynchronous queue responsible for all of our internal processing. When
* we get incoming work from the user (via public API) or the network
* (incoming GRPC messages), we should always schedule onto this queue.
* This ensures all of our work is properly serialized (e.g. we don't
* start processing a new operation while the previous one is waiting for
* an async I/O to complete).
*/
asyncQueue, databaseInfo) {
this.authCredentials = authCredentials;
this.appCheckCredentials = appCheckCredentials;
this.asyncQueue = asyncQueue;
this.databaseInfo = databaseInfo;
this.user = User.UNAUTHENTICATED;
this.clientId = AutoId.newId();
this.authCredentialListener = () => Promise.resolve();
this.appCheckCredentialListener = () => Promise.resolve();
this.authCredentials.start(asyncQueue, async (user) => {
logDebug(LOG_TAG$2, 'Received user=', user.uid);
await this.authCredentialListener(user);
this.user = user;
});
this.appCheckCredentials.start(asyncQueue, newAppCheckToken => {
logDebug(LOG_TAG$2, 'Received new app check token=', newAppCheckToken);
return this.appCheckCredentialListener(newAppCheckToken, this.user);
});
}
async getConfiguration() {
return {
asyncQueue: this.asyncQueue,
databaseInfo: this.databaseInfo,
clientId: this.clientId,
authCredentials: this.authCredentials,
appCheckCredentials: this.appCheckCredentials,
initialUser: this.user,
maxConcurrentLimboResolutions: MAX_CONCURRENT_LIMBO_RESOLUTIONS
};
}
setCredentialChangeListener(listener) {
this.authCredentialListener = listener;
}
setAppCheckTokenChangeListener(listener) {
this.appCheckCredentialListener = listener;
}
/**
* Checks that the client has not been terminated. Ensures that other methods on
* this class cannot be called after the client is terminated.
*/
verifyNotTerminated() {
if (this.asyncQueue.isShuttingDown) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
}
}
terminate() {
this.asyncQueue.enterRestrictedMode();
const deferred = new Deferred();
this.asyncQueue.enqueueAndForgetEvenWhileRestricted(async () => {
try {
if (this.onlineComponents) {
await this.onlineComponents.terminate();
}
if (this.offlineComponents) {
await this.offlineComponents.terminate();
}
// The credentials provider must be terminated after shutting down the
// RemoteStore as it will prevent the RemoteStore from retrieving auth
// tokens.
this.authCredentials.shutdown();
this.appCheckCredentials.shutdown();
deferred.resolve();
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to shutdown persistence`);
deferred.reject(firestoreError);
}
});
return deferred.promise;
}
}
async function setOfflineComponentProvider(client, offlineComponentProvider) {
client.asyncQueue.verifyOperationInProgress();
logDebug(LOG_TAG$2, 'Initializing OfflineComponentProvider');
const configuration = await client.getConfiguration();
await offlineComponentProvider.initialize(configuration);
let currentUser = configuration.initialUser;
client.setCredentialChangeListener(async (user) => {
if (!currentUser.isEqual(user)) {
await localStoreHandleUserChange(offlineComponentProvider.localStore, user);
currentUser = user;
}
});
// When a user calls clearPersistence() in one client, all other clients
// need to be terminated to allow the delete to succeed.
offlineComponentProvider.persistence.setDatabaseDeletedListener(() => client.terminate());
client.offlineComponents = offlineComponentProvider;
}
async function setOnlineComponentProvider(client, onlineComponentProvider) {
client.asyncQueue.verifyOperationInProgress();
const offlineComponentProvider = await ensureOfflineComponents(client);
logDebug(LOG_TAG$2, 'Initializing OnlineComponentProvider');
const configuration = await client.getConfiguration();
await onlineComponentProvider.initialize(offlineComponentProvider, configuration);
// The CredentialChangeListener of the online component provider takes
// precedence over the offline component provider.
client.setCredentialChangeListener(user => remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user));
client.setAppCheckTokenChangeListener((_, user) => remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user));
client.onlineComponents = onlineComponentProvider;
}
async function ensureOfflineComponents(client) {
if (!client.offlineComponents) {
logDebug(LOG_TAG$2, 'Using default OfflineComponentProvider');
await setOfflineComponentProvider(client, new MemoryOfflineComponentProvider());
}
return client.offlineComponents;
}
async function ensureOnlineComponents(client) {
if (!client.onlineComponents) {
logDebug(LOG_TAG$2, 'Using default OnlineComponentProvider');
await setOnlineComponentProvider(client, new OnlineComponentProvider());
}
return client.onlineComponents;
}
function getPersistence(client) {
return ensureOfflineComponents(client).then(c => c.persistence);
}
function getLocalStore(client) {
return ensureOfflineComponents(client).then(c => c.localStore);
}
function getRemoteStore(client) {
return ensureOnlineComponents(client).then(c => c.remoteStore);
}
function getSyncEngine(client) {
return ensureOnlineComponents(client).then(c => c.syncEngine);
}
function getDatastore(client) {
return ensureOnlineComponents(client).then(c => c.datastore);
}
async function getEventManager(client) {
const onlineComponentProvider = await ensureOnlineComponents(client);
const eventManager = onlineComponentProvider.eventManager;
eventManager.onListen = syncEngineListen.bind(null, onlineComponentProvider.syncEngine);
eventManager.onUnlisten = syncEngineUnlisten.bind(null, onlineComponentProvider.syncEngine);
return eventManager;
}
/** Enables the network connection and re-enqueues all pending operations. */
function firestoreClientEnableNetwork(client) {
return client.asyncQueue.enqueue(async () => {
const persistence = await getPersistence(client);
const remoteStore = await getRemoteStore(client);
persistence.setNetworkEnabled(true);
return remoteStoreEnableNetwork(remoteStore);
});
}
/** Disables the network connection. Pending operations will not complete. */
function firestoreClientDisableNetwork(client) {
return client.asyncQueue.enqueue(async () => {
const persistence = await getPersistence(client);
const remoteStore = await getRemoteStore(client);
persistence.setNetworkEnabled(false);
return remoteStoreDisableNetwork(remoteStore);
});
}
/**
* Returns a Promise that resolves when all writes that were pending at the time
* this method was called received server acknowledgement. An acknowledgement
* can be either acceptance or rejection.
*/
function firestoreClientWaitForPendingWrites(client) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const syncEngine = await getSyncEngine(client);
return syncEngineRegisterPendingWritesCallback(syncEngine, deferred);
});
return deferred.promise;
}
function firestoreClientListen(client, query, options, observer) {
const wrappedObserver = new AsyncObserver(observer);
const listener = new QueryListener(query, wrappedObserver, options);
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return eventManagerListen(eventManager, listener);
});
return () => {
wrappedObserver.mute();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return eventManagerUnlisten(eventManager, listener);
});
};
}
function firestoreClientGetDocumentFromLocalCache(client, docKey) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const localStore = await getLocalStore(client);
return readDocumentFromCache(localStore, docKey, deferred);
});
return deferred.promise;
}
function firestoreClientGetDocumentViaSnapshotListener(client, key, options = {}) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return readDocumentViaSnapshotListener(eventManager, client.asyncQueue, key, options, deferred);
});
return deferred.promise;
}
function firestoreClientGetDocumentsFromLocalCache(client, query) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const localStore = await getLocalStore(client);
return executeQueryFromCache(localStore, query, deferred);
});
return deferred.promise;
}
function firestoreClientGetDocumentsViaSnapshotListener(client, query, options = {}) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return executeQueryViaSnapshotListener(eventManager, client.asyncQueue, query, options, deferred);
});
return deferred.promise;
}
function firestoreClientWrite(client, mutations) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const syncEngine = await getSyncEngine(client);
return syncEngineWrite(syncEngine, mutations, deferred);
});
return deferred.promise;
}
function firestoreClientAddSnapshotsInSyncListener(client, observer) {
const wrappedObserver = new AsyncObserver(observer);
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return addSnapshotsInSyncListener(eventManager, wrappedObserver);
});
return () => {
wrappedObserver.mute();
client.asyncQueue.enqueueAndForget(async () => {
const eventManager = await getEventManager(client);
return removeSnapshotsInSyncListener(eventManager, wrappedObserver);
});
};
}
/**
* Takes an updateFunction in which a set of reads and writes can be performed
* atomically. In the updateFunction, the client can read and write values
* using the supplied transaction object. After the updateFunction, all
* changes will be committed. If a retryable error occurs (ex: some other
* client has changed any of the data referenced), then the updateFunction
* will be called again after a backoff. If the updateFunction still fails
* after all retries, then the transaction will be rejected.
*
* The transaction object passed to the updateFunction contains methods for
* accessing documents and collections. Unlike other datastore access, data
* accessed with the transaction will not reflect local changes that have not
* been committed. For this reason, it is required that all reads are
* performed before any writes. Transactions must be performed while online.
*/
function firestoreClientTransaction(client, updateFunction, options) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
const datastore = await getDatastore(client);
new TransactionRunner(client.asyncQueue, datastore, options, updateFunction, deferred).run();
});
return deferred.promise;
}
function firestoreClientRunCountQuery(client, query, userDataWriter) {
const deferred = new Deferred();
client.asyncQueue.enqueueAndForget(async () => {
try {
const remoteStore = await getRemoteStore(client);
if (!canUseNetwork(remoteStore)) {
deferred.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get count result because the client is offline.'));
}
else {
const datastore = await getDatastore(client);
const result = new CountQueryRunner(query, datastore, userDataWriter).run();
deferred.resolve(result);
}
}
catch (e) {
deferred.reject(e);
}
});
return deferred.promise;
}
async function readDocumentFromCache(localStore, docKey, result) {
try {
const document = await localStoreReadDocument(localStore, docKey);
if (document.isFoundDocument()) {
result.resolve(document);
}
else if (document.isNoDocument()) {
result.resolve(null);
}
else {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from cache. (However, this document may ' +
"exist on the server. Run again without setting 'source' in " +
'the GetOptions to attempt to retrieve the document from the ' +
'server.)'));
}
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to get document '${docKey} from cache`);
result.reject(firestoreError);
}
}
/**
* Retrieves a latency-compensated document from the backend via a
* SnapshotListener.
*/
function readDocumentViaSnapshotListener(eventManager, asyncQueue, key, options, result) {
const wrappedObserver = new AsyncObserver({
next: (snap) => {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
asyncQueue.enqueueAndForget(() => eventManagerUnlisten(eventManager, listener));
const exists = snap.docs.has(key);
if (!exists && snap.fromCache) {
// TODO(dimond): If we're online and the document doesn't
// exist then we resolve with a doc.exists set to false. If
// we're offline however, we reject the Promise in this
// case. Two options: 1) Cache the negative response from
// the server so we can deliver that even when you're
// offline 2) Actually reject the Promise in the online case
// if the document doesn't exist.
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document because the client is offline.'));
}
else if (exists &&
snap.fromCache &&
options &&
options.source === 'server') {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from server. (However, this ' +
'document does exist in the local cache. Run again ' +
'without setting source to "server" to ' +
'retrieve the cached document.)'));
}
else {
result.resolve(snap);
}
},
error: e => result.reject(e)
});
const listener = new QueryListener(newQueryForPath(key.path), wrappedObserver, {
includeMetadataChanges: true,
waitForSyncWhenOnline: true
});
return eventManagerListen(eventManager, listener);
}
async function executeQueryFromCache(localStore, query, result) {
try {
const queryResult = await localStoreExecuteQuery(localStore, query,
/* usePreviousResults= */ true);
const view = new View(query, queryResult.remoteKeys);
const viewDocChanges = view.computeDocChanges(queryResult.documents);
const viewChange = view.applyChanges(viewDocChanges,
/* updateLimboDocuments= */ false);
result.resolve(viewChange.snapshot);
}
catch (e) {
const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to execute query '${query} against cache`);
result.reject(firestoreError);
}
}
/**
* Retrieves a latency-compensated query snapshot from the backend via a
* SnapshotListener.
*/
function executeQueryViaSnapshotListener(eventManager, asyncQueue, query, options, result) {
const wrappedObserver = new AsyncObserver({
next: snapshot => {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
asyncQueue.enqueueAndForget(() => eventManagerUnlisten(eventManager, listener));
if (snapshot.fromCache && options.source === 'server') {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get documents from server. (However, these ' +
'documents may exist in the local cache. Run again ' +
'without setting source to "server" to ' +
'retrieve the cached documents.)'));
}
else {
result.resolve(snapshot);
}
},
error: e => result.reject(e)
});
const listener = new QueryListener(query, wrappedObserver, {
includeMetadataChanges: true,
waitForSyncWhenOnline: true
});
return eventManagerListen(eventManager, listener);
}
function firestoreClientLoadBundle(client, databaseId, data, resultTask) {
const reader = createBundleReader(data, newSerializer(databaseId));
client.asyncQueue.enqueueAndForget(async () => {
syncEngineLoadBundle(await getSyncEngine(client), reader, resultTask);
});
}
function firestoreClientGetNamedQuery(client, queryName) {
return client.asyncQueue.enqueue(async () => localStoreGetNamedQuery(await getLocalStore(client), queryName));
}
function createBundleReader(data, serializer) {
let content;
if (typeof data === 'string') {
content = newTextEncoder().encode(data);
}
else {
content = data;
}
return newBundleReader(toByteStreamReader(content), serializer);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG$1 = 'ComponentProvider';
/**
* An instance map that ensures only one Datastore exists per Firestore
* instance.
*/
const datastoreInstances = new Map();
/**
* Removes all components associated with the provided instance. Must be called
* when the `Firestore` instance is terminated.
*/
function removeComponents(firestore) {
const datastore = datastoreInstances.get(firestore);
if (datastore) {
logDebug(LOG_TAG$1, 'Removing Datastore');
datastoreInstances.delete(firestore);
datastore.terminate();
}
}
function makeDatabaseInfo(databaseId, appId, persistenceKey, settings) {
return new DatabaseInfo(databaseId, appId, persistenceKey, settings.host, settings.ssl, settings.experimentalForceLongPolling, settings.experimentalAutoDetectLongPolling, settings.useFetchStreams);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// settings() defaults:
const DEFAULT_HOST = 'firestore.googleapis.com';
const DEFAULT_SSL = true;
/**
* A concrete type describing all the values that can be applied via a
* user-supplied `FirestoreSettings` object. This is a separate type so that
* defaults can be supplied and the value can be checked for equality.
*/
class FirestoreSettingsImpl {
constructor(settings) {
var _a;
if (settings.host === undefined) {
if (settings.ssl !== undefined) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't provide ssl option if host option is not set");
}
this.host = DEFAULT_HOST;
this.ssl = DEFAULT_SSL;
}
else {
this.host = settings.host;
this.ssl = (_a = settings.ssl) !== null && _a !== void 0 ? _a : DEFAULT_SSL;
}
this.credentials = settings.credentials;
this.ignoreUndefinedProperties = !!settings.ignoreUndefinedProperties;
if (settings.cacheSizeBytes === undefined) {
this.cacheSizeBytes = LRU_DEFAULT_CACHE_SIZE_BYTES;
}
else {
if (settings.cacheSizeBytes !== LRU_COLLECTION_DISABLED &&
settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `cacheSizeBytes must be at least ${LRU_MINIMUM_CACHE_SIZE_BYTES}`);
}
else {
this.cacheSizeBytes = settings.cacheSizeBytes;
}
}
this.experimentalForceLongPolling = !!settings.experimentalForceLongPolling;
this.experimentalAutoDetectLongPolling =
!!settings.experimentalAutoDetectLongPolling;
this.useFetchStreams = !!settings.useFetchStreams;
validateIsNotUsedTogether('experimentalForceLongPolling', settings.experimentalForceLongPolling, 'experimentalAutoDetectLongPolling', settings.experimentalAutoDetectLongPolling);
}
isEqual(other) {
return (this.host === other.host &&
this.ssl === other.ssl &&
this.credentials === other.credentials &&
this.cacheSizeBytes === other.cacheSizeBytes &&
this.experimentalForceLongPolling ===
other.experimentalForceLongPolling &&
this.experimentalAutoDetectLongPolling ===
other.experimentalAutoDetectLongPolling &&
this.ignoreUndefinedProperties === other.ignoreUndefinedProperties &&
this.useFetchStreams === other.useFetchStreams);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
*/
class Firestore$1 {
/** @hideconstructor */
constructor(_authCredentials, _appCheckCredentials, _databaseId, _app) {
this._authCredentials = _authCredentials;
this._appCheckCredentials = _appCheckCredentials;
this._databaseId = _databaseId;
this._app = _app;
/**
* Whether it's a Firestore or Firestore Lite instance.
*/
this.type = 'firestore-lite';
this._persistenceKey = '(lite)';
this._settings = new FirestoreSettingsImpl({});
this._settingsFrozen = false;
}
/**
* The {@link @firebase/app#FirebaseApp} associated with this `Firestore` service
* instance.
*/
get app() {
if (!this._app) {
throw new FirestoreError(Code.FAILED_PRECONDITION, "Firestore was not initialized using the Firebase SDK. 'app' is " +
'not available');
}
return this._app;
}
get _initialized() {
return this._settingsFrozen;
}
get _terminated() {
return this._terminateTask !== undefined;
}
_setSettings(settings) {
if (this._settingsFrozen) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and its settings can no longer ' +
'be changed. You can only modify settings before calling any other ' +
'methods on a Firestore object.');
}
this._settings = new FirestoreSettingsImpl(settings);
if (settings.credentials !== undefined) {
this._authCredentials = makeAuthCredentialsProvider(settings.credentials);
}
}
_getSettings() {
return this._settings;
}
_freezeSettings() {
this._settingsFrozen = true;
return this._settings;
}
_delete() {
if (!this._terminateTask) {
this._terminateTask = this._terminate();
}
return this._terminateTask;
}
/** Returns a JSON-serializable representation of this `Firestore` instance. */
toJSON() {
return {
app: this._app,
databaseId: this._databaseId,
settings: this._settings
};
}
/**
* Terminates all components used by this client. Subclasses can override
* this method to clean up their own dependencies, but must also call this
* method.
*
* Only ever called once.
*/
_terminate() {
removeComponents(this);
return Promise.resolve();
}
}
/**
* Modify this instance to communicate with the Cloud Firestore emulator.
*
* Note: This must be called before this instance has been used to do any
* operations.
*
* @param firestore - The `Firestore` instance to configure to connect to the
* emulator.
* @param host - the emulator host (ex: localhost).
* @param port - the emulator port (ex: 9000).
* @param options.mockUserToken - the mock auth token to use for unit testing
* Security Rules.
*/
function connectFirestoreEmulator(firestore, host, port, options = {}) {
var _a;
firestore = cast(firestore, Firestore$1);
const settings = firestore._getSettings();
if (settings.host !== DEFAULT_HOST && settings.host !== host) {
logWarn('Host has been set in both settings() and useEmulator(), emulator host ' +
'will be used');
}
firestore._setSettings(Object.assign(Object.assign({}, settings), { host: `${host}:${port}`, ssl: false }));
if (options.mockUserToken) {
let token;
let user;
if (typeof options.mockUserToken === 'string') {
token = options.mockUserToken;
user = User.MOCK_USER;
}
else {
// Let createMockUserToken validate first (catches common mistakes like
// invalid field "uid" and missing field "sub" / "user_id".)
token = util.createMockUserToken(options.mockUserToken, (_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.projectId);
const uid = options.mockUserToken.sub || options.mockUserToken.user_id;
if (!uid) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "mockUserToken must contain 'sub' or 'user_id' field!");
}
user = new User(uid);
}
firestore._authCredentials = new EmulatorAuthCredentialsProvider(new OAuthToken(token, user));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentReference` refers to a document location in a Firestore database
* and can be used to write, read, or listen to the location. The document at
* the referenced location may or may not exist.
*/
class DocumentReference {
/** @hideconstructor */
constructor(firestore,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
converter, _key) {
this.converter = converter;
this._key = _key;
/** The type of this Firestore reference. */
this.type = 'document';
this.firestore = firestore;
}
get _path() {
return this._key.path;
}
/**
* The document's identifier within its collection.
*/
get id() {
return this._key.path.lastSegment();
}
/**
* A string representing the path of the referenced document (relative
* to the root of the database).
*/
get path() {
return this._key.path.canonicalString();
}
/**
* The collection this `DocumentReference` belongs to.
*/
get parent() {
return new CollectionReference(this.firestore, this.converter, this._key.path.popLast());
}
withConverter(converter) {
return new DocumentReference(this.firestore, converter, this._key);
}
}
/**
* A `Query` refers to a query which you can read or listen to. You can also
* construct refined `Query` objects by adding filters and ordering.
*/
class Query {
// This is the lite version of the Query class in the main SDK.
/** @hideconstructor protected */
constructor(firestore,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
converter, _query) {
this.converter = converter;
this._query = _query;
/** The type of this Firestore reference. */
this.type = 'query';
this.firestore = firestore;
}
withConverter(converter) {
return new Query(this.firestore, converter, this._query);
}
}
/**
* A `CollectionReference` object can be used for adding documents, getting
* document references, and querying for documents (using {@link query}).
*/
class CollectionReference extends Query {
/** @hideconstructor */
constructor(firestore, converter, _path) {
super(firestore, converter, newQueryForPath(_path));
this._path = _path;
/** The type of this Firestore reference. */
this.type = 'collection';
}
/** The collection's identifier. */
get id() {
return this._query.path.lastSegment();
}
/**
* A string representing the path of the referenced collection (relative
* to the root of the database).
*/
get path() {
return this._query.path.canonicalString();
}
/**
* A reference to the containing `DocumentReference` if this is a
* subcollection. If this isn't a subcollection, the reference is null.
*/
get parent() {
const parentPath = this._path.popLast();
if (parentPath.isEmpty()) {
return null;
}
else {
return new DocumentReference(this.firestore,
/* converter= */ null, new DocumentKey(parentPath));
}
}
withConverter(converter) {
return new CollectionReference(this.firestore, converter, this._path);
}
}
function collection(parent, path, ...pathSegments) {
parent = util.getModularInstance(parent);
validateNonEmptyArgument('collection', 'path', path);
if (parent instanceof Firestore$1) {
const absolutePath = ResourcePath.fromString(path, ...pathSegments);
validateCollectionPath(absolutePath);
return new CollectionReference(parent, /* converter= */ null, absolutePath);
}
else {
if (!(parent instanceof DocumentReference) &&
!(parent instanceof CollectionReference)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
'a DocumentReference or FirebaseFirestore');
}
const absolutePath = parent._path.child(ResourcePath.fromString(path, ...pathSegments));
validateCollectionPath(absolutePath);
return new CollectionReference(parent.firestore,
/* converter= */ null, absolutePath);
}
}
// TODO(firestorelite): Consider using ErrorFactory -
// https://github.com/firebase/firebase-js-sdk/blob/0131e1f/packages/util/src/errors.ts#L106
/**
* Creates and returns a new `Query` instance that includes all documents in the
* database that are contained in a collection or subcollection with the
* given `collectionId`.
*
* @param firestore - A reference to the root `Firestore` instance.
* @param collectionId - Identifies the collections to query over. Every
* collection or subcollection with this ID as the last segment of its path
* will be included. Cannot contain a slash.
* @returns The created `Query`.
*/
function collectionGroup(firestore, collectionId) {
firestore = cast(firestore, Firestore$1);
validateNonEmptyArgument('collectionGroup', 'collection id', collectionId);
if (collectionId.indexOf('/') >= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid collection ID '${collectionId}' passed to function ` +
`collectionGroup(). Collection IDs must not contain '/'.`);
}
return new Query(firestore,
/* converter= */ null, newQueryForCollectionGroup(collectionId));
}
function doc(parent, path, ...pathSegments) {
parent = util.getModularInstance(parent);
// We allow omission of 'pathString' but explicitly prohibit passing in both
// 'undefined' and 'null'.
if (arguments.length === 1) {
path = AutoId.newId();
}
validateNonEmptyArgument('doc', 'path', path);
if (parent instanceof Firestore$1) {
const absolutePath = ResourcePath.fromString(path, ...pathSegments);
validateDocumentPath(absolutePath);
return new DocumentReference(parent,
/* converter= */ null, new DocumentKey(absolutePath));
}
else {
if (!(parent instanceof DocumentReference) &&
!(parent instanceof CollectionReference)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
'a DocumentReference or FirebaseFirestore');
}
const absolutePath = parent._path.child(ResourcePath.fromString(path, ...pathSegments));
validateDocumentPath(absolutePath);
return new DocumentReference(parent.firestore, parent instanceof CollectionReference ? parent.converter : null, new DocumentKey(absolutePath));
}
}
/**
* Returns true if the provided references are equal.
*
* @param left - A reference to compare.
* @param right - A reference to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/
function refEqual(left, right) {
left = util.getModularInstance(left);
right = util.getModularInstance(right);
if ((left instanceof DocumentReference ||
left instanceof CollectionReference) &&
(right instanceof DocumentReference || right instanceof CollectionReference)) {
return (left.firestore === right.firestore &&
left.path === right.path &&
left.converter === right.converter);
}
return false;
}
/**
* Returns true if the provided queries point to the same collection and apply
* the same constraints.
*
* @param left - A `Query` to compare.
* @param right - A `Query` to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/
function queryEqual(left, right) {
left = util.getModularInstance(left);
right = util.getModularInstance(right);
if (left instanceof Query && right instanceof Query) {
return (left.firestore === right.firestore &&
queryEquals(left._query, right._query) &&
left.converter === right.converter);
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const LOG_TAG = 'AsyncQueue';
class AsyncQueueImpl {
constructor() {
// The last promise in the queue.
this.tail = Promise.resolve();
// A list of retryable operations. Retryable operations are run in order and
// retried with backoff.
this.retryableOps = [];
// Is this AsyncQueue being shut down? Once it is set to true, it will not
// be changed again.
this._isShuttingDown = false;
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// visible for testing
this.failure = null;
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
// Enabled during shutdown on Safari to prevent future access to IndexedDB.
this.skipNonRestrictedTasks = false;
// List of TimerIds to fast-forward delays for.
this.timerIdsToSkip = [];
// Backoff timer used to schedule retries for retryable operations
this.backoff = new ExponentialBackoff(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */);
// Visibility handler that triggers an immediate retry of all retryable
// operations. Meant to speed up recovery when we regain file system access
// after page comes into foreground.
this.visibilityHandler = () => {
this.backoff.skipBackoff();
};
}
get isShuttingDown() {
return this._isShuttingDown;
}
/**
* Adds a new operation to the queue without waiting for it to complete (i.e.
* we ignore the Promise result).
*/
enqueueAndForget(op) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueue(op);
}
enqueueAndForgetEvenWhileRestricted(op) {
this.verifyNotFailed();
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueueInternal(op);
}
enterRestrictedMode(purgeExistingTasks) {
if (!this._isShuttingDown) {
this._isShuttingDown = true;
this.skipNonRestrictedTasks = purgeExistingTasks || false;
}
}
enqueue(op) {
this.verifyNotFailed();
if (this._isShuttingDown) {
// Return a Promise which never resolves.
return new Promise(() => { });
}
// Create a deferred Promise that we can return to the callee. This
// allows us to return a "hanging Promise" only to the callee and still
// advance the queue even when the operation is not run.
const task = new Deferred();
return this.enqueueInternal(() => {
if (this._isShuttingDown && this.skipNonRestrictedTasks) {
// We do not resolve 'task'
return Promise.resolve();
}
op().then(task.resolve, task.reject);
return task.promise;
}).then(() => task.promise);
}
enqueueRetryable(op) {
this.enqueueAndForget(() => {
this.retryableOps.push(op);
return this.retryNextOp();
});
}
/**
* Runs the next operation from the retryable queue. If the operation fails,
* reschedules with backoff.
*/
async retryNextOp() {
if (this.retryableOps.length === 0) {
return;
}
try {
await this.retryableOps[0]();
this.retryableOps.shift();
this.backoff.reset();
}
catch (e) {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG, 'Operation failed with retryable error: ' + e);
}
else {
throw e; // Failure will be handled by AsyncQueue
}
}
if (this.retryableOps.length > 0) {
// If there are additional operations, we re-schedule `retryNextOp()`.
// This is necessary to run retryable operations that failed during
// their initial attempt since we don't know whether they are already
// enqueued. If, for example, `op1`, `op2`, `op3` are enqueued and `op1`
// needs to be re-run, we will run `op1`, `op1`, `op2` using the
// already enqueued calls to `retryNextOp()`. `op3()` will then run in the
// call scheduled here.
// Since `backoffAndRun()` cancels an existing backoff and schedules a
// new backoff on every call, there is only ever a single additional
// operation in the queue.
this.backoff.backoffAndRun(() => this.retryNextOp());
}
}
enqueueInternal(op) {
const newTail = this.tail.then(() => {
this.operationInProgress = true;
return op()
.catch((error) => {
this.failure = error;
this.operationInProgress = false;
const message = getMessageOrStack(error);
logError('INTERNAL UNHANDLED ERROR: ', message);
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(result => {
this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
}
enqueueAfterDelay(timerId, delayMs, op) {
this.verifyNotFailed();
// Fast-forward delays for timerIds that have been overriden.
if (this.timerIdsToSkip.indexOf(timerId) > -1) {
delayMs = 0;
}
const delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, removedOp => this.removeDelayedOperation(removedOp));
this.delayedOperations.push(delayedOp);
return delayedOp;
}
verifyNotFailed() {
if (this.failure) {
fail();
}
}
verifyOperationInProgress() {
}
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
async drain() {
// Operations in the queue prior to draining may have enqueued additional
// operations. Keep draining the queue until the tail is no longer advanced,
// which indicates that no more new operations were enqueued and that all
// operations were executed.
let currentTail;
do {
currentTail = this.tail;
await currentTail;
} while (currentTail !== this.tail);
}
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
containsDelayedOperation(timerId) {
for (const op of this.delayedOperations) {
if (op.timerId === timerId) {
return true;
}
}
return false;
}
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId - Delayed operations up to and including this TimerId
* will be drained. Pass TimerId.All to run all delayed operations.
* @returns a Promise that resolves once all operations have been run.
*/
runAllDelayedOperationsUntil(lastTimerId) {
// Note that draining may generate more delayed ops, so we do that first.
return this.drain().then(() => {
// Run ops in the same order they'd run if they ran naturally.
this.delayedOperations.sort((a, b) => a.targetTimeMs - b.targetTimeMs);
for (const op of this.delayedOperations) {
op.skipDelay();
if (lastTimerId !== "all" /* TimerId.All */ && op.timerId === lastTimerId) {
break;
}
}
return this.drain();
});
}
/**
* For Tests: Skip all subsequent delays for a timer id.
*/
skipDelaysForTimerId(timerId) {
this.timerIdsToSkip.push(timerId);
}
/** Called once a DelayedOperation is run or canceled. */
removeDelayedOperation(op) {
// NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
const index = this.delayedOperations.indexOf(op);
this.delayedOperations.splice(index, 1);
}
}
function newAsyncQueue() {
return new AsyncQueueImpl();
}
/**
* Chrome includes Error.message in Error.stack. Other browsers do not.
* This returns expected output of message + stack when available.
* @param error - Error or FirestoreError
*/
function getMessageOrStack(error) {
let message = error.message || '';
if (error.stack) {
if (error.stack.includes(error.message)) {
message = error.stack;
}
else {
message = error.message + '\n' + error.stack;
}
}
return message;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents the task of loading a Firestore bundle. It provides progress of bundle
* loading, as well as task completion and error events.
*
* The API is compatible with `Promise`.
*/
class LoadBundleTask {
constructor() {
this._progressObserver = {};
this._taskCompletionResolver = new Deferred();
this._lastProgress = {
taskState: 'Running',
totalBytes: 0,
totalDocuments: 0,
bytesLoaded: 0,
documentsLoaded: 0
};
}
/**
* Registers functions to listen to bundle loading progress events.
* @param next - Called when there is a progress update from bundle loading. Typically `next` calls occur
* each time a Firestore document is loaded from the bundle.
* @param error - Called when an error occurs during bundle loading. The task aborts after reporting the
* error, and there should be no more updates after this.
* @param complete - Called when the loading task is complete.
*/
onProgress(next, error, complete) {
this._progressObserver = {
next,
error,
complete
};
}
/**
* Implements the `Promise.catch` interface.
*
* @param onRejected - Called when an error occurs during bundle loading.
*/
catch(onRejected) {
return this._taskCompletionResolver.promise.catch(onRejected);
}
/**
* Implements the `Promise.then` interface.
*
* @param onFulfilled - Called on the completion of the loading task with a final `LoadBundleTaskProgress` update.
* The update will always have its `taskState` set to `"Success"`.
* @param onRejected - Called when an error occurs during bundle loading.
*/
then(onFulfilled, onRejected) {
return this._taskCompletionResolver.promise.then(onFulfilled, onRejected);
}
/**
* Notifies all observers that bundle loading has completed, with a provided
* `LoadBundleTaskProgress` object.
*
* @private
*/
_completeWith(progress) {
this._updateProgress(progress);
if (this._progressObserver.complete) {
this._progressObserver.complete();
}
this._taskCompletionResolver.resolve(progress);
}
/**
* Notifies all observers that bundle loading has failed, with a provided
* `Error` as the reason.
*
* @private
*/
_failWith(error) {
this._lastProgress.taskState = 'Error';
if (this._progressObserver.next) {
this._progressObserver.next(this._lastProgress);
}
if (this._progressObserver.error) {
this._progressObserver.error(error);
}
this._taskCompletionResolver.reject(error);
}
/**
* Notifies a progress update of loading a bundle.
* @param progress - The new progress.
*
* @private
*/
_updateProgress(progress) {
this._lastProgress = progress;
if (this._progressObserver.next) {
this._progressObserver.next(progress);
}
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** DOMException error code constants. */
const DOM_EXCEPTION_INVALID_STATE = 11;
const DOM_EXCEPTION_ABORTED = 20;
const DOM_EXCEPTION_QUOTA_EXCEEDED = 22;
/**
* Constant used to indicate the LRU garbage collection should be disabled.
* Set this value as the `cacheSizeBytes` on the settings passed to the
* {@link Firestore} instance.
*/
const CACHE_SIZE_UNLIMITED = LRU_COLLECTION_DISABLED;
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
*/
class Firestore extends Firestore$1 {
/** @hideconstructor */
constructor(authCredentialsProvider, appCheckCredentialsProvider, databaseId, app) {
super(authCredentialsProvider, appCheckCredentialsProvider, databaseId, app);
/**
* Whether it's a {@link Firestore} or Firestore Lite instance.
*/
this.type = 'firestore';
this._queue = newAsyncQueue();
this._persistenceKey = (app === null || app === void 0 ? void 0 : app.name) || '[DEFAULT]';
}
_terminate() {
if (!this._firestoreClient) {
// The client must be initialized to ensure that all subsequent API
// usage throws an exception.
configureFirestore(this);
}
return this._firestoreClient.terminate();
}
}
/**
* Initializes a new instance of {@link Firestore} with the provided settings.
* Can only be called before any other function, including
* {@link (getFirestore:1)}. If the custom settings are empty, this function is
* equivalent to calling {@link (getFirestore:1)}.
*
* @param app - The {@link @firebase/app#FirebaseApp} with which the {@link Firestore} instance will
* be associated.
* @param settings - A settings object to configure the {@link Firestore} instance.
* @param databaseId - The name of database.
* @returns A newly initialized {@link Firestore} instance.
*/
function initializeFirestore(app$1, settings, databaseId) {
if (!databaseId) {
databaseId = DEFAULT_DATABASE_NAME;
}
const provider = app._getProvider(app$1, 'firestore');
if (provider.isInitialized(databaseId)) {
const existingInstance = provider.getImmediate({
identifier: databaseId
});
const initialSettings = provider.getOptions(databaseId);
if (util.deepEqual(initialSettings, settings)) {
return existingInstance;
}
else {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'initializeFirestore() has already been called with ' +
'different options. To avoid this error, call initializeFirestore() with the ' +
'same options as when it was originally called, or call getFirestore() to return the' +
' already initialized instance.');
}
}
if (settings.cacheSizeBytes !== undefined &&
settings.cacheSizeBytes !== CACHE_SIZE_UNLIMITED &&
settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `cacheSizeBytes must be at least ${LRU_MINIMUM_CACHE_SIZE_BYTES}`);
}
return provider.initialize({
options: settings,
instanceIdentifier: databaseId
});
}
function getFirestore(appOrDatabaseId, optionalDatabaseId) {
const app$1 = typeof appOrDatabaseId === 'object' ? appOrDatabaseId : app.getApp();
const databaseId = typeof appOrDatabaseId === 'string'
? appOrDatabaseId
: optionalDatabaseId || DEFAULT_DATABASE_NAME;
const db = app._getProvider(app$1, 'firestore').getImmediate({
identifier: databaseId
});
if (!db._initialized) {
const emulator = util.getDefaultEmulatorHostnameAndPort('firestore');
if (emulator) {
connectFirestoreEmulator(db, ...emulator);
}
}
return db;
}
/**
* @internal
*/
function ensureFirestoreConfigured(firestore) {
if (!firestore._firestoreClient) {
configureFirestore(firestore);
}
firestore._firestoreClient.verifyNotTerminated();
return firestore._firestoreClient;
}
function configureFirestore(firestore) {
var _a;
const settings = firestore._freezeSettings();
const databaseInfo = makeDatabaseInfo(firestore._databaseId, ((_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.appId) || '', firestore._persistenceKey, settings);
firestore._firestoreClient = new FirestoreClient(firestore._authCredentials, firestore._appCheckCredentials, firestore._queue, databaseInfo);
}
/**
* Attempts to enable persistent storage, if possible.
*
* Must be called before any other functions (other than
* {@link initializeFirestore}, {@link (getFirestore:1)} or
* {@link clearIndexedDbPersistence}.
*
* If this fails, `enableIndexedDbPersistence()` will reject the promise it
* returns. Note that even after this failure, the {@link Firestore} instance will
* remain usable, however offline persistence will be disabled.
*
* There are several reasons why this can fail, which can be identified by
* the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab.
* * unimplemented: The browser is incompatible with the offline
* persistence implementation.
*
* @param firestore - The {@link Firestore} instance to enable persistence for.
* @param persistenceSettings - Optional settings object to configure
* persistence.
* @returns A `Promise` that represents successfully enabling persistent storage.
*/
function enableIndexedDbPersistence(firestore, persistenceSettings) {
firestore = cast(firestore, Firestore);
verifyNotInitialized(firestore);
const client = ensureFirestoreConfigured(firestore);
const settings = firestore._freezeSettings();
const onlineComponentProvider = new OnlineComponentProvider();
const offlineComponentProvider = new IndexedDbOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes, persistenceSettings === null || persistenceSettings === void 0 ? void 0 : persistenceSettings.forceOwnership);
return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
}
/**
* Attempts to enable multi-tab persistent storage, if possible. If enabled
* across all tabs, all operations share access to local persistence, including
* shared execution of queries and latency-compensated local document updates
* across all connected instances.
*
* If this fails, `enableMultiTabIndexedDbPersistence()` will reject the promise
* it returns. Note that even after this failure, the {@link Firestore} instance will
* remain usable, however offline persistence will be disabled.
*
* There are several reasons why this can fail, which can be identified by
* the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab and
* multi-tab is not enabled.
* * unimplemented: The browser is incompatible with the offline
* persistence implementation.
*
* @param firestore - The {@link Firestore} instance to enable persistence for.
* @returns A `Promise` that represents successfully enabling persistent
* storage.
*/
function enableMultiTabIndexedDbPersistence(firestore) {
firestore = cast(firestore, Firestore);
verifyNotInitialized(firestore);
const client = ensureFirestoreConfigured(firestore);
const settings = firestore._freezeSettings();
const onlineComponentProvider = new OnlineComponentProvider();
const offlineComponentProvider = new MultiTabOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes);
return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
}
/**
* Registers both the `OfflineComponentProvider` and `OnlineComponentProvider`.
* If the operation fails with a recoverable error (see
* `canRecoverFromIndexedDbError()` below), the returned Promise is rejected
* but the client remains usable.
*/
function setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider) {
const persistenceResult = new Deferred();
return client.asyncQueue
.enqueue(async () => {
try {
await setOfflineComponentProvider(client, offlineComponentProvider);
await setOnlineComponentProvider(client, onlineComponentProvider);
persistenceResult.resolve();
}
catch (e) {
const error = e;
if (!canFallbackFromIndexedDbError(error)) {
throw error;
}
logWarn('Error enabling offline persistence. Falling back to ' +
'persistence disabled: ' +
error);
persistenceResult.reject(error);
}
})
.then(() => persistenceResult.promise);
}
/**
* Decides whether the provided error allows us to gracefully disable
* persistence (as opposed to crashing the client).
*/
function canFallbackFromIndexedDbError(error) {
if (error.name === 'FirebaseError') {
return (error.code === Code.FAILED_PRECONDITION ||
error.code === Code.UNIMPLEMENTED);
}
else if (typeof DOMException !== 'undefined' &&
error instanceof DOMException) {
// There are a few known circumstances where we can open IndexedDb but
// trying to read/write will fail (e.g. quota exceeded). For
// well-understood cases, we attempt to detect these and then gracefully
// fall back to memory persistence.
// NOTE: Rather than continue to add to this list, we could decide to
// always fall back, with the risk that we might accidentally hide errors
// representing actual SDK bugs.
return (
// When the browser is out of quota we could get either quota exceeded
// or an aborted error depending on whether the error happened during
// schema migration.
error.code === DOM_EXCEPTION_QUOTA_EXCEEDED ||
error.code === DOM_EXCEPTION_ABORTED ||
// Firefox Private Browsing mode disables IndexedDb and returns
// INVALID_STATE for any usage.
error.code === DOM_EXCEPTION_INVALID_STATE);
}
return true;
}
/**
* Clears the persistent storage. This includes pending writes and cached
* documents.
*
* Must be called while the {@link Firestore} instance is not started (after the app is
* terminated or when the app is first initialized). On startup, this function
* must be called before other functions (other than {@link
* initializeFirestore} or {@link (getFirestore:1)})). If the {@link Firestore}
* instance is still running, the promise will be rejected with the error code
* of `failed-precondition`.
*
* Note: `clearIndexedDbPersistence()` is primarily intended to help write
* reliable tests that use Cloud Firestore. It uses an efficient mechanism for
* dropping existing data but does not attempt to securely overwrite or
* otherwise make cached data unrecoverable. For applications that are sensitive
* to the disclosure of cached data in between user sessions, we strongly
* recommend not enabling persistence at all.
*
* @param firestore - The {@link Firestore} instance to clear persistence for.
* @returns A `Promise` that is resolved when the persistent storage is
* cleared. Otherwise, the promise is rejected with an error.
*/
function clearIndexedDbPersistence(firestore) {
if (firestore._initialized && !firestore._terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Persistence can only be cleared before a Firestore instance is ' +
'initialized or after it is terminated.');
}
const deferred = new Deferred();
firestore._queue.enqueueAndForgetEvenWhileRestricted(async () => {
try {
await indexedDbClearPersistence(indexedDbStoragePrefix(firestore._databaseId, firestore._persistenceKey));
deferred.resolve();
}
catch (e) {
deferred.reject(e);
}
});
return deferred.promise;
}
/**
* Waits until all currently pending writes for the active user have been
* acknowledged by the backend.
*
* The returned promise resolves immediately if there are no outstanding writes.
* Otherwise, the promise waits for all previously issued writes (including
* those written in a previous app session), but it does not wait for writes
* that were added after the function is called. If you want to wait for
* additional writes, call `waitForPendingWrites()` again.
*
* Any outstanding `waitForPendingWrites()` promises are rejected during user
* changes.
*
* @returns A `Promise` which resolves when all currently pending writes have been
* acknowledged by the backend.
*/
function waitForPendingWrites(firestore) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientWaitForPendingWrites(client);
}
/**
* Re-enables use of the network for this {@link Firestore} instance after a prior
* call to {@link disableNetwork}.
*
* @returns A `Promise` that is resolved once the network has been enabled.
*/
function enableNetwork(firestore) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientEnableNetwork(client);
}
/**
* Disables network usage for this instance. It can be re-enabled via {@link
* enableNetwork}. While the network is disabled, any snapshot listeners,
* `getDoc()` or `getDocs()` calls will return results from cache, and any write
* operations will be queued until the network is restored.
*
* @returns A `Promise` that is resolved once the network has been disabled.
*/
function disableNetwork(firestore) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientDisableNetwork(client);
}
/**
* Terminates the provided {@link Firestore} instance.
*
* After calling `terminate()` only the `clearIndexedDbPersistence()` function
* may be used. Any other function will throw a `FirestoreError`.
*
* To restart after termination, create a new instance of FirebaseFirestore with
* {@link (getFirestore:1)}.
*
* Termination does not cancel any pending writes, and any promises that are
* awaiting a response from the server will not be resolved. If you have
* persistence enabled, the next time you start this instance, it will resume
* sending these writes to the server.
*
* Note: Under normal circumstances, calling `terminate()` is not required. This
* function is useful only when you want to force this instance to release all
* of its resources or in combination with `clearIndexedDbPersistence()` to
* ensure that all local state is destroyed between test runs.
*
* @returns A `Promise` that is resolved when the instance has been successfully
* terminated.
*/
function terminate(firestore) {
app._removeServiceInstance(firestore.app, 'firestore', firestore._databaseId.database);
return firestore._delete();
}
/**
* Loads a Firestore bundle into the local cache.
*
* @param firestore - The {@link Firestore} instance to load bundles for.
* @param bundleData - An object representing the bundle to be loaded. Valid
* objects are `ArrayBuffer`, `ReadableStream` or `string`.
*
* @returns A `LoadBundleTask` object, which notifies callers with progress
* updates, and completion or error events. It can be used as a
* `Promise`.
*/
function loadBundle(firestore, bundleData) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const resultTask = new LoadBundleTask();
firestoreClientLoadBundle(client, firestore._databaseId, bundleData, resultTask);
return resultTask;
}
/**
* Reads a Firestore {@link Query} from local cache, identified by the given
* name.
*
* The named queries are packaged into bundles on the server side (along
* with resulting documents), and loaded to local cache using `loadBundle`. Once
* in local cache, use this method to extract a {@link Query} by name.
*
* @param firestore - The {@link Firestore} instance to read the query from.
* @param name - The name of the query.
* @returns A `Promise` that is resolved with the Query or `null`.
*/
function namedQuery(firestore, name) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientGetNamedQuery(client, name).then(namedQuery => {
if (!namedQuery) {
return null;
}
return new Query(firestore, null, namedQuery.query);
});
}
function verifyNotInitialized(firestore) {
if (firestore._initialized || firestore._terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and persistence can no longer be ' +
'enabled. You can only enable persistence before calling any other ' +
'methods on a Firestore object.');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function registerFirestore(variant, useFetchStreams = true) {
setSDKVersion(app.SDK_VERSION);
app._registerComponent(new component.Component('firestore', (container, { instanceIdentifier: databaseId, options: settings }) => {
const app = container.getProvider('app').getImmediate();
const firestoreInstance = new Firestore(new FirebaseAuthCredentialsProvider(container.getProvider('auth-internal')), new FirebaseAppCheckTokenProvider(container.getProvider('app-check-internal')), databaseIdFromApp(app, databaseId), app);
settings = Object.assign({ useFetchStreams }, settings);
firestoreInstance._setSettings(settings);
return firestoreInstance;
}, 'PUBLIC').setMultipleInstances(true));
app.registerVersion(name, version$1, variant);
// BUILD_TARGET will be replaced by values like esm5, esm2017, cjs5, etc during the compilation
app.registerVersion(name, version$1, 'cjs2017');
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function isPartialObserver(obj) {
return implementsAnyMethods(obj, ['next', 'error', 'complete']);
}
/**
* Returns true if obj is an object and contains at least one of the specified
* methods.
*/
function implementsAnyMethods(obj, methods) {
if (typeof obj !== 'object' || obj === null) {
return false;
}
const object = obj;
for (const method of methods) {
if (method in object && typeof object[method] === 'function') {
return true;
}
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing an array of bytes.
*/
class Bytes {
/** @hideconstructor */
constructor(byteString) {
this._byteString = byteString;
}
/**
* Creates a new `Bytes` object from the given Base64 string, converting it to
* bytes.
*
* @param base64 - The Base64 string used to create the `Bytes` object.
*/
static fromBase64String(base64) {
try {
return new Bytes(ByteString.fromBase64String(base64));
}
catch (e) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to construct data from Base64 string: ' + e);
}
}
/**
* Creates a new `Bytes` object from the given Uint8Array.
*
* @param array - The Uint8Array used to create the `Bytes` object.
*/
static fromUint8Array(array) {
return new Bytes(ByteString.fromUint8Array(array));
}
/**
* Returns the underlying bytes as a Base64-encoded string.
*
* @returns The Base64-encoded string created from the `Bytes` object.
*/
toBase64() {
return this._byteString.toBase64();
}
/**
* Returns the underlying bytes in a new `Uint8Array`.
*
* @returns The Uint8Array created from the `Bytes` object.
*/
toUint8Array() {
return this._byteString.toUint8Array();
}
/**
* Returns a string representation of the `Bytes` object.
*
* @returns A string representation of the `Bytes` object.
*/
toString() {
return 'Bytes(base64: ' + this.toBase64() + ')';
}
/**
* Returns true if this `Bytes` object is equal to the provided one.
*
* @param other - The `Bytes` object to compare against.
* @returns true if this `Bytes` object is equal to the provided one.
*/
isEqual(other) {
return this._byteString.isEqual(other._byteString);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `FieldPath` refers to a field in a document. The path may consist of a
* single field name (referring to a top-level field in the document), or a
* list of field names (referring to a nested field in the document).
*
* Create a `FieldPath` by providing field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*/
class FieldPath {
/**
* Creates a `FieldPath` from the provided field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*
* @param fieldNames - A list of field names.
*/
constructor(...fieldNames) {
for (let i = 0; i < fieldNames.length; ++i) {
if (fieldNames[i].length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid field name at argument $(i + 1). ` +
'Field names must not be empty.');
}
}
this._internalPath = new FieldPath$1(fieldNames);
}
/**
* Returns true if this `FieldPath` is equal to the provided one.
*
* @param other - The `FieldPath` to compare against.
* @returns true if this `FieldPath` is equal to the provided one.
*/
isEqual(other) {
return this._internalPath.isEqual(other._internalPath);
}
}
/**
* Returns a special sentinel `FieldPath` to refer to the ID of a document.
* It can be used in queries to sort or filter by the document ID.
*/
function documentId() {
return new FieldPath(DOCUMENT_KEY_NAME);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Sentinel values that can be used when writing document fields with `set()`
* or `update()`.
*/
class FieldValue {
/**
* @param _methodName - The public API endpoint that returns this class.
* @hideconstructor
*/
constructor(_methodName) {
this._methodName = _methodName;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing a geographic location in Firestore. The
* location is represented as latitude/longitude pair.
*
* Latitude values are in the range of [-90, 90].
* Longitude values are in the range of [-180, 180].
*/
class GeoPoint {
/**
* Creates a new immutable `GeoPoint` object with the provided latitude and
* longitude values.
* @param latitude - The latitude as number between -90 and 90.
* @param longitude - The longitude as number between -180 and 180.
*/
constructor(latitude, longitude) {
if (!isFinite(latitude) || latitude < -90 || latitude > 90) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Latitude must be a number between -90 and 90, but was: ' + latitude);
}
if (!isFinite(longitude) || longitude < -180 || longitude > 180) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Longitude must be a number between -180 and 180, but was: ' + longitude);
}
this._lat = latitude;
this._long = longitude;
}
/**
* The latitude of this `GeoPoint` instance.
*/
get latitude() {
return this._lat;
}
/**
* The longitude of this `GeoPoint` instance.
*/
get longitude() {
return this._long;
}
/**
* Returns true if this `GeoPoint` is equal to the provided one.
*
* @param other - The `GeoPoint` to compare against.
* @returns true if this `GeoPoint` is equal to the provided one.
*/
isEqual(other) {
return this._lat === other._lat && this._long === other._long;
}
/** Returns a JSON-serializable representation of this GeoPoint. */
toJSON() {
return { latitude: this._lat, longitude: this._long };
}
/**
* Actually private to JS consumers of our API, so this function is prefixed
* with an underscore.
*/
_compareTo(other) {
return (primitiveComparator(this._lat, other._lat) ||
primitiveComparator(this._long, other._long));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const RESERVED_FIELD_REGEX = /^__.*__$/;
/** The result of parsing document data (e.g. for a setData call). */
class ParsedSetData {
constructor(data, fieldMask, fieldTransforms) {
this.data = data;
this.fieldMask = fieldMask;
this.fieldTransforms = fieldTransforms;
}
toMutation(key, precondition) {
if (this.fieldMask !== null) {
return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
}
else {
return new SetMutation(key, this.data, precondition, this.fieldTransforms);
}
}
}
/** The result of parsing "update" data (i.e. for an updateData call). */
class ParsedUpdateData {
constructor(data,
// The fieldMask does not include document transforms.
fieldMask, fieldTransforms) {
this.data = data;
this.fieldMask = fieldMask;
this.fieldTransforms = fieldTransforms;
}
toMutation(key, precondition) {
return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
}
}
function isWrite(dataSource) {
switch (dataSource) {
case 0 /* UserDataSource.Set */: // fall through
case 2 /* UserDataSource.MergeSet */: // fall through
case 1 /* UserDataSource.Update */:
return true;
case 3 /* UserDataSource.Argument */:
case 4 /* UserDataSource.ArrayArgument */:
return false;
default:
throw fail();
}
}
/** A "context" object passed around while parsing user data. */
class ParseContextImpl {
/**
* Initializes a ParseContext with the given source and path.
*
* @param settings - The settings for the parser.
* @param databaseId - The database ID of the Firestore instance.
* @param serializer - The serializer to use to generate the Value proto.
* @param ignoreUndefinedProperties - Whether to ignore undefined properties
* rather than throw.
* @param fieldTransforms - A mutable list of field transforms encountered
* while parsing the data.
* @param fieldMask - A mutable list of field paths encountered while parsing
* the data.
*
* TODO(b/34871131): We don't support array paths right now, so path can be
* null to indicate the context represents any location within an array (in
* which case certain features will not work and errors will be somewhat
* compromised).
*/
constructor(settings, databaseId, serializer, ignoreUndefinedProperties, fieldTransforms, fieldMask) {
this.settings = settings;
this.databaseId = databaseId;
this.serializer = serializer;
this.ignoreUndefinedProperties = ignoreUndefinedProperties;
// Minor hack: If fieldTransforms is undefined, we assume this is an
// external call and we need to validate the entire path.
if (fieldTransforms === undefined) {
this.validatePath();
}
this.fieldTransforms = fieldTransforms || [];
this.fieldMask = fieldMask || [];
}
get path() {
return this.settings.path;
}
get dataSource() {
return this.settings.dataSource;
}
/** Returns a new context with the specified settings overwritten. */
contextWith(configuration) {
return new ParseContextImpl(Object.assign(Object.assign({}, this.settings), configuration), this.databaseId, this.serializer, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
}
childContextForField(field) {
var _a;
const childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
const context = this.contextWith({ path: childPath, arrayElement: false });
context.validatePathSegment(field);
return context;
}
childContextForFieldPath(field) {
var _a;
const childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
const context = this.contextWith({ path: childPath, arrayElement: false });
context.validatePath();
return context;
}
childContextForArray(index) {
// TODO(b/34871131): We don't support array paths right now; so make path
// undefined.
return this.contextWith({ path: undefined, arrayElement: true });
}
createError(reason) {
return createError(reason, this.settings.methodName, this.settings.hasConverter || false, this.path, this.settings.targetDoc);
}
/** Returns 'true' if 'fieldPath' was traversed when creating this context. */
contains(fieldPath) {
return (this.fieldMask.find(field => fieldPath.isPrefixOf(field)) !== undefined ||
this.fieldTransforms.find(transform => fieldPath.isPrefixOf(transform.field)) !== undefined);
}
validatePath() {
// TODO(b/34871131): Remove null check once we have proper paths for fields
// within arrays.
if (!this.path) {
return;
}
for (let i = 0; i < this.path.length; i++) {
this.validatePathSegment(this.path.get(i));
}
}
validatePathSegment(segment) {
if (segment.length === 0) {
throw this.createError('Document fields must not be empty');
}
if (isWrite(this.dataSource) && RESERVED_FIELD_REGEX.test(segment)) {
throw this.createError('Document fields cannot begin and end with "__"');
}
}
}
/**
* Helper for parsing raw user input (provided via the API) into internal model
* classes.
*/
class UserDataReader {
constructor(databaseId, ignoreUndefinedProperties, serializer) {
this.databaseId = databaseId;
this.ignoreUndefinedProperties = ignoreUndefinedProperties;
this.serializer = serializer || newSerializer(databaseId);
}
/** Creates a new top-level parse context. */
createContext(dataSource, methodName, targetDoc, hasConverter = false) {
return new ParseContextImpl({
dataSource,
methodName,
targetDoc,
path: FieldPath$1.emptyPath(),
arrayElement: false,
hasConverter
}, this.databaseId, this.serializer, this.ignoreUndefinedProperties);
}
}
function newUserDataReader(firestore) {
const settings = firestore._freezeSettings();
const serializer = newSerializer(firestore._databaseId);
return new UserDataReader(firestore._databaseId, !!settings.ignoreUndefinedProperties, serializer);
}
/** Parse document data from a set() call. */
function parseSetData(userDataReader, methodName, targetDoc, input, hasConverter, options = {}) {
const context = userDataReader.createContext(options.merge || options.mergeFields
? 2 /* UserDataSource.MergeSet */
: 0 /* UserDataSource.Set */, methodName, targetDoc, hasConverter);
validatePlainObject('Data must be an object, but it was:', context, input);
const updateData = parseObject(input, context);
let fieldMask;
let fieldTransforms;
if (options.merge) {
fieldMask = new FieldMask(context.fieldMask);
fieldTransforms = context.fieldTransforms;
}
else if (options.mergeFields) {
const validatedFieldPaths = [];
for (const stringOrFieldPath of options.mergeFields) {
const fieldPath = fieldPathFromArgument$1(methodName, stringOrFieldPath, targetDoc);
if (!context.contains(fieldPath)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Field '${fieldPath}' is specified in your field mask but missing from your input data.`);
}
if (!fieldMaskContains(validatedFieldPaths, fieldPath)) {
validatedFieldPaths.push(fieldPath);
}
}
fieldMask = new FieldMask(validatedFieldPaths);
fieldTransforms = context.fieldTransforms.filter(transform => fieldMask.covers(transform.field));
}
else {
fieldMask = null;
fieldTransforms = context.fieldTransforms;
}
return new ParsedSetData(new ObjectValue(updateData), fieldMask, fieldTransforms);
}
class DeleteFieldValueImpl extends FieldValue {
_toFieldTransform(context) {
if (context.dataSource === 2 /* UserDataSource.MergeSet */) {
// No transform to add for a delete, but we need to add it to our
// fieldMask so it gets deleted.
context.fieldMask.push(context.path);
}
else if (context.dataSource === 1 /* UserDataSource.Update */) {
throw context.createError(`${this._methodName}() can only appear at the top level ` +
'of your update data');
}
else {
// We shouldn't encounter delete sentinels for queries or non-merge set() calls.
throw context.createError(`${this._methodName}() cannot be used with set() unless you pass ` +
'{merge:true}');
}
return null;
}
isEqual(other) {
return other instanceof DeleteFieldValueImpl;
}
}
/**
* Creates a child context for parsing SerializableFieldValues.
*
* This is different than calling `ParseContext.contextWith` because it keeps
* the fieldTransforms and fieldMask separate.
*
* The created context has its `dataSource` set to `UserDataSource.Argument`.
* Although these values are used with writes, any elements in these FieldValues
* are not considered writes since they cannot contain any FieldValue sentinels,
* etc.
*
* @param fieldValue - The sentinel FieldValue for which to create a child
* context.
* @param context - The parent context.
* @param arrayElement - Whether or not the FieldValue has an array.
*/
function createSentinelChildContext(fieldValue, context, arrayElement) {
return new ParseContextImpl({
dataSource: 3 /* UserDataSource.Argument */,
targetDoc: context.settings.targetDoc,
methodName: fieldValue._methodName,
arrayElement
}, context.databaseId, context.serializer, context.ignoreUndefinedProperties);
}
class ServerTimestampFieldValueImpl extends FieldValue {
_toFieldTransform(context) {
return new FieldTransform(context.path, new ServerTimestampTransform());
}
isEqual(other) {
return other instanceof ServerTimestampFieldValueImpl;
}
}
class ArrayUnionFieldValueImpl extends FieldValue {
constructor(methodName, _elements) {
super(methodName);
this._elements = _elements;
}
_toFieldTransform(context) {
const parseContext = createSentinelChildContext(this, context,
/*array=*/ true);
const parsedElements = this._elements.map(element => parseData(element, parseContext));
const arrayUnion = new ArrayUnionTransformOperation(parsedElements);
return new FieldTransform(context.path, arrayUnion);
}
isEqual(other) {
// TODO(mrschmidt): Implement isEquals
return this === other;
}
}
class ArrayRemoveFieldValueImpl extends FieldValue {
constructor(methodName, _elements) {
super(methodName);
this._elements = _elements;
}
_toFieldTransform(context) {
const parseContext = createSentinelChildContext(this, context,
/*array=*/ true);
const parsedElements = this._elements.map(element => parseData(element, parseContext));
const arrayUnion = new ArrayRemoveTransformOperation(parsedElements);
return new FieldTransform(context.path, arrayUnion);
}
isEqual(other) {
// TODO(mrschmidt): Implement isEquals
return this === other;
}
}
class NumericIncrementFieldValueImpl extends FieldValue {
constructor(methodName, _operand) {
super(methodName);
this._operand = _operand;
}
_toFieldTransform(context) {
const numericIncrement = new NumericIncrementTransformOperation(context.serializer, toNumber(context.serializer, this._operand));
return new FieldTransform(context.path, numericIncrement);
}
isEqual(other) {
// TODO(mrschmidt): Implement isEquals
return this === other;
}
}
/** Parse update data from an update() call. */
function parseUpdateData(userDataReader, methodName, targetDoc, input) {
const context = userDataReader.createContext(1 /* UserDataSource.Update */, methodName, targetDoc);
validatePlainObject('Data must be an object, but it was:', context, input);
const fieldMaskPaths = [];
const updateData = ObjectValue.empty();
forEach(input, (key, value) => {
const path = fieldPathFromDotSeparatedString(methodName, key, targetDoc);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
value = util.getModularInstance(value);
const childContext = context.childContextForFieldPath(path);
if (value instanceof DeleteFieldValueImpl) {
// Add it to the field mask, but don't add anything to updateData.
fieldMaskPaths.push(path);
}
else {
const parsedValue = parseData(value, childContext);
if (parsedValue != null) {
fieldMaskPaths.push(path);
updateData.set(path, parsedValue);
}
}
});
const mask = new FieldMask(fieldMaskPaths);
return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
}
/** Parse update data from a list of field/value arguments. */
function parseUpdateVarargs(userDataReader, methodName, targetDoc, field, value, moreFieldsAndValues) {
const context = userDataReader.createContext(1 /* UserDataSource.Update */, methodName, targetDoc);
const keys = [fieldPathFromArgument$1(methodName, field, targetDoc)];
const values = [value];
if (moreFieldsAndValues.length % 2 !== 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${methodName}() needs to be called with an even number ` +
'of arguments that alternate between field names and values.');
}
for (let i = 0; i < moreFieldsAndValues.length; i += 2) {
keys.push(fieldPathFromArgument$1(methodName, moreFieldsAndValues[i]));
values.push(moreFieldsAndValues[i + 1]);
}
const fieldMaskPaths = [];
const updateData = ObjectValue.empty();
// We iterate in reverse order to pick the last value for a field if the
// user specified the field multiple times.
for (let i = keys.length - 1; i >= 0; --i) {
if (!fieldMaskContains(fieldMaskPaths, keys[i])) {
const path = keys[i];
let value = values[i];
// For Compat types, we have to "extract" the underlying types before
// performing validation.
value = util.getModularInstance(value);
const childContext = context.childContextForFieldPath(path);
if (value instanceof DeleteFieldValueImpl) {
// Add it to the field mask, but don't add anything to updateData.
fieldMaskPaths.push(path);
}
else {
const parsedValue = parseData(value, childContext);
if (parsedValue != null) {
fieldMaskPaths.push(path);
updateData.set(path, parsedValue);
}
}
}
}
const mask = new FieldMask(fieldMaskPaths);
return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
}
/**
* Parse a "query value" (e.g. value in a where filter or a value in a cursor
* bound).
*
* @param allowArrays - Whether the query value is an array that may directly
* contain additional arrays (e.g. the operand of an `in` query).
*/
function parseQueryValue(userDataReader, methodName, input, allowArrays = false) {
const context = userDataReader.createContext(allowArrays ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */, methodName);
const parsed = parseData(input, context);
return parsed;
}
/**
* Parses user data to Protobuf Values.
*
* @param input - Data to be parsed.
* @param context - A context object representing the current path being parsed,
* the source of the data being parsed, etc.
* @returns The parsed value, or null if the value was a FieldValue sentinel
* that should not be included in the resulting parsed data.
*/
function parseData(input, context) {
// Unwrap the API type from the Compat SDK. This will return the API type
// from firestore-exp.
input = util.getModularInstance(input);
if (looksLikeJsonObject(input)) {
validatePlainObject('Unsupported field value:', context, input);
return parseObject(input, context);
}
else if (input instanceof FieldValue) {
// FieldValues usually parse into transforms (except deleteField())
// in which case we do not want to include this field in our parsed data
// (as doing so will overwrite the field directly prior to the transform
// trying to transform it). So we don't add this location to
// context.fieldMask and we return null as our parsing result.
parseSentinelFieldValue(input, context);
return null;
}
else if (input === undefined && context.ignoreUndefinedProperties) {
// If the input is undefined it can never participate in the fieldMask, so
// don't handle this below. If `ignoreUndefinedProperties` is false,
// `parseScalarValue` will reject an undefined value.
return null;
}
else {
// If context.path is null we are inside an array and we don't support
// field mask paths more granular than the top-level array.
if (context.path) {
context.fieldMask.push(context.path);
}
if (input instanceof Array) {
// TODO(b/34871131): Include the path containing the array in the error
// message.
// In the case of IN queries, the parsed data is an array (representing
// the set of values to be included for the IN query) that may directly
// contain additional arrays (each representing an individual field
// value), so we disable this validation.
if (context.settings.arrayElement &&
context.dataSource !== 4 /* UserDataSource.ArrayArgument */) {
throw context.createError('Nested arrays are not supported');
}
return parseArray(input, context);
}
else {
return parseScalarValue(input, context);
}
}
}
function parseObject(obj, context) {
const fields = {};
if (isEmpty(obj)) {
// If we encounter an empty object, we explicitly add it to the update
// mask to ensure that the server creates a map entry.
if (context.path && context.path.length > 0) {
context.fieldMask.push(context.path);
}
}
else {
forEach(obj, (key, val) => {
const parsedValue = parseData(val, context.childContextForField(key));
if (parsedValue != null) {
fields[key] = parsedValue;
}
});
}
return { mapValue: { fields } };
}
function parseArray(array, context) {
const values = [];
let entryIndex = 0;
for (const entry of array) {
let parsedEntry = parseData(entry, context.childContextForArray(entryIndex));
if (parsedEntry == null) {
// Just include nulls in the array for fields being replaced with a
// sentinel.
parsedEntry = { nullValue: 'NULL_VALUE' };
}
values.push(parsedEntry);
entryIndex++;
}
return { arrayValue: { values } };
}
/**
* "Parses" the provided FieldValueImpl, adding any necessary transforms to
* context.fieldTransforms.
*/
function parseSentinelFieldValue(value, context) {
// Sentinels are only supported with writes, and not within arrays.
if (!isWrite(context.dataSource)) {
throw context.createError(`${value._methodName}() can only be used with update() and set()`);
}
if (!context.path) {
throw context.createError(`${value._methodName}() is not currently supported inside arrays`);
}
const fieldTransform = value._toFieldTransform(context);
if (fieldTransform) {
context.fieldTransforms.push(fieldTransform);
}
}
/**
* Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
*
* @returns The parsed value
*/
function parseScalarValue(value, context) {
value = util.getModularInstance(value);
if (value === null) {
return { nullValue: 'NULL_VALUE' };
}
else if (typeof value === 'number') {
return toNumber(context.serializer, value);
}
else if (typeof value === 'boolean') {
return { booleanValue: value };
}
else if (typeof value === 'string') {
return { stringValue: value };
}
else if (value instanceof Date) {
const timestamp = Timestamp.fromDate(value);
return {
timestampValue: toTimestamp(context.serializer, timestamp)
};
}
else if (value instanceof Timestamp) {
// Firestore backend truncates precision down to microseconds. To ensure
// offline mode works the same with regards to truncation, perform the
// truncation immediately without waiting for the backend to do that.
const timestamp = new Timestamp(value.seconds, Math.floor(value.nanoseconds / 1000) * 1000);
return {
timestampValue: toTimestamp(context.serializer, timestamp)
};
}
else if (value instanceof GeoPoint) {
return {
geoPointValue: {
latitude: value.latitude,
longitude: value.longitude
}
};
}
else if (value instanceof Bytes) {
return { bytesValue: toBytes(context.serializer, value._byteString) };
}
else if (value instanceof DocumentReference) {
const thisDb = context.databaseId;
const otherDb = value.firestore._databaseId;
if (!otherDb.isEqual(thisDb)) {
throw context.createError('Document reference is for database ' +
`${otherDb.projectId}/${otherDb.database} but should be ` +
`for database ${thisDb.projectId}/${thisDb.database}`);
}
return {
referenceValue: toResourceName(value.firestore._databaseId || context.databaseId, value._key.path)
};
}
else {
throw context.createError(`Unsupported field value: ${valueDescription(value)}`);
}
}
/**
* Checks whether an object looks like a JSON object that should be converted
* into a struct. Normal class/prototype instances are considered to look like
* JSON objects since they should be converted to a struct value. Arrays, Dates,
* GeoPoints, etc. are not considered to look like JSON objects since they map
* to specific FieldValue types other than ObjectValue.
*/
function looksLikeJsonObject(input) {
return (typeof input === 'object' &&
input !== null &&
!(input instanceof Array) &&
!(input instanceof Date) &&
!(input instanceof Timestamp) &&
!(input instanceof GeoPoint) &&
!(input instanceof Bytes) &&
!(input instanceof DocumentReference) &&
!(input instanceof FieldValue));
}
function validatePlainObject(message, context, input) {
if (!looksLikeJsonObject(input) || !isPlainObject(input)) {
const description = valueDescription(input);
if (description === 'an object') {
// Massage the error if it was an object.
throw context.createError(message + ' a custom object');
}
else {
throw context.createError(message + ' ' + description);
}
}
}
/**
* Helper that calls fromDotSeparatedString() but wraps any error thrown.
*/
function fieldPathFromArgument$1(methodName, path, targetDoc) {
// If required, replace the FieldPath Compat class with with the firestore-exp
// FieldPath.
path = util.getModularInstance(path);
if (path instanceof FieldPath) {
return path._internalPath;
}
else if (typeof path === 'string') {
return fieldPathFromDotSeparatedString(methodName, path);
}
else {
const message = 'Field path arguments must be of type string or ';
throw createError(message, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
}
/**
* Matches any characters in a field path string that are reserved.
*/
const FIELD_PATH_RESERVED = new RegExp('[~\\*/\\[\\]]');
/**
* Wraps fromDotSeparatedString with an error message about the method that
* was thrown.
* @param methodName - The publicly visible method name
* @param path - The dot-separated string form of a field path which will be
* split on dots.
* @param targetDoc - The document against which the field path will be
* evaluated.
*/
function fieldPathFromDotSeparatedString(methodName, path, targetDoc) {
const found = path.search(FIELD_PATH_RESERVED);
if (found >= 0) {
throw createError(`Invalid field path (${path}). Paths must not contain ` +
`'~', '*', '/', '[', or ']'`, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
try {
return new FieldPath(...path.split('.'))._internalPath;
}
catch (e) {
throw createError(`Invalid field path (${path}). Paths must not be empty, ` +
`begin with '.', end with '.', or contain '..'`, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
}
function createError(reason, methodName, hasConverter, path, targetDoc) {
const hasPath = path && !path.isEmpty();
const hasDocument = targetDoc !== undefined;
let message = `Function ${methodName}() called with invalid data`;
if (hasConverter) {
message += ' (via `toFirestore()`)';
}
message += '. ';
let description = '';
if (hasPath || hasDocument) {
description += ' (found';
if (hasPath) {
description += ` in field ${path}`;
}
if (hasDocument) {
description += ` in document ${targetDoc}`;
}
description += ')';
}
return new FirestoreError(Code.INVALID_ARGUMENT, message + reason + description);
}
/** Checks `haystack` if FieldPath `needle` is present. Runs in O(n). */
function fieldMaskContains(haystack, needle) {
return haystack.some(v => v.isEqual(needle));
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get()` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/
class DocumentSnapshot$1 {
// Note: This class is stripped down version of the DocumentSnapshot in
// the legacy SDK. The changes are:
// - No support for SnapshotMetadata.
// - No support for SnapshotOptions.
/** @hideconstructor protected */
constructor(_firestore, _userDataWriter, _key, _document, _converter) {
this._firestore = _firestore;
this._userDataWriter = _userDataWriter;
this._key = _key;
this._document = _document;
this._converter = _converter;
}
/** Property of the `DocumentSnapshot` that provides the document's ID. */
get id() {
return this._key.path.lastSegment();
}
/**
* The `DocumentReference` for the document included in the `DocumentSnapshot`.
*/
get ref() {
return new DocumentReference(this._firestore, this._converter, this._key);
}
/**
* Signals whether or not the document at the snapshot's location exists.
*
* @returns true if the document exists.
*/
exists() {
return this._document !== null;
}
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* @returns An `Object` containing all fields in the document or `undefined`
* if the document doesn't exist.
*/
data() {
if (!this._document) {
return undefined;
}
else if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
const snapshot = new QueryDocumentSnapshot$1(this._firestore, this._userDataWriter, this._key, this._document,
/* converter= */ null);
return this._converter.fromFirestore(snapshot);
}
else {
return this._userDataWriter.convertValue(this._document.data.value);
}
}
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get(fieldPath) {
if (this._document) {
const value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
if (value !== null) {
return this._userDataWriter.convertValue(value);
}
}
return undefined;
}
}
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get()` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/
class QueryDocumentSnapshot$1 extends DocumentSnapshot$1 {
/**
* Retrieves all fields in the document as an `Object`.
*
* @override
* @returns An `Object` containing all fields in the document.
*/
data() {
return super.data();
}
}
/**
* Helper that calls `fromDotSeparatedString()` but wraps any error thrown.
*/
function fieldPathFromArgument(methodName, arg) {
if (typeof arg === 'string') {
return fieldPathFromDotSeparatedString(methodName, arg);
}
else if (arg instanceof FieldPath) {
return arg._internalPath;
}
else {
return arg._delegate._internalPath;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function validateHasExplicitOrderByForLimitToLast(query) {
if (query.limitType === "L" /* LimitType.Last */ &&
query.explicitOrderBy.length === 0) {
throw new FirestoreError(Code.UNIMPLEMENTED, 'limitToLast() queries require specifying at least one orderBy() clause');
}
}
/**
* An `AppliableConstraint` is an abstraction of a constraint that can be applied
* to a Firestore query.
*/
class AppliableConstraint {
}
/**
* A `QueryConstraint` is used to narrow the set of documents returned by a
* Firestore query. `QueryConstraint`s are created by invoking {@link where},
* {@link orderBy}, {@link startAt}, {@link startAfter}, {@link
* endBefore}, {@link endAt}, {@link limit}, {@link limitToLast} and
* can then be passed to {@link query} to create a new query instance that
* also contains this `QueryConstraint`.
*/
class QueryConstraint extends AppliableConstraint {
}
function query(query, queryConstraint, ...additionalQueryConstraints) {
let queryConstraints = [];
if (queryConstraint instanceof AppliableConstraint) {
queryConstraints.push(queryConstraint);
}
queryConstraints = queryConstraints.concat(additionalQueryConstraints);
validateQueryConstraintArray(queryConstraints);
for (const constraint of queryConstraints) {
query = constraint._apply(query);
}
return query;
}
/**
* A `QueryFieldFilterConstraint` is used to narrow the set of documents returned by
* a Firestore query by filtering on one or more document fields.
* `QueryFieldFilterConstraint`s are created by invoking {@link where} and can then
* be passed to {@link query} to create a new query instance that also contains
* this `QueryFieldFilterConstraint`.
*/
class QueryFieldFilterConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(_field, _op, _value) {
super();
this._field = _field;
this._op = _op;
this._value = _value;
/** The type of this query constraint */
this.type = 'where';
}
static _create(_field, _op, _value) {
return new QueryFieldFilterConstraint(_field, _op, _value);
}
_apply(query) {
const filter = this._parse(query);
validateNewFieldFilter(query._query, filter);
return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, filter));
}
_parse(query) {
const reader = newUserDataReader(query.firestore);
const filter = newQueryFilter(query._query, 'where', reader, query.firestore._databaseId, this._field, this._op, this._value);
return filter;
}
}
/**
* Creates a {@link QueryFieldFilterConstraint} that enforces that documents
* must contain the specified field and that the value should satisfy the
* relation constraint provided.
*
* @param fieldPath - The path to compare
* @param opStr - The operation string (e.g "<", "<=", "==", "<",
* "<=", "!=").
* @param value - The value for comparison
* @returns The created {@link QueryFieldFilterConstraint}.
*/
function where(fieldPath, opStr, value) {
const op = opStr;
const field = fieldPathFromArgument('where', fieldPath);
return QueryFieldFilterConstraint._create(field, op, value);
}
/**
* A `QueryCompositeFilterConstraint` is used to narrow the set of documents
* returned by a Firestore query by performing the logical OR or AND of multiple
* {@link QueryFieldFilterConstraint}s or {@link QueryCompositeFilterConstraint}s.
* `QueryCompositeFilterConstraint`s are created by invoking {@link or} or
* {@link and} and can then be passed to {@link query} to create a new query
* instance that also contains the `QueryCompositeFilterConstraint`.
* @internal TODO remove this internal tag with OR Query support in the server
*/
class QueryCompositeFilterConstraint extends AppliableConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _queryConstraints) {
super();
this.type = type;
this._queryConstraints = _queryConstraints;
}
static _create(type, _queryConstraints) {
return new QueryCompositeFilterConstraint(type, _queryConstraints);
}
_parse(query) {
const parsedFilters = this._queryConstraints
.map(queryConstraint => {
return queryConstraint._parse(query);
})
.filter(parsedFilter => parsedFilter.getFilters().length > 0);
if (parsedFilters.length === 1) {
return parsedFilters[0];
}
return CompositeFilter.create(parsedFilters, this._getOperator());
}
_apply(query) {
const parsedFilter = this._parse(query);
if (parsedFilter.getFilters().length === 0) {
// Return the existing query if not adding any more filters (e.g. an empty
// composite filter).
return query;
}
validateNewFilter(query._query, parsedFilter);
return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, parsedFilter));
}
_getQueryConstraints() {
return this._queryConstraints;
}
_getOperator() {
return this.type === 'and' ? "and" /* CompositeOperator.AND */ : "or" /* CompositeOperator.OR */;
}
}
/**
* Creates a new {@link QueryCompositeFilterConstraint} that is a disjunction of
* the given filter constraints. A disjunction filter includes a document if it
* satisfies any of the given filters.
*
* @param queryConstraints - Optional. The list of
* {@link QueryFilterConstraint}s to perform a disjunction for. These must be
* created with calls to {@link where}, {@link or}, or {@link and}.
* @returns The newly created {@link QueryCompositeFilterConstraint}.
* @internal TODO remove this internal tag with OR Query support in the server
*/
function or(...queryConstraints) {
// Only support QueryFilterConstraints
queryConstraints.forEach(queryConstraint => validateQueryFilterConstraint('or', queryConstraint));
return QueryCompositeFilterConstraint._create("or" /* CompositeOperator.OR */, queryConstraints);
}
/**
* Creates a new {@link QueryCompositeFilterConstraint} that is a conjunction of
* the given filter constraints. A conjunction filter includes a document if it
* satisfies all of the given filters.
*
* @param queryConstraints - Optional. The list of
* {@link QueryFilterConstraint}s to perform a conjunction for. These must be
* created with calls to {@link where}, {@link or}, or {@link and}.
* @returns The newly created {@link QueryCompositeFilterConstraint}.
* @internal TODO remove this internal tag with OR Query support in the server
*/
function and(...queryConstraints) {
// Only support QueryFilterConstraints
queryConstraints.forEach(queryConstraint => validateQueryFilterConstraint('and', queryConstraint));
return QueryCompositeFilterConstraint._create("and" /* CompositeOperator.AND */, queryConstraints);
}
/**
* A `QueryOrderByConstraint` is used to sort the set of documents returned by a
* Firestore query. `QueryOrderByConstraint`s are created by invoking
* {@link orderBy} and can then be passed to {@link query} to create a new query
* instance that also contains this `QueryOrderByConstraint`.
*
* Note: Documents that do not contain the orderBy field will not be present in
* the query result.
*/
class QueryOrderByConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(_field, _direction) {
super();
this._field = _field;
this._direction = _direction;
/** The type of this query constraint */
this.type = 'orderBy';
}
static _create(_field, _direction) {
return new QueryOrderByConstraint(_field, _direction);
}
_apply(query) {
const orderBy = newQueryOrderBy(query._query, this._field, this._direction);
return new Query(query.firestore, query.converter, queryWithAddedOrderBy(query._query, orderBy));
}
}
/**
* Creates a {@link QueryOrderByConstraint} that sorts the query result by the
* specified field, optionally in descending order instead of ascending.
*
* Note: Documents that do not contain the specified field will not be present
* in the query result.
*
* @param fieldPath - The field to sort by.
* @param directionStr - Optional direction to sort by ('asc' or 'desc'). If
* not specified, order will be ascending.
* @returns The created {@link QueryOrderByConstraint}.
*/
function orderBy(fieldPath, directionStr = 'asc') {
const direction = directionStr;
const path = fieldPathFromArgument('orderBy', fieldPath);
return QueryOrderByConstraint._create(path, direction);
}
/**
* A `QueryLimitConstraint` is used to limit the number of documents returned by
* a Firestore query.
* `QueryLimitConstraint`s are created by invoking {@link limit} or
* {@link limitToLast} and can then be passed to {@link query} to create a new
* query instance that also contains this `QueryLimitConstraint`.
*/
class QueryLimitConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _limit, _limitType) {
super();
this.type = type;
this._limit = _limit;
this._limitType = _limitType;
}
static _create(type, _limit, _limitType) {
return new QueryLimitConstraint(type, _limit, _limitType);
}
_apply(query) {
return new Query(query.firestore, query.converter, queryWithLimit(query._query, this._limit, this._limitType));
}
}
/**
* Creates a {@link QueryLimitConstraint} that only returns the first matching
* documents.
*
* @param limit - The maximum number of items to return.
* @returns The created {@link QueryLimitConstraint}.
*/
function limit(limit) {
validatePositiveNumber('limit', limit);
return QueryLimitConstraint._create('limit', limit, "F" /* LimitType.First */);
}
/**
* Creates a {@link QueryLimitConstraint} that only returns the last matching
* documents.
*
* You must specify at least one `orderBy` clause for `limitToLast` queries,
* otherwise an exception will be thrown during execution.
*
* @param limit - The maximum number of items to return.
* @returns The created {@link QueryLimitConstraint}.
*/
function limitToLast(limit) {
validatePositiveNumber('limitToLast', limit);
return QueryLimitConstraint._create('limitToLast', limit, "L" /* LimitType.Last */);
}
/**
* A `QueryStartAtConstraint` is used to exclude documents from the start of a
* result set returned by a Firestore query.
* `QueryStartAtConstraint`s are created by invoking {@link (startAt:1)} or
* {@link (startAfter:1)} and can then be passed to {@link query} to create a
* new query instance that also contains this `QueryStartAtConstraint`.
*/
class QueryStartAtConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _docOrFields, _inclusive) {
super();
this.type = type;
this._docOrFields = _docOrFields;
this._inclusive = _inclusive;
}
static _create(type, _docOrFields, _inclusive) {
return new QueryStartAtConstraint(type, _docOrFields, _inclusive);
}
_apply(query) {
const bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._inclusive);
return new Query(query.firestore, query.converter, queryWithStartAt(query._query, bound));
}
}
function startAt(...docOrFields) {
return QueryStartAtConstraint._create('startAt', docOrFields,
/*inclusive=*/ true);
}
function startAfter(...docOrFields) {
return QueryStartAtConstraint._create('startAfter', docOrFields,
/*inclusive=*/ false);
}
/**
* A `QueryEndAtConstraint` is used to exclude documents from the end of a
* result set returned by a Firestore query.
* `QueryEndAtConstraint`s are created by invoking {@link (endAt:1)} or
* {@link (endBefore:1)} and can then be passed to {@link query} to create a new
* query instance that also contains this `QueryEndAtConstraint`.
*/
class QueryEndAtConstraint extends QueryConstraint {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
type, _docOrFields, _inclusive) {
super();
this.type = type;
this._docOrFields = _docOrFields;
this._inclusive = _inclusive;
}
static _create(type, _docOrFields, _inclusive) {
return new QueryEndAtConstraint(type, _docOrFields, _inclusive);
}
_apply(query) {
const bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._inclusive);
return new Query(query.firestore, query.converter, queryWithEndAt(query._query, bound));
}
}
function endBefore(...docOrFields) {
return QueryEndAtConstraint._create('endBefore', docOrFields,
/*inclusive=*/ false);
}
function endAt(...docOrFields) {
return QueryEndAtConstraint._create('endAt', docOrFields,
/*inclusive=*/ true);
}
/** Helper function to create a bound from a document or fields */
function newQueryBoundFromDocOrFields(query, methodName, docOrFields, inclusive) {
docOrFields[0] = util.getModularInstance(docOrFields[0]);
if (docOrFields[0] instanceof DocumentSnapshot$1) {
return newQueryBoundFromDocument(query._query, query.firestore._databaseId, methodName, docOrFields[0]._document, inclusive);
}
else {
const reader = newUserDataReader(query.firestore);
return newQueryBoundFromFields(query._query, query.firestore._databaseId, reader, methodName, docOrFields, inclusive);
}
}
function newQueryFilter(query, methodName, dataReader, databaseId, fieldPath, op, value) {
let fieldValue;
if (fieldPath.isKeyField()) {
if (op === "array-contains" /* Operator.ARRAY_CONTAINS */ || op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid Query. You can't perform '${op}' queries on documentId().`);
}
else if (op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */) {
validateDisjunctiveFilterElements(value, op);
const referenceList = [];
for (const arrayValue of value) {
referenceList.push(parseDocumentIdValue(databaseId, query, arrayValue));
}
fieldValue = { arrayValue: { values: referenceList } };
}
else {
fieldValue = parseDocumentIdValue(databaseId, query, value);
}
}
else {
if (op === "in" /* Operator.IN */ ||
op === "not-in" /* Operator.NOT_IN */ ||
op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
validateDisjunctiveFilterElements(value, op);
}
fieldValue = parseQueryValue(dataReader, methodName, value,
/* allowArrays= */ op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */);
}
const filter = FieldFilter.create(fieldPath, op, fieldValue);
return filter;
}
function newQueryOrderBy(query, fieldPath, direction) {
if (query.startAt !== null) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call startAt() or startAfter() before ' +
'calling orderBy().');
}
if (query.endAt !== null) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call endAt() or endBefore() before ' +
'calling orderBy().');
}
const orderBy = new OrderBy(fieldPath, direction);
validateNewOrderBy(query, orderBy);
return orderBy;
}
/**
* Create a `Bound` from a query and a document.
*
* Note that the `Bound` will always include the key of the document
* and so only the provided document will compare equal to the returned
* position.
*
* Will throw if the document does not contain all fields of the order by
* of the query or if any of the fields in the order by are an uncommitted
* server timestamp.
*/
function newQueryBoundFromDocument(query, databaseId, methodName, doc, inclusive) {
if (!doc) {
throw new FirestoreError(Code.NOT_FOUND, `Can't use a DocumentSnapshot that doesn't exist for ` +
`${methodName}().`);
}
const components = [];
// Because people expect to continue/end a query at the exact document
// provided, we need to use the implicit sort order rather than the explicit
// sort order, because it's guaranteed to contain the document key. That way
// the position becomes unambiguous and the query continues/ends exactly at
// the provided document. Without the key (by using the explicit sort
// orders), multiple documents could match the position, yielding duplicate
// results.
for (const orderBy of queryOrderBy(query)) {
if (orderBy.field.isKeyField()) {
components.push(refValue(databaseId, doc.key));
}
else {
const value = doc.data.field(orderBy.field);
if (isServerTimestamp(value)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You are trying to start or end a query using a ' +
'document for which the field "' +
orderBy.field +
'" is an uncommitted server timestamp. (Since the value of ' +
'this field is unknown, you cannot start/end a query with it.)');
}
else if (value !== null) {
components.push(value);
}
else {
const field = orderBy.field.canonicalString();
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You are trying to start or end a query using a ` +
`document for which the field '${field}' (used as the ` +
`orderBy) does not exist.`);
}
}
}
return new Bound(components, inclusive);
}
/**
* Converts a list of field values to a `Bound` for the given query.
*/
function newQueryBoundFromFields(query, databaseId, dataReader, methodName, values, inclusive) {
// Use explicit order by's because it has to match the query the user made
const orderBy = query.explicitOrderBy;
if (values.length > orderBy.length) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Too many arguments provided to ${methodName}(). ` +
`The number of arguments must be less than or equal to the ` +
`number of orderBy() clauses`);
}
const components = [];
for (let i = 0; i < values.length; i++) {
const rawValue = values[i];
const orderByComponent = orderBy[i];
if (orderByComponent.field.isKeyField()) {
if (typeof rawValue !== 'string') {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. Expected a string for document ID in ` +
`${methodName}(), but got a ${typeof rawValue}`);
}
if (!isCollectionGroupQuery(query) && rawValue.indexOf('/') !== -1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection and ordering by documentId(), ` +
`the value passed to ${methodName}() must be a plain document ID, but ` +
`'${rawValue}' contains a slash.`);
}
const path = query.path.child(ResourcePath.fromString(rawValue));
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection group and ordering by ` +
`documentId(), the value passed to ${methodName}() must result in a ` +
`valid document path, but '${path}' is not because it contains an odd number ` +
`of segments.`);
}
const key = new DocumentKey(path);
components.push(refValue(databaseId, key));
}
else {
const wrapped = parseQueryValue(dataReader, methodName, rawValue);
components.push(wrapped);
}
}
return new Bound(components, inclusive);
}
/**
* Parses the given `documentIdValue` into a `ReferenceValue`, throwing
* appropriate errors if the value is anything other than a `DocumentReference`
* or `string`, or if the string is malformed.
*/
function parseDocumentIdValue(databaseId, query, documentIdValue) {
documentIdValue = util.getModularInstance(documentIdValue);
if (typeof documentIdValue === 'string') {
if (documentIdValue === '') {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. When querying with documentId(), you ' +
'must provide a valid document ID, but it was an empty string.');
}
if (!isCollectionGroupQuery(query) && documentIdValue.indexOf('/') !== -1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection by ` +
`documentId(), you must provide a plain document ID, but ` +
`'${documentIdValue}' contains a '/' character.`);
}
const path = query.path.child(ResourcePath.fromString(documentIdValue));
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection group by ` +
`documentId(), the value provided must result in a valid document path, ` +
`but '${path}' is not because it has an odd number of segments (${path.length}).`);
}
return refValue(databaseId, new DocumentKey(path));
}
else if (documentIdValue instanceof DocumentReference) {
return refValue(databaseId, documentIdValue._key);
}
else {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying with documentId(), you must provide a valid ` +
`string or a DocumentReference, but it was: ` +
`${valueDescription(documentIdValue)}.`);
}
}
/**
* Validates that the value passed into a disjunctive filter satisfies all
* array requirements.
*/
function validateDisjunctiveFilterElements(value, operator) {
if (!Array.isArray(value) || value.length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid Query. A non-empty array is required for ' +
`'${operator.toString()}' filters.`);
}
if (value.length > 10) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid Query. '${operator.toString()}' filters support a ` +
'maximum of 10 elements in the value array.');
}
}
/**
* Given an operator, returns the set of operators that cannot be used with it.
*
* Operators in a query must adhere to the following set of rules:
* 1. Only one array operator is allowed.
* 2. Only one disjunctive operator is allowed.
* 3. `NOT_EQUAL` cannot be used with another `NOT_EQUAL` operator.
* 4. `NOT_IN` cannot be used with array, disjunctive, or `NOT_EQUAL` operators.
*
* Array operators: `ARRAY_CONTAINS`, `ARRAY_CONTAINS_ANY`
* Disjunctive operators: `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`
*/
function conflictingOps(op) {
switch (op) {
case "!=" /* Operator.NOT_EQUAL */:
return ["!=" /* Operator.NOT_EQUAL */, "not-in" /* Operator.NOT_IN */];
case "array-contains" /* Operator.ARRAY_CONTAINS */:
return [
"array-contains" /* Operator.ARRAY_CONTAINS */,
"array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
"not-in" /* Operator.NOT_IN */
];
case "in" /* Operator.IN */:
return ["array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */, "in" /* Operator.IN */, "not-in" /* Operator.NOT_IN */];
case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */:
return [
"array-contains" /* Operator.ARRAY_CONTAINS */,
"array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
"in" /* Operator.IN */,
"not-in" /* Operator.NOT_IN */
];
case "not-in" /* Operator.NOT_IN */:
return [
"array-contains" /* Operator.ARRAY_CONTAINS */,
"array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
"in" /* Operator.IN */,
"not-in" /* Operator.NOT_IN */,
"!=" /* Operator.NOT_EQUAL */
];
default:
return [];
}
}
function validateNewFieldFilter(query, fieldFilter) {
if (fieldFilter.isInequality()) {
const existingInequality = getInequalityFilterField(query);
const newInequality = fieldFilter.field;
if (existingInequality !== null &&
!existingInequality.isEqual(newInequality)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. All where filters with an inequality' +
' (<, <=, !=, not-in, >, or >=) must be on the same field. But you have' +
` inequality filters on '${existingInequality.toString()}'` +
` and '${newInequality.toString()}'`);
}
const firstOrderByField = getFirstOrderByField(query);
if (firstOrderByField !== null) {
validateOrderByAndInequalityMatch(query, newInequality, firstOrderByField);
}
}
const conflictingOp = findOpInsideFilters(query.filters, conflictingOps(fieldFilter.op));
if (conflictingOp !== null) {
// Special case when it's a duplicate op to give a slightly clearer error message.
if (conflictingOp === fieldFilter.op) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You cannot use more than one ' +
`'${fieldFilter.op.toString()}' filter.`);
}
else {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You cannot use '${fieldFilter.op.toString()}' filters ` +
`with '${conflictingOp.toString()}' filters.`);
}
}
}
function validateNewFilter(query, filter) {
let testQuery = query;
const subFilters = filter.getFlattenedFilters();
for (const subFilter of subFilters) {
validateNewFieldFilter(testQuery, subFilter);
testQuery = queryWithAddedFilter(testQuery, subFilter);
}
}
// Checks if any of the provided filter operators are included in the given list of filters and
// returns the first one that is, or null if none are.
function findOpInsideFilters(filters, operators) {
for (const filter of filters) {
for (const fieldFilter of filter.getFlattenedFilters()) {
if (operators.indexOf(fieldFilter.op) >= 0) {
return fieldFilter.op;
}
}
}
return null;
}
function validateNewOrderBy(query, orderBy) {
if (getFirstOrderByField(query) === null) {
// This is the first order by. It must match any inequality.
const inequalityField = getInequalityFilterField(query);
if (inequalityField !== null) {
validateOrderByAndInequalityMatch(query, inequalityField, orderBy.field);
}
}
}
function validateOrderByAndInequalityMatch(baseQuery, inequality, orderBy) {
if (!orderBy.isEqual(inequality)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You have a where filter with an inequality ` +
`(<, <=, !=, not-in, >, or >=) on field '${inequality.toString()}' ` +
`and so you must also use '${inequality.toString()}' ` +
`as your first argument to orderBy(), but your first orderBy() ` +
`is on field '${orderBy.toString()}' instead.`);
}
}
function validateQueryFilterConstraint(functionName, queryConstraint) {
if (!(queryConstraint instanceof QueryFieldFilterConstraint) &&
!(queryConstraint instanceof QueryCompositeFilterConstraint)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() requires AppliableConstraints created with a call to 'where(...)', 'or(...)', or 'and(...)'.`);
}
}
function validateQueryConstraintArray(queryConstraint) {
const compositeFilterCount = queryConstraint.filter(filter => filter instanceof QueryCompositeFilterConstraint).length;
const fieldFilterCount = queryConstraint.filter(filter => filter instanceof QueryFieldFilterConstraint).length;
if (compositeFilterCount > 1 ||
(compositeFilterCount > 0 && fieldFilterCount > 0)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'InvalidQuery. When using composite filters, you cannot use ' +
'more than one filter at the top level. Consider nesting the multiple ' +
'filters within an `and(...)` statement. For example: ' +
'change `query(query, where(...), or(...))` to ' +
'`query(query, and(where(...), or(...)))`.');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts Firestore's internal types to the JavaScript types that we expose
* to the user.
*
* @internal
*/
class AbstractUserDataWriter {
convertValue(value, serverTimestampBehavior = 'none') {
switch (typeOrder(value)) {
case 0 /* TypeOrder.NullValue */:
return null;
case 1 /* TypeOrder.BooleanValue */:
return value.booleanValue;
case 2 /* TypeOrder.NumberValue */:
return normalizeNumber(value.integerValue || value.doubleValue);
case 3 /* TypeOrder.TimestampValue */:
return this.convertTimestamp(value.timestampValue);
case 4 /* TypeOrder.ServerTimestampValue */:
return this.convertServerTimestamp(value, serverTimestampBehavior);
case 5 /* TypeOrder.StringValue */:
return value.stringValue;
case 6 /* TypeOrder.BlobValue */:
return this.convertBytes(normalizeByteString(value.bytesValue));
case 7 /* TypeOrder.RefValue */:
return this.convertReference(value.referenceValue);
case 8 /* TypeOrder.GeoPointValue */:
return this.convertGeoPoint(value.geoPointValue);
case 9 /* TypeOrder.ArrayValue */:
return this.convertArray(value.arrayValue, serverTimestampBehavior);
case 10 /* TypeOrder.ObjectValue */:
return this.convertObject(value.mapValue, serverTimestampBehavior);
default:
throw fail();
}
}
convertObject(mapValue, serverTimestampBehavior) {
const result = {};
forEach(mapValue.fields, (key, value) => {
result[key] = this.convertValue(value, serverTimestampBehavior);
});
return result;
}
convertGeoPoint(value) {
return new GeoPoint(normalizeNumber(value.latitude), normalizeNumber(value.longitude));
}
convertArray(arrayValue, serverTimestampBehavior) {
return (arrayValue.values || []).map(value => this.convertValue(value, serverTimestampBehavior));
}
convertServerTimestamp(value, serverTimestampBehavior) {
switch (serverTimestampBehavior) {
case 'previous':
const previousValue = getPreviousValue(value);
if (previousValue == null) {
return null;
}
return this.convertValue(previousValue, serverTimestampBehavior);
case 'estimate':
return this.convertTimestamp(getLocalWriteTime(value));
default:
return null;
}
}
convertTimestamp(value) {
const normalizedValue = normalizeTimestamp(value);
return new Timestamp(normalizedValue.seconds, normalizedValue.nanos);
}
convertDocumentKey(name, expectedDatabaseId) {
const resourcePath = ResourcePath.fromString(name);
hardAssert(isValidResourceName(resourcePath));
const databaseId = new DatabaseId(resourcePath.get(1), resourcePath.get(3));
const key = new DocumentKey(resourcePath.popFirst(5));
if (!databaseId.isEqual(expectedDatabaseId)) {
// TODO(b/64130202): Somehow support foreign references.
logError(`Document ${key} contains a document ` +
`reference within a different database (` +
`${databaseId.projectId}/${databaseId.database}) which is not ` +
`supported. It will be treated as a reference in the current ` +
`database (${expectedDatabaseId.projectId}/${expectedDatabaseId.database}) ` +
`instead.`);
}
return key;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts custom model object of type T into `DocumentData` by applying the
* converter if it exists.
*
* This function is used when converting user objects to `DocumentData`
* because we want to provide the user with a more specific error message if
* their `set()` or fails due to invalid data originating from a `toFirestore()`
* call.
*/
function applyFirestoreDataConverter(converter, value, options) {
let convertedValue;
if (converter) {
if (options && (options.merge || options.mergeFields)) {
// Cast to `any` in order to satisfy the union type constraint on
// toFirestore().
// eslint-disable-next-line @typescript-eslint/no-explicit-any
convertedValue = converter.toFirestore(value, options);
}
else {
convertedValue = converter.toFirestore(value);
}
}
else {
convertedValue = value;
}
return convertedValue;
}
class LiteUserDataWriter extends AbstractUserDataWriter {
constructor(firestore) {
super();
this.firestore = firestore;
}
convertBytes(bytes) {
return new Bytes(bytes);
}
convertReference(name) {
const key = this.convertDocumentKey(name, this.firestore._databaseId);
return new DocumentReference(this.firestore, /* converter= */ null, key);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Metadata about a snapshot, describing the state of the snapshot.
*/
class SnapshotMetadata {
/** @hideconstructor */
constructor(hasPendingWrites, fromCache) {
this.hasPendingWrites = hasPendingWrites;
this.fromCache = fromCache;
}
/**
* Returns true if this `SnapshotMetadata` is equal to the provided one.
*
* @param other - The `SnapshotMetadata` to compare against.
* @returns true if this `SnapshotMetadata` is equal to the provided one.
*/
isEqual(other) {
return (this.hasPendingWrites === other.hasPendingWrites &&
this.fromCache === other.fromCache);
}
}
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get()` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/
class DocumentSnapshot extends DocumentSnapshot$1 {
/** @hideconstructor protected */
constructor(_firestore, userDataWriter, key, document, metadata, converter) {
super(_firestore, userDataWriter, key, document, converter);
this._firestore = _firestore;
this._firestoreImpl = _firestore;
this.metadata = metadata;
}
/**
* Returns whether or not the data exists. True if the document exists.
*/
exists() {
return super.exists();
}
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* By default, `serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document or `undefined` if
* the document doesn't exist.
*/
data(options = {}) {
if (!this._document) {
return undefined;
}
else if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
const snapshot = new QueryDocumentSnapshot(this._firestore, this._userDataWriter, this._key, this._document, this.metadata,
/* converter= */ null);
return this._converter.fromFirestore(snapshot, options);
}
else {
return this._userDataWriter.convertValue(this._document.data.value, options.serverTimestamps);
}
}
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* By default, a `serverTimestamp()` that has not yet been set to
* its final value will be returned as `null`. You can override this by
* passing an options object.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @param options - An options object to configure how the field is retrieved
* from the snapshot (for example the desired behavior for server timestamps
* that have not yet been set to their final value).
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get(fieldPath, options = {}) {
if (this._document) {
const value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
if (value !== null) {
return this._userDataWriter.convertValue(value, options.serverTimestamps);
}
}
return undefined;
}
}
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get()` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/
class QueryDocumentSnapshot extends DocumentSnapshot {
/**
* Retrieves all fields in the document as an `Object`.
*
* By default, `serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @override
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document.
*/
data(options = {}) {
return super.data(options);
}
}
/**
* A `QuerySnapshot` contains zero or more `DocumentSnapshot` objects
* representing the results of a query. The documents can be accessed as an
* array via the `docs` property or enumerated using the `forEach` method. The
* number of documents can be determined via the `empty` and `size`
* properties.
*/
class QuerySnapshot {
/** @hideconstructor */
constructor(_firestore, _userDataWriter, query, _snapshot) {
this._firestore = _firestore;
this._userDataWriter = _userDataWriter;
this._snapshot = _snapshot;
this.metadata = new SnapshotMetadata(_snapshot.hasPendingWrites, _snapshot.fromCache);
this.query = query;
}
/** An array of all the documents in the `QuerySnapshot`. */
get docs() {
const result = [];
this.forEach(doc => result.push(doc));
return result;
}
/** The number of documents in the `QuerySnapshot`. */
get size() {
return this._snapshot.docs.size;
}
/** True if there are no documents in the `QuerySnapshot`. */
get empty() {
return this.size === 0;
}
/**
* Enumerates all of the documents in the `QuerySnapshot`.
*
* @param callback - A callback to be called with a `QueryDocumentSnapshot` for
* each document in the snapshot.
* @param thisArg - The `this` binding for the callback.
*/
forEach(callback, thisArg) {
this._snapshot.docs.forEach(doc => {
callback.call(thisArg, new QueryDocumentSnapshot(this._firestore, this._userDataWriter, doc.key, doc, new SnapshotMetadata(this._snapshot.mutatedKeys.has(doc.key), this._snapshot.fromCache), this.query.converter));
});
}
/**
* Returns an array of the documents changes since the last snapshot. If this
* is the first snapshot, all documents will be in the list as 'added'
* changes.
*
* @param options - `SnapshotListenOptions` that control whether metadata-only
* changes (i.e. only `DocumentSnapshot.metadata` changed) should trigger
* snapshot events.
*/
docChanges(options = {}) {
const includeMetadataChanges = !!options.includeMetadataChanges;
if (includeMetadataChanges && this._snapshot.excludesMetadataChanges) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'To include metadata changes with your document changes, you must ' +
'also pass { includeMetadataChanges:true } to onSnapshot().');
}
if (!this._cachedChanges ||
this._cachedChangesIncludeMetadataChanges !== includeMetadataChanges) {
this._cachedChanges = changesFromSnapshot(this, includeMetadataChanges);
this._cachedChangesIncludeMetadataChanges = includeMetadataChanges;
}
return this._cachedChanges;
}
}
/** Calculates the array of `DocumentChange`s for a given `ViewSnapshot`. */
function changesFromSnapshot(querySnapshot, includeMetadataChanges) {
if (querySnapshot._snapshot.oldDocs.isEmpty()) {
let index = 0;
return querySnapshot._snapshot.docChanges.map(change => {
const doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
change.doc;
return {
type: 'added',
doc,
oldIndex: -1,
newIndex: index++
};
});
}
else {
// A `DocumentSet` that is updated incrementally as changes are applied to use
// to lookup the index of a document.
let indexTracker = querySnapshot._snapshot.oldDocs;
return querySnapshot._snapshot.docChanges
.filter(change => includeMetadataChanges || change.type !== 3 /* ChangeType.Metadata */)
.map(change => {
const doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
let oldIndex = -1;
let newIndex = -1;
if (change.type !== 0 /* ChangeType.Added */) {
oldIndex = indexTracker.indexOf(change.doc.key);
indexTracker = indexTracker.delete(change.doc.key);
}
if (change.type !== 1 /* ChangeType.Removed */) {
indexTracker = indexTracker.add(change.doc);
newIndex = indexTracker.indexOf(change.doc.key);
}
return {
type: resultChangeType(change.type),
doc,
oldIndex,
newIndex
};
});
}
}
function resultChangeType(type) {
switch (type) {
case 0 /* ChangeType.Added */:
return 'added';
case 2 /* ChangeType.Modified */:
case 3 /* ChangeType.Metadata */:
return 'modified';
case 1 /* ChangeType.Removed */:
return 'removed';
default:
return fail();
}
}
// TODO(firestoreexp): Add tests for snapshotEqual with different snapshot
// metadata
/**
* Returns true if the provided snapshots are equal.
*
* @param left - A snapshot to compare.
* @param right - A snapshot to compare.
* @returns true if the snapshots are equal.
*/
function snapshotEqual(left, right) {
if (left instanceof DocumentSnapshot && right instanceof DocumentSnapshot) {
return (left._firestore === right._firestore &&
left._key.isEqual(right._key) &&
(left._document === null
? right._document === null
: left._document.isEqual(right._document)) &&
left._converter === right._converter);
}
else if (left instanceof QuerySnapshot && right instanceof QuerySnapshot) {
return (left._firestore === right._firestore &&
queryEqual(left.query, right.query) &&
left.metadata.isEqual(right.metadata) &&
left._snapshot.isEqual(right._snapshot));
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Reads the document referred to by this `DocumentReference`.
*
* Note: `getDoc()` attempts to provide up-to-date data when possible by waiting
* for data from the server, but it may return cached data or fail if you are
* offline and the server cannot be reached. To specify this behavior, invoke
* {@link getDocFromCache} or {@link getDocFromServer}.
*
* @param reference - The reference of the document to fetch.
* @returns A Promise resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDoc(reference) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientGetDocumentViaSnapshotListener(client, reference._key).then(snapshot => convertToDocSnapshot(firestore, reference, snapshot));
}
class ExpUserDataWriter extends AbstractUserDataWriter {
constructor(firestore) {
super();
this.firestore = firestore;
}
convertBytes(bytes) {
return new Bytes(bytes);
}
convertReference(name) {
const key = this.convertDocumentKey(name, this.firestore._databaseId);
return new DocumentReference(this.firestore, /* converter= */ null, key);
}
}
/**
* Reads the document referred to by this `DocumentReference` from cache.
* Returns an error if the document is not currently cached.
*
* @returns A `Promise` resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDocFromCache(reference) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentFromLocalCache(client, reference._key).then(doc => new DocumentSnapshot(firestore, userDataWriter, reference._key, doc, new SnapshotMetadata(doc !== null && doc.hasLocalMutations,
/* fromCache= */ true), reference.converter));
}
/**
* Reads the document referred to by this `DocumentReference` from the server.
* Returns an error if the network is not available.
*
* @returns A `Promise` resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDocFromServer(reference) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientGetDocumentViaSnapshotListener(client, reference._key, {
source: 'server'
}).then(snapshot => convertToDocSnapshot(firestore, reference, snapshot));
}
/**
* Executes the query and returns the results as a `QuerySnapshot`.
*
* Note: `getDocs()` attempts to provide up-to-date data when possible by
* waiting for data from the server, but it may return cached data or fail if
* you are offline and the server cannot be reached. To specify this behavior,
* invoke {@link getDocsFromCache} or {@link getDocsFromServer}.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/
function getDocs(query) {
query = cast(query, Query);
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
validateHasExplicitOrderByForLimitToLast(query._query);
return firestoreClientGetDocumentsViaSnapshotListener(client, query._query).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from cache.
* Returns an empty result set if no documents matching the query are currently
* cached.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/
function getDocsFromCache(query) {
query = cast(query, Query);
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentsFromLocalCache(client, query._query).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from the
* server. Returns an error if the network is not available.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/
function getDocsFromServer(query) {
query = cast(query, Query);
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentsViaSnapshotListener(client, query._query, {
source: 'server'
}).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
function setDoc(reference, data, options) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const convertedValue = applyFirestoreDataConverter(reference.converter, data, options);
const dataReader = newUserDataReader(firestore);
const parsed = parseSetData(dataReader, 'setDoc', reference._key, convertedValue, reference.converter !== null, options);
const mutation = parsed.toMutation(reference._key, Precondition.none());
return executeWrite(firestore, [mutation]);
}
function updateDoc(reference, fieldOrUpdateData, value, ...moreFieldsAndValues) {
reference = cast(reference, DocumentReference);
const firestore = cast(reference.firestore, Firestore);
const dataReader = newUserDataReader(firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = util.getModularInstance(fieldOrUpdateData);
let parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(dataReader, 'updateDoc', reference._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(dataReader, 'updateDoc', reference._key, fieldOrUpdateData);
}
const mutation = parsed.toMutation(reference._key, Precondition.exists(true));
return executeWrite(firestore, [mutation]);
}
/**
* Deletes the document referred to by the specified `DocumentReference`.
*
* @param reference - A reference to the document to delete.
* @returns A Promise resolved once the document has been successfully
* deleted from the backend (note that it won't resolve while you're offline).
*/
function deleteDoc(reference) {
const firestore = cast(reference.firestore, Firestore);
const mutations = [new DeleteMutation(reference._key, Precondition.none())];
return executeWrite(firestore, mutations);
}
/**
* Add a new document to specified `CollectionReference` with the given data,
* assigning it a document ID automatically.
*
* @param reference - A reference to the collection to add this document to.
* @param data - An Object containing the data for the new document.
* @returns A `Promise` resolved with a `DocumentReference` pointing to the
* newly created document after it has been written to the backend (Note that it
* won't resolve while you're offline).
*/
function addDoc(reference, data) {
const firestore = cast(reference.firestore, Firestore);
const docRef = doc(reference);
const convertedValue = applyFirestoreDataConverter(reference.converter, data);
const dataReader = newUserDataReader(reference.firestore);
const parsed = parseSetData(dataReader, 'addDoc', docRef._key, convertedValue, reference.converter !== null, {});
const mutation = parsed.toMutation(docRef._key, Precondition.exists(false));
return executeWrite(firestore, [mutation]).then(() => docRef);
}
function onSnapshot(reference, ...args) {
var _a, _b, _c;
reference = util.getModularInstance(reference);
let options = {
includeMetadataChanges: false
};
let currArg = 0;
if (typeof args[currArg] === 'object' && !isPartialObserver(args[currArg])) {
options = args[currArg];
currArg++;
}
const internalOptions = {
includeMetadataChanges: options.includeMetadataChanges
};
if (isPartialObserver(args[currArg])) {
const userObserver = args[currArg];
args[currArg] = (_a = userObserver.next) === null || _a === void 0 ? void 0 : _a.bind(userObserver);
args[currArg + 1] = (_b = userObserver.error) === null || _b === void 0 ? void 0 : _b.bind(userObserver);
args[currArg + 2] = (_c = userObserver.complete) === null || _c === void 0 ? void 0 : _c.bind(userObserver);
}
let observer;
let firestore;
let internalQuery;
if (reference instanceof DocumentReference) {
firestore = cast(reference.firestore, Firestore);
internalQuery = newQueryForPath(reference._key.path);
observer = {
next: snapshot => {
if (args[currArg]) {
args[currArg](convertToDocSnapshot(firestore, reference, snapshot));
}
},
error: args[currArg + 1],
complete: args[currArg + 2]
};
}
else {
const query = cast(reference, Query);
firestore = cast(query.firestore, Firestore);
internalQuery = query._query;
const userDataWriter = new ExpUserDataWriter(firestore);
observer = {
next: snapshot => {
if (args[currArg]) {
args[currArg](new QuerySnapshot(firestore, userDataWriter, query, snapshot));
}
},
error: args[currArg + 1],
complete: args[currArg + 2]
};
validateHasExplicitOrderByForLimitToLast(reference._query);
}
const client = ensureFirestoreConfigured(firestore);
return firestoreClientListen(client, internalQuery, internalOptions, observer);
}
function onSnapshotsInSync(firestore, arg) {
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const observer = isPartialObserver(arg)
? arg
: {
next: arg
};
return firestoreClientAddSnapshotsInSyncListener(client, observer);
}
/**
* Locally writes `mutations` on the async queue.
* @internal
*/
function executeWrite(firestore, mutations) {
const client = ensureFirestoreConfigured(firestore);
return firestoreClientWrite(client, mutations);
}
/**
* Converts a {@link ViewSnapshot} that contains the single document specified by `ref`
* to a {@link DocumentSnapshot}.
*/
function convertToDocSnapshot(firestore, ref, snapshot) {
const doc = snapshot.docs.get(ref._key);
const userDataWriter = new ExpUserDataWriter(firestore);
return new DocumentSnapshot(firestore, userDataWriter, ref._key, doc, new SnapshotMetadata(snapshot.hasPendingWrites, snapshot.fromCache), ref.converter);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compares two `AggregateQuerySnapshot` instances for equality.
*
* Two `AggregateQuerySnapshot` instances are considered "equal" if they have
* underlying queries that compare equal, and the same data.
*
* @param left - The first `AggregateQuerySnapshot` to compare.
* @param right - The second `AggregateQuerySnapshot` to compare.
*
* @returns `true` if the objects are "equal", as defined above, or `false`
* otherwise.
*/
function aggregateQuerySnapshotEqual(left, right) {
return (queryEqual(left.query, right.query) && util.deepEqual(left.data(), right.data()));
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Calculates the number of documents in the result set of the given query,
* without actually downloading the documents.
*
* Using this function to count the documents is efficient because only the
* final count, not the documents' data, is downloaded. This function can even
* count the documents if the result set would be prohibitively large to
* download entirely (e.g. thousands of documents).
*
* The result received from the server is presented, unaltered, without
* considering any local state. That is, documents in the local cache are not
* taken into consideration, neither are local modifications not yet
* synchronized with the server. Previously-downloaded results, if any, are not
* used: every request using this source necessarily involves a round trip to
* the server.
*
* @param query - The query whose result set size to calculate.
* @returns A Promise that will be resolved with the count; the count can be
* retrieved from `snapshot.data().count`, where `snapshot` is the
* `AggregateQuerySnapshot` to which the returned Promise resolves.
*/
function getCountFromServer(query) {
const firestore = cast(query.firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
const userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientRunCountQuery(client, query, userDataWriter);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const DEFAULT_TRANSACTION_OPTIONS = {
maxAttempts: 5
};
function validateTransactionOptions(options) {
if (options.maxAttempts < 1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Max attempts must be at least 1');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A write batch, used to perform multiple writes as a single atomic unit.
*
* A `WriteBatch` object can be acquired by calling {@link writeBatch}. It
* provides methods for adding writes to the write batch. None of the writes
* will be committed (or visible locally) until {@link WriteBatch.commit} is
* called.
*/
class WriteBatch {
/** @hideconstructor */
constructor(_firestore, _commitHandler) {
this._firestore = _firestore;
this._commitHandler = _commitHandler;
this._mutations = [];
this._committed = false;
this._dataReader = newUserDataReader(_firestore);
}
set(documentRef, data, options) {
this._verifyNotCommitted();
const ref = validateReference(documentRef, this._firestore);
const convertedValue = applyFirestoreDataConverter(ref.converter, data, options);
const parsed = parseSetData(this._dataReader, 'WriteBatch.set', ref._key, convertedValue, ref.converter !== null, options);
this._mutations.push(parsed.toMutation(ref._key, Precondition.none()));
return this;
}
update(documentRef, fieldOrUpdateData, value, ...moreFieldsAndValues) {
this._verifyNotCommitted();
const ref = validateReference(documentRef, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = util.getModularInstance(fieldOrUpdateData);
let parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData);
}
this._mutations.push(parsed.toMutation(ref._key, Precondition.exists(true)));
return this;
}
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `WriteBatch` instance. Used for chaining method calls.
*/
delete(documentRef) {
this._verifyNotCommitted();
const ref = validateReference(documentRef, this._firestore);
this._mutations = this._mutations.concat(new DeleteMutation(ref._key, Precondition.none()));
return this;
}
/**
* Commits all of the writes in this write batch as a single atomic unit.
*
* The result of these writes will only be reflected in document reads that
* occur after the returned promise resolves. If the client is offline, the
* write fails. If you would like to see local modifications or buffer writes
* until the client is online, use the full Firestore SDK.
*
* @returns A `Promise` resolved once all of the writes in the batch have been
* successfully written to the backend as an atomic unit (note that it won't
* resolve while you're offline).
*/
commit() {
this._verifyNotCommitted();
this._committed = true;
if (this._mutations.length > 0) {
return this._commitHandler(this._mutations);
}
return Promise.resolve();
}
_verifyNotCommitted() {
if (this._committed) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'A write batch can no longer be used after commit() ' +
'has been called.');
}
}
}
function validateReference(documentRef, firestore) {
documentRef = util.getModularInstance(documentRef);
if (documentRef.firestore !== firestore) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Provided document reference is from a different Firestore instance.');
}
else {
return documentRef;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO(mrschmidt) Consider using `BaseTransaction` as the base class in the
// legacy SDK.
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
class Transaction$1 {
/** @hideconstructor */
constructor(_firestore, _transaction) {
this._firestore = _firestore;
this._transaction = _transaction;
this._dataReader = newUserDataReader(_firestore);
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/
get(documentRef) {
const ref = validateReference(documentRef, this._firestore);
const userDataWriter = new LiteUserDataWriter(this._firestore);
return this._transaction.lookup([ref._key]).then(docs => {
if (!docs || docs.length !== 1) {
return fail();
}
const doc = docs[0];
if (doc.isFoundDocument()) {
return new DocumentSnapshot$1(this._firestore, userDataWriter, doc.key, doc, ref.converter);
}
else if (doc.isNoDocument()) {
return new DocumentSnapshot$1(this._firestore, userDataWriter, ref._key, null, ref.converter);
}
else {
throw fail();
}
});
}
set(documentRef, value, options) {
const ref = validateReference(documentRef, this._firestore);
const convertedValue = applyFirestoreDataConverter(ref.converter, value, options);
const parsed = parseSetData(this._dataReader, 'Transaction.set', ref._key, convertedValue, ref.converter !== null, options);
this._transaction.set(ref._key, parsed);
return this;
}
update(documentRef, fieldOrUpdateData, value, ...moreFieldsAndValues) {
const ref = validateReference(documentRef, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = util.getModularInstance(fieldOrUpdateData);
let parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData);
}
this._transaction.update(ref._key, parsed);
return this;
}
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `Transaction` instance. Used for chaining method calls.
*/
delete(documentRef) {
const ref = validateReference(documentRef, this._firestore);
this._transaction.delete(ref._key);
return this;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
class Transaction extends Transaction$1 {
// This class implements the same logic as the Transaction API in the Lite SDK
// but is subclassed in order to return its own DocumentSnapshot types.
/** @hideconstructor */
constructor(_firestore, _transaction) {
super(_firestore, _transaction);
this._firestore = _firestore;
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/
get(documentRef) {
const ref = validateReference(documentRef, this._firestore);
const userDataWriter = new ExpUserDataWriter(this._firestore);
return super
.get(documentRef)
.then(liteDocumentSnapshot => new DocumentSnapshot(this._firestore, userDataWriter, ref._key, liteDocumentSnapshot._document, new SnapshotMetadata(
/* hasPendingWrites= */ false,
/* fromCache= */ false), ref.converter));
}
}
/**
* Executes the given `updateFunction` and then attempts to commit the changes
* applied within the transaction. If any document read within the transaction
* has changed, Cloud Firestore retries the `updateFunction`. If it fails to
* commit after 5 attempts, the transaction fails.
*
* The maximum number of writes allowed in a single transaction is 500.
*
* @param firestore - A reference to the Firestore database to run this
* transaction against.
* @param updateFunction - The function to execute within the transaction
* context.
* @param options - An options object to configure maximum number of attempts to
* commit.
* @returns If the transaction completed successfully or was explicitly aborted
* (the `updateFunction` returned a failed promise), the promise returned by the
* `updateFunction `is returned here. Otherwise, if the transaction failed, a
* rejected promise with the corresponding failure error is returned.
*/
function runTransaction(firestore, updateFunction, options) {
firestore = cast(firestore, Firestore);
const optionsWithDefaults = Object.assign(Object.assign({}, DEFAULT_TRANSACTION_OPTIONS), options);
validateTransactionOptions(optionsWithDefaults);
const client = ensureFirestoreConfigured(firestore);
return firestoreClientTransaction(client, internalTransaction => updateFunction(new Transaction(firestore, internalTransaction)), optionsWithDefaults);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Returns a sentinel for use with {@link @firebase/firestore/lite#(updateDoc:1)} or
* {@link @firebase/firestore/lite#(setDoc:1)} with `{merge: true}` to mark a field for deletion.
*/
function deleteField() {
return new DeleteFieldValueImpl('deleteField');
}
/**
* Returns a sentinel used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link @firebase/firestore/lite#(updateDoc:1)} to
* include a server-generated timestamp in the written data.
*/
function serverTimestamp() {
return new ServerTimestampFieldValueImpl('serverTimestamp');
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to union the given elements with any array
* value that already exists on the server. Each specified element that doesn't
* already exist in the array will be added to the end. If the field being
* modified is not already an array it will be overwritten with an array
* containing exactly the specified elements.
*
* @param elements - The elements to union into the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`.
*/
function arrayUnion(...elements) {
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new ArrayUnionFieldValueImpl('arrayUnion', elements);
}
/**
* Returns a special value that can be used with {@link (setDoc:1)} or {@link
* updateDoc:1} that tells the server to remove the given elements from any
* array value that already exists on the server. All instances of each element
* specified will be removed from the array. If the field being modified is not
* already an array it will be overwritten with an empty array.
*
* @param elements - The elements to remove from the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/
function arrayRemove(...elements) {
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new ArrayRemoveFieldValueImpl('arrayRemove', elements);
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to increment the field's current value by
* the given value.
*
* If either the operand or the current field value uses floating point
* precision, all arithmetic follows IEEE 754 semantics. If both values are
* integers, values outside of JavaScript's safe number range
* (`Number.MIN_SAFE_INTEGER` to `Number.MAX_SAFE_INTEGER`) are also subject to
* precision loss. Furthermore, once processed by the Firestore backend, all
* integer operations are capped between -2^63 and 2^63-1.
*
* If the current field value is not of type `number`, or if the field does not
* yet exist, the transformation sets the field to the given value.
*
* @param n - The value to increment by.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/
function increment(n) {
return new NumericIncrementFieldValueImpl('increment', n);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Creates a write batch, used for performing multiple writes as a single
* atomic operation. The maximum number of writes allowed in a single {@link WriteBatch}
* is 500.
*
* Unlike transactions, write batches are persisted offline and therefore are
* preferable when you don't need to condition your writes on read data.
*
* @returns A {@link WriteBatch} that can be used to atomically execute multiple
* writes.
*/
function writeBatch(firestore) {
firestore = cast(firestore, Firestore);
ensureFirestoreConfigured(firestore);
return new WriteBatch(firestore, mutations => executeWrite(firestore, mutations));
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function setIndexConfiguration(firestore, jsonOrConfiguration) {
var _a;
firestore = cast(firestore, Firestore);
const client = ensureFirestoreConfigured(firestore);
// PORTING NOTE: We don't return an error if the user has not enabled
// persistence since `enableIndexeddbPersistence()` can fail on the Web.
if (!((_a = client.offlineComponents) === null || _a === void 0 ? void 0 : _a.indexBackfillerScheduler)) {
logWarn('Cannot enable indexes when persistence is disabled');
return Promise.resolve();
}
const parsedIndexes = parseIndexes(jsonOrConfiguration);
return getLocalStore(client).then(localStore => localStoreConfigureFieldIndexes(localStore, parsedIndexes));
}
function parseIndexes(jsonOrConfiguration) {
const indexConfiguration = typeof jsonOrConfiguration === 'string'
? tryParseJson(jsonOrConfiguration)
: jsonOrConfiguration;
const parsedIndexes = [];
if (Array.isArray(indexConfiguration.indexes)) {
for (const index of indexConfiguration.indexes) {
const collectionGroup = tryGetString(index, 'collectionGroup');
const segments = [];
if (Array.isArray(index.fields)) {
for (const field of index.fields) {
const fieldPathString = tryGetString(field, 'fieldPath');
const fieldPath = fieldPathFromDotSeparatedString('setIndexConfiguration', fieldPathString);
if (field.arrayConfig === 'CONTAINS') {
segments.push(new IndexSegment(fieldPath, 2 /* IndexKind.CONTAINS */));
}
else if (field.order === 'ASCENDING') {
segments.push(new IndexSegment(fieldPath, 0 /* IndexKind.ASCENDING */));
}
else if (field.order === 'DESCENDING') {
segments.push(new IndexSegment(fieldPath, 1 /* IndexKind.DESCENDING */));
}
}
}
parsedIndexes.push(new FieldIndex(FieldIndex.UNKNOWN_ID, collectionGroup, segments, IndexState.empty()));
}
}
return parsedIndexes;
}
function tryParseJson(json) {
try {
return JSON.parse(json);
}
catch (e) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to parse JSON: ' + (e === null || e === void 0 ? void 0 : e.message));
}
}
function tryGetString(data, property) {
if (typeof data[property] !== 'string') {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Missing string value for: ' + property);
}
return data[property];
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
registerFirestore('node');
exports.AbstractUserDataWriter = AbstractUserDataWriter;
exports.AggregateField = AggregateField;
exports.AggregateQuerySnapshot = AggregateQuerySnapshot;
exports.Bytes = Bytes;
exports.CACHE_SIZE_UNLIMITED = CACHE_SIZE_UNLIMITED;
exports.CollectionReference = CollectionReference;
exports.DocumentReference = DocumentReference;
exports.DocumentSnapshot = DocumentSnapshot;
exports.FieldPath = FieldPath;
exports.FieldValue = FieldValue;
exports.Firestore = Firestore;
exports.FirestoreError = FirestoreError;
exports.GeoPoint = GeoPoint;
exports.LoadBundleTask = LoadBundleTask;
exports.Query = Query;
exports.QueryCompositeFilterConstraint = QueryCompositeFilterConstraint;
exports.QueryConstraint = QueryConstraint;
exports.QueryDocumentSnapshot = QueryDocumentSnapshot;
exports.QueryEndAtConstraint = QueryEndAtConstraint;
exports.QueryFieldFilterConstraint = QueryFieldFilterConstraint;
exports.QueryLimitConstraint = QueryLimitConstraint;
exports.QueryOrderByConstraint = QueryOrderByConstraint;
exports.QuerySnapshot = QuerySnapshot;
exports.QueryStartAtConstraint = QueryStartAtConstraint;
exports.SnapshotMetadata = SnapshotMetadata;
exports.Timestamp = Timestamp;
exports.Transaction = Transaction;
exports.WriteBatch = WriteBatch;
exports._DatabaseId = DatabaseId;
exports._DocumentKey = DocumentKey;
exports._EmptyAppCheckTokenProvider = EmptyAppCheckTokenProvider;
exports._EmptyAuthCredentialsProvider = EmptyAuthCredentialsProvider;
exports._FieldPath = FieldPath$1;
exports._cast = cast;
exports._debugAssert = debugAssert;
exports._isBase64Available = isBase64Available;
exports._logWarn = logWarn;
exports._validateIsNotUsedTogether = validateIsNotUsedTogether;
exports.addDoc = addDoc;
exports.aggregateQuerySnapshotEqual = aggregateQuerySnapshotEqual;
exports.and = and;
exports.arrayRemove = arrayRemove;
exports.arrayUnion = arrayUnion;
exports.clearIndexedDbPersistence = clearIndexedDbPersistence;
exports.collection = collection;
exports.collectionGroup = collectionGroup;
exports.connectFirestoreEmulator = connectFirestoreEmulator;
exports.deleteDoc = deleteDoc;
exports.deleteField = deleteField;
exports.disableNetwork = disableNetwork;
exports.doc = doc;
exports.documentId = documentId;
exports.enableIndexedDbPersistence = enableIndexedDbPersistence;
exports.enableMultiTabIndexedDbPersistence = enableMultiTabIndexedDbPersistence;
exports.enableNetwork = enableNetwork;
exports.endAt = endAt;
exports.endBefore = endBefore;
exports.ensureFirestoreConfigured = ensureFirestoreConfigured;
exports.executeWrite = executeWrite;
exports.getCountFromServer = getCountFromServer;
exports.getDoc = getDoc;
exports.getDocFromCache = getDocFromCache;
exports.getDocFromServer = getDocFromServer;
exports.getDocs = getDocs;
exports.getDocsFromCache = getDocsFromCache;
exports.getDocsFromServer = getDocsFromServer;
exports.getFirestore = getFirestore;
exports.increment = increment;
exports.initializeFirestore = initializeFirestore;
exports.limit = limit;
exports.limitToLast = limitToLast;
exports.loadBundle = loadBundle;
exports.namedQuery = namedQuery;
exports.onSnapshot = onSnapshot;
exports.onSnapshotsInSync = onSnapshotsInSync;
exports.or = or;
exports.orderBy = orderBy;
exports.query = query;
exports.queryEqual = queryEqual;
exports.refEqual = refEqual;
exports.runTransaction = runTransaction;
exports.serverTimestamp = serverTimestamp;
exports.setDoc = setDoc;
exports.setIndexConfiguration = setIndexConfiguration;
exports.setLogLevel = setLogLevel;
exports.snapshotEqual = snapshotEqual;
exports.startAfter = startAfter;
exports.startAt = startAt;
exports.terminate = terminate;
exports.updateDoc = updateDoc;
exports.waitForPendingWrites = waitForPendingWrites;
exports.where = where;
exports.writeBatch = writeBatch;
//# sourceMappingURL=index.node.cjs.js.map