} struct.
*/ (o);
n = new Cs(s, i, r, u || null);
} else if ("documentChange" in e) {
e.documentChange;
const s = e.documentChange;
s.document, s.document.name, s.document.updateTime;
const i = Ws(t, s.document.name), r = Ks(s.document.updateTime), o = s.document.createTime ? Ks(s.document.createTime) : it.min(), u = new Ze({
mapValue: {
fields: s.document.fields
}
}), c = en.newFoundDocument(i, r, o, u), a = s.targetIds || [], h = s.removedTargetIds || [];
n = new Ss(a, h, c.key, c);
} else if ("documentDelete" in e) {
e.documentDelete;
const s = e.documentDelete;
s.document;
const i = Ws(t, s.document), r = s.readTime ? Ks(s.readTime) : it.min(), o = en.newNoDocument(i, r), u = s.removedTargetIds || [];
n = new Ss([], u, o.key, o);
} else if ("documentRemove" in e) {
e.documentRemove;
const s = e.documentRemove;
s.document;
const i = Ws(t, s.document), r = s.removedTargetIds || [];
n = new Ss([], r, i, null);
} else {
if (!("filter" in e)) return M();
{
e.filter;
const t = e.filter;
t.targetId;
const s = t.count || 0, i = new cs(s), r = t.targetId;
n = new Ds(r, i);
}
}
return n;
}
function ni(t, e) {
let n;
if (e instanceof es) n = {
update: Xs(t, e.key, e.value)
}; else if (e instanceof os) n = {
delete: js(t, e.key)
}; else if (e instanceof ns) n = {
update: Xs(t, e.key, e.data),
updateMask: mi(e.fieldMask)
}; else {
if (!(e instanceof us)) return M();
n = {
verify: js(t, e.key)
};
}
return e.fieldTransforms.length > 0 && (n.updateTransforms = e.fieldTransforms.map((t => function(t, e) {
const n = e.transform;
if (n instanceof Mn) return {
fieldPath: e.field.canonicalString(),
setToServerValue: "REQUEST_TIME"
};
if (n instanceof Fn) return {
fieldPath: e.field.canonicalString(),
appendMissingElements: {
values: n.elements
}
};
if (n instanceof Bn) return {
fieldPath: e.field.canonicalString(),
removeAllFromArray: {
values: n.elements
}
};
if (n instanceof qn) return {
fieldPath: e.field.canonicalString(),
increment: n.gt
};
throw M();
}(0, t)))), e.precondition.isNone || (n.currentDocument = function(t, e) {
return void 0 !== e.updateTime ? {
updateTime: Us(t, e.updateTime)
} : void 0 !== e.exists ? {
exists: e.exists
} : M();
}(t, e.precondition)), n;
}
function si(t, e) {
const n = e.currentDocument ? function(t) {
return void 0 !== t.updateTime ? Wn.updateTime(Ks(t.updateTime)) : void 0 !== t.exists ? Wn.exists(t.exists) : Wn.none();
}(e.currentDocument) : Wn.none(), s = e.updateTransforms ? e.updateTransforms.map((e => function(t, e) {
let n = null;
if ("setToServerValue" in e) F("REQUEST_TIME" === e.setToServerValue), n = new Mn; else if ("appendMissingElements" in e) {
const t = e.appendMissingElements.values || [];
n = new Fn(t);
} else if ("removeAllFromArray" in e) {
const t = e.removeAllFromArray.values || [];
n = new Bn(t);
} else "increment" in e ? n = new qn(t, e.increment) : M();
const s = ct.fromServerFormat(e.fieldPath);
return new Gn(s, n);
}(t, e))) : [];
if (e.update) {
e.update.name;
const i = Ws(t, e.update.name), r = new Ze({
mapValue: {
fields: e.update.fields
}
});
if (e.updateMask) {
const t = function(t) {
const e = t.fieldPaths || [];
return new Xe(e.map((t => ct.fromServerFormat(t))));
}(e.updateMask);
return new ns(i, r, t, n, s);
}
return new es(i, r, n, s);
}
if (e.delete) {
const s = Ws(t, e.delete);
return new os(s, n);
}
if (e.verify) {
const s = Ws(t, e.verify);
return new us(s, n);
}
return M();
}
function ii(t, e) {
return t && t.length > 0 ? (F(void 0 !== e), t.map((t => function(t, e) {
// NOTE: Deletes don't have an updateTime.
let n = t.updateTime ? Ks(t.updateTime) : Ks(e);
return n.isEqual(it.min()) && (
// The Firestore Emulator currently returns an update time of 0 for
// deletes of non-existing documents (rather than null). This breaks the
// test "get deleted doc while offline with source=cache" as NoDocuments
// with version 0 are filtered by IndexedDb's RemoteDocumentCache.
// TODO(#2149): Remove this when Emulator is fixed
n = Ks(e)), new jn(n, t.transformResults || []);
}(t, e)))) : [];
}
function ri(t, e) {
return {
documents: [ zs(t, e.path) ]
};
}
function oi(t, e) {
// Dissect the path into parent, collectionId, and optional key filter.
const n = {
structuredQuery: {}
}, s = e.path;
null !== e.collectionGroup ? (n.parent = zs(t, s), n.structuredQuery.from = [ {
collectionId: e.collectionGroup,
allDescendants: !0
} ]) : (n.parent = zs(t, s.popLast()), n.structuredQuery.from = [ {
collectionId: s.lastSegment()
} ]);
const i = function(t) {
if (0 === t.length) return;
return wi(ve.create(t, "and" /* CompositeOperator.AND */));
}(e.filters);
i && (n.structuredQuery.where = i);
const r = function(t) {
if (0 === t.length) return;
return t.map((t =>
// visible for testing
function(t) {
return {
field: di(t.field),
direction: hi(t.dir)
};
}(t)));
}(e.orderBy);
r && (n.structuredQuery.orderBy = r);
const o = function(t, e) {
return t.wt || Ut(e) ? e : {
value: e
};
}
/**
* Returns a number (or null) from a google.protobuf.Int32Value proto.
*/ (t, e.limit);
var u;
return null !== o && (n.structuredQuery.limit = o), e.startAt && (n.structuredQuery.startAt = {
before: (u = e.startAt).inclusive,
values: u.position
}), e.endAt && (n.structuredQuery.endAt = function(t) {
return {
before: !t.inclusive,
values: t.position
};
}(e.endAt)), n;
}
function ui(t) {
let e = Hs(t.parent);
const n = t.structuredQuery, s = n.from ? n.from.length : 0;
let i = null;
if (s > 0) {
F(1 === s);
const t = n.from[0];
t.allDescendants ? i = t.collectionId : e = e.child(t.collectionId);
}
let r = [];
n.where && (r = function(t) {
const e = ai(t);
if (e instanceof ve && De(e)) return e.getFilters();
return [ e ];
}(n.where));
let o = [];
n.orderBy && (o = n.orderBy.map((t => function(t) {
return new Ge(_i(t.field),
// visible for testing
function(t) {
switch (t) {
case "ASCENDING":
return "asc" /* Direction.ASCENDING */;
case "DESCENDING":
return "desc" /* Direction.DESCENDING */;
default:
return;
}
}
// visible for testing
(t.direction));
}
// visible for testing
(t))));
let u = null;
n.limit && (u = function(t) {
let e;
return e = "object" == typeof t ? t.value : t, Ut(e) ? null : e;
}(n.limit));
let c = null;
n.startAt && (c = function(t) {
const e = !!t.before, n = t.values || [];
return new Ee(n, e);
}(n.startAt));
let a = null;
return n.endAt && (a = function(t) {
const e = !t.before, n = t.values || [];
return new Ee(n, e);
}
// visible for testing
(n.endAt)), fn(e, i, o, r, u, "F" /* LimitType.First */ , c, a);
}
function ci(t, e) {
const n = function(t, e) {
switch (e) {
case 0 /* TargetPurpose.Listen */ :
return null;
case 1 /* TargetPurpose.ExistenceFilterMismatch */ :
return "existence-filter-mismatch";
case 2 /* TargetPurpose.LimboResolution */ :
return "limbo-document";
default:
return M();
}
}(0, e.purpose);
return null == n ? null : {
"goog-listen-tags": n
};
}
function ai(t) {
return void 0 !== t.unaryFilter ? function(t) {
switch (t.unaryFilter.op) {
case "IS_NAN":
const e = _i(t.unaryFilter.field);
return Pe.create(e, "==" /* Operator.EQUAL */ , {
doubleValue: NaN
});
case "IS_NULL":
const n = _i(t.unaryFilter.field);
return Pe.create(n, "==" /* Operator.EQUAL */ , {
nullValue: "NULL_VALUE"
});
case "IS_NOT_NAN":
const s = _i(t.unaryFilter.field);
return Pe.create(s, "!=" /* Operator.NOT_EQUAL */ , {
doubleValue: NaN
});
case "IS_NOT_NULL":
const i = _i(t.unaryFilter.field);
return Pe.create(i, "!=" /* Operator.NOT_EQUAL */ , {
nullValue: "NULL_VALUE"
});
default:
return M();
}
}(t) : void 0 !== t.fieldFilter ? function(t) {
return Pe.create(_i(t.fieldFilter.field), function(t) {
switch (t) {
case "EQUAL":
return "==" /* Operator.EQUAL */;
case "NOT_EQUAL":
return "!=" /* Operator.NOT_EQUAL */;
case "GREATER_THAN":
return ">" /* Operator.GREATER_THAN */;
case "GREATER_THAN_OR_EQUAL":
return ">=" /* Operator.GREATER_THAN_OR_EQUAL */;
case "LESS_THAN":
return "<" /* Operator.LESS_THAN */;
case "LESS_THAN_OR_EQUAL":
return "<=" /* Operator.LESS_THAN_OR_EQUAL */;
case "ARRAY_CONTAINS":
return "array-contains" /* Operator.ARRAY_CONTAINS */;
case "IN":
return "in" /* Operator.IN */;
case "NOT_IN":
return "not-in" /* Operator.NOT_IN */;
case "ARRAY_CONTAINS_ANY":
return "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
default:
return M();
}
}(t.fieldFilter.op), t.fieldFilter.value);
}(t) : void 0 !== t.compositeFilter ? function(t) {
return ve.create(t.compositeFilter.filters.map((t => ai(t))), function(t) {
switch (t) {
case "AND":
return "and" /* CompositeOperator.AND */;
case "OR":
return "or" /* CompositeOperator.OR */;
default:
return M();
}
}(t.compositeFilter.op));
}(t) : M();
}
function hi(t) {
return Ms[t];
}
function li(t) {
return Fs[t];
}
function fi(t) {
return $s[t];
}
function di(t) {
return {
fieldPath: t.canonicalString()
};
}
function _i(t) {
return ct.fromServerFormat(t.fieldPath);
}
function wi(t) {
return t instanceof Pe ? function(t) {
if ("==" /* Operator.EQUAL */ === t.op) {
if (_e(t.value)) return {
unaryFilter: {
field: di(t.field),
op: "IS_NAN"
}
};
if (de(t.value)) return {
unaryFilter: {
field: di(t.field),
op: "IS_NULL"
}
};
} else if ("!=" /* Operator.NOT_EQUAL */ === t.op) {
if (_e(t.value)) return {
unaryFilter: {
field: di(t.field),
op: "IS_NOT_NAN"
}
};
if (de(t.value)) return {
unaryFilter: {
field: di(t.field),
op: "IS_NOT_NULL"
}
};
}
return {
fieldFilter: {
field: di(t.field),
op: li(t.op),
value: t.value
}
};
}(t) : t instanceof ve ? function(t) {
const e = t.getFilters().map((t => wi(t)));
if (1 === e.length) return e[0];
return {
compositeFilter: {
op: fi(t.op),
filters: e
}
};
}(t) : M();
}
function mi(t) {
const e = [];
return t.fields.forEach((t => e.push(t.canonicalString()))), {
fieldPaths: e
};
}
function gi(t) {
// Resource names have at least 4 components (project ID, database ID)
return t.length >= 4 && "projects" === t.get(0) && "databases" === t.get(2);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Encodes a resource path into a IndexedDb-compatible string form.
*/
function yi(t) {
let e = "";
for (let n = 0; n < t.length; n++) e.length > 0 && (e = Ii(e)), e = pi(t.get(n), e);
return Ii(e);
}
/** Encodes a single segment of a resource path into the given result */ function pi(t, e) {
let n = e;
const s = t.length;
for (let e = 0; e < s; e++) {
const s = t.charAt(e);
switch (s) {
case "\0":
n += "";
break;
case "":
n += "";
break;
default:
n += s;
}
}
return n;
}
/** Encodes a path separator into the given result */ function Ii(t) {
return t + "";
}
/**
* Decodes the given IndexedDb-compatible string form of a resource path into
* a ResourcePath instance. Note that this method is not suitable for use with
* decoding resource names from the server; those are One Platform format
* strings.
*/ function Ti(t) {
// Event the empty path must encode as a path of at least length 2. A path
// with exactly 2 must be the empty path.
const e = t.length;
if (F(e >= 2), 2 === e) return F("" === t.charAt(0) && "" === t.charAt(1)), ot.emptyPath();
// Escape characters cannot exist past the second-to-last position in the
// source value.
const n = e - 2, s = [];
let i = "";
for (let r = 0; r < e; ) {
// The last two characters of a valid encoded path must be a separator, so
// there must be an end to this segment.
const e = t.indexOf("", r);
(e < 0 || e > n) && M();
switch (t.charAt(e + 1)) {
case "":
const n = t.substring(r, e);
let o;
0 === i.length ?
// Avoid copying for the common case of a segment that excludes \0
// and \001
o = n : (i += n, o = i, i = ""), s.push(o);
break;
case "":
i += t.substring(r, e), i += "\0";
break;
case "":
// The escape character can be used in the output to encode itself.
i += t.substring(r, e + 1);
break;
default:
M();
}
r = e + 2;
}
return new ot(s);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const Ei = [ "userId", "batchId" ];
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Name of the IndexedDb object store.
*
* Note that the name 'owner' is chosen to ensure backwards compatibility with
* older clients that only supported single locked access to the persistence
* layer.
*/
/**
* Creates a [userId, encodedPath] key for use in the DbDocumentMutations
* index to iterate over all at document mutations for a given path or lower.
*/
function Ai(t, e) {
return [ t, yi(e) ];
}
/**
* Creates a full index key of [userId, encodedPath, batchId] for inserting
* and deleting into the DbDocumentMutations index.
*/ function Ri(t, e, n) {
return [ t, yi(e), n ];
}
/**
* Because we store all the useful information for this store in the key,
* there is no useful information to store as the value. The raw (unencoded)
* path cannot be stored because IndexedDb doesn't store prototype
* information.
*/ const bi = {}, Pi = [ "prefixPath", "collectionGroup", "readTime", "documentId" ], vi = [ "prefixPath", "collectionGroup", "documentId" ], Vi = [ "collectionGroup", "readTime", "prefixPath", "documentId" ], Si = [ "canonicalId", "targetId" ], Di = [ "targetId", "path" ], Ci = [ "path", "targetId" ], xi = [ "collectionId", "parent" ], Ni = [ "indexId", "uid" ], ki = [ "uid", "sequenceNumber" ], Oi = [ "indexId", "uid", "arrayValue", "directionalValue", "orderedDocumentKey", "documentKey" ], Mi = [ "indexId", "uid", "orderedDocumentKey" ], Fi = [ "userId", "collectionPath", "documentId" ], $i = [ "userId", "collectionPath", "largestBatchId" ], Bi = [ "userId", "collectionGroup", "largestBatchId" ], Li = [ ...[ ...[ ...[ ...[ "mutationQueues", "mutations", "documentMutations", "remoteDocuments", "targets", "owner", "targetGlobal", "targetDocuments" ], "clientMetadata" ], "remoteDocumentGlobal" ], "collectionParents" ], "bundles", "namedQueries" ], qi = [ ...Li, "documentOverlays" ], Ui = [ "mutationQueues", "mutations", "documentMutations", "remoteDocumentsV14", "targets", "owner", "targetGlobal", "targetDocuments", "clientMetadata", "remoteDocumentGlobal", "collectionParents", "bundles", "namedQueries", "documentOverlays" ], Ki = Ui, Gi = [ ...Ki, "indexConfiguration", "indexState", "indexEntries" ];
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class Qi extends Et {
constructor(t, e) {
super(), this.se = t, this.currentSequenceNumber = e;
}
}
function ji(t, e) {
const n = B(t);
return Pt.M(n.se, e);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A batch of mutations that will be sent as one unit to the backend.
*/ class Wi {
/**
* @param batchId - The unique ID of this mutation batch.
* @param localWriteTime - The original write time of this mutation.
* @param baseMutations - Mutations that are used to populate the base
* values when this mutation is applied locally. This can be used to locally
* overwrite values that are persisted in the remote document cache. Base
* mutations are never sent to the backend.
* @param mutations - The user-provided mutations in this mutation batch.
* User-provided mutations are applied both locally and remotely on the
* backend.
*/
constructor(t, e, n, s) {
this.batchId = t, this.localWriteTime = e, this.baseMutations = n, this.mutations = s;
}
/**
* Applies all the mutations in this MutationBatch to the specified document
* to compute the state of the remote document
*
* @param document - The document to apply mutations to.
* @param batchResult - The result of applying the MutationBatch to the
* backend.
*/ applyToRemoteDocument(t, e) {
const n = e.mutationResults;
for (let e = 0; e < this.mutations.length; e++) {
const s = this.mutations[e];
if (s.key.isEqual(t.key)) {
Yn(s, t, n[e]);
}
}
}
/**
* Computes the local view of a document given all the mutations in this
* batch.
*
* @param document - The document to apply mutations to.
* @param mutatedFields - Fields that have been updated before applying this mutation batch.
* @returns A `FieldMask` representing all the fields that are mutated.
*/ applyToLocalView(t, e) {
// First, apply the base state. This allows us to apply non-idempotent
// transform against a consistent set of values.
for (const n of this.baseMutations) n.key.isEqual(t.key) && (e = Xn(n, t, e, this.localWriteTime));
// Second, apply all user-provided mutations.
for (const n of this.mutations) n.key.isEqual(t.key) && (e = Xn(n, t, e, this.localWriteTime));
return e;
}
/**
* Computes the local view for all provided documents given the mutations in
* this batch. Returns a `DocumentKey` to `Mutation` map which can be used to
* replace all the mutation applications.
*/ applyToLocalDocumentSet(t, e) {
// TODO(mrschmidt): This implementation is O(n^2). If we apply the mutations
// directly (as done in `applyToLocalView()`), we can reduce the complexity
// to O(n).
const n = Is();
return this.mutations.forEach((s => {
const i = t.get(s.key), r = i.overlayedDocument;
// TODO(mutabledocuments): This method should take a MutableDocumentMap
// and we should remove this cast.
let o = this.applyToLocalView(r, i.mutatedFields);
// Set mutatedFields to null if the document is only from local mutations.
// This creates a Set or Delete mutation, instead of trying to create a
// patch mutation as the overlay.
o = e.has(s.key) ? null : o;
const u = Jn(r, o);
null !== u && n.set(s.key, u), r.isValidDocument() || r.convertToNoDocument(it.min());
})), n;
}
keys() {
return this.mutations.reduce(((t, e) => t.add(e.key)), Rs());
}
isEqual(t) {
return this.batchId === t.batchId && et(this.mutations, t.mutations, ((t, e) => ts(t, e))) && et(this.baseMutations, t.baseMutations, ((t, e) => ts(t, e)));
}
}
/** The result of applying a mutation batch to the backend. */ class zi {
constructor(t, e, n,
/**
* A pre-computed mapping from each mutated document to the resulting
* version.
*/
s) {
this.batch = t, this.commitVersion = e, this.mutationResults = n, this.docVersions = s;
}
/**
* Creates a new MutationBatchResult for the given batch and results. There
* must be one result for each mutation in the batch. This static factory
* caches a document=>version mapping (docVersions).
*/ static from(t, e, n) {
F(t.mutations.length === n.length);
let s = Es;
const i = t.mutations;
for (let t = 0; t < i.length; t++) s = s.insert(i[t].key, n[t].version);
return new zi(t, e, n, s);
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Representation of an overlay computed by Firestore.
*
* Holds information about a mutation and the largest batch id in Firestore when
* the mutation was created.
*/ class Hi {
constructor(t, e) {
this.largestBatchId = t, this.mutation = e;
}
getKey() {
return this.mutation.key;
}
isEqual(t) {
return null !== t && this.mutation === t.mutation;
}
toString() {
return `Overlay{\n largestBatchId: ${this.largestBatchId},\n mutation: ${this.mutation.toString()}\n }`;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable set of metadata that the local store tracks for each target.
*/ class Ji {
constructor(
/** The target being listened to. */
t,
/**
* The target ID to which the target corresponds; Assigned by the
* LocalStore for user listens and by the SyncEngine for limbo watches.
*/
e,
/** The purpose of the target. */
n,
/**
* The sequence number of the last transaction during which this target data
* was modified.
*/
s,
/** The latest snapshot version seen for this target. */
i = it.min()
/**
* The maximum snapshot version at which the associated view
* contained no limbo documents.
*/ , r = it.min()
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/ , o = Wt.EMPTY_BYTE_STRING) {
this.target = t, this.targetId = e, this.purpose = n, this.sequenceNumber = s, this.snapshotVersion = i,
this.lastLimboFreeSnapshotVersion = r, this.resumeToken = o;
}
/** Creates a new target data instance with an updated sequence number. */ withSequenceNumber(t) {
return new Ji(this.target, this.targetId, this.purpose, t, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken);
}
/**
* Creates a new target data instance with an updated resume token and
* snapshot version.
*/ withResumeToken(t, e) {
return new Ji(this.target, this.targetId, this.purpose, this.sequenceNumber, e, this.lastLimboFreeSnapshotVersion, t);
}
/**
* Creates a new target data instance with an updated last limbo free
* snapshot version number.
*/ withLastLimboFreeSnapshotVersion(t) {
return new Ji(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, t, this.resumeToken);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Serializer for values stored in the LocalStore. */ class Yi {
constructor(t) {
this.ie = t;
}
}
/** Decodes a remote document from storage locally to a Document. */ function Xi(t, e) {
let n;
if (e.document) n = Zs(t.ie, e.document, !!e.hasCommittedMutations); else if (e.noDocument) {
const t = at.fromSegments(e.noDocument.path), s = nr(e.noDocument.readTime);
n = en.newNoDocument(t, s), e.hasCommittedMutations && n.setHasCommittedMutations();
} else {
if (!e.unknownDocument) return M();
{
const t = at.fromSegments(e.unknownDocument.path), s = nr(e.unknownDocument.version);
n = en.newUnknownDocument(t, s);
}
}
return e.readTime && n.setReadTime(function(t) {
const e = new st(t[0], t[1]);
return it.fromTimestamp(e);
}(e.readTime)), n;
}
/** Encodes a document for storage locally. */ function Zi(t, e) {
const n = e.key, s = {
prefixPath: n.getCollectionPath().popLast().toArray(),
collectionGroup: n.collectionGroup,
documentId: n.path.lastSegment(),
readTime: tr(e.readTime),
hasCommittedMutations: e.hasCommittedMutations
};
if (e.isFoundDocument()) s.document = function(t, e) {
return {
name: js(t, e.key),
fields: e.data.value.mapValue.fields,
updateTime: Ls(t, e.version.toTimestamp()),
createTime: Ls(t, e.createTime.toTimestamp())
};
}(t.ie, e); else if (e.isNoDocument()) s.noDocument = {
path: n.path.toArray(),
readTime: er(e.version)
}; else {
if (!e.isUnknownDocument()) return M();
s.unknownDocument = {
path: n.path.toArray(),
version: er(e.version)
};
}
return s;
}
function tr(t) {
const e = t.toTimestamp();
return [ e.seconds, e.nanoseconds ];
}
function er(t) {
const e = t.toTimestamp();
return {
seconds: e.seconds,
nanoseconds: e.nanoseconds
};
}
function nr(t) {
const e = new st(t.seconds, t.nanoseconds);
return it.fromTimestamp(e);
}
/** Encodes a batch of mutations into a DbMutationBatch for local storage. */
/** Decodes a DbMutationBatch into a MutationBatch */
function sr(t, e) {
const n = (e.baseMutations || []).map((e => si(t.ie, e)));
// Squash old transform mutations into existing patch or set mutations.
// The replacement of representing `transforms` with `update_transforms`
// on the SDK means that old `transform` mutations stored in IndexedDB need
// to be updated to `update_transforms`.
// TODO(b/174608374): Remove this code once we perform a schema migration.
for (let t = 0; t < e.mutations.length - 1; ++t) {
const n = e.mutations[t];
if (t + 1 < e.mutations.length && void 0 !== e.mutations[t + 1].transform) {
const s = e.mutations[t + 1];
n.updateTransforms = s.transform.fieldTransforms, e.mutations.splice(t + 1, 1),
++t;
}
}
const s = e.mutations.map((e => si(t.ie, e))), i = st.fromMillis(e.localWriteTimeMs);
return new Wi(e.batchId, i, n, s);
}
/** Decodes a DbTarget into TargetData */ function ir(t) {
const e = nr(t.readTime), n = void 0 !== t.lastLimboFreeSnapshotVersion ? nr(t.lastLimboFreeSnapshotVersion) : it.min();
let s;
var i;
return void 0 !== t.query.documents ? (F(1 === (i = t.query).documents.length),
s = pn(dn(Hs(i.documents[0])))) : s = function(t) {
return pn(ui(t));
}(t.query), new Ji(s, t.targetId, 0 /* TargetPurpose.Listen */ , t.lastListenSequenceNumber, e, n, Wt.fromBase64String(t.resumeToken));
}
/** Encodes TargetData into a DbTarget for storage locally. */ function rr(t, e) {
const n = er(e.snapshotVersion), s = er(e.lastLimboFreeSnapshotVersion);
let i;
i = un(e.target) ? ri(t.ie, e.target) : oi(t.ie, e.target);
// We can't store the resumeToken as a ByteString in IndexedDb, so we
// convert it to a base64 string for storage.
const r = e.resumeToken.toBase64();
// lastListenSequenceNumber is always 0 until we do real GC.
return {
targetId: e.targetId,
canonicalId: rn(e.target),
readTime: n,
resumeToken: r,
lastListenSequenceNumber: e.sequenceNumber,
lastLimboFreeSnapshotVersion: s,
query: i
};
}
/**
* A helper function for figuring out what kind of query has been stored.
*/
/**
* Encodes a `BundledQuery` from bundle proto to a Query object.
*
* This reconstructs the original query used to build the bundle being loaded,
* including features exists only in SDKs (for example: limit-to-last).
*/
function or(t) {
const e = ui({
parent: t.parent,
structuredQuery: t.structuredQuery
});
return "LAST" === t.limitType ? Tn(e, e.limit, "L" /* LimitType.Last */) : e;
}
/** Encodes a NamedQuery proto object to a NamedQuery model object. */
/** Encodes a DbDocumentOverlay object to an Overlay model object. */
function ur(t, e) {
return new Hi(e.largestBatchId, si(t.ie, e.overlayMutation));
}
/** Decodes an Overlay model object into a DbDocumentOverlay object. */
/**
* Returns the DbDocumentOverlayKey corresponding to the given user and
* document key.
*/
function cr(t, e) {
const n = e.path.lastSegment();
return [ t, yi(e.path.popLast()), n ];
}
function ar(t, e, n, s) {
return {
indexId: t,
uid: e.uid || "",
sequenceNumber: n,
readTime: er(s.readTime),
documentKey: yi(s.documentKey.path),
largestBatchId: s.largestBatchId
};
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class hr {
getBundleMetadata(t, e) {
return lr(t).get(e).next((t => {
if (t) return {
id: (e = t).bundleId,
createTime: nr(e.createTime),
version: e.version
};
/** Encodes a DbBundle to a BundleMetadata object. */
var e;
/** Encodes a BundleMetadata to a DbBundle. */ }));
}
saveBundleMetadata(t, e) {
return lr(t).put({
bundleId: (n = e).id,
createTime: er(Ks(n.createTime)),
version: n.version
});
var n;
/** Encodes a DbNamedQuery to a NamedQuery. */ }
getNamedQuery(t, e) {
return fr(t).get(e).next((t => {
if (t) return {
name: (e = t).name,
query: or(e.bundledQuery),
readTime: nr(e.readTime)
};
var e;
/** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */ }));
}
saveNamedQuery(t, e) {
return fr(t).put(function(t) {
return {
name: t.name,
readTime: er(Ks(t.readTime)),
bundledQuery: t.bundledQuery
};
}(e));
}
}
/**
* Helper to get a typed SimpleDbStore for the bundles object store.
*/ function lr(t) {
return ji(t, "bundles");
}
/**
* Helper to get a typed SimpleDbStore for the namedQueries object store.
*/ function fr(t) {
return ji(t, "namedQueries");
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of DocumentOverlayCache using IndexedDb.
*/ class dr {
/**
* @param serializer - The document serializer.
* @param userId - The userId for which we are accessing overlays.
*/
constructor(t, e) {
this.yt = t, this.userId = e;
}
static re(t, e) {
const n = e.uid || "";
return new dr(t, n);
}
getOverlay(t, e) {
return _r(t).get(cr(this.userId, e)).next((t => t ? ur(this.yt, t) : null));
}
getOverlays(t, e) {
const n = ps();
return Rt.forEach(e, (e => this.getOverlay(t, e).next((t => {
null !== t && n.set(e, t);
})))).next((() => n));
}
saveOverlays(t, e, n) {
const s = [];
return n.forEach(((n, i) => {
const r = new Hi(e, i);
s.push(this.oe(t, r));
})), Rt.waitFor(s);
}
removeOverlaysForBatchId(t, e, n) {
const s = new Set;
// Get the set of unique collection paths.
e.forEach((t => s.add(yi(t.getCollectionPath()))));
const i = [];
return s.forEach((e => {
const s = IDBKeyRange.bound([ this.userId, e, n ], [ this.userId, e, n + 1 ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0);
i.push(_r(t).Y("collectionPathOverlayIndex", s));
})), Rt.waitFor(i);
}
getOverlaysForCollection(t, e, n) {
const s = ps(), i = yi(e), r = IDBKeyRange.bound([ this.userId, i, n ], [ this.userId, i, Number.POSITIVE_INFINITY ],
/*lowerOpen=*/ !0);
return _r(t).W("collectionPathOverlayIndex", r).next((t => {
for (const e of t) {
const t = ur(this.yt, e);
s.set(t.getKey(), t);
}
return s;
}));
}
getOverlaysForCollectionGroup(t, e, n, s) {
const i = ps();
let r;
// We want batch IDs larger than `sinceBatchId`, and so the lower bound
// is not inclusive.
const o = IDBKeyRange.bound([ this.userId, e, n ], [ this.userId, e, Number.POSITIVE_INFINITY ],
/*lowerOpen=*/ !0);
return _r(t).Z({
index: "collectionGroupOverlayIndex",
range: o
}, ((t, e, n) => {
// We do not want to return partial batch overlays, even if the size
// of the result set exceeds the given `count` argument. Therefore, we
// continue to aggregate results even after the result size exceeds
// `count` if there are more overlays from the `currentBatchId`.
const o = ur(this.yt, e);
i.size() < s || o.largestBatchId === r ? (i.set(o.getKey(), o), r = o.largestBatchId) : n.done();
})).next((() => i));
}
oe(t, e) {
return _r(t).put(function(t, e, n) {
const [s, i, r] = cr(e, n.mutation.key);
return {
userId: e,
collectionPath: i,
documentId: r,
collectionGroup: n.mutation.key.getCollectionGroup(),
largestBatchId: n.largestBatchId,
overlayMutation: ni(t.ie, n.mutation)
};
}(this.yt, this.userId, e));
}
}
/**
* Helper to get a typed SimpleDbStore for the document overlay object store.
*/ function _r(t) {
return ji(t, "documentOverlays");
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Note: This code is copied from the backend. Code that is not used by
// Firestore was removed.
/** Firestore index value writer. */
class wr {
constructor() {}
// The write methods below short-circuit writing terminators for values
// containing a (terminating) truncated value.
// As an example, consider the resulting encoding for:
// ["bar", [2, "foo"]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TERM, TERM, TERM)
// ["bar", [2, truncated("foo")]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TRUNC)
// ["bar", truncated(["foo"])] -> (STRING, "bar", TERM, ARRAY. STRING, "foo", TERM, TRUNC)
/** Writes an index value. */
ue(t, e) {
this.ce(t, e),
// Write separator to split index values
// (see go/firestore-storage-format#encodings).
e.ae();
}
ce(t, e) {
if ("nullValue" in t) this.he(e, 5); else if ("booleanValue" in t) this.he(e, 10),
e.le(t.booleanValue ? 1 : 0); else if ("integerValue" in t) this.he(e, 15), e.le(Jt(t.integerValue)); else if ("doubleValue" in t) {
const n = Jt(t.doubleValue);
isNaN(n) ? this.he(e, 13) : (this.he(e, 15), Kt(n) ?
// -0.0, 0 and 0.0 are all considered the same
e.le(0) : e.le(n));
} else if ("timestampValue" in t) {
const n = t.timestampValue;
this.he(e, 20), "string" == typeof n ? e.fe(n) : (e.fe(`${n.seconds || ""}`), e.le(n.nanos || 0));
} else if ("stringValue" in t) this.de(t.stringValue, e), this._e(e); else if ("bytesValue" in t) this.he(e, 30),
e.we(Yt(t.bytesValue)), this._e(e); else if ("referenceValue" in t) this.me(t.referenceValue, e); else if ("geoPointValue" in t) {
const n = t.geoPointValue;
this.he(e, 45), e.le(n.latitude || 0), e.le(n.longitude || 0);
} else "mapValue" in t ? ge(t) ? this.he(e, Number.MAX_SAFE_INTEGER) : (this.ge(t.mapValue, e),
this._e(e)) : "arrayValue" in t ? (this.ye(t.arrayValue, e), this._e(e)) : M();
}
de(t, e) {
this.he(e, 25), this.pe(t, e);
}
pe(t, e) {
e.fe(t);
}
ge(t, e) {
const n = t.fields || {};
this.he(e, 55);
for (const t of Object.keys(n)) this.de(t, e), this.ce(n[t], e);
}
ye(t, e) {
const n = t.values || [];
this.he(e, 50);
for (const t of n) this.ce(t, e);
}
me(t, e) {
this.he(e, 37);
at.fromName(t).path.forEach((t => {
this.he(e, 60), this.pe(t, e);
}));
}
he(t, e) {
t.le(e);
}
_e(t) {
// While the SDK does not implement truncation, the truncation marker is
// used to terminate all variable length values (which are strings, bytes,
// references, arrays and maps).
t.le(2);
}
}
wr.Ie = new wr;
/**
* Counts the number of zeros in a byte.
*
* Visible for testing.
*/
function mr(t) {
if (0 === t) return 8;
let e = 0;
return t >> 4 == 0 && (
// Test if the first four bits are zero.
e += 4, t <<= 4), t >> 6 == 0 && (
// Test if the first two (or next two) bits are zero.
e += 2, t <<= 2), t >> 7 == 0 && (
// Test if the remaining bit is zero.
e += 1), e;
}
/** Counts the number of leading zeros in the given byte array. */
/**
* Returns the number of bytes required to store "value". Leading zero bytes
* are skipped.
*/
function gr(t) {
// This is just the number of bytes for the unsigned representation of the number.
const e = 64 - function(t) {
let e = 0;
for (let n = 0; n < 8; ++n) {
const s = mr(255 & t[n]);
if (e += s, 8 !== s) break;
}
return e;
}(t);
return Math.ceil(e / 8);
}
/**
* OrderedCodeWriter is a minimal-allocation implementation of the writing
* behavior defined by the backend.
*
* The code is ported from its Java counterpart.
*/ class yr {
constructor() {
this.buffer = new Uint8Array(1024), this.position = 0;
}
Te(t) {
const e = t[Symbol.iterator]();
let n = e.next();
for (;!n.done; ) this.Ee(n.value), n = e.next();
this.Ae();
}
Re(t) {
const e = t[Symbol.iterator]();
let n = e.next();
for (;!n.done; ) this.be(n.value), n = e.next();
this.Pe();
}
/** Writes utf8 bytes into this byte sequence, ascending. */ ve(t) {
for (const e of t) {
const t = e.charCodeAt(0);
if (t < 128) this.Ee(t); else if (t < 2048) this.Ee(960 | t >>> 6), this.Ee(128 | 63 & t); else if (e < "\ud800" || "\udbff" < e) this.Ee(480 | t >>> 12),
this.Ee(128 | 63 & t >>> 6), this.Ee(128 | 63 & t); else {
const t = e.codePointAt(0);
this.Ee(240 | t >>> 18), this.Ee(128 | 63 & t >>> 12), this.Ee(128 | 63 & t >>> 6),
this.Ee(128 | 63 & t);
}
}
this.Ae();
}
/** Writes utf8 bytes into this byte sequence, descending */ Ve(t) {
for (const e of t) {
const t = e.charCodeAt(0);
if (t < 128) this.be(t); else if (t < 2048) this.be(960 | t >>> 6), this.be(128 | 63 & t); else if (e < "\ud800" || "\udbff" < e) this.be(480 | t >>> 12),
this.be(128 | 63 & t >>> 6), this.be(128 | 63 & t); else {
const t = e.codePointAt(0);
this.be(240 | t >>> 18), this.be(128 | 63 & t >>> 12), this.be(128 | 63 & t >>> 6),
this.be(128 | 63 & t);
}
}
this.Pe();
}
Se(t) {
// Values are encoded with a single byte length prefix, followed by the
// actual value in big-endian format with leading 0 bytes dropped.
const e = this.De(t), n = gr(e);
this.Ce(1 + n), this.buffer[this.position++] = 255 & n;
// Write the length
for (let t = e.length - n; t < e.length; ++t) this.buffer[this.position++] = 255 & e[t];
}
xe(t) {
// Values are encoded with a single byte length prefix, followed by the
// inverted value in big-endian format with leading 0 bytes dropped.
const e = this.De(t), n = gr(e);
this.Ce(1 + n), this.buffer[this.position++] = ~(255 & n);
// Write the length
for (let t = e.length - n; t < e.length; ++t) this.buffer[this.position++] = ~(255 & e[t]);
}
/**
* Writes the "infinity" byte sequence that sorts after all other byte
* sequences written in ascending order.
*/ Ne() {
this.ke(255), this.ke(255);
}
/**
* Writes the "infinity" byte sequence that sorts before all other byte
* sequences written in descending order.
*/ Oe() {
this.Me(255), this.Me(255);
}
/**
* Resets the buffer such that it is the same as when it was newly
* constructed.
*/ reset() {
this.position = 0;
}
seed(t) {
this.Ce(t.length), this.buffer.set(t, this.position), this.position += t.length;
}
/** Makes a copy of the encoded bytes in this buffer. */ Fe() {
return this.buffer.slice(0, this.position);
}
/**
* Encodes `val` into an encoding so that the order matches the IEEE 754
* floating-point comparison results with the following exceptions:
* -0.0 < 0.0
* all non-NaN < NaN
* NaN = NaN
*/ De(t) {
const e =
/** Converts a JavaScript number to a byte array (using big endian encoding). */
function(t) {
const e = new DataView(new ArrayBuffer(8));
return e.setFloat64(0, t, /* littleEndian= */ !1), new Uint8Array(e.buffer);
}(t), n = 0 != (128 & e[0]);
// Check if the first bit is set. We use a bit mask since value[0] is
// encoded as a number from 0 to 255.
// Revert the two complement to get natural ordering
e[0] ^= n ? 255 : 128;
for (let t = 1; t < e.length; ++t) e[t] ^= n ? 255 : 0;
return e;
}
/** Writes a single byte ascending to the buffer. */ Ee(t) {
const e = 255 & t;
0 === e ? (this.ke(0), this.ke(255)) : 255 === e ? (this.ke(255), this.ke(0)) : this.ke(e);
}
/** Writes a single byte descending to the buffer. */ be(t) {
const e = 255 & t;
0 === e ? (this.Me(0), this.Me(255)) : 255 === e ? (this.Me(255), this.Me(0)) : this.Me(t);
}
Ae() {
this.ke(0), this.ke(1);
}
Pe() {
this.Me(0), this.Me(1);
}
ke(t) {
this.Ce(1), this.buffer[this.position++] = t;
}
Me(t) {
this.Ce(1), this.buffer[this.position++] = ~t;
}
Ce(t) {
const e = t + this.position;
if (e <= this.buffer.length) return;
// Try doubling.
let n = 2 * this.buffer.length;
// Still not big enough? Just allocate the right size.
n < e && (n = e);
// Create the new buffer.
const s = new Uint8Array(n);
s.set(this.buffer), // copy old data
this.buffer = s;
}
}
class pr {
constructor(t) {
this.$e = t;
}
we(t) {
this.$e.Te(t);
}
fe(t) {
this.$e.ve(t);
}
le(t) {
this.$e.Se(t);
}
ae() {
this.$e.Ne();
}
}
class Ir {
constructor(t) {
this.$e = t;
}
we(t) {
this.$e.Re(t);
}
fe(t) {
this.$e.Ve(t);
}
le(t) {
this.$e.xe(t);
}
ae() {
this.$e.Oe();
}
}
/**
* Implements `DirectionalIndexByteEncoder` using `OrderedCodeWriter` for the
* actual encoding.
*/ class Tr {
constructor() {
this.$e = new yr, this.Be = new pr(this.$e), this.Le = new Ir(this.$e);
}
seed(t) {
this.$e.seed(t);
}
qe(t) {
return 0 /* IndexKind.ASCENDING */ === t ? this.Be : this.Le;
}
Fe() {
return this.$e.Fe();
}
reset() {
this.$e.reset();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Represents an index entry saved by the SDK in persisted storage. */ class Er {
constructor(t, e, n, s) {
this.indexId = t, this.documentKey = e, this.arrayValue = n, this.directionalValue = s;
}
/**
* Returns an IndexEntry entry that sorts immediately after the current
* directional value.
*/ Ue() {
const t = this.directionalValue.length, e = 0 === t || 255 === this.directionalValue[t - 1] ? t + 1 : t, n = new Uint8Array(e);
return n.set(this.directionalValue, 0), e !== t ? n.set([ 0 ], this.directionalValue.length) : ++n[n.length - 1],
new Er(this.indexId, this.documentKey, this.arrayValue, n);
}
}
function Ar(t, e) {
let n = t.indexId - e.indexId;
return 0 !== n ? n : (n = Rr(t.arrayValue, e.arrayValue), 0 !== n ? n : (n = Rr(t.directionalValue, e.directionalValue),
0 !== n ? n : at.comparator(t.documentKey, e.documentKey)));
}
function Rr(t, e) {
for (let n = 0; n < t.length && n < e.length; ++n) {
const s = t[n] - e[n];
if (0 !== s) return s;
}
return t.length - e.length;
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A light query planner for Firestore.
*
* This class matches a `FieldIndex` against a Firestore Query `Target`. It
* determines whether a given index can be used to serve the specified target.
*
* The following table showcases some possible index configurations:
*
* Query | Index
* -----------------------------------------------------------------------------
* where('a', '==', 'a').where('b', '==', 'b') | a ASC, b DESC
* where('a', '==', 'a').where('b', '==', 'b') | a ASC
* where('a', '==', 'a').where('b', '==', 'b') | b DESC
* where('a', '>=', 'a').orderBy('a') | a ASC
* where('a', '>=', 'a').orderBy('a', 'desc') | a DESC
* where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC, b ASC
* where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC
* where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS, b ASCENDING
* where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS
*/ class br {
constructor(t) {
this.collectionId = null != t.collectionGroup ? t.collectionGroup : t.path.lastSegment(),
this.Ke = t.orderBy, this.Ge = [];
for (const e of t.filters) {
const t = e;
t.isInequality() ? this.Qe = t : this.Ge.push(t);
}
}
/**
* Returns whether the index can be used to serve the TargetIndexMatcher's
* target.
*
* An index is considered capable of serving the target when:
* - The target uses all index segments for its filters and orderBy clauses.
* The target can have additional filter and orderBy clauses, but not
* fewer.
* - If an ArrayContains/ArrayContainsAnyfilter is used, the index must also
* have a corresponding `CONTAINS` segment.
* - All directional index segments can be mapped to the target as a series of
* equality filters, a single inequality filter and a series of orderBy
* clauses.
* - The segments that represent the equality filters may appear out of order.
* - The optional segment for the inequality filter must appear after all
* equality segments.
* - The segments that represent that orderBy clause of the target must appear
* in order after all equality and inequality segments. Single orderBy
* clauses cannot be skipped, but a continuous orderBy suffix may be
* omitted.
*/ je(t) {
F(t.collectionGroup === this.collectionId);
// If there is an array element, find a matching filter.
const e = lt(t);
if (void 0 !== e && !this.We(e)) return !1;
const n = ft(t);
let s = 0, i = 0;
// Process all equalities first. Equalities can appear out of order.
for (;s < n.length && this.We(n[s]); ++s) ;
// If we already have processed all segments, all segments are used to serve
// the equality filters and we do not need to map any segments to the
// target's inequality and orderBy clauses.
if (s === n.length) return !0;
// If there is an inequality filter, the next segment must match both the
// filter and the first orderBy clause.
if (void 0 !== this.Qe) {
const t = n[s];
if (!this.ze(this.Qe, t) || !this.He(this.Ke[i++], t)) return !1;
++s;
}
// All remaining segments need to represent the prefix of the target's
// orderBy.
for (;s < n.length; ++s) {
const t = n[s];
if (i >= this.Ke.length || !this.He(this.Ke[i++], t)) return !1;
}
return !0;
}
We(t) {
for (const e of this.Ge) if (this.ze(e, t)) return !0;
return !1;
}
ze(t, e) {
if (void 0 === t || !t.field.isEqual(e.fieldPath)) return !1;
const n = "array-contains" /* Operator.ARRAY_CONTAINS */ === t.op || "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ === t.op;
return 2 /* IndexKind.CONTAINS */ === e.kind === n;
}
He(t, e) {
return !!t.field.isEqual(e.fieldPath) && (0 /* IndexKind.ASCENDING */ === e.kind && "asc" /* Direction.ASCENDING */ === t.dir || 1 /* IndexKind.DESCENDING */ === e.kind && "desc" /* Direction.DESCENDING */ === t.dir);
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides utility functions that help with boolean logic transformations needed for handling
* complex filters used in queries.
*/
/**
* The `in` filter is only a syntactic sugar over a disjunction of equalities. For instance: `a in
* [1,2,3]` is in fact `a==1 || a==2 || a==3`. This method expands any `in` filter in the given
* input into a disjunction of equality filters and returns the expanded filter.
*/ function Pr(t) {
var e, n;
if (F(t instanceof Pe || t instanceof ve), t instanceof Pe) {
if (t instanceof qe) {
const s = (null === (n = null === (e = t.value.arrayValue) || void 0 === e ? void 0 : e.values) || void 0 === n ? void 0 : n.map((e => Pe.create(t.field, "==" /* Operator.EQUAL */ , e)))) || [];
return ve.create(s, "or" /* CompositeOperator.OR */);
}
// We have reached other kinds of field filters.
return t;
}
// We have a composite filter.
const s = t.filters.map((t => Pr(t)));
return ve.create(s, t.op);
}
/**
* Given a composite filter, returns the list of terms in its disjunctive normal form.
*
* Each element in the return value is one term of the resulting DNF. For instance: For the
* input: (A || B) && C, the DNF form is: (A && C) || (B && C), and the return value is a list
* with two elements: a composite filter that performs (A && C), and a composite filter that
* performs (B && C).
*
* @param filter the composite filter to calculate DNF transform for.
* @return the terms in the DNF transform.
*/ function vr(t) {
if (0 === t.getFilters().length) return [];
const e = Cr(Pr(t));
return F(Dr(e)), Vr(e) || Sr(e) ? [ e ] : e.getFilters();
}
/** Returns true if the given filter is a single field filter. e.g. (a == 10). */ function Vr(t) {
return t instanceof Pe;
}
/**
* Returns true if the given filter is the conjunction of one or more field filters. e.g. (a == 10
* && b == 20)
*/ function Sr(t) {
return t instanceof ve && De(t);
}
/**
* Returns whether or not the given filter is in disjunctive normal form (DNF).
*
*
In boolean logic, a disjunctive normal form (DNF) is a canonical normal form of a logical
* formula consisting of a disjunction of conjunctions; it can also be described as an OR of ANDs.
*
*
For more info, visit: https://en.wikipedia.org/wiki/Disjunctive_normal_form
*/ function Dr(t) {
return Vr(t) || Sr(t) ||
/**
* Returns true if the given filter is the disjunction of one or more "flat conjunctions" and
* field filters. e.g. (a == 10) || (b==20 && c==30)
*/
function(t) {
if (t instanceof ve && Se(t)) {
for (const e of t.getFilters()) if (!Vr(e) && !Sr(e)) return !1;
return !0;
}
return !1;
}(t);
}
function Cr(t) {
if (F(t instanceof Pe || t instanceof ve), t instanceof Pe) return t;
if (1 === t.filters.length) return Cr(t.filters[0]);
// Compute DNF for each of the subfilters first
const e = t.filters.map((t => Cr(t)));
let n = ve.create(e, t.op);
return n = kr(n), Dr(n) ? n : (F(n instanceof ve), F(Ve(n)), F(n.filters.length > 1),
n.filters.reduce(((t, e) => xr(t, e))));
}
function xr(t, e) {
let n;
return F(t instanceof Pe || t instanceof ve), F(e instanceof Pe || e instanceof ve),
// FieldFilter FieldFilter
n = t instanceof Pe ? e instanceof Pe ? function(t, e) {
// Conjunction distribution for two field filters is the conjunction of them.
return ve.create([ t, e ], "and" /* CompositeOperator.AND */);
}(t, e) : Nr(t, e) : e instanceof Pe ? Nr(e, t) : function(t, e) {
// There are four cases:
// (A & B) & (C & D) --> (A & B & C & D)
// (A & B) & (C | D) --> (A & B & C) | (A & B & D)
// (A | B) & (C & D) --> (C & D & A) | (C & D & B)
// (A | B) & (C | D) --> (A & C) | (A & D) | (B & C) | (B & D)
// Case 1 is a merge.
if (F(t.filters.length > 0 && e.filters.length > 0), Ve(t) && Ve(e)) return ke(t, e.getFilters());
// Case 2,3,4 all have at least one side (lhs or rhs) that is a disjunction. In all three cases
// we should take each element of the disjunction and distribute it over the other side, and
// return the disjunction of the distribution results.
const n = Se(t) ? t : e, s = Se(t) ? e : t, i = n.filters.map((t => xr(t, s)));
return ve.create(i, "or" /* CompositeOperator.OR */);
}(t, e), kr(n);
}
function Nr(t, e) {
// There are two cases:
// A & (B & C) --> (A & B & C)
// A & (B | C) --> (A & B) | (A & C)
if (Ve(e))
// Case 1
return ke(e, t.getFilters());
{
// Case 2
const n = e.filters.map((e => xr(t, e)));
return ve.create(n, "or" /* CompositeOperator.OR */);
}
}
/**
* Applies the associativity property to the given filter and returns the resulting filter.
*
*
* - A | (B | C) == (A | B) | C == (A | B | C)
*
- A & (B & C) == (A & B) & C == (A & B & C)
*
*
* For more info, visit: https://en.wikipedia.org/wiki/Associative_property#Propositional_logic
*/ function kr(t) {
if (F(t instanceof Pe || t instanceof ve), t instanceof Pe) return t;
const e = t.getFilters();
// If the composite filter only contains 1 filter, apply associativity to it.
if (1 === e.length) return kr(e[0]);
// Associativity applied to a flat composite filter results is itself.
if (Ce(t)) return t;
// First apply associativity to all subfilters. This will in turn recursively apply
// associativity to all nested composite filters and field filters.
const n = e.map((t => kr(t))), s = [];
// For composite subfilters that perform the same kind of logical operation as `compositeFilter`
// take out their filters and add them to `compositeFilter`. For example:
// compositeFilter = (A | (B | C | D))
// compositeSubfilter = (B | C | D)
// Result: (A | B | C | D)
// Note that the `compositeSubfilter` has been eliminated, and its filters (B, C, D) have been
// added to the top-level "compositeFilter".
return n.forEach((e => {
e instanceof Pe ? s.push(e) : e instanceof ve && (e.op === t.op ?
// compositeFilter: (A | (B | C))
// compositeSubfilter: (B | C)
// Result: (A | B | C)
s.push(...e.filters) :
// compositeFilter: (A | (B & C))
// compositeSubfilter: (B & C)
// Result: (A | (B & C))
s.push(e));
})), 1 === s.length ? s[0] : ve.create(s, t.op);
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of IndexManager.
*/ class Or {
constructor() {
this.Je = new Mr;
}
addToCollectionParentIndex(t, e) {
return this.Je.add(e), Rt.resolve();
}
getCollectionParents(t, e) {
return Rt.resolve(this.Je.getEntries(e));
}
addFieldIndex(t, e) {
// Field indices are not supported with memory persistence.
return Rt.resolve();
}
deleteFieldIndex(t, e) {
// Field indices are not supported with memory persistence.
return Rt.resolve();
}
getDocumentsMatchingTarget(t, e) {
// Field indices are not supported with memory persistence.
return Rt.resolve(null);
}
getIndexType(t, e) {
// Field indices are not supported with memory persistence.
return Rt.resolve(0 /* IndexType.NONE */);
}
getFieldIndexes(t, e) {
// Field indices are not supported with memory persistence.
return Rt.resolve([]);
}
getNextCollectionGroupToUpdate(t) {
// Field indices are not supported with memory persistence.
return Rt.resolve(null);
}
getMinOffset(t, e) {
return Rt.resolve(pt.min());
}
getMinOffsetFromCollectionGroup(t, e) {
return Rt.resolve(pt.min());
}
updateCollectionGroup(t, e, n) {
// Field indices are not supported with memory persistence.
return Rt.resolve();
}
updateIndexEntries(t, e) {
// Field indices are not supported with memory persistence.
return Rt.resolve();
}
}
/**
* Internal implementation of the collection-parent index exposed by MemoryIndexManager.
* Also used for in-memory caching by IndexedDbIndexManager and initial index population
* in indexeddb_schema.ts
*/ class Mr {
constructor() {
this.index = {};
}
// Returns false if the entry already existed.
add(t) {
const e = t.lastSegment(), n = t.popLast(), s = this.index[e] || new He(ot.comparator), i = !s.has(n);
return this.index[e] = s.add(n), i;
}
has(t) {
const e = t.lastSegment(), n = t.popLast(), s = this.index[e];
return s && s.has(n);
}
getEntries(t) {
return (this.index[t] || new He(ot.comparator)).toArray();
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const Fr = new Uint8Array(0);
/**
* A persisted implementation of IndexManager.
*
* PORTING NOTE: Unlike iOS and Android, the Web SDK does not memoize index
* data as it supports multi-tab access.
*/
class $r {
constructor(t, e) {
this.user = t, this.databaseId = e,
/**
* An in-memory copy of the index entries we've already written since the SDK
* launched. Used to avoid re-writing the same entry repeatedly.
*
* This is *NOT* a complete cache of what's in persistence and so can never be
* used to satisfy reads.
*/
this.Ye = new Mr,
/**
* Maps from a target to its equivalent list of sub-targets. Each sub-target
* contains only one term from the target's disjunctive normal form (DNF).
*/
this.Xe = new ds((t => rn(t)), ((t, e) => on(t, e))), this.uid = t.uid || "";
}
/**
* Adds a new entry to the collection parent index.
*
* Repeated calls for the same collectionPath should be avoided within a
* transaction as IndexedDbIndexManager only caches writes once a transaction
* has been committed.
*/ addToCollectionParentIndex(t, e) {
if (!this.Ye.has(e)) {
const n = e.lastSegment(), s = e.popLast();
t.addOnCommittedListener((() => {
// Add the collection to the in memory cache only if the transaction was
// successfully committed.
this.Ye.add(e);
}));
const i = {
collectionId: n,
parent: yi(s)
};
return Br(t).put(i);
}
return Rt.resolve();
}
getCollectionParents(t, e) {
const n = [], s = IDBKeyRange.bound([ e, "" ], [ nt(e), "" ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0);
return Br(t).W(s).next((t => {
for (const s of t) {
// This collectionId guard shouldn't be necessary (and isn't as long
// as we're running in a real browser), but there's a bug in
// indexeddbshim that breaks our range in our tests running in node:
// https://github.com/axemclion/IndexedDBShim/issues/334
if (s.collectionId !== e) break;
n.push(Ti(s.parent));
}
return n;
}));
}
addFieldIndex(t, e) {
// TODO(indexing): Verify that the auto-incrementing index ID works in
// Safari & Firefox.
const n = qr(t), s = function(t) {
return {
indexId: t.indexId,
collectionGroup: t.collectionGroup,
fields: t.fields.map((t => [ t.fieldPath.canonicalString(), t.kind ]))
};
}(e);
delete s.indexId;
// `indexId` is auto-populated by IndexedDb
const i = n.add(s);
if (e.indexState) {
const n = Ur(t);
return i.next((t => {
n.put(ar(t, this.user, e.indexState.sequenceNumber, e.indexState.offset));
}));
}
return i.next();
}
deleteFieldIndex(t, e) {
const n = qr(t), s = Ur(t), i = Lr(t);
return n.delete(e.indexId).next((() => s.delete(IDBKeyRange.bound([ e.indexId ], [ e.indexId + 1 ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0)))).next((() => i.delete(IDBKeyRange.bound([ e.indexId ], [ e.indexId + 1 ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0))));
}
getDocumentsMatchingTarget(t, e) {
const n = Lr(t);
let s = !0;
const i = new Map;
return Rt.forEach(this.Ze(e), (e => this.tn(t, e).next((t => {
s && (s = !!t), i.set(e, t);
})))).next((() => {
if (s) {
let t = Rs();
const s = [];
return Rt.forEach(i, ((i, r) => {
var o;
x("IndexedDbIndexManager", `Using index ${o = i, `id=${o.indexId}|cg=${o.collectionGroup}|f=${o.fields.map((t => `${t.fieldPath}:${t.kind}`)).join(",")}`} to execute ${rn(e)}`);
const u = function(t, e) {
const n = lt(e);
if (void 0 === n) return null;
for (const e of cn(t, n.fieldPath)) switch (e.op) {
case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ :
return e.value.arrayValue.values || [];
case "array-contains" /* Operator.ARRAY_CONTAINS */ :
return [ e.value ];
// Remaining filters are not array filters.
}
return null;
}
/**
* Returns the list of values that are used in != or NOT_IN filters. Returns
* `null` if there are no such filters.
*/ (r, i), c = function(t, e) {
const n = new Map;
for (const s of ft(e)) for (const e of cn(t, s.fieldPath)) switch (e.op) {
case "==" /* Operator.EQUAL */ :
case "in" /* Operator.IN */ :
// Encode equality prefix, which is encoded in the index value before
// the inequality (e.g. `a == 'a' && b != 'b'` is encoded to
// `value != 'ab'`).
n.set(s.fieldPath.canonicalString(), e.value);
break;
case "not-in" /* Operator.NOT_IN */ :
case "!=" /* Operator.NOT_EQUAL */ :
// NotIn/NotEqual is always a suffix. There cannot be any remaining
// segments and hence we can return early here.
return n.set(s.fieldPath.canonicalString(), e.value), Array.from(n.values());
// Remaining filters cannot be used as notIn bounds.
}
return null;
}
/**
* Returns a lower bound of field values that can be used as a starting point to
* scan the index defined by `fieldIndex`. Returns `MIN_VALUE` if no lower bound
* exists.
*/ (r, i), a = function(t, e) {
const n = [];
let s = !0;
// For each segment, retrieve a lower bound if there is a suitable filter or
// startAt.
for (const i of ft(e)) {
const e = 0 /* IndexKind.ASCENDING */ === i.kind ? an(t, i.fieldPath, t.startAt) : hn(t, i.fieldPath, t.startAt);
n.push(e.value), s && (s = e.inclusive);
}
return new Ee(n, s);
}
/**
* Returns an upper bound of field values that can be used as an ending point
* when scanning the index defined by `fieldIndex`. Returns `MAX_VALUE` if no
* upper bound exists.
*/ (r, i), h = function(t, e) {
const n = [];
let s = !0;
// For each segment, retrieve an upper bound if there is a suitable filter or
// endAt.
for (const i of ft(e)) {
const e = 0 /* IndexKind.ASCENDING */ === i.kind ? hn(t, i.fieldPath, t.endAt) : an(t, i.fieldPath, t.endAt);
n.push(e.value), s && (s = e.inclusive);
}
return new Ee(n, s);
}(r, i), l = this.en(i, r, a), f = this.en(i, r, h), d = this.nn(i, r, c), _ = this.sn(i.indexId, u, l, a.inclusive, f, h.inclusive, d);
return Rt.forEach(_, (i => n.J(i, e.limit).next((e => {
e.forEach((e => {
const n = at.fromSegments(e.documentKey);
t.has(n) || (t = t.add(n), s.push(n));
}));
}))));
})).next((() => s));
}
return Rt.resolve(null);
}));
}
Ze(t) {
let e = this.Xe.get(t);
if (e) return e;
if (0 === t.filters.length) e = [ t ]; else {
e = vr(ve.create(t.filters, "and" /* CompositeOperator.AND */)).map((e => sn(t.path, t.collectionGroup, t.orderBy, e.getFilters(), t.limit, t.startAt, t.endAt)));
}
return this.Xe.set(t, e), e;
}
/**
* Constructs a key range query on `DbIndexEntryStore` that unions all
* bounds.
*/ sn(t, e, n, s, i, r, o) {
// The number of total index scans we union together. This is similar to a
// distributed normal form, but adapted for array values. We create a single
// index range per value in an ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filter
// combined with the values from the query bounds.
const u = (null != e ? e.length : 1) * Math.max(n.length, i.length), c = u / (null != e ? e.length : 1), a = [];
for (let h = 0; h < u; ++h) {
const u = e ? this.rn(e[h / c]) : Fr, l = this.on(t, u, n[h % c], s), f = this.un(t, u, i[h % c], r), d = o.map((e => this.on(t, u, e,
/* inclusive= */ !0)));
a.push(...this.createRange(l, f, d));
}
return a;
}
/** Generates the lower bound for `arrayValue` and `directionalValue`. */ on(t, e, n, s) {
const i = new Er(t, at.empty(), e, n);
return s ? i : i.Ue();
}
/** Generates the upper bound for `arrayValue` and `directionalValue`. */ un(t, e, n, s) {
const i = new Er(t, at.empty(), e, n);
return s ? i.Ue() : i;
}
tn(t, e) {
const n = new br(e), s = null != e.collectionGroup ? e.collectionGroup : e.path.lastSegment();
return this.getFieldIndexes(t, s).next((t => {
// Return the index with the most number of segments.
let e = null;
for (const s of t) {
n.je(s) && (!e || s.fields.length > e.fields.length) && (e = s);
}
return e;
}));
}
getIndexType(t, e) {
let n = 2 /* IndexType.FULL */;
const s = this.Ze(e);
return Rt.forEach(s, (e => this.tn(t, e).next((t => {
t ? 0 /* IndexType.NONE */ !== n && t.fields.length < function(t) {
let e = new He(ct.comparator), n = !1;
for (const s of t.filters) for (const t of s.getFlattenedFilters())
// __name__ is not an explicit segment of any index, so we don't need to
// count it.
t.field.isKeyField() || (
// ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filters must be counted separately.
// For instance, it is possible to have an index for "a ARRAY a ASC". Even
// though these are on the same field, they should be counted as two
// separate segments in an index.
"array-contains" /* Operator.ARRAY_CONTAINS */ === t.op || "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ === t.op ? n = !0 : e = e.add(t.field));
for (const n of t.orderBy)
// __name__ is not an explicit segment of any index, so we don't need to
// count it.
n.field.isKeyField() || (e = e.add(n.field));
return e.size + (n ? 1 : 0);
}(e) && (n = 1 /* IndexType.PARTIAL */) : n = 0 /* IndexType.NONE */;
})))).next((() =>
// OR queries have more than one sub-target (one sub-target per DNF term). We currently consider
// OR queries that have a `limit` to have a partial index. For such queries we perform sorting
// and apply the limit in memory as a post-processing step.
function(t) {
return null !== t.limit;
}(e) && s.length > 1 && 2 /* IndexType.FULL */ === n ? 1 /* IndexType.PARTIAL */ : n));
}
/**
* Returns the byte encoded form of the directional values in the field index.
* Returns `null` if the document does not have all fields specified in the
* index.
*/ cn(t, e) {
const n = new Tr;
for (const s of ft(t)) {
const t = e.data.field(s.fieldPath);
if (null == t) return null;
const i = n.qe(s.kind);
wr.Ie.ue(t, i);
}
return n.Fe();
}
/** Encodes a single value to the ascending index format. */ rn(t) {
const e = new Tr;
return wr.Ie.ue(t, e.qe(0 /* IndexKind.ASCENDING */)), e.Fe();
}
/**
* Returns an encoded form of the document key that sorts based on the key
* ordering of the field index.
*/ an(t, e) {
const n = new Tr;
return wr.Ie.ue(he(this.databaseId, e), n.qe(function(t) {
const e = ft(t);
return 0 === e.length ? 0 /* IndexKind.ASCENDING */ : e[e.length - 1].kind;
}(t))), n.Fe();
}
/**
* Encodes the given field values according to the specification in `target`.
* For IN queries, a list of possible values is returned.
*/ nn(t, e, n) {
if (null === n) return [];
let s = [];
s.push(new Tr);
let i = 0;
for (const r of ft(t)) {
const t = n[i++];
for (const n of s) if (this.hn(e, r.fieldPath) && fe(t)) s = this.ln(s, r, t); else {
const e = n.qe(r.kind);
wr.Ie.ue(t, e);
}
}
return this.fn(s);
}
/**
* Encodes the given bounds according to the specification in `target`. For IN
* queries, a list of possible values is returned.
*/ en(t, e, n) {
return this.nn(t, e, n.position);
}
/** Returns the byte representation for the provided encoders. */ fn(t) {
const e = [];
for (let n = 0; n < t.length; ++n) e[n] = t[n].Fe();
return e;
}
/**
* Creates a separate encoder for each element of an array.
*
* The method appends each value to all existing encoders (e.g. filter("a",
* "==", "a1").filter("b", "in", ["b1", "b2"]) becomes ["a1,b1", "a1,b2"]). A
* list of new encoders is returned.
*/ ln(t, e, n) {
const s = [ ...t ], i = [];
for (const t of n.arrayValue.values || []) for (const n of s) {
const s = new Tr;
s.seed(n.Fe()), wr.Ie.ue(t, s.qe(e.kind)), i.push(s);
}
return i;
}
hn(t, e) {
return !!t.filters.find((t => t instanceof Pe && t.field.isEqual(e) && ("in" /* Operator.IN */ === t.op || "not-in" /* Operator.NOT_IN */ === t.op)));
}
getFieldIndexes(t, e) {
const n = qr(t), s = Ur(t);
return (e ? n.W("collectionGroupIndex", IDBKeyRange.bound(e, e)) : n.W()).next((t => {
const e = [];
return Rt.forEach(t, (t => s.get([ t.indexId, this.uid ]).next((n => {
e.push(function(t, e) {
const n = e ? new mt(e.sequenceNumber, new pt(nr(e.readTime), new at(Ti(e.documentKey)), e.largestBatchId)) : mt.empty(), s = t.fields.map((([t, e]) => new _t(ct.fromServerFormat(t), e)));
return new ht(t.indexId, t.collectionGroup, s, n);
}(t, n));
})))).next((() => e));
}));
}
getNextCollectionGroupToUpdate(t) {
return this.getFieldIndexes(t).next((t => 0 === t.length ? null : (t.sort(((t, e) => {
const n = t.indexState.sequenceNumber - e.indexState.sequenceNumber;
return 0 !== n ? n : tt(t.collectionGroup, e.collectionGroup);
})), t[0].collectionGroup)));
}
updateCollectionGroup(t, e, n) {
const s = qr(t), i = Ur(t);
return this.dn(t).next((t => s.W("collectionGroupIndex", IDBKeyRange.bound(e, e)).next((e => Rt.forEach(e, (e => i.put(ar(e.indexId, this.user, t, n))))))));
}
updateIndexEntries(t, e) {
// Porting Note: `getFieldIndexes()` on Web does not cache index lookups as
// it could be used across different IndexedDB transactions. As any cached
// data might be invalidated by other multi-tab clients, we can only trust
// data within a single IndexedDB transaction. We therefore add a cache
// here.
const n = new Map;
return Rt.forEach(e, ((e, s) => {
const i = n.get(e.collectionGroup);
return (i ? Rt.resolve(i) : this.getFieldIndexes(t, e.collectionGroup)).next((i => (n.set(e.collectionGroup, i),
Rt.forEach(i, (n => this._n(t, e, n).next((e => {
const i = this.wn(s, n);
return e.isEqual(i) ? Rt.resolve() : this.mn(t, s, n, e, i);
})))))));
}));
}
gn(t, e, n, s) {
return Lr(t).put({
indexId: s.indexId,
uid: this.uid,
arrayValue: s.arrayValue,
directionalValue: s.directionalValue,
orderedDocumentKey: this.an(n, e.key),
documentKey: e.key.path.toArray()
});
}
yn(t, e, n, s) {
return Lr(t).delete([ s.indexId, this.uid, s.arrayValue, s.directionalValue, this.an(n, e.key), e.key.path.toArray() ]);
}
_n(t, e, n) {
const s = Lr(t);
let i = new He(Ar);
return s.Z({
index: "documentKeyIndex",
range: IDBKeyRange.only([ n.indexId, this.uid, this.an(n, e) ])
}, ((t, s) => {
i = i.add(new Er(n.indexId, e, s.arrayValue, s.directionalValue));
})).next((() => i));
}
/** Creates the index entries for the given document. */ wn(t, e) {
let n = new He(Ar);
const s = this.cn(e, t);
if (null == s) return n;
const i = lt(e);
if (null != i) {
const r = t.data.field(i.fieldPath);
if (fe(r)) for (const i of r.arrayValue.values || []) n = n.add(new Er(e.indexId, t.key, this.rn(i), s));
} else n = n.add(new Er(e.indexId, t.key, Fr, s));
return n;
}
/**
* Updates the index entries for the provided document by deleting entries
* that are no longer referenced in `newEntries` and adding all newly added
* entries.
*/ mn(t, e, n, s, i) {
x("IndexedDbIndexManager", "Updating index entries for document '%s'", e.key);
const r = [];
return function(t, e, n, s, i) {
const r = t.getIterator(), o = e.getIterator();
let u = Ye(r), c = Ye(o);
// Walk through the two sets at the same time, using the ordering defined by
// `comparator`.
for (;u || c; ) {
let t = !1, e = !1;
if (u && c) {
const s = n(u, c);
s < 0 ?
// The element was removed if the next element in our ordered
// walkthrough is only in `before`.
e = !0 : s > 0 && (
// The element was added if the next element in our ordered walkthrough
// is only in `after`.
t = !0);
} else null != u ? e = !0 : t = !0;
t ? (s(c), c = Ye(o)) : e ? (i(u), u = Ye(r)) : (u = Ye(r), c = Ye(o));
}
}(s, i, Ar, (
/* onAdd= */ s => {
r.push(this.gn(t, e, n, s));
}), (
/* onRemove= */ s => {
r.push(this.yn(t, e, n, s));
})), Rt.waitFor(r);
}
dn(t) {
let e = 1;
return Ur(t).Z({
index: "sequenceNumberIndex",
reverse: !0,
range: IDBKeyRange.upperBound([ this.uid, Number.MAX_SAFE_INTEGER ])
}, ((t, n, s) => {
s.done(), e = n.sequenceNumber + 1;
})).next((() => e));
}
/**
* Returns a new set of IDB ranges that splits the existing range and excludes
* any values that match the `notInValue` from these ranges. As an example,
* '[foo > 2 && foo != 3]` becomes `[foo > 2 && < 3, foo > 3]`.
*/ createRange(t, e, n) {
// The notIn values need to be sorted and unique so that we can return a
// sorted set of non-overlapping ranges.
n = n.sort(((t, e) => Ar(t, e))).filter(((t, e, n) => !e || 0 !== Ar(t, n[e - 1])));
const s = [];
s.push(t);
for (const i of n) {
const n = Ar(i, t), r = Ar(i, e);
if (0 === n)
// `notInValue` is the lower bound. We therefore need to raise the bound
// to the next value.
s[0] = t.Ue(); else if (n > 0 && r < 0)
// `notInValue` is in the middle of the range
s.push(i), s.push(i.Ue()); else if (r > 0)
// `notInValue` (and all following values) are out of the range
break;
}
s.push(e);
const i = [];
for (let t = 0; t < s.length; t += 2) {
// If we encounter two bounds that will create an unmatchable key range,
// then we return an empty set of key ranges.
if (this.pn(s[t], s[t + 1])) return [];
const e = [ s[t].indexId, this.uid, s[t].arrayValue, s[t].directionalValue, Fr, [] ], n = [ s[t + 1].indexId, this.uid, s[t + 1].arrayValue, s[t + 1].directionalValue, Fr, [] ];
i.push(IDBKeyRange.bound(e, n));
}
return i;
}
pn(t, e) {
// If lower bound is greater than the upper bound, then the key
// range can never be matched.
return Ar(t, e) > 0;
}
getMinOffsetFromCollectionGroup(t, e) {
return this.getFieldIndexes(t, e).next(Kr);
}
getMinOffset(t, e) {
return Rt.mapArray(this.Ze(e), (e => this.tn(t, e).next((t => t || M())))).next(Kr);
}
}
/**
* Helper to get a typed SimpleDbStore for the collectionParents
* document store.
*/ function Br(t) {
return ji(t, "collectionParents");
}
/**
* Helper to get a typed SimpleDbStore for the index entry object store.
*/ function Lr(t) {
return ji(t, "indexEntries");
}
/**
* Helper to get a typed SimpleDbStore for the index configuration object store.
*/ function qr(t) {
return ji(t, "indexConfiguration");
}
/**
* Helper to get a typed SimpleDbStore for the index state object store.
*/ function Ur(t) {
return ji(t, "indexState");
}
function Kr(t) {
F(0 !== t.length);
let e = t[0].indexState.offset, n = e.largestBatchId;
for (let s = 1; s < t.length; s++) {
const i = t[s].indexState.offset;
It(i, e) < 0 && (e = i), n < i.largestBatchId && (n = i.largestBatchId);
}
return new pt(e.readTime, e.documentKey, n);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const Gr = {
didRun: !1,
sequenceNumbersCollected: 0,
targetsRemoved: 0,
documentsRemoved: 0
};
class Qr {
constructor(
// When we attempt to collect, we will only do so if the cache size is greater than this
// threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.
t,
// The percentage of sequence numbers that we will attempt to collect
e,
// A cap on the total number of sequence numbers that will be collected. This prevents
// us from collecting a huge number of sequence numbers if the cache has grown very large.
n) {
this.cacheSizeCollectionThreshold = t, this.percentileToCollect = e, this.maximumSequenceNumbersToCollect = n;
}
static withCacheSize(t) {
return new Qr(t, Qr.DEFAULT_COLLECTION_PERCENTILE, Qr.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Delete a mutation batch and the associated document mutations.
* @returns A PersistencePromise of the document mutations that were removed.
*/
function jr(t, e, n) {
const s = t.store("mutations"), i = t.store("documentMutations"), r = [], o = IDBKeyRange.only(n.batchId);
let u = 0;
const c = s.Z({
range: o
}, ((t, e, n) => (u++, n.delete())));
r.push(c.next((() => {
F(1 === u);
})));
const a = [];
for (const t of n.mutations) {
const s = Ri(e, t.key.path, n.batchId);
r.push(i.delete(s)), a.push(t.key);
}
return Rt.waitFor(r).next((() => a));
}
/**
* Returns an approximate size for the given document.
*/ function Wr(t) {
if (!t) return 0;
let e;
if (t.document) e = t.document; else if (t.unknownDocument) e = t.unknownDocument; else {
if (!t.noDocument) throw M();
e = t.noDocument;
}
return JSON.stringify(e).length;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A mutation queue for a specific user, backed by IndexedDB. */ Qr.DEFAULT_COLLECTION_PERCENTILE = 10,
Qr.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1e3, Qr.DEFAULT = new Qr(41943040, Qr.DEFAULT_COLLECTION_PERCENTILE, Qr.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT),
Qr.DISABLED = new Qr(-1, 0, 0);
class zr {
constructor(
/**
* The normalized userId (e.g. null UID => "" userId) used to store /
* retrieve mutations.
*/
t, e, n, s) {
this.userId = t, this.yt = e, this.indexManager = n, this.referenceDelegate = s,
/**
* Caches the document keys for pending mutation batches. If the mutation
* has been removed from IndexedDb, the cached value may continue to
* be used to retrieve the batch's document keys. To remove a cached value
* locally, `removeCachedMutationKeys()` should be invoked either directly
* or through `removeMutationBatches()`.
*
* With multi-tab, when the primary client acknowledges or rejects a mutation,
* this cache is used by secondary clients to invalidate the local
* view of the documents that were previously affected by the mutation.
*/
// PORTING NOTE: Multi-tab only.
this.In = {};
}
/**
* Creates a new mutation queue for the given user.
* @param user - The user for which to create a mutation queue.
* @param serializer - The serializer to use when persisting to IndexedDb.
*/ static re(t, e, n, s) {
// TODO(mcg): Figure out what constraints there are on userIDs
// In particular, are there any reserved characters? are empty ids allowed?
// For the moment store these together in the same mutations table assuming
// that empty userIDs aren't allowed.
F("" !== t.uid);
const i = t.isAuthenticated() ? t.uid : "";
return new zr(i, e, n, s);
}
checkEmpty(t) {
let e = !0;
const n = IDBKeyRange.bound([ this.userId, Number.NEGATIVE_INFINITY ], [ this.userId, Number.POSITIVE_INFINITY ]);
return Jr(t).Z({
index: "userMutationsIndex",
range: n
}, ((t, n, s) => {
e = !1, s.done();
})).next((() => e));
}
addMutationBatch(t, e, n, s) {
const i = Yr(t), r = Jr(t);
// The IndexedDb implementation in Chrome (and Firefox) does not handle
// compound indices that include auto-generated keys correctly. To ensure
// that the index entry is added correctly in all browsers, we perform two
// writes: The first write is used to retrieve the next auto-generated Batch
// ID, and the second write populates the index and stores the actual
// mutation batch.
// See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972
// We write an empty object to obtain key
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return r.add({}).next((o => {
F("number" == typeof o);
const u = new Wi(o, e, n, s), c = function(t, e, n) {
const s = n.baseMutations.map((e => ni(t.ie, e))), i = n.mutations.map((e => ni(t.ie, e)));
return {
userId: e,
batchId: n.batchId,
localWriteTimeMs: n.localWriteTime.toMillis(),
baseMutations: s,
mutations: i
};
}(this.yt, this.userId, u), a = [];
let h = new He(((t, e) => tt(t.canonicalString(), e.canonicalString())));
for (const t of s) {
const e = Ri(this.userId, t.key.path, o);
h = h.add(t.key.path.popLast()), a.push(r.put(c)), a.push(i.put(e, bi));
}
return h.forEach((e => {
a.push(this.indexManager.addToCollectionParentIndex(t, e));
})), t.addOnCommittedListener((() => {
this.In[o] = u.keys();
})), Rt.waitFor(a).next((() => u));
}));
}
lookupMutationBatch(t, e) {
return Jr(t).get(e).next((t => t ? (F(t.userId === this.userId), sr(this.yt, t)) : null));
}
/**
* Returns the document keys for the mutation batch with the given batchId.
* For primary clients, this method returns `null` after
* `removeMutationBatches()` has been called. Secondary clients return a
* cached result until `removeCachedMutationKeys()` is invoked.
*/
// PORTING NOTE: Multi-tab only.
Tn(t, e) {
return this.In[e] ? Rt.resolve(this.In[e]) : this.lookupMutationBatch(t, e).next((t => {
if (t) {
const n = t.keys();
return this.In[e] = n, n;
}
return null;
}));
}
getNextMutationBatchAfterBatchId(t, e) {
const n = e + 1, s = IDBKeyRange.lowerBound([ this.userId, n ]);
let i = null;
return Jr(t).Z({
index: "userMutationsIndex",
range: s
}, ((t, e, s) => {
e.userId === this.userId && (F(e.batchId >= n), i = sr(this.yt, e)), s.done();
})).next((() => i));
}
getHighestUnacknowledgedBatchId(t) {
const e = IDBKeyRange.upperBound([ this.userId, Number.POSITIVE_INFINITY ]);
let n = -1;
return Jr(t).Z({
index: "userMutationsIndex",
range: e,
reverse: !0
}, ((t, e, s) => {
n = e.batchId, s.done();
})).next((() => n));
}
getAllMutationBatches(t) {
const e = IDBKeyRange.bound([ this.userId, -1 ], [ this.userId, Number.POSITIVE_INFINITY ]);
return Jr(t).W("userMutationsIndex", e).next((t => t.map((t => sr(this.yt, t)))));
}
getAllMutationBatchesAffectingDocumentKey(t, e) {
// Scan the document-mutation index starting with a prefix starting with
// the given documentKey.
const n = Ai(this.userId, e.path), s = IDBKeyRange.lowerBound(n), i = [];
return Yr(t).Z({
range: s
}, ((n, s, r) => {
const [o, u, c] = n, a = Ti(u);
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
if (o === this.userId && e.path.isEqual(a))
// Look up the mutation batch in the store.
return Jr(t).get(c).next((t => {
if (!t) throw M();
F(t.userId === this.userId), i.push(sr(this.yt, t));
}));
r.done();
})).next((() => i));
}
getAllMutationBatchesAffectingDocumentKeys(t, e) {
let n = new He(tt);
const s = [];
return e.forEach((e => {
const i = Ai(this.userId, e.path), r = IDBKeyRange.lowerBound(i), o = Yr(t).Z({
range: r
}, ((t, s, i) => {
const [r, o, u] = t, c = Ti(o);
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
r === this.userId && e.path.isEqual(c) ? n = n.add(u) : i.done();
}));
s.push(o);
})), Rt.waitFor(s).next((() => this.En(t, n)));
}
getAllMutationBatchesAffectingQuery(t, e) {
const n = e.path, s = n.length + 1, i = Ai(this.userId, n), r = IDBKeyRange.lowerBound(i);
// Collect up unique batchIDs encountered during a scan of the index. Use a
// SortedSet to accumulate batch IDs so they can be traversed in order in a
// scan of the main table.
let o = new He(tt);
return Yr(t).Z({
range: r
}, ((t, e, i) => {
const [r, u, c] = t, a = Ti(u);
r === this.userId && n.isPrefixOf(a) ?
// Rows with document keys more than one segment longer than the
// query path can't be matches. For example, a query on 'rooms'
// can't match the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
a.length === s && (o = o.add(c)) : i.done();
})).next((() => this.En(t, o)));
}
En(t, e) {
const n = [], s = [];
// TODO(rockwood): Implement this using iterate.
return e.forEach((e => {
s.push(Jr(t).get(e).next((t => {
if (null === t) throw M();
F(t.userId === this.userId), n.push(sr(this.yt, t));
})));
})), Rt.waitFor(s).next((() => n));
}
removeMutationBatch(t, e) {
return jr(t.se, this.userId, e).next((n => (t.addOnCommittedListener((() => {
this.An(e.batchId);
})), Rt.forEach(n, (e => this.referenceDelegate.markPotentiallyOrphaned(t, e))))));
}
/**
* Clears the cached keys for a mutation batch. This method should be
* called by secondary clients after they process mutation updates.
*
* Note that this method does not have to be called from primary clients as
* the corresponding cache entries are cleared when an acknowledged or
* rejected batch is removed from the mutation queue.
*/
// PORTING NOTE: Multi-tab only
An(t) {
delete this.In[t];
}
performConsistencyCheck(t) {
return this.checkEmpty(t).next((e => {
if (!e) return Rt.resolve();
// Verify that there are no entries in the documentMutations index if
// the queue is empty.
const n = IDBKeyRange.lowerBound([ this.userId ]);
const s = [];
return Yr(t).Z({
range: n
}, ((t, e, n) => {
if (t[0] === this.userId) {
const e = Ti(t[1]);
s.push(e);
} else n.done();
})).next((() => {
F(0 === s.length);
}));
}));
}
containsKey(t, e) {
return Hr(t, this.userId, e);
}
// PORTING NOTE: Multi-tab only (state is held in memory in other clients).
/** Returns the mutation queue's metadata from IndexedDb. */
Rn(t) {
return Xr(t).get(this.userId).next((t => t || {
userId: this.userId,
lastAcknowledgedBatchId: -1,
lastStreamToken: ""
}));
}
}
/**
* @returns true if the mutation queue for the given user contains a pending
* mutation for the given key.
*/ function Hr(t, e, n) {
const s = Ai(e, n.path), i = s[1], r = IDBKeyRange.lowerBound(s);
let o = !1;
return Yr(t).Z({
range: r,
X: !0
}, ((t, n, s) => {
const [r, u, /*batchID*/ c] = t;
r === e && u === i && (o = !0), s.done();
})).next((() => o));
}
/** Returns true if any mutation queue contains the given document. */
/**
* Helper to get a typed SimpleDbStore for the mutations object store.
*/
function Jr(t) {
return ji(t, "mutations");
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/ function Yr(t) {
return ji(t, "documentMutations");
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/ function Xr(t) {
return ji(t, "mutationQueues");
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Offset to ensure non-overlapping target ids. */
/**
* Generates monotonically increasing target IDs for sending targets to the
* watch stream.
*
* The client constructs two generators, one for the target cache, and one for
* for the sync engine (to generate limbo documents targets). These
* generators produce non-overlapping IDs (by using even and odd IDs
* respectively).
*
* By separating the target ID space, the query cache can generate target IDs
* that persist across client restarts, while sync engine can independently
* generate in-memory target IDs that are transient and can be reused after a
* restart.
*/
class Zr {
constructor(t) {
this.bn = t;
}
next() {
return this.bn += 2, this.bn;
}
static Pn() {
// The target cache generator must return '2' in its first call to `next()`
// as there is no differentiation in the protocol layer between an unset
// number and the number '0'. If we were to sent a target with target ID
// '0', the backend would consider it unset and replace it with its own ID.
return new Zr(0);
}
static vn() {
// Sync engine assigns target IDs for limbo document detection.
return new Zr(-1);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class to {
constructor(t, e) {
this.referenceDelegate = t, this.yt = e;
}
// PORTING NOTE: We don't cache global metadata for the target cache, since
// some of it (in particular `highestTargetId`) can be modified by secondary
// tabs. We could perhaps be more granular (and e.g. still cache
// `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go
// to IndexedDb whenever we need to read metadata. We can revisit if it turns
// out to have a meaningful performance impact.
allocateTargetId(t) {
return this.Vn(t).next((e => {
const n = new Zr(e.highestTargetId);
return e.highestTargetId = n.next(), this.Sn(t, e).next((() => e.highestTargetId));
}));
}
getLastRemoteSnapshotVersion(t) {
return this.Vn(t).next((t => it.fromTimestamp(new st(t.lastRemoteSnapshotVersion.seconds, t.lastRemoteSnapshotVersion.nanoseconds))));
}
getHighestSequenceNumber(t) {
return this.Vn(t).next((t => t.highestListenSequenceNumber));
}
setTargetsMetadata(t, e, n) {
return this.Vn(t).next((s => (s.highestListenSequenceNumber = e, n && (s.lastRemoteSnapshotVersion = n.toTimestamp()),
e > s.highestListenSequenceNumber && (s.highestListenSequenceNumber = e), this.Sn(t, s))));
}
addTargetData(t, e) {
return this.Dn(t, e).next((() => this.Vn(t).next((n => (n.targetCount += 1, this.Cn(e, n),
this.Sn(t, n))))));
}
updateTargetData(t, e) {
return this.Dn(t, e);
}
removeTargetData(t, e) {
return this.removeMatchingKeysForTargetId(t, e.targetId).next((() => eo(t).delete(e.targetId))).next((() => this.Vn(t))).next((e => (F(e.targetCount > 0),
e.targetCount -= 1, this.Sn(t, e))));
}
/**
* Drops any targets with sequence number less than or equal to the upper bound, excepting those
* present in `activeTargetIds`. Document associations for the removed targets are also removed.
* Returns the number of targets removed.
*/ removeTargets(t, e, n) {
let s = 0;
const i = [];
return eo(t).Z(((r, o) => {
const u = ir(o);
u.sequenceNumber <= e && null === n.get(u.targetId) && (s++, i.push(this.removeTargetData(t, u)));
})).next((() => Rt.waitFor(i))).next((() => s));
}
/**
* Call provided function with each `TargetData` that we have cached.
*/ forEachTarget(t, e) {
return eo(t).Z(((t, n) => {
const s = ir(n);
e(s);
}));
}
Vn(t) {
return no(t).get("targetGlobalKey").next((t => (F(null !== t), t)));
}
Sn(t, e) {
return no(t).put("targetGlobalKey", e);
}
Dn(t, e) {
return eo(t).put(rr(this.yt, e));
}
/**
* In-place updates the provided metadata to account for values in the given
* TargetData. Saving is done separately. Returns true if there were any
* changes to the metadata.
*/ Cn(t, e) {
let n = !1;
return t.targetId > e.highestTargetId && (e.highestTargetId = t.targetId, n = !0),
t.sequenceNumber > e.highestListenSequenceNumber && (e.highestListenSequenceNumber = t.sequenceNumber,
n = !0), n;
}
getTargetCount(t) {
return this.Vn(t).next((t => t.targetCount));
}
getTargetData(t, e) {
// Iterating by the canonicalId may yield more than one result because
// canonicalId values are not required to be unique per target. This query
// depends on the queryTargets index to be efficient.
const n = rn(e), s = IDBKeyRange.bound([ n, Number.NEGATIVE_INFINITY ], [ n, Number.POSITIVE_INFINITY ]);
let i = null;
return eo(t).Z({
range: s,
index: "queryTargetsIndex"
}, ((t, n, s) => {
const r = ir(n);
// After finding a potential match, check that the target is
// actually equal to the requested target.
on(e, r.target) && (i = r, s.done());
})).next((() => i));
}
addMatchingKeys(t, e, n) {
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
const s = [], i = so(t);
return e.forEach((e => {
const r = yi(e.path);
s.push(i.put({
targetId: n,
path: r
})), s.push(this.referenceDelegate.addReference(t, n, e));
})), Rt.waitFor(s);
}
removeMatchingKeys(t, e, n) {
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
const s = so(t);
return Rt.forEach(e, (e => {
const i = yi(e.path);
return Rt.waitFor([ s.delete([ n, i ]), this.referenceDelegate.removeReference(t, n, e) ]);
}));
}
removeMatchingKeysForTargetId(t, e) {
const n = so(t), s = IDBKeyRange.bound([ e ], [ e + 1 ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0);
return n.delete(s);
}
getMatchingKeysForTargetId(t, e) {
const n = IDBKeyRange.bound([ e ], [ e + 1 ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0), s = so(t);
let i = Rs();
return s.Z({
range: n,
X: !0
}, ((t, e, n) => {
const s = Ti(t[1]), r = new at(s);
i = i.add(r);
})).next((() => i));
}
containsKey(t, e) {
const n = yi(e.path), s = IDBKeyRange.bound([ n ], [ nt(n) ],
/*lowerOpen=*/ !1,
/*upperOpen=*/ !0);
let i = 0;
return so(t).Z({
index: "documentTargetsIndex",
X: !0,
range: s
}, (([t, e], n, s) => {
// Having a sentinel row for a document does not count as containing that document;
// For the target cache, containing the document means the document is part of some
// target.
0 !== t && (i++, s.done());
})).next((() => i > 0));
}
/**
* Looks up a TargetData entry by target ID.
*
* @param targetId - The target ID of the TargetData entry to look up.
* @returns The cached TargetData entry, or null if the cache has no entry for
* the target.
*/
// PORTING NOTE: Multi-tab only.
ne(t, e) {
return eo(t).get(e).next((t => t ? ir(t) : null));
}
}
/**
* Helper to get a typed SimpleDbStore for the queries object store.
*/ function eo(t) {
return ji(t, "targets");
}
/**
* Helper to get a typed SimpleDbStore for the target globals object store.
*/ function no(t) {
return ji(t, "targetGlobal");
}
/**
* Helper to get a typed SimpleDbStore for the document target object store.
*/ function so(t) {
return ji(t, "targetDocuments");
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ function io([t, e], [n, s]) {
const i = tt(t, n);
return 0 === i ? tt(e, s) : i;
}
/**
* Used to calculate the nth sequence number. Keeps a rolling buffer of the
* lowest n values passed to `addElement`, and finally reports the largest of
* them in `maxValue`.
*/ class ro {
constructor(t) {
this.xn = t, this.buffer = new He(io), this.Nn = 0;
}
kn() {
return ++this.Nn;
}
On(t) {
const e = [ t, this.kn() ];
if (this.buffer.size < this.xn) this.buffer = this.buffer.add(e); else {
const t = this.buffer.last();
io(e, t) < 0 && (this.buffer = this.buffer.delete(t).add(e));
}
}
get maxValue() {
// Guaranteed to be non-empty. If we decide we are not collecting any
// sequence numbers, nthSequenceNumber below short-circuits. If we have
// decided that we are collecting n sequence numbers, it's because n is some
// percentage of the existing sequence numbers. That means we should never
// be in a situation where we are collecting sequence numbers but don't
// actually have any.
return this.buffer.last()[0];
}
}
/**
* This class is responsible for the scheduling of LRU garbage collection. It handles checking
* whether or not GC is enabled, as well as which delay to use before the next run.
*/ class oo {
constructor(t, e, n) {
this.garbageCollector = t, this.asyncQueue = e, this.localStore = n, this.Mn = null;
}
start() {
-1 !== this.garbageCollector.params.cacheSizeCollectionThreshold && this.Fn(6e4);
}
stop() {
this.Mn && (this.Mn.cancel(), this.Mn = null);
}
get started() {
return null !== this.Mn;
}
Fn(t) {
x("LruGarbageCollector", `Garbage collection scheduled in ${t}ms`), this.Mn = this.asyncQueue.enqueueAfterDelay("lru_garbage_collection" /* TimerId.LruGarbageCollection */ , t, (async () => {
this.Mn = null;
try {
await this.localStore.collectGarbage(this.garbageCollector);
} catch (t) {
St(t) ? x("LruGarbageCollector", "Ignoring IndexedDB error during garbage collection: ", t) : await At(t);
}
await this.Fn(3e5);
}));
}
}
/** Implements the steps for LRU garbage collection. */ class uo {
constructor(t, e) {
this.$n = t, this.params = e;
}
calculateTargetCount(t, e) {
return this.$n.Bn(t).next((t => Math.floor(e / 100 * t)));
}
nthSequenceNumber(t, e) {
if (0 === e) return Rt.resolve(Mt.at);
const n = new ro(e);
return this.$n.forEachTarget(t, (t => n.On(t.sequenceNumber))).next((() => this.$n.Ln(t, (t => n.On(t))))).next((() => n.maxValue));
}
removeTargets(t, e, n) {
return this.$n.removeTargets(t, e, n);
}
removeOrphanedDocuments(t, e) {
return this.$n.removeOrphanedDocuments(t, e);
}
collect(t, e) {
return -1 === this.params.cacheSizeCollectionThreshold ? (x("LruGarbageCollector", "Garbage collection skipped; disabled"),
Rt.resolve(Gr)) : this.getCacheSize(t).next((n => n < this.params.cacheSizeCollectionThreshold ? (x("LruGarbageCollector", `Garbage collection skipped; Cache size ${n} is lower than threshold ${this.params.cacheSizeCollectionThreshold}`),
Gr) : this.qn(t, e)));
}
getCacheSize(t) {
return this.$n.getCacheSize(t);
}
qn(t, e) {
let n, s, i, r, o, c, a;
const h = Date.now();
return this.calculateTargetCount(t, this.params.percentileToCollect).next((e => (
// Cap at the configured max
e > this.params.maximumSequenceNumbersToCollect ? (x("LruGarbageCollector", `Capping sequence numbers to collect down to the maximum of ${this.params.maximumSequenceNumbersToCollect} from ${e}`),
s = this.params.maximumSequenceNumbersToCollect) : s = e, r = Date.now(), this.nthSequenceNumber(t, s)))).next((s => (n = s,
o = Date.now(), this.removeTargets(t, n, e)))).next((e => (i = e, c = Date.now(),
this.removeOrphanedDocuments(t, n)))).next((t => {
if (a = Date.now(), D() <= u.DEBUG) {
x("LruGarbageCollector", `LRU Garbage Collection\n\tCounted targets in ${r - h}ms\n\tDetermined least recently used ${s} in ` + (o - r) + "ms\n" + `\tRemoved ${i} targets in ` + (c - o) + "ms\n" + `\tRemoved ${t} documents in ` + (a - c) + "ms\n" + `Total Duration: ${a - h}ms`);
}
return Rt.resolve({
didRun: !0,
sequenceNumbersCollected: s,
targetsRemoved: i,
documentsRemoved: t
});
}));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Provides LRU functionality for IndexedDB persistence. */
class co {
constructor(t, e) {
this.db = t, this.garbageCollector = function(t, e) {
return new uo(t, e);
}(this, e);
}
Bn(t) {
const e = this.Un(t);
return this.db.getTargetCache().getTargetCount(t).next((t => e.next((e => t + e))));
}
Un(t) {
let e = 0;
return this.Ln(t, (t => {
e++;
})).next((() => e));
}
forEachTarget(t, e) {
return this.db.getTargetCache().forEachTarget(t, e);
}
Ln(t, e) {
return this.Kn(t, ((t, n) => e(n)));
}
addReference(t, e, n) {
return ao(t, n);
}
removeReference(t, e, n) {
return ao(t, n);
}
removeTargets(t, e, n) {
return this.db.getTargetCache().removeTargets(t, e, n);
}
markPotentiallyOrphaned(t, e) {
return ao(t, e);
}
/**
* Returns true if anything would prevent this document from being garbage
* collected, given that the document in question is not present in any
* targets and has a sequence number less than or equal to the upper bound for
* the collection run.
*/ Gn(t, e) {
return function(t, e) {
let n = !1;
return Xr(t).tt((s => Hr(t, s, e).next((t => (t && (n = !0), Rt.resolve(!t)))))).next((() => n));
}(t, e);
}
removeOrphanedDocuments(t, e) {
const n = this.db.getRemoteDocumentCache().newChangeBuffer(), s = [];
let i = 0;
return this.Kn(t, ((r, o) => {
if (o <= e) {
const e = this.Gn(t, r).next((e => {
if (!e)
// Our size accounting requires us to read all documents before
// removing them.
return i++, n.getEntry(t, r).next((() => (n.removeEntry(r, it.min()), so(t).delete([ 0, yi(r.path) ]))));
}));
s.push(e);
}
})).next((() => Rt.waitFor(s))).next((() => n.apply(t))).next((() => i));
}
removeTarget(t, e) {
const n = e.withSequenceNumber(t.currentSequenceNumber);
return this.db.getTargetCache().updateTargetData(t, n);
}
updateLimboDocument(t, e) {
return ao(t, e);
}
/**
* Call provided function for each document in the cache that is 'orphaned'. Orphaned
* means not a part of any target, so the only entry in the target-document index for
* that document will be the sentinel row (targetId 0), which will also have the sequence
* number for the last time the document was accessed.
*/ Kn(t, e) {
const n = so(t);
let s, i = Mt.at;
return n.Z({
index: "documentTargetsIndex"
}, (([t, n], {path: r, sequenceNumber: o}) => {
0 === t ? (
// if nextToReport is valid, report it, this is a new key so the
// last one must not be a member of any targets.
i !== Mt.at && e(new at(Ti(s)), i),
// set nextToReport to be this sequence number. It's the next one we
// might report, if we don't find any targets for this document.
// Note that the sequence number must be defined when the targetId
// is 0.
i = o, s = r) :
// set nextToReport to be invalid, we know we don't need to report
// this one since we found a target for it.
i = Mt.at;
})).next((() => {
// Since we report sequence numbers after getting to the next key, we
// need to check if the last key we iterated over was an orphaned
// document and report it.
i !== Mt.at && e(new at(Ti(s)), i);
}));
}
getCacheSize(t) {
return this.db.getRemoteDocumentCache().getSize(t);
}
}
function ao(t, e) {
return so(t).put(
/**
* @returns A value suitable for writing a sentinel row in the target-document
* store.
*/
function(t, e) {
return {
targetId: 0,
path: yi(t.path),
sequenceNumber: e
};
}(e, t.currentSequenceNumber));
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory buffer of entries to be written to a RemoteDocumentCache.
* It can be used to batch up a set of changes to be written to the cache, but
* additionally supports reading entries back with the `getEntry()` method,
* falling back to the underlying RemoteDocumentCache if no entry is
* buffered.
*
* Entries added to the cache *must* be read first. This is to facilitate
* calculating the size delta of the pending changes.
*
* PORTING NOTE: This class was implemented then removed from other platforms.
* If byte-counting ends up being needed on the other platforms, consider
* porting this class as part of that implementation work.
*/ class ho {
constructor() {
// A mapping of document key to the new cache entry that should be written.
this.changes = new ds((t => t.toString()), ((t, e) => t.isEqual(e))), this.changesApplied = !1;
}
/**
* Buffers a `RemoteDocumentCache.addEntry()` call.
*
* You can only modify documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/ addEntry(t) {
this.assertNotApplied(), this.changes.set(t.key, t);
}
/**
* Buffers a `RemoteDocumentCache.removeEntry()` call.
*
* You can only remove documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/ removeEntry(t, e) {
this.assertNotApplied(), this.changes.set(t, en.newInvalidDocument(t).setReadTime(e));
}
/**
* Looks up an entry in the cache. The buffered changes will first be checked,
* and if no buffered change applies, this will forward to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKey - The key of the entry to look up.
* @returns The cached document or an invalid document if we have nothing
* cached.
*/ getEntry(t, e) {
this.assertNotApplied();
const n = this.changes.get(e);
return void 0 !== n ? Rt.resolve(n) : this.getFromCache(t, e);
}
/**
* Looks up several entries in the cache, forwarding to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKeys - The keys of the entries to look up.
* @returns A map of cached documents, indexed by key. If an entry cannot be
* found, the corresponding key will be mapped to an invalid document.
*/ getEntries(t, e) {
return this.getAllFromCache(t, e);
}
/**
* Applies buffered changes to the underlying RemoteDocumentCache, using
* the provided transaction.
*/ apply(t) {
return this.assertNotApplied(), this.changesApplied = !0, this.applyChanges(t);
}
/** Helper to assert this.changes is not null */ assertNotApplied() {}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The RemoteDocumentCache for IndexedDb. To construct, invoke
* `newIndexedDbRemoteDocumentCache()`.
*/ class lo {
constructor(t) {
this.yt = t;
}
setIndexManager(t) {
this.indexManager = t;
}
/**
* Adds the supplied entries to the cache.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/ addEntry(t, e, n) {
return mo(t).put(n);
}
/**
* Removes a document from the cache.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/ removeEntry(t, e, n) {
return mo(t).delete(
/**
* Returns a key that can be used for document lookups via the primary key of
* the DbRemoteDocument object store.
*/
function(t, e) {
const n = t.path.toArray();
return [
/* prefix path */ n.slice(0, n.length - 2),
/* collection id */ n[n.length - 2], tr(e),
/* document id */ n[n.length - 1] ];
}
/**
* Returns a key that can be used for document lookups on the
* `DbRemoteDocumentDocumentCollectionGroupIndex` index.
*/ (e, n));
}
/**
* Updates the current cache size.
*
* Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
* cache's metadata.
*/ updateMetadata(t, e) {
return this.getMetadata(t).next((n => (n.byteSize += e, this.Qn(t, n))));
}
getEntry(t, e) {
let n = en.newInvalidDocument(e);
return mo(t).Z({
index: "documentKeyIndex",
range: IDBKeyRange.only(go(e))
}, ((t, s) => {
n = this.jn(e, s);
})).next((() => n));
}
/**
* Looks up an entry in the cache.
*
* @param documentKey - The key of the entry to look up.
* @returns The cached document entry and its size.
*/ Wn(t, e) {
let n = {
size: 0,
document: en.newInvalidDocument(e)
};
return mo(t).Z({
index: "documentKeyIndex",
range: IDBKeyRange.only(go(e))
}, ((t, s) => {
n = {
document: this.jn(e, s),
size: Wr(s)
};
})).next((() => n));
}
getEntries(t, e) {
let n = ws();
return this.zn(t, e, ((t, e) => {
const s = this.jn(t, e);
n = n.insert(t, s);
})).next((() => n));
}
/**
* Looks up several entries in the cache.
*
* @param documentKeys - The set of keys entries to look up.
* @returns A map of documents indexed by key and a map of sizes indexed by
* key (zero if the document does not exist).
*/ Hn(t, e) {
let n = ws(), s = new je(at.comparator);
return this.zn(t, e, ((t, e) => {
const i = this.jn(t, e);
n = n.insert(t, i), s = s.insert(t, Wr(e));
})).next((() => ({
documents: n,
Jn: s
})));
}
zn(t, e, n) {
if (e.isEmpty()) return Rt.resolve();
let s = new He(po);
e.forEach((t => s = s.add(t)));
const i = IDBKeyRange.bound(go(s.first()), go(s.last())), r = s.getIterator();
let o = r.getNext();
return mo(t).Z({
index: "documentKeyIndex",
range: i
}, ((t, e, s) => {
const i = at.fromSegments([ ...e.prefixPath, e.collectionGroup, e.documentId ]);
// Go through keys not found in cache.
for (;o && po(o, i) < 0; ) n(o, null), o = r.getNext();
o && o.isEqual(i) && (
// Key found in cache.
n(o, e), o = r.hasNext() ? r.getNext() : null),
// Skip to the next key (if there is one).
o ? s.j(go(o)) : s.done();
})).next((() => {
// The rest of the keys are not in the cache. One case where `iterate`
// above won't go through them is when the cache is empty.
for (;o; ) n(o, null), o = r.hasNext() ? r.getNext() : null;
}));
}
getAllFromCollection(t, e, n) {
const s = [ e.popLast().toArray(), e.lastSegment(), tr(n.readTime), n.documentKey.path.isEmpty() ? "" : n.documentKey.path.lastSegment() ], i = [ e.popLast().toArray(), e.lastSegment(), [ Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER ], "" ];
return mo(t).W(IDBKeyRange.bound(s, i, !0)).next((t => {
let e = ws();
for (const n of t) {
const t = this.jn(at.fromSegments(n.prefixPath.concat(n.collectionGroup, n.documentId)), n);
e = e.insert(t.key, t);
}
return e;
}));
}
getAllFromCollectionGroup(t, e, n, s) {
let i = ws();
const r = yo(e, n), o = yo(e, pt.max());
return mo(t).Z({
index: "collectionGroupIndex",
range: IDBKeyRange.bound(r, o, !0)
}, ((t, e, n) => {
const r = this.jn(at.fromSegments(e.prefixPath.concat(e.collectionGroup, e.documentId)), e);
i = i.insert(r.key, r), i.size === s && n.done();
})).next((() => i));
}
newChangeBuffer(t) {
return new _o(this, !!t && t.trackRemovals);
}
getSize(t) {
return this.getMetadata(t).next((t => t.byteSize));
}
getMetadata(t) {
return wo(t).get("remoteDocumentGlobalKey").next((t => (F(!!t), t)));
}
Qn(t, e) {
return wo(t).put("remoteDocumentGlobalKey", e);
}
/**
* Decodes `dbRemoteDoc` and returns the document (or an invalid document if
* the document corresponds to the format used for sentinel deletes).
*/ jn(t, e) {
if (e) {
const t = Xi(this.yt, e);
// Whether the document is a sentinel removal and should only be used in the
// `getNewDocumentChanges()`
if (!(t.isNoDocument() && t.version.isEqual(it.min()))) return t;
}
return en.newInvalidDocument(t);
}
}
/** Creates a new IndexedDbRemoteDocumentCache. */ function fo(t) {
return new lo(t);
}
/**
* Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
*
* Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
* delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
* when we apply the changes.
*/ class _o extends ho {
/**
* @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.
* @param trackRemovals - Whether to create sentinel deletes that can be tracked by
* `getNewDocumentChanges()`.
*/
constructor(t, e) {
super(), this.Yn = t, this.trackRemovals = e,
// A map of document sizes and read times prior to applying the changes in
// this buffer.
this.Xn = new ds((t => t.toString()), ((t, e) => t.isEqual(e)));
}
applyChanges(t) {
const e = [];
let n = 0, s = new He(((t, e) => tt(t.canonicalString(), e.canonicalString())));
return this.changes.forEach(((i, r) => {
const o = this.Xn.get(i);
if (e.push(this.Yn.removeEntry(t, i, o.readTime)), r.isValidDocument()) {
const u = Zi(this.Yn.yt, r);
s = s.add(i.path.popLast());
const c = Wr(u);
n += c - o.size, e.push(this.Yn.addEntry(t, i, u));
} else if (n -= o.size, this.trackRemovals) {
// In order to track removals, we store a "sentinel delete" in the
// RemoteDocumentCache. This entry is represented by a NoDocument
// with a version of 0 and ignored by `maybeDecodeDocument()` but
// preserved in `getNewDocumentChanges()`.
const n = Zi(this.Yn.yt, r.convertToNoDocument(it.min()));
e.push(this.Yn.addEntry(t, i, n));
}
})), s.forEach((n => {
e.push(this.Yn.indexManager.addToCollectionParentIndex(t, n));
})), e.push(this.Yn.updateMetadata(t, n)), Rt.waitFor(e);
}
getFromCache(t, e) {
// Record the size of everything we load from the cache so we can compute a delta later.
return this.Yn.Wn(t, e).next((t => (this.Xn.set(e, {
size: t.size,
readTime: t.document.readTime
}), t.document)));
}
getAllFromCache(t, e) {
// Record the size of everything we load from the cache so we can compute
// a delta later.
return this.Yn.Hn(t, e).next((({documents: t, Jn: e}) => (
// Note: `getAllFromCache` returns two maps instead of a single map from
// keys to `DocumentSizeEntry`s. This is to allow returning the
// `MutableDocumentMap` directly, without a conversion.
e.forEach(((e, n) => {
this.Xn.set(e, {
size: n,
readTime: t.get(e).readTime
});
})), t)));
}
}
function wo(t) {
return ji(t, "remoteDocumentGlobal");
}
/**
* Helper to get a typed SimpleDbStore for the remoteDocuments object store.
*/ function mo(t) {
return ji(t, "remoteDocumentsV14");
}
/**
* Returns a key that can be used for document lookups on the
* `DbRemoteDocumentDocumentKeyIndex` index.
*/ function go(t) {
const e = t.path.toArray();
return [
/* prefix path */ e.slice(0, e.length - 2),
/* collection id */ e[e.length - 2],
/* document id */ e[e.length - 1] ];
}
function yo(t, e) {
const n = e.documentKey.path.toArray();
return [
/* collection id */ t, tr(e.readTime),
/* prefix path */ n.slice(0, n.length - 2),
/* document id */ n.length > 0 ? n[n.length - 1] : "" ];
}
/**
* Comparator that compares document keys according to the primary key sorting
* used by the `DbRemoteDocumentDocument` store (by prefix path, collection id
* and then document ID).
*
* Visible for testing.
*/ function po(t, e) {
const n = t.path.toArray(), s = e.path.toArray();
// The ordering is based on https://chromium.googlesource.com/chromium/blink/+/fe5c21fef94dae71c1c3344775b8d8a7f7e6d9ec/Source/modules/indexeddb/IDBKey.cpp#74
let i = 0;
for (let t = 0; t < n.length - 2 && t < s.length - 2; ++t) if (i = tt(n[t], s[t]),
i) return i;
return i = tt(n.length, s.length), i || (i = tt(n[n.length - 2], s[s.length - 2]),
i || tt(n[n.length - 1], s[s.length - 1]));
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Schema Version for the Web client:
* 1. Initial version including Mutation Queue, Query Cache, and Remote
* Document Cache
* 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
* longer required because migration 3 unconditionally clears it.
* 3. Dropped and re-created Query Cache to deal with cache corruption related
* to limbo resolution. Addresses
* https://github.com/firebase/firebase-ios-sdk/issues/1548
* 4. Multi-Tab Support.
* 5. Removal of held write acks.
* 6. Create document global for tracking document cache size.
* 7. Ensure every cached document has a sentinel row with a sequence number.
* 8. Add collection-parent index for Collection Group queries.
* 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
* an auto-incrementing ID. This is required for Index-Free queries.
* 10. Rewrite the canonical IDs to the explicit Protobuf-based format.
* 11. Add bundles and named_queries for bundle support.
* 12. Add document overlays.
* 13. Rewrite the keys of the remote document cache to allow for efficient
* document lookup via `getAll()`.
* 14. Add overlays.
* 15. Add indexing support.
*/
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a local view (overlay) of a document, and the fields that are
* locally mutated.
*/
class Io {
constructor(t,
/**
* The fields that are locally mutated by patch mutations.
*
* If the overlayed document is from set or delete mutations, this is `null`.
* If there is no overlay (mutation) for the document, this is an empty `FieldMask`.
*/
e) {
this.overlayedDocument = t, this.mutatedFields = e;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A readonly view of the local state of all documents we're tracking (i.e. we
* have a cached version in remoteDocumentCache or local mutations for the
* document). The view is computed by applying the mutations in the
* MutationQueue to the RemoteDocumentCache.
*/ class To {
constructor(t, e, n, s) {
this.remoteDocumentCache = t, this.mutationQueue = e, this.documentOverlayCache = n,
this.indexManager = s;
}
/**
* Get the local view of the document identified by `key`.
*
* @returns Local view of the document or null if we don't have any cached
* state for it.
*/ getDocument(t, e) {
let n = null;
return this.documentOverlayCache.getOverlay(t, e).next((s => (n = s, this.remoteDocumentCache.getEntry(t, e)))).next((t => (null !== n && Xn(n.mutation, t, Xe.empty(), st.now()),
t)));
}
/**
* Gets the local view of the documents identified by `keys`.
*
* If we don't have cached state for a document in `keys`, a NoDocument will
* be stored for that key in the resulting set.
*/ getDocuments(t, e) {
return this.remoteDocumentCache.getEntries(t, e).next((e => this.getLocalViewOfDocuments(t, e, Rs()).next((() => e))));
}
/**
* Similar to `getDocuments`, but creates the local view from the given
* `baseDocs` without retrieving documents from the local store.
*
* @param transaction - The transaction this operation is scoped to.
* @param docs - The documents to apply local mutations to get the local views.
* @param existenceStateChanged - The set of document keys whose existence state
* is changed. This is useful to determine if some documents overlay needs
* to be recalculated.
*/ getLocalViewOfDocuments(t, e, n = Rs()) {
const s = ps();
return this.populateOverlays(t, s, e).next((() => this.computeViews(t, e, s, n).next((t => {
let e = gs();
return t.forEach(((t, n) => {
e = e.insert(t, n.overlayedDocument);
})), e;
}))));
}
/**
* Gets the overlayed documents for the given document map, which will include
* the local view of those documents and a `FieldMask` indicating which fields
* are mutated locally, `null` if overlay is a Set or Delete mutation.
*/ getOverlayedDocuments(t, e) {
const n = ps();
return this.populateOverlays(t, n, e).next((() => this.computeViews(t, e, n, Rs())));
}
/**
* Fetches the overlays for {@code docs} and adds them to provided overlay map
* if the map does not already contain an entry for the given document key.
*/ populateOverlays(t, e, n) {
const s = [];
return n.forEach((t => {
e.has(t) || s.push(t);
})), this.documentOverlayCache.getOverlays(t, s).next((t => {
t.forEach(((t, n) => {
e.set(t, n);
}));
}));
}
/**
* Computes the local view for the given documents.
*
* @param docs - The documents to compute views for. It also has the base
* version of the documents.
* @param overlays - The overlays that need to be applied to the given base
* version of the documents.
* @param existenceStateChanged - A set of documents whose existence states
* might have changed. This is used to determine if we need to re-calculate
* overlays from mutation queues.
* @return A map represents the local documents view.
*/ computeViews(t, e, n, s) {
let i = ws();
const r = Ts(), o = Ts();
return e.forEach(((t, e) => {
const o = n.get(e.key);
// Recalculate an overlay if the document's existence state changed due to
// a remote event *and* the overlay is a PatchMutation. This is because
// document existence state can change if some patch mutation's
// preconditions are met.
// NOTE: we recalculate when `overlay` is undefined as well, because there
// might be a patch mutation whose precondition does not match before the
// change (hence overlay is undefined), but would now match.
s.has(e.key) && (void 0 === o || o.mutation instanceof ns) ? i = i.insert(e.key, e) : void 0 !== o ? (r.set(e.key, o.mutation.getFieldMask()),
Xn(o.mutation, e, o.mutation.getFieldMask(), st.now())) :
// no overlay exists
// Using EMPTY to indicate there is no overlay for the document.
r.set(e.key, Xe.empty());
})), this.recalculateAndSaveOverlays(t, i).next((t => (t.forEach(((t, e) => r.set(t, e))),
e.forEach(((t, e) => {
var n;
return o.set(t, new Io(e, null !== (n = r.get(t)) && void 0 !== n ? n : null));
})), o)));
}
recalculateAndSaveOverlays(t, e) {
const n = Ts();
// A reverse lookup map from batch id to the documents within that batch.
let s = new je(((t, e) => t - e)), i = Rs();
return this.mutationQueue.getAllMutationBatchesAffectingDocumentKeys(t, e).next((t => {
for (const i of t) i.keys().forEach((t => {
const r = e.get(t);
if (null === r) return;
let o = n.get(t) || Xe.empty();
o = i.applyToLocalView(r, o), n.set(t, o);
const u = (s.get(i.batchId) || Rs()).add(t);
s = s.insert(i.batchId, u);
}));
})).next((() => {
const r = [], o = s.getReverseIterator();
// Iterate in descending order of batch IDs, and skip documents that are
// already saved.
for (;o.hasNext(); ) {
const s = o.getNext(), u = s.key, c = s.value, a = Is();
c.forEach((t => {
if (!i.has(t)) {
const s = Jn(e.get(t), n.get(t));
null !== s && a.set(t, s), i = i.add(t);
}
})), r.push(this.documentOverlayCache.saveOverlays(t, u, a));
}
return Rt.waitFor(r);
})).next((() => n));
}
/**
* Recalculates overlays by reading the documents from remote document cache
* first, and saves them after they are calculated.
*/ recalculateAndSaveOverlaysForDocumentKeys(t, e) {
return this.remoteDocumentCache.getEntries(t, e).next((e => this.recalculateAndSaveOverlays(t, e)));
}
/**
* Performs a query against the local view of all documents.
*
* @param transaction - The persistence transaction.
* @param query - The query to match documents against.
* @param offset - Read time and key to start scanning by (exclusive).
*/ getDocumentsMatchingQuery(t, e, n) {
/**
* Returns whether the query matches a single document by path (rather than a
* collection).
*/
return function(t) {
return at.isDocumentKey(t.path) && null === t.collectionGroup && 0 === t.filters.length;
}(e) ? this.getDocumentsMatchingDocumentQuery(t, e.path) : gn(e) ? this.getDocumentsMatchingCollectionGroupQuery(t, e, n) : this.getDocumentsMatchingCollectionQuery(t, e, n);
}
/**
* Given a collection group, returns the next documents that follow the provided offset, along
* with an updated batch ID.
*
*
The documents returned by this method are ordered by remote version from the provided
* offset. If there are no more remote documents after the provided offset, documents with
* mutations in order of batch id from the offset are returned. Since all documents in a batch are
* returned together, the total number of documents returned can exceed {@code count}.
*
* @param transaction
* @param collectionGroup The collection group for the documents.
* @param offset The offset to index into.
* @param count The number of documents to return
* @return A LocalWriteResult with the documents that follow the provided offset and the last processed batch id.
*/ getNextDocuments(t, e, n, s) {
return this.remoteDocumentCache.getAllFromCollectionGroup(t, e, n, s).next((i => {
const r = s - i.size > 0 ? this.documentOverlayCache.getOverlaysForCollectionGroup(t, e, n.largestBatchId, s - i.size) : Rt.resolve(ps());
// The callsite will use the largest batch ID together with the latest read time to create
// a new index offset. Since we only process batch IDs if all remote documents have been read,
// no overlay will increase the overall read time. This is why we only need to special case
// the batch id.
let o = -1, u = i;
return r.next((e => Rt.forEach(e, ((e, n) => (o < n.largestBatchId && (o = n.largestBatchId),
i.get(e) ? Rt.resolve() : this.remoteDocumentCache.getEntry(t, e).next((t => {
u = u.insert(e, t);
}))))).next((() => this.populateOverlays(t, e, i))).next((() => this.computeViews(t, u, e, Rs()))).next((t => ({
batchId: o,
changes: ys(t)
})))));
}));
}
getDocumentsMatchingDocumentQuery(t, e) {
// Just do a simple document lookup.
return this.getDocument(t, new at(e)).next((t => {
let e = gs();
return t.isFoundDocument() && (e = e.insert(t.key, t)), e;
}));
}
getDocumentsMatchingCollectionGroupQuery(t, e, n) {
const s = e.collectionGroup;
let i = gs();
return this.indexManager.getCollectionParents(t, s).next((r => Rt.forEach(r, (r => {
const o = function(t, e) {
return new ln(e,
/*collectionGroup=*/ null, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, t.startAt, t.endAt);
}(e, r.child(s));
return this.getDocumentsMatchingCollectionQuery(t, o, n).next((t => {
t.forEach(((t, e) => {
i = i.insert(t, e);
}));
}));
})).next((() => i))));
}
getDocumentsMatchingCollectionQuery(t, e, n) {
// Query the remote documents and overlay mutations.
let s;
return this.remoteDocumentCache.getAllFromCollection(t, e.path, n).next((i => (s = i,
this.documentOverlayCache.getOverlaysForCollection(t, e.path, n.largestBatchId)))).next((t => {
// As documents might match the query because of their overlay we need to
// include documents for all overlays in the initial document set.
t.forEach(((t, e) => {
const n = e.getKey();
null === s.get(n) && (s = s.insert(n, en.newInvalidDocument(n)));
}));
// Apply the overlays and match against the query.
let n = gs();
return s.forEach(((s, i) => {
const r = t.get(s);
void 0 !== r && Xn(r.mutation, i, Xe.empty(), st.now()),
// Finally, insert the documents that still match the query
bn(e, i) && (n = n.insert(s, i));
})), n;
}));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class Eo {
constructor(t) {
this.yt = t, this.Zn = new Map, this.ts = new Map;
}
getBundleMetadata(t, e) {
return Rt.resolve(this.Zn.get(e));
}
saveBundleMetadata(t, e) {
/** Decodes a BundleMetadata proto into a BundleMetadata object. */
var n;
return this.Zn.set(e.id, {
id: (n = e).id,
version: n.version,
createTime: Ks(n.createTime)
}), Rt.resolve();
}
getNamedQuery(t, e) {
return Rt.resolve(this.ts.get(e));
}
saveNamedQuery(t, e) {
return this.ts.set(e.name, function(t) {
return {
name: t.name,
query: or(t.bundledQuery),
readTime: Ks(t.readTime)
};
}(e)), Rt.resolve();
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of DocumentOverlayCache.
*/ class Ao {
constructor() {
// A map sorted by DocumentKey, whose value is a pair of the largest batch id
// for the overlay and the overlay itself.
this.overlays = new je(at.comparator), this.es = new Map;
}
getOverlay(t, e) {
return Rt.resolve(this.overlays.get(e));
}
getOverlays(t, e) {
const n = ps();
return Rt.forEach(e, (e => this.getOverlay(t, e).next((t => {
null !== t && n.set(e, t);
})))).next((() => n));
}
saveOverlays(t, e, n) {
return n.forEach(((n, s) => {
this.oe(t, e, s);
})), Rt.resolve();
}
removeOverlaysForBatchId(t, e, n) {
const s = this.es.get(n);
return void 0 !== s && (s.forEach((t => this.overlays = this.overlays.remove(t))),
this.es.delete(n)), Rt.resolve();
}
getOverlaysForCollection(t, e, n) {
const s = ps(), i = e.length + 1, r = new at(e.child("")), o = this.overlays.getIteratorFrom(r);
for (;o.hasNext(); ) {
const t = o.getNext().value, r = t.getKey();
if (!e.isPrefixOf(r.path)) break;
// Documents from sub-collections
r.path.length === i && (t.largestBatchId > n && s.set(t.getKey(), t));
}
return Rt.resolve(s);
}
getOverlaysForCollectionGroup(t, e, n, s) {
let i = new je(((t, e) => t - e));
const r = this.overlays.getIterator();
for (;r.hasNext(); ) {
const t = r.getNext().value;
if (t.getKey().getCollectionGroup() === e && t.largestBatchId > n) {
let e = i.get(t.largestBatchId);
null === e && (e = ps(), i = i.insert(t.largestBatchId, e)), e.set(t.getKey(), t);
}
}
const o = ps(), u = i.getIterator();
for (;u.hasNext(); ) {
if (u.getNext().value.forEach(((t, e) => o.set(t, e))), o.size() >= s) break;
}
return Rt.resolve(o);
}
oe(t, e, n) {
// Remove the association of the overlay to its batch id.
const s = this.overlays.get(n.key);
if (null !== s) {
const t = this.es.get(s.largestBatchId).delete(n.key);
this.es.set(s.largestBatchId, t);
}
this.overlays = this.overlays.insert(n.key, new Hi(e, n));
// Create the association of this overlay to the given largestBatchId.
let i = this.es.get(e);
void 0 === i && (i = Rs(), this.es.set(e, i)), this.es.set(e, i.add(n.key));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A collection of references to a document from some kind of numbered entity
* (either a target ID or batch ID). As references are added to or removed from
* the set corresponding events are emitted to a registered garbage collector.
*
* Each reference is represented by a DocumentReference object. Each of them
* contains enough information to uniquely identify the reference. They are all
* stored primarily in a set sorted by key. A document is considered garbage if
* there's no references in that set (this can be efficiently checked thanks to
* sorting by key).
*
* ReferenceSet also keeps a secondary set that contains references sorted by
* IDs. This one is used to efficiently implement removal of all references by
* some target ID.
*/ class Ro {
constructor() {
// A set of outstanding references to a document sorted by key.
this.ns = new He(bo.ss),
// A set of outstanding references to a document sorted by target id.
this.rs = new He(bo.os);
}
/** Returns true if the reference set contains no references. */ isEmpty() {
return this.ns.isEmpty();
}
/** Adds a reference to the given document key for the given ID. */ addReference(t, e) {
const n = new bo(t, e);
this.ns = this.ns.add(n), this.rs = this.rs.add(n);
}
/** Add references to the given document keys for the given ID. */ us(t, e) {
t.forEach((t => this.addReference(t, e)));
}
/**
* Removes a reference to the given document key for the given
* ID.
*/ removeReference(t, e) {
this.cs(new bo(t, e));
}
hs(t, e) {
t.forEach((t => this.removeReference(t, e)));
}
/**
* Clears all references with a given ID. Calls removeRef() for each key
* removed.
*/ ls(t) {
const e = new at(new ot([])), n = new bo(e, t), s = new bo(e, t + 1), i = [];
return this.rs.forEachInRange([ n, s ], (t => {
this.cs(t), i.push(t.key);
})), i;
}
fs() {
this.ns.forEach((t => this.cs(t)));
}
cs(t) {
this.ns = this.ns.delete(t), this.rs = this.rs.delete(t);
}
ds(t) {
const e = new at(new ot([])), n = new bo(e, t), s = new bo(e, t + 1);
let i = Rs();
return this.rs.forEachInRange([ n, s ], (t => {
i = i.add(t.key);
})), i;
}
containsKey(t) {
const e = new bo(t, 0), n = this.ns.firstAfterOrEqual(e);
return null !== n && t.isEqual(n.key);
}
}
class bo {
constructor(t, e) {
this.key = t, this._s = e;
}
/** Compare by key then by ID */ static ss(t, e) {
return at.comparator(t.key, e.key) || tt(t._s, e._s);
}
/** Compare by ID then by key */ static os(t, e) {
return tt(t._s, e._s) || at.comparator(t.key, e.key);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class Po {
constructor(t, e) {
this.indexManager = t, this.referenceDelegate = e,
/**
* The set of all mutations that have been sent but not yet been applied to
* the backend.
*/
this.mutationQueue = [],
/** Next value to use when assigning sequential IDs to each mutation batch. */
this.ws = 1,
/** An ordered mapping between documents and the mutations batch IDs. */
this.gs = new He(bo.ss);
}
checkEmpty(t) {
return Rt.resolve(0 === this.mutationQueue.length);
}
addMutationBatch(t, e, n, s) {
const i = this.ws;
this.ws++, this.mutationQueue.length > 0 && this.mutationQueue[this.mutationQueue.length - 1];
const r = new Wi(i, e, n, s);
this.mutationQueue.push(r);
// Track references by document key and index collection parents.
for (const e of s) this.gs = this.gs.add(new bo(e.key, i)), this.indexManager.addToCollectionParentIndex(t, e.key.path.popLast());
return Rt.resolve(r);
}
lookupMutationBatch(t, e) {
return Rt.resolve(this.ys(e));
}
getNextMutationBatchAfterBatchId(t, e) {
const n = e + 1, s = this.ps(n), i = s < 0 ? 0 : s;
// The requested batchId may still be out of range so normalize it to the
// start of the queue.
return Rt.resolve(this.mutationQueue.length > i ? this.mutationQueue[i] : null);
}
getHighestUnacknowledgedBatchId() {
return Rt.resolve(0 === this.mutationQueue.length ? -1 : this.ws - 1);
}
getAllMutationBatches(t) {
return Rt.resolve(this.mutationQueue.slice());
}
getAllMutationBatchesAffectingDocumentKey(t, e) {
const n = new bo(e, 0), s = new bo(e, Number.POSITIVE_INFINITY), i = [];
return this.gs.forEachInRange([ n, s ], (t => {
const e = this.ys(t._s);
i.push(e);
})), Rt.resolve(i);
}
getAllMutationBatchesAffectingDocumentKeys(t, e) {
let n = new He(tt);
return e.forEach((t => {
const e = new bo(t, 0), s = new bo(t, Number.POSITIVE_INFINITY);
this.gs.forEachInRange([ e, s ], (t => {
n = n.add(t._s);
}));
})), Rt.resolve(this.Is(n));
}
getAllMutationBatchesAffectingQuery(t, e) {
// Use the query path as a prefix for testing if a document matches the
// query.
const n = e.path, s = n.length + 1;
// Construct a document reference for actually scanning the index. Unlike
// the prefix the document key in this reference must have an even number of
// segments. The empty segment can be used a suffix of the query path
// because it precedes all other segments in an ordered traversal.
let i = n;
at.isDocumentKey(i) || (i = i.child(""));
const r = new bo(new at(i), 0);
// Find unique batchIDs referenced by all documents potentially matching the
// query.
let o = new He(tt);
return this.gs.forEachWhile((t => {
const e = t.key.path;
return !!n.isPrefixOf(e) && (
// Rows with document keys more than one segment longer than the query
// path can't be matches. For example, a query on 'rooms' can't match
// the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
e.length === s && (o = o.add(t._s)), !0);
}), r), Rt.resolve(this.Is(o));
}
Is(t) {
// Construct an array of matching batches, sorted by batchID to ensure that
// multiple mutations affecting the same document key are applied in order.
const e = [];
return t.forEach((t => {
const n = this.ys(t);
null !== n && e.push(n);
})), e;
}
removeMutationBatch(t, e) {
F(0 === this.Ts(e.batchId, "removed")), this.mutationQueue.shift();
let n = this.gs;
return Rt.forEach(e.mutations, (s => {
const i = new bo(s.key, e.batchId);
return n = n.delete(i), this.referenceDelegate.markPotentiallyOrphaned(t, s.key);
})).next((() => {
this.gs = n;
}));
}
An(t) {
// No-op since the memory mutation queue does not maintain a separate cache.
}
containsKey(t, e) {
const n = new bo(e, 0), s = this.gs.firstAfterOrEqual(n);
return Rt.resolve(e.isEqual(s && s.key));
}
performConsistencyCheck(t) {
return this.mutationQueue.length, Rt.resolve();
}
/**
* Finds the index of the given batchId in the mutation queue and asserts that
* the resulting index is within the bounds of the queue.
*
* @param batchId - The batchId to search for
* @param action - A description of what the caller is doing, phrased in passive
* form (e.g. "acknowledged" in a routine that acknowledges batches).
*/ Ts(t, e) {
return this.ps(t);
}
/**
* Finds the index of the given batchId in the mutation queue. This operation
* is O(1).
*
* @returns The computed index of the batch with the given batchId, based on
* the state of the queue. Note this index can be negative if the requested
* batchId has already been remvoed from the queue or past the end of the
* queue if the batchId is larger than the last added batch.
*/ ps(t) {
if (0 === this.mutationQueue.length)
// As an index this is past the end of the queue
return 0;
// Examine the front of the queue to figure out the difference between the
// batchId and indexes in the array. Note that since the queue is ordered
// by batchId, if the first batch has a larger batchId then the requested
// batchId doesn't exist in the queue.
return t - this.mutationQueue[0].batchId;
}
/**
* A version of lookupMutationBatch that doesn't return a promise, this makes
* other functions that uses this code easier to read and more efficent.
*/ ys(t) {
const e = this.ps(t);
if (e < 0 || e >= this.mutationQueue.length) return null;
return this.mutationQueue[e];
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke
* `newMemoryRemoteDocumentCache()`.
*/
class vo {
/**
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
constructor(t) {
this.Es = t,
/** Underlying cache of documents and their read times. */
this.docs = new je(at.comparator),
/** Size of all cached documents. */
this.size = 0;
}
setIndexManager(t) {
this.indexManager = t;
}
/**
* Adds the supplied entry to the cache and updates the cache size as appropriate.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/ addEntry(t, e) {
const n = e.key, s = this.docs.get(n), i = s ? s.size : 0, r = this.Es(e);
return this.docs = this.docs.insert(n, {
document: e.mutableCopy(),
size: r
}), this.size += r - i, this.indexManager.addToCollectionParentIndex(t, n.path.popLast());
}
/**
* Removes the specified entry from the cache and updates the cache size as appropriate.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/ removeEntry(t) {
const e = this.docs.get(t);
e && (this.docs = this.docs.remove(t), this.size -= e.size);
}
getEntry(t, e) {
const n = this.docs.get(e);
return Rt.resolve(n ? n.document.mutableCopy() : en.newInvalidDocument(e));
}
getEntries(t, e) {
let n = ws();
return e.forEach((t => {
const e = this.docs.get(t);
n = n.insert(t, e ? e.document.mutableCopy() : en.newInvalidDocument(t));
})), Rt.resolve(n);
}
getAllFromCollection(t, e, n) {
let s = ws();
// Documents are ordered by key, so we can use a prefix scan to narrow down
// the documents we need to match the query against.
const i = new at(e.child("")), r = this.docs.getIteratorFrom(i);
for (;r.hasNext(); ) {
const {key: t, value: {document: i}} = r.getNext();
if (!e.isPrefixOf(t.path)) break;
t.path.length > e.length + 1 || (It(yt(i), n) <= 0 || (s = s.insert(i.key, i.mutableCopy())));
}
return Rt.resolve(s);
}
getAllFromCollectionGroup(t, e, n, s) {
// This method should only be called from the IndexBackfiller if persistence
// is enabled.
M();
}
As(t, e) {
return Rt.forEach(this.docs, (t => e(t)));
}
newChangeBuffer(t) {
// `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps
// a separate changelog and does not need special handling for removals.
return new Vo(this);
}
getSize(t) {
return Rt.resolve(this.size);
}
}
/**
* Creates a new memory-only RemoteDocumentCache.
*
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
/**
* Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
*/
class Vo extends ho {
constructor(t) {
super(), this.Yn = t;
}
applyChanges(t) {
const e = [];
return this.changes.forEach(((n, s) => {
s.isValidDocument() ? e.push(this.Yn.addEntry(t, s)) : this.Yn.removeEntry(n);
})), Rt.waitFor(e);
}
getFromCache(t, e) {
return this.Yn.getEntry(t, e);
}
getAllFromCache(t, e) {
return this.Yn.getEntries(t, e);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class So {
constructor(t) {
this.persistence = t,
/**
* Maps a target to the data about that target
*/
this.Rs = new ds((t => rn(t)), on),
/** The last received snapshot version. */
this.lastRemoteSnapshotVersion = it.min(),
/** The highest numbered target ID encountered. */
this.highestTargetId = 0,
/** The highest sequence number encountered. */
this.bs = 0,
/**
* A ordered bidirectional mapping between documents and the remote target
* IDs.
*/
this.Ps = new Ro, this.targetCount = 0, this.vs = Zr.Pn();
}
forEachTarget(t, e) {
return this.Rs.forEach(((t, n) => e(n))), Rt.resolve();
}
getLastRemoteSnapshotVersion(t) {
return Rt.resolve(this.lastRemoteSnapshotVersion);
}
getHighestSequenceNumber(t) {
return Rt.resolve(this.bs);
}
allocateTargetId(t) {
return this.highestTargetId = this.vs.next(), Rt.resolve(this.highestTargetId);
}
setTargetsMetadata(t, e, n) {
return n && (this.lastRemoteSnapshotVersion = n), e > this.bs && (this.bs = e),
Rt.resolve();
}
Dn(t) {
this.Rs.set(t.target, t);
const e = t.targetId;
e > this.highestTargetId && (this.vs = new Zr(e), this.highestTargetId = e), t.sequenceNumber > this.bs && (this.bs = t.sequenceNumber);
}
addTargetData(t, e) {
return this.Dn(e), this.targetCount += 1, Rt.resolve();
}
updateTargetData(t, e) {
return this.Dn(e), Rt.resolve();
}
removeTargetData(t, e) {
return this.Rs.delete(e.target), this.Ps.ls(e.targetId), this.targetCount -= 1,
Rt.resolve();
}
removeTargets(t, e, n) {
let s = 0;
const i = [];
return this.Rs.forEach(((r, o) => {
o.sequenceNumber <= e && null === n.get(o.targetId) && (this.Rs.delete(r), i.push(this.removeMatchingKeysForTargetId(t, o.targetId)),
s++);
})), Rt.waitFor(i).next((() => s));
}
getTargetCount(t) {
return Rt.resolve(this.targetCount);
}
getTargetData(t, e) {
const n = this.Rs.get(e) || null;
return Rt.resolve(n);
}
addMatchingKeys(t, e, n) {
return this.Ps.us(e, n), Rt.resolve();
}
removeMatchingKeys(t, e, n) {
this.Ps.hs(e, n);
const s = this.persistence.referenceDelegate, i = [];
return s && e.forEach((e => {
i.push(s.markPotentiallyOrphaned(t, e));
})), Rt.waitFor(i);
}
removeMatchingKeysForTargetId(t, e) {
return this.Ps.ls(e), Rt.resolve();
}
getMatchingKeysForTargetId(t, e) {
const n = this.Ps.ds(e);
return Rt.resolve(n);
}
containsKey(t, e) {
return Rt.resolve(this.Ps.containsKey(e));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A memory-backed instance of Persistence. Data is stored only in RAM and
* not persisted across sessions.
*/
class Do {
/**
* The constructor accepts a factory for creating a reference delegate. This
* allows both the delegate and this instance to have strong references to
* each other without having nullable fields that would then need to be
* checked or asserted on every access.
*/
constructor(t, e) {
this.Vs = {}, this.overlays = {}, this.Ss = new Mt(0), this.Ds = !1, this.Ds = !0,
this.referenceDelegate = t(this), this.Cs = new So(this);
this.indexManager = new Or, this.remoteDocumentCache = function(t) {
return new vo(t);
}((t => this.referenceDelegate.xs(t))), this.yt = new Yi(e), this.Ns = new Eo(this.yt);
}
start() {
return Promise.resolve();
}
shutdown() {
// No durable state to ensure is closed on shutdown.
return this.Ds = !1, Promise.resolve();
}
get started() {
return this.Ds;
}
setDatabaseDeletedListener() {
// No op.
}
setNetworkEnabled() {
// No op.
}
getIndexManager(t) {
// We do not currently support indices for memory persistence, so we can
// return the same shared instance of the memory index manager.
return this.indexManager;
}
getDocumentOverlayCache(t) {
let e = this.overlays[t.toKey()];
return e || (e = new Ao, this.overlays[t.toKey()] = e), e;
}
getMutationQueue(t, e) {
let n = this.Vs[t.toKey()];
return n || (n = new Po(e, this.referenceDelegate), this.Vs[t.toKey()] = n), n;
}
getTargetCache() {
return this.Cs;
}
getRemoteDocumentCache() {
return this.remoteDocumentCache;
}
getBundleCache() {
return this.Ns;
}
runTransaction(t, e, n) {
x("MemoryPersistence", "Starting transaction:", t);
const s = new Co(this.Ss.next());
return this.referenceDelegate.ks(), n(s).next((t => this.referenceDelegate.Os(s).next((() => t)))).toPromise().then((t => (s.raiseOnCommittedEvent(),
t)));
}
Ms(t, e) {
return Rt.or(Object.values(this.Vs).map((n => () => n.containsKey(t, e))));
}
}
/**
* Memory persistence is not actually transactional, but future implementations
* may have transaction-scoped state.
*/ class Co extends Et {
constructor(t) {
super(), this.currentSequenceNumber = t;
}
}
class xo {
constructor(t) {
this.persistence = t,
/** Tracks all documents that are active in Query views. */
this.Fs = new Ro,
/** The list of documents that are potentially GCed after each transaction. */
this.$s = null;
}
static Bs(t) {
return new xo(t);
}
get Ls() {
if (this.$s) return this.$s;
throw M();
}
addReference(t, e, n) {
return this.Fs.addReference(n, e), this.Ls.delete(n.toString()), Rt.resolve();
}
removeReference(t, e, n) {
return this.Fs.removeReference(n, e), this.Ls.add(n.toString()), Rt.resolve();
}
markPotentiallyOrphaned(t, e) {
return this.Ls.add(e.toString()), Rt.resolve();
}
removeTarget(t, e) {
this.Fs.ls(e.targetId).forEach((t => this.Ls.add(t.toString())));
const n = this.persistence.getTargetCache();
return n.getMatchingKeysForTargetId(t, e.targetId).next((t => {
t.forEach((t => this.Ls.add(t.toString())));
})).next((() => n.removeTargetData(t, e)));
}
ks() {
this.$s = new Set;
}
Os(t) {
// Remove newly orphaned documents.
const e = this.persistence.getRemoteDocumentCache().newChangeBuffer();
return Rt.forEach(this.Ls, (n => {
const s = at.fromPath(n);
return this.qs(t, s).next((t => {
t || e.removeEntry(s, it.min());
}));
})).next((() => (this.$s = null, e.apply(t))));
}
updateLimboDocument(t, e) {
return this.qs(t, e).next((t => {
t ? this.Ls.delete(e.toString()) : this.Ls.add(e.toString());
}));
}
xs(t) {
// For eager GC, we don't care about the document size, there are no size thresholds.
return 0;
}
qs(t, e) {
return Rt.or([ () => Rt.resolve(this.Fs.containsKey(e)), () => this.persistence.getTargetCache().containsKey(t, e), () => this.persistence.Ms(t, e) ]);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Performs database creation and schema upgrades. */ class No {
constructor(t) {
this.yt = t;
}
/**
* Performs database creation and schema upgrades.
*
* Note that in production, this method is only ever used to upgrade the schema
* to SCHEMA_VERSION. Different values of toVersion are only used for testing
* and local feature development.
*/ $(t, e, n, s) {
const i = new bt("createOrUpgrade", e);
n < 1 && s >= 1 && (function(t) {
t.createObjectStore("owner");
}(t), function(t) {
t.createObjectStore("mutationQueues", {
keyPath: "userId"
});
t.createObjectStore("mutations", {
keyPath: "batchId",
autoIncrement: !0
}).createIndex("userMutationsIndex", Ei, {
unique: !0
}), t.createObjectStore("documentMutations");
}
/**
* Upgrade function to migrate the 'mutations' store from V1 to V3. Loads
* and rewrites all data.
*/ (t), ko(t), function(t) {
t.createObjectStore("remoteDocuments");
}(t));
// Migration 2 to populate the targetGlobal object no longer needed since
// migration 3 unconditionally clears it.
let r = Rt.resolve();
return n < 3 && s >= 3 && (
// Brand new clients don't need to drop and recreate--only clients that
// potentially have corrupt data.
0 !== n && (!function(t) {
t.deleteObjectStore("targetDocuments"), t.deleteObjectStore("targets"), t.deleteObjectStore("targetGlobal");
}(t), ko(t)), r = r.next((() =>
/**
* Creates the target global singleton row.
*
* @param txn - The version upgrade transaction for indexeddb
*/
function(t) {
const e = t.store("targetGlobal"), n = {
highestTargetId: 0,
highestListenSequenceNumber: 0,
lastRemoteSnapshotVersion: it.min().toTimestamp(),
targetCount: 0
};
return e.put("targetGlobalKey", n);
}(i)))), n < 4 && s >= 4 && (0 !== n && (
// Schema version 3 uses auto-generated keys to generate globally unique
// mutation batch IDs (this was previously ensured internally by the
// client). To migrate to the new schema, we have to read all mutations
// and write them back out. We preserve the existing batch IDs to guarantee
// consistency with other object stores. Any further mutation batch IDs will
// be auto-generated.
r = r.next((() => function(t, e) {
return e.store("mutations").W().next((n => {
t.deleteObjectStore("mutations");
t.createObjectStore("mutations", {
keyPath: "batchId",
autoIncrement: !0
}).createIndex("userMutationsIndex", Ei, {
unique: !0
});
const s = e.store("mutations"), i = n.map((t => s.put(t)));
return Rt.waitFor(i);
}));
}(t, i)))), r = r.next((() => {
!function(t) {
t.createObjectStore("clientMetadata", {
keyPath: "clientId"
});
}(t);
}))), n < 5 && s >= 5 && (r = r.next((() => this.Us(i)))), n < 6 && s >= 6 && (r = r.next((() => (function(t) {
t.createObjectStore("remoteDocumentGlobal");
}(t), this.Ks(i))))), n < 7 && s >= 7 && (r = r.next((() => this.Gs(i)))), n < 8 && s >= 8 && (r = r.next((() => this.Qs(t, i)))),
n < 9 && s >= 9 && (r = r.next((() => {
// Multi-Tab used to manage its own changelog, but this has been moved
// to the DbRemoteDocument object store itself. Since the previous change
// log only contained transient data, we can drop its object store.
!function(t) {
t.objectStoreNames.contains("remoteDocumentChanges") && t.deleteObjectStore("remoteDocumentChanges");
}(t);
// Note: Schema version 9 used to create a read time index for the
// RemoteDocumentCache. This is now done with schema version 13.
}))), n < 10 && s >= 10 && (r = r.next((() => this.js(i)))), n < 11 && s >= 11 && (r = r.next((() => {
!function(t) {
t.createObjectStore("bundles", {
keyPath: "bundleId"
});
}(t), function(t) {
t.createObjectStore("namedQueries", {
keyPath: "name"
});
}(t);
}))), n < 12 && s >= 12 && (r = r.next((() => {
!function(t) {
const e = t.createObjectStore("documentOverlays", {
keyPath: Fi
});
e.createIndex("collectionPathOverlayIndex", $i, {
unique: !1
}), e.createIndex("collectionGroupOverlayIndex", Bi, {
unique: !1
});
}(t);
}))), n < 13 && s >= 13 && (r = r.next((() => function(t) {
const e = t.createObjectStore("remoteDocumentsV14", {
keyPath: Pi
});
e.createIndex("documentKeyIndex", vi), e.createIndex("collectionGroupIndex", Vi);
}(t))).next((() => this.Ws(t, i))).next((() => t.deleteObjectStore("remoteDocuments")))),
n < 14 && s >= 14 && (r = r.next((() => this.zs(t, i)))), n < 15 && s >= 15 && (r = r.next((() => function(t) {
t.createObjectStore("indexConfiguration", {
keyPath: "indexId",
autoIncrement: !0
}).createIndex("collectionGroupIndex", "collectionGroup", {
unique: !1
});
t.createObjectStore("indexState", {
keyPath: Ni
}).createIndex("sequenceNumberIndex", ki, {
unique: !1
});
t.createObjectStore("indexEntries", {
keyPath: Oi
}).createIndex("documentKeyIndex", Mi, {
unique: !1
});
}(t)))), r;
}
Ks(t) {
let e = 0;
return t.store("remoteDocuments").Z(((t, n) => {
e += Wr(n);
})).next((() => {
const n = {
byteSize: e
};
return t.store("remoteDocumentGlobal").put("remoteDocumentGlobalKey", n);
}));
}
Us(t) {
const e = t.store("mutationQueues"), n = t.store("mutations");
return e.W().next((e => Rt.forEach(e, (e => {
const s = IDBKeyRange.bound([ e.userId, -1 ], [ e.userId, e.lastAcknowledgedBatchId ]);
return n.W("userMutationsIndex", s).next((n => Rt.forEach(n, (n => {
F(n.userId === e.userId);
const s = sr(this.yt, n);
return jr(t, e.userId, s).next((() => {}));
}))));
}))));
}
/**
* Ensures that every document in the remote document cache has a corresponding sentinel row
* with a sequence number. Missing rows are given the most recently used sequence number.
*/ Gs(t) {
const e = t.store("targetDocuments"), n = t.store("remoteDocuments");
return t.store("targetGlobal").get("targetGlobalKey").next((t => {
const s = [];
return n.Z(((n, i) => {
const r = new ot(n), o = function(t) {
return [ 0, yi(t) ];
}(r);
s.push(e.get(o).next((n => n ? Rt.resolve() : (n => e.put({
targetId: 0,
path: yi(n),
sequenceNumber: t.highestListenSequenceNumber
}))(r))));
})).next((() => Rt.waitFor(s)));
}));
}
Qs(t, e) {
// Create the index.
t.createObjectStore("collectionParents", {
keyPath: xi
});
const n = e.store("collectionParents"), s = new Mr, i = t => {
if (s.add(t)) {
const e = t.lastSegment(), s = t.popLast();
return n.put({
collectionId: e,
parent: yi(s)
});
}
};
// Helper to add an index entry iff we haven't already written it.
// Index existing remote documents.
return e.store("remoteDocuments").Z({
X: !0
}, ((t, e) => {
const n = new ot(t);
return i(n.popLast());
})).next((() => e.store("documentMutations").Z({
X: !0
}, (([t, e, n], s) => {
const r = Ti(e);
return i(r.popLast());
}))));
}
js(t) {
const e = t.store("targets");
return e.Z(((t, n) => {
const s = ir(n), i = rr(this.yt, s);
return e.put(i);
}));
}
Ws(t, e) {
const n = e.store("remoteDocuments"), s = [];
return n.Z(((t, n) => {
const i = e.store("remoteDocumentsV14"), r = (o = n, o.document ? new at(ot.fromString(o.document.name).popFirst(5)) : o.noDocument ? at.fromSegments(o.noDocument.path) : o.unknownDocument ? at.fromSegments(o.unknownDocument.path) : M()).path.toArray();
var o;
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const u = {
prefixPath: r.slice(0, r.length - 2),
collectionGroup: r[r.length - 2],
documentId: r[r.length - 1],
readTime: n.readTime || [ 0, 0 ],
unknownDocument: n.unknownDocument,
noDocument: n.noDocument,
document: n.document,
hasCommittedMutations: !!n.hasCommittedMutations
};
s.push(i.put(u));
})).next((() => Rt.waitFor(s)));
}
zs(t, e) {
const n = e.store("mutations"), s = fo(this.yt), i = new Do(xo.Bs, this.yt.ie);
return n.W().next((t => {
const n = new Map;
return t.forEach((t => {
var e;
let s = null !== (e = n.get(t.userId)) && void 0 !== e ? e : Rs();
sr(this.yt, t).keys().forEach((t => s = s.add(t))), n.set(t.userId, s);
})), Rt.forEach(n, ((t, n) => {
const r = new v(n), o = dr.re(this.yt, r), u = i.getIndexManager(r), c = zr.re(r, this.yt, u, i.referenceDelegate);
return new To(s, c, o, u).recalculateAndSaveOverlaysForDocumentKeys(new Qi(e, Mt.at), t).next();
}));
}));
}
}
function ko(t) {
t.createObjectStore("targetDocuments", {
keyPath: Di
}).createIndex("documentTargetsIndex", Ci, {
unique: !0
});
// NOTE: This is unique only because the TargetId is the suffix.
t.createObjectStore("targets", {
keyPath: "targetId"
}).createIndex("queryTargetsIndex", Si, {
unique: !0
}), t.createObjectStore("targetGlobal");
}
const Oo = "Failed to obtain exclusive access to the persistence layer. To allow shared access, multi-tab synchronization has to be enabled in all tabs. If you are using `experimentalForceOwningTab:true`, make sure that only one tab has persistence enabled at any given time.";
/**
* Oldest acceptable age in milliseconds for client metadata before the client
* is considered inactive and its associated data is garbage collected.
*/
/**
* An IndexedDB-backed instance of Persistence. Data is stored persistently
* across sessions.
*
* On Web only, the Firestore SDKs support shared access to its persistence
* layer. This allows multiple browser tabs to read and write to IndexedDb and
* to synchronize state even without network connectivity. Shared access is
* currently optional and not enabled unless all clients invoke
* `enablePersistence()` with `{synchronizeTabs:true}`.
*
* In multi-tab mode, if multiple clients are active at the same time, the SDK
* will designate one client as the “primary client”. An effort is made to pick
* a visible, network-connected and active client, and this client is
* responsible for letting other clients know about its presence. The primary
* client writes a unique client-generated identifier (the client ID) to
* IndexedDb’s “owner” store every 4 seconds. If the primary client fails to
* update this entry, another client can acquire the lease and take over as
* primary.
*
* Some persistence operations in the SDK are designated as primary-client only
* operations. This includes the acknowledgment of mutations and all updates of
* remote documents. The effects of these operations are written to persistence
* and then broadcast to other tabs via LocalStorage (see
* `WebStorageSharedClientState`), which then refresh their state from
* persistence.
*
* Similarly, the primary client listens to notifications sent by secondary
* clients to discover persistence changes written by secondary clients, such as
* the addition of new mutations and query targets.
*
* If multi-tab is not enabled and another tab already obtained the primary
* lease, IndexedDbPersistence enters a failed state and all subsequent
* operations will automatically fail.
*
* Additionally, there is an optimization so that when a tab is closed, the
* primary lease is released immediately (this is especially important to make
* sure that a refreshed tab is able to immediately re-acquire the primary
* lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
* since it is an asynchronous API. So in addition to attempting to give up the
* lease, the leaseholder writes its client ID to a "zombiedClient" entry in
* LocalStorage which acts as an indicator that another tab should go ahead and
* take the primary lease immediately regardless of the current lease timestamp.
*
* TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
* longer optional.
*/
class Mo {
constructor(
/**
* Whether to synchronize the in-memory state of multiple tabs and share
* access to local persistence.
*/
t, e, n, s, i, r, o, u, c,
/**
* If set to true, forcefully obtains database access. Existing tabs will
* no longer be able to access IndexedDB.
*/
a, h = 15) {
if (this.allowTabSynchronization = t, this.persistenceKey = e, this.clientId = n,
this.Hs = i, this.window = r, this.document = o, this.Js = c, this.Ys = a, this.Xs = h,
this.Ss = null, this.Ds = !1, this.isPrimary = !1, this.networkEnabled = !0,
/** Our window.unload handler, if registered. */
this.Zs = null, this.inForeground = !1,
/** Our 'visibilitychange' listener if registered. */
this.ti = null,
/** The client metadata refresh task. */
this.ei = null,
/** The last time we garbage collected the client metadata object store. */
this.ni = Number.NEGATIVE_INFINITY,
/** A listener to notify on primary state changes. */
this.si = t => Promise.resolve(), !Mo.C()) throw new q(L.UNIMPLEMENTED, "This platform is either missing IndexedDB or is known to have an incomplete implementation. Offline persistence has been disabled.");
this.referenceDelegate = new co(this, s), this.ii = e + "main", this.yt = new Yi(u),
this.ri = new Pt(this.ii, this.Xs, new No(this.yt)), this.Cs = new to(this.referenceDelegate, this.yt),
this.remoteDocumentCache = fo(this.yt), this.Ns = new hr, this.window && this.window.localStorage ? this.oi = this.window.localStorage : (this.oi = null,
!1 === a && N("IndexedDbPersistence", "LocalStorage is unavailable. As a result, persistence may not work reliably. In particular enablePersistence() could fail immediately after refreshing the page."));
}
/**
* Attempt to start IndexedDb persistence.
*
* @returns Whether persistence was enabled.
*/ start() {
// NOTE: This is expected to fail sometimes (in the case of another tab
// already having the persistence lock), so it's the first thing we should
// do.
return this.ui().then((() => {
if (!this.isPrimary && !this.allowTabSynchronization)
// Fail `start()` if `synchronizeTabs` is disabled and we cannot
// obtain the primary lease.
throw new q(L.FAILED_PRECONDITION, Oo);
return this.ci(), this.ai(), this.hi(), this.runTransaction("getHighestListenSequenceNumber", "readonly", (t => this.Cs.getHighestSequenceNumber(t)));
})).then((t => {
this.Ss = new Mt(t, this.Js);
})).then((() => {
this.Ds = !0;
})).catch((t => (this.ri && this.ri.close(), Promise.reject(t))));
}
/**
* Registers a listener that gets called when the primary state of the
* instance changes. Upon registering, this listener is invoked immediately
* with the current primary state.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/ li(t) {
return this.si = async e => {
if (this.started) return t(e);
}, t(this.isPrimary);
}
/**
* Registers a listener that gets called when the database receives a
* version change event indicating that it has deleted.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/ setDatabaseDeletedListener(t) {
this.ri.L((async e => {
// Check if an attempt is made to delete IndexedDB.
null === e.newVersion && await t();
}));
}
/**
* Adjusts the current network state in the client's metadata, potentially
* affecting the primary lease.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/ setNetworkEnabled(t) {
this.networkEnabled !== t && (this.networkEnabled = t,
// Schedule a primary lease refresh for immediate execution. The eventual
// lease update will be propagated via `primaryStateListener`.
this.Hs.enqueueAndForget((async () => {
this.started && await this.ui();
})));
}
/**
* Updates the client metadata in IndexedDb and attempts to either obtain or
* extend the primary lease for the local client. Asynchronously notifies the
* primary state listener if the client either newly obtained or released its
* primary lease.
*/ ui() {
return this.runTransaction("updateClientMetadataAndTryBecomePrimary", "readwrite", (t => $o(t).put({
clientId: this.clientId,
updateTimeMs: Date.now(),
networkEnabled: this.networkEnabled,
inForeground: this.inForeground
}).next((() => {
if (this.isPrimary) return this.fi(t).next((t => {
t || (this.isPrimary = !1, this.Hs.enqueueRetryable((() => this.si(!1))));
}));
})).next((() => this.di(t))).next((e => this.isPrimary && !e ? this._i(t).next((() => !1)) : !!e && this.wi(t).next((() => !0)))))).catch((t => {
if (St(t))
// Proceed with the existing state. Any subsequent access to
// IndexedDB will verify the lease.
return x("IndexedDbPersistence", "Failed to extend owner lease: ", t), this.isPrimary;
if (!this.allowTabSynchronization) throw t;
return x("IndexedDbPersistence", "Releasing owner lease after error during lease refresh", t),
/* isPrimary= */ !1;
})).then((t => {
this.isPrimary !== t && this.Hs.enqueueRetryable((() => this.si(t))), this.isPrimary = t;
}));
}
fi(t) {
return Fo(t).get("owner").next((t => Rt.resolve(this.mi(t))));
}
gi(t) {
return $o(t).delete(this.clientId);
}
/**
* If the garbage collection threshold has passed, prunes the
* RemoteDocumentChanges and the ClientMetadata store based on the last update
* time of all clients.
*/ async yi() {
if (this.isPrimary && !this.pi(this.ni, 18e5)) {
this.ni = Date.now();
const t = await this.runTransaction("maybeGarbageCollectMultiClientState", "readwrite-primary", (t => {
const e = ji(t, "clientMetadata");
return e.W().next((t => {
const n = this.Ii(t, 18e5), s = t.filter((t => -1 === n.indexOf(t)));
// Delete metadata for clients that are no longer considered active.
return Rt.forEach(s, (t => e.delete(t.clientId))).next((() => s));
}));
})).catch((() => []));
// Delete potential leftover entries that may continue to mark the
// inactive clients as zombied in LocalStorage.
// Ideally we'd delete the IndexedDb and LocalStorage zombie entries for
// the client atomically, but we can't. So we opt to delete the IndexedDb
// entries first to avoid potentially reviving a zombied client.
if (this.oi) for (const e of t) this.oi.removeItem(this.Ti(e.clientId));
}
}
/**
* Schedules a recurring timer to update the client metadata and to either
* extend or acquire the primary lease if the client is eligible.
*/ hi() {
this.ei = this.Hs.enqueueAfterDelay("client_metadata_refresh" /* TimerId.ClientMetadataRefresh */ , 4e3, (() => this.ui().then((() => this.yi())).then((() => this.hi()))));
}
/** Checks whether `client` is the local client. */ mi(t) {
return !!t && t.ownerId === this.clientId;
}
/**
* Evaluate the state of all active clients and determine whether the local
* client is or can act as the holder of the primary lease. Returns whether
* the client is eligible for the lease, but does not actually acquire it.
* May return 'false' even if there is no active leaseholder and another
* (foreground) client should become leaseholder instead.
*/ di(t) {
if (this.Ys) return Rt.resolve(!0);
return Fo(t).get("owner").next((e => {
// A client is eligible for the primary lease if:
// - its network is enabled and the client's tab is in the foreground.
// - its network is enabled and no other client's tab is in the
// foreground.
// - every clients network is disabled and the client's tab is in the
// foreground.
// - every clients network is disabled and no other client's tab is in
// the foreground.
// - the `forceOwningTab` setting was passed in.
if (null !== e && this.pi(e.leaseTimestampMs, 5e3) && !this.Ei(e.ownerId)) {
if (this.mi(e) && this.networkEnabled) return !0;
if (!this.mi(e)) {
if (!e.allowTabSynchronization)
// Fail the `canActAsPrimary` check if the current leaseholder has
// not opted into multi-tab synchronization. If this happens at
// client startup, we reject the Promise returned by
// `enablePersistence()` and the user can continue to use Firestore
// with in-memory persistence.
// If this fails during a lease refresh, we will instead block the
// AsyncQueue from executing further operations. Note that this is
// acceptable since mixing & matching different `synchronizeTabs`
// settings is not supported.
// TODO(b/114226234): Remove this check when `synchronizeTabs` can
// no longer be turned off.
throw new q(L.FAILED_PRECONDITION, Oo);
return !1;
}
}
return !(!this.networkEnabled || !this.inForeground) || $o(t).W().next((t => void 0 === this.Ii(t, 5e3).find((t => {
if (this.clientId !== t.clientId) {
const e = !this.networkEnabled && t.networkEnabled, n = !this.inForeground && t.inForeground, s = this.networkEnabled === t.networkEnabled;
if (e || n && s) return !0;
}
return !1;
}))));
})).next((t => (this.isPrimary !== t && x("IndexedDbPersistence", `Client ${t ? "is" : "is not"} eligible for a primary lease.`),
t)));
}
async shutdown() {
// The shutdown() operations are idempotent and can be called even when
// start() aborted (e.g. because it couldn't acquire the persistence lease).
this.Ds = !1, this.Ai(), this.ei && (this.ei.cancel(), this.ei = null), this.Ri(),
this.bi(),
// Use `SimpleDb.runTransaction` directly to avoid failing if another tab
// has obtained the primary lease.
await this.ri.runTransaction("shutdown", "readwrite", [ "owner", "clientMetadata" ], (t => {
const e = new Qi(t, Mt.at);
return this._i(e).next((() => this.gi(e)));
})), this.ri.close(),
// Remove the entry marking the client as zombied from LocalStorage since
// we successfully deleted its metadata from IndexedDb.
this.Pi();
}
/**
* Returns clients that are not zombied and have an updateTime within the
* provided threshold.
*/ Ii(t, e) {
return t.filter((t => this.pi(t.updateTimeMs, e) && !this.Ei(t.clientId)));
}
/**
* Returns the IDs of the clients that are currently active. If multi-tab
* is not supported, returns an array that only contains the local client's
* ID.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/ vi() {
return this.runTransaction("getActiveClients", "readonly", (t => $o(t).W().next((t => this.Ii(t, 18e5).map((t => t.clientId))))));
}
get started() {
return this.Ds;
}
getMutationQueue(t, e) {
return zr.re(t, this.yt, e, this.referenceDelegate);
}
getTargetCache() {
return this.Cs;
}
getRemoteDocumentCache() {
return this.remoteDocumentCache;
}
getIndexManager(t) {
return new $r(t, this.yt.ie.databaseId);
}
getDocumentOverlayCache(t) {
return dr.re(this.yt, t);
}
getBundleCache() {
return this.Ns;
}
runTransaction(t, e, n) {
x("IndexedDbPersistence", "Starting transaction:", t);
const s = "readonly" === e ? "readonly" : "readwrite", i = 15 === (r = this.Xs) ? Gi : 14 === r ? Ki : 13 === r ? Ui : 12 === r ? qi : 11 === r ? Li : void M();
/** Returns the object stores for the provided schema. */
var r;
let o;
// Do all transactions as readwrite against all object stores, since we
// are the only reader/writer.
return this.ri.runTransaction(t, s, i, (s => (o = new Qi(s, this.Ss ? this.Ss.next() : Mt.at),
"readwrite-primary" === e ? this.fi(o).next((t => !!t || this.di(o))).next((e => {
if (!e) throw N(`Failed to obtain primary lease for action '${t}'.`), this.isPrimary = !1,
this.Hs.enqueueRetryable((() => this.si(!1))), new q(L.FAILED_PRECONDITION, Tt);
return n(o);
})).next((t => this.wi(o).next((() => t)))) : this.Vi(o).next((() => n(o)))))).then((t => (o.raiseOnCommittedEvent(),
t)));
}
/**
* Verifies that the current tab is the primary leaseholder or alternatively
* that the leaseholder has opted into multi-tab synchronization.
*/
// TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer
// be turned off.
Vi(t) {
return Fo(t).get("owner").next((t => {
if (null !== t && this.pi(t.leaseTimestampMs, 5e3) && !this.Ei(t.ownerId) && !this.mi(t) && !(this.Ys || this.allowTabSynchronization && t.allowTabSynchronization)) throw new q(L.FAILED_PRECONDITION, Oo);
}));
}
/**
* Obtains or extends the new primary lease for the local client. This
* method does not verify that the client is eligible for this lease.
*/ wi(t) {
const e = {
ownerId: this.clientId,
allowTabSynchronization: this.allowTabSynchronization,
leaseTimestampMs: Date.now()
};
return Fo(t).put("owner", e);
}
static C() {
return Pt.C();
}
/** Checks the primary lease and removes it if we are the current primary. */ _i(t) {
const e = Fo(t);
return e.get("owner").next((t => this.mi(t) ? (x("IndexedDbPersistence", "Releasing primary lease."),
e.delete("owner")) : Rt.resolve()));
}
/** Verifies that `updateTimeMs` is within `maxAgeMs`. */ pi(t, e) {
const n = Date.now();
return !(t < n - e) && (!(t > n) || (N(`Detected an update time that is in the future: ${t} > ${n}`),
!1));
}
ci() {
null !== this.document && "function" == typeof this.document.addEventListener && (this.ti = () => {
this.Hs.enqueueAndForget((() => (this.inForeground = "visible" === this.document.visibilityState,
this.ui())));
}, this.document.addEventListener("visibilitychange", this.ti), this.inForeground = "visible" === this.document.visibilityState);
}
Ri() {
this.ti && (this.document.removeEventListener("visibilitychange", this.ti), this.ti = null);
}
/**
* Attaches a window.unload handler that will synchronously write our
* clientId to a "zombie client id" location in LocalStorage. This can be used
* by tabs trying to acquire the primary lease to determine that the lease
* is no longer valid even if the timestamp is recent. This is particularly
* important for the refresh case (so the tab correctly re-acquires the
* primary lease). LocalStorage is used for this rather than IndexedDb because
* it is a synchronous API and so can be used reliably from an unload
* handler.
*/ ai() {
var t;
"function" == typeof (null === (t = this.window) || void 0 === t ? void 0 : t.addEventListener) && (this.Zs = () => {
// Note: In theory, this should be scheduled on the AsyncQueue since it
// accesses internal state. We execute this code directly during shutdown
// to make sure it gets a chance to run.
this.Ai(), f() && navigator.appVersion.match(/Version\/1[45]/) &&
// On Safari 14 and 15, we do not run any cleanup actions as it might
// trigger a bug that prevents Safari from re-opening IndexedDB during
// the next page load.
// See https://bugs.webkit.org/show_bug.cgi?id=226547
this.Hs.enterRestrictedMode(/* purgeExistingTasks= */ !0), this.Hs.enqueueAndForget((() => this.shutdown()));
}, this.window.addEventListener("pagehide", this.Zs));
}
bi() {
this.Zs && (this.window.removeEventListener("pagehide", this.Zs), this.Zs = null);
}
/**
* Returns whether a client is "zombied" based on its LocalStorage entry.
* Clients become zombied when their tab closes without running all of the
* cleanup logic in `shutdown()`.
*/ Ei(t) {
var e;
try {
const n = null !== (null === (e = this.oi) || void 0 === e ? void 0 : e.getItem(this.Ti(t)));
return x("IndexedDbPersistence", `Client '${t}' ${n ? "is" : "is not"} zombied in LocalStorage`),
n;
} catch (t) {
// Gracefully handle if LocalStorage isn't working.
return N("IndexedDbPersistence", "Failed to get zombied client id.", t), !1;
}
}
/**
* Record client as zombied (a client that had its tab closed). Zombied
* clients are ignored during primary tab selection.
*/ Ai() {
if (this.oi) try {
this.oi.setItem(this.Ti(this.clientId), String(Date.now()));
} catch (t) {
// Gracefully handle if LocalStorage isn't available / working.
N("Failed to set zombie client id.", t);
}
}
/** Removes the zombied client entry if it exists. */ Pi() {
if (this.oi) try {
this.oi.removeItem(this.Ti(this.clientId));
} catch (t) {
// Ignore
}
}
Ti(t) {
return `firestore_zombie_${this.persistenceKey}_${t}`;
}
}
/**
* Helper to get a typed SimpleDbStore for the primary client object store.
*/ function Fo(t) {
return ji(t, "owner");
}
/**
* Helper to get a typed SimpleDbStore for the client metadata object store.
*/ function $o(t) {
return ji(t, "clientMetadata");
}
/**
* Generates a string used as a prefix when storing data in IndexedDB and
* LocalStorage.
*/ function Bo(t, e) {
// Use two different prefix formats:
// * firestore / persistenceKey / projectID . databaseID / ...
// * firestore / persistenceKey / projectID / ...
// projectIDs are DNS-compatible names and cannot contain dots
// so there's no danger of collisions.
let n = t.projectId;
return t.isDefaultDatabase || (n += "." + t.database), "firestore/" + e + "/" + n + "/";
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A set of changes to what documents are currently in view and out of view for
* a given query. These changes are sent to the LocalStore by the View (via
* the SyncEngine) and are used to pin / unpin documents as appropriate.
*/
class Lo {
constructor(t, e, n, s) {
this.targetId = t, this.fromCache = e, this.Si = n, this.Di = s;
}
static Ci(t, e) {
let n = Rs(), s = Rs();
for (const t of e.docChanges) switch (t.type) {
case 0 /* ChangeType.Added */ :
n = n.add(t.doc.key);
break;
case 1 /* ChangeType.Removed */ :
s = s.add(t.doc.key);
// do nothing
}
return new Lo(t, e.fromCache, n, s);
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The Firestore query engine.
*
* Firestore queries can be executed in three modes. The Query Engine determines
* what mode to use based on what data is persisted. The mode only determines
* the runtime complexity of the query - the result set is equivalent across all
* implementations.
*
* The Query engine will use indexed-based execution if a user has configured
* any index that can be used to execute query (via `setIndexConfiguration()`).
* Otherwise, the engine will try to optimize the query by re-using a previously
* persisted query result. If that is not possible, the query will be executed
* via a full collection scan.
*
* Index-based execution is the default when available. The query engine
* supports partial indexed execution and merges the result from the index
* lookup with documents that have not yet been indexed. The index evaluation
* matches the backend's format and as such, the SDK can use indexing for all
* queries that the backend supports.
*
* If no index exists, the query engine tries to take advantage of the target
* document mapping in the TargetCache. These mappings exists for all queries
* that have been synced with the backend at least once and allow the query
* engine to only read documents that previously matched a query plus any
* documents that were edited after the query was last listened to.
*
* There are some cases when this optimization is not guaranteed to produce
* the same results as full collection scans. In these cases, query
* processing falls back to full scans. These cases are:
*
* - Limit queries where a document that matched the query previously no longer
* matches the query.
*
* - Limit queries where a document edit may cause the document to sort below
* another document that is in the local cache.
*
* - Queries that have never been CURRENT or free of limbo documents.
*/ class qo {
constructor() {
this.xi = !1;
}
/** Sets the document view to query against. */ initialize(t, e) {
this.Ni = t, this.indexManager = e, this.xi = !0;
}
/** Returns all local documents matching the specified query. */ getDocumentsMatchingQuery(t, e, n, s) {
return this.ki(t, e).next((i => i || this.Oi(t, e, s, n))).next((n => n || this.Mi(t, e)));
}
/**
* Performs an indexed query that evaluates the query based on a collection's
* persisted index values. Returns `null` if an index is not available.
*/ ki(t, e) {
if (_n(e))
// Queries that match all documents don't benefit from using
// key-based lookups. It is more efficient to scan all documents in a
// collection, rather than to perform individual lookups.
return Rt.resolve(null);
let n = pn(e);
return this.indexManager.getIndexType(t, n).next((s => 0 /* IndexType.NONE */ === s ? null : (null !== e.limit && 1 /* IndexType.PARTIAL */ === s && (
// We cannot apply a limit for targets that are served using a partial
// index. If a partial index will be used to serve the target, the
// query may return a superset of documents that match the target
// (e.g. if the index doesn't include all the target's filters), or
// may return the correct set of documents in the wrong order (e.g. if
// the index doesn't include a segment for one of the orderBys).
// Therefore, a limit should not be applied in such cases.
e = Tn(e, null, "F" /* LimitType.First */), n = pn(e)), this.indexManager.getDocumentsMatchingTarget(t, n).next((s => {
const i = Rs(...s);
return this.Ni.getDocuments(t, i).next((s => this.indexManager.getMinOffset(t, n).next((n => {
const r = this.Fi(e, s);
return this.$i(e, r, i, n.readTime) ? this.ki(t, Tn(e, null, "F" /* LimitType.First */)) : this.Bi(t, r, e, n);
}))));
})))));
}
/**
* Performs a query based on the target's persisted query mapping. Returns
* `null` if the mapping is not available or cannot be used.
*/ Oi(t, e, n, s) {
return _n(e) || s.isEqual(it.min()) ? this.Mi(t, e) : this.Ni.getDocuments(t, n).next((i => {
const r = this.Fi(e, i);
return this.$i(e, r, n, s) ? this.Mi(t, e) : (D() <= u.DEBUG && x("QueryEngine", "Re-using previous result from %s to execute query: %s", s.toString(), Rn(e)),
this.Bi(t, r, e, gt(s, -1)));
}));
// Queries that have never seen a snapshot without limbo free documents
// should also be run as a full collection scan.
}
/** Applies the query filter and sorting to the provided documents. */ Fi(t, e) {
// Sort the documents and re-apply the query filter since previously
// matching documents do not necessarily still match the query.
let n = new He(vn(t));
return e.forEach(((e, s) => {
bn(t, s) && (n = n.add(s));
})), n;
}
/**
* Determines if a limit query needs to be refilled from cache, making it
* ineligible for index-free execution.
*
* @param query - The query.
* @param sortedPreviousResults - The documents that matched the query when it
* was last synchronized, sorted by the query's comparator.
* @param remoteKeys - The document keys that matched the query at the last
* snapshot.
* @param limboFreeSnapshotVersion - The version of the snapshot when the
* query was last synchronized.
*/ $i(t, e, n, s) {
if (null === t.limit)
// Queries without limits do not need to be refilled.
return !1;
if (n.size !== e.size)
// The query needs to be refilled if a previously matching document no
// longer matches.
return !0;
// Limit queries are not eligible for index-free query execution if there is
// a potential that an older document from cache now sorts before a document
// that was previously part of the limit. This, however, can only happen if
// the document at the edge of the limit goes out of limit.
// If a document that is not the limit boundary sorts differently,
// the boundary of the limit itself did not change and documents from cache
// will continue to be "rejected" by this boundary. Therefore, we can ignore
// any modifications that don't affect the last document.
const i = "F" /* LimitType.First */ === t.limitType ? e.last() : e.first();
return !!i && (i.hasPendingWrites || i.version.compareTo(s) > 0);
}
Mi(t, e) {
return D() <= u.DEBUG && x("QueryEngine", "Using full collection scan to execute query:", Rn(e)),
this.Ni.getDocumentsMatchingQuery(t, e, pt.min());
}
/**
* Combines the results from an indexed execution with the remaining documents
* that have not yet been indexed.
*/ Bi(t, e, n, s) {
// Retrieve all results for documents that were updated since the offset.
return this.Ni.getDocumentsMatchingQuery(t, n, s).next((t => (
// Merge with existing results
e.forEach((e => {
t = t.insert(e.key, e);
})), t)));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implements `LocalStore` interface.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
class Uo {
constructor(
/** Manages our in-memory or durable persistence. */
t, e, n, s) {
this.persistence = t, this.Li = e, this.yt = s,
/**
* Maps a targetID to data about its target.
*
* PORTING NOTE: We are using an immutable data structure on Web to make re-runs
* of `applyRemoteEvent()` idempotent.
*/
this.qi = new je(tt),
/** Maps a target to its targetID. */
// TODO(wuandy): Evaluate if TargetId can be part of Target.
this.Ui = new ds((t => rn(t)), on),
/**
* A per collection group index of the last read time processed by
* `getNewDocumentChanges()`.
*
* PORTING NOTE: This is only used for multi-tab synchronization.
*/
this.Ki = new Map, this.Gi = t.getRemoteDocumentCache(), this.Cs = t.getTargetCache(),
this.Ns = t.getBundleCache(), this.Qi(n);
}
Qi(t) {
// TODO(indexing): Add spec tests that test these components change after a
// user change
this.documentOverlayCache = this.persistence.getDocumentOverlayCache(t), this.indexManager = this.persistence.getIndexManager(t),
this.mutationQueue = this.persistence.getMutationQueue(t, this.indexManager), this.localDocuments = new To(this.Gi, this.mutationQueue, this.documentOverlayCache, this.indexManager),
this.Gi.setIndexManager(this.indexManager), this.Li.initialize(this.localDocuments, this.indexManager);
}
collectGarbage(t) {
return this.persistence.runTransaction("Collect garbage", "readwrite-primary", (e => t.collect(e, this.qi)));
}
}
function Ko(
/** Manages our in-memory or durable persistence. */
t, e, n, s) {
return new Uo(t, e, n, s);
}
/**
* Tells the LocalStore that the currently authenticated user has changed.
*
* In response the local store switches the mutation queue to the new user and
* returns any resulting document changes.
*/
// PORTING NOTE: Android and iOS only return the documents affected by the
// change.
async function Go(t, e) {
const n = B(t);
return await n.persistence.runTransaction("Handle user change", "readonly", (t => {
// Swap out the mutation queue, grabbing the pending mutation batches
// before and after.
let s;
return n.mutationQueue.getAllMutationBatches(t).next((i => (s = i, n.Qi(e), n.mutationQueue.getAllMutationBatches(t)))).next((e => {
const i = [], r = [];
// Union the old/new changed keys.
let o = Rs();
for (const t of s) {
i.push(t.batchId);
for (const e of t.mutations) o = o.add(e.key);
}
for (const t of e) {
r.push(t.batchId);
for (const e of t.mutations) o = o.add(e.key);
}
// Return the set of all (potentially) changed documents and the list
// of mutation batch IDs that were affected by change.
return n.localDocuments.getDocuments(t, o).next((t => ({
ji: t,
removedBatchIds: i,
addedBatchIds: r
})));
}));
}));
}
/* Accepts locally generated Mutations and commit them to storage. */
/**
* Acknowledges the given batch.
*
* On the happy path when a batch is acknowledged, the local store will
*
* + remove the batch from the mutation queue;
* + apply the changes to the remote document cache;
* + recalculate the latency compensated view implied by those changes (there
* may be mutations in the queue that affect the documents but haven't been
* acknowledged yet); and
* + give the changed documents back the sync engine
*
* @returns The resulting (modified) documents.
*/
function Qo(t, e) {
const n = B(t);
return n.persistence.runTransaction("Acknowledge batch", "readwrite-primary", (t => {
const s = e.batch.keys(), i = n.Gi.newChangeBuffer({
trackRemovals: !0
});
return function(t, e, n, s) {
const i = n.batch, r = i.keys();
let o = Rt.resolve();
return r.forEach((t => {
o = o.next((() => s.getEntry(e, t))).next((e => {
const r = n.docVersions.get(t);
F(null !== r), e.version.compareTo(r) < 0 && (i.applyToRemoteDocument(e, n), e.isValidDocument() && (
// We use the commitVersion as the readTime rather than the
// document's updateTime since the updateTime is not advanced
// for updates that do not modify the underlying document.
e.setReadTime(n.commitVersion), s.addEntry(e)));
}));
})), o.next((() => t.mutationQueue.removeMutationBatch(e, i)));
}
/** Returns the local view of the documents affected by a mutation batch. */
// PORTING NOTE: Multi-Tab only.
(n, t, e, i).next((() => i.apply(t))).next((() => n.mutationQueue.performConsistencyCheck(t))).next((() => n.documentOverlayCache.removeOverlaysForBatchId(t, s, e.batch.batchId))).next((() => n.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(t, function(t) {
let e = Rs();
for (let n = 0; n < t.mutationResults.length; ++n) {
t.mutationResults[n].transformResults.length > 0 && (e = e.add(t.batch.mutations[n].key));
}
return e;
}
/**
* Removes mutations from the MutationQueue for the specified batch;
* LocalDocuments will be recalculated.
*
* @returns The resulting modified documents.
*/ (e)))).next((() => n.localDocuments.getDocuments(t, s)));
}));
}
/**
* Returns the last consistent snapshot processed (used by the RemoteStore to
* determine whether to buffer incoming snapshots from the backend).
*/
function jo(t) {
const e = B(t);
return e.persistence.runTransaction("Get last remote snapshot version", "readonly", (t => e.Cs.getLastRemoteSnapshotVersion(t)));
}
/**
* Updates the "ground-state" (remote) documents. We assume that the remote
* event reflects any write batches that have been acknowledged or rejected
* (i.e. we do not re-apply local mutations to updates from this event).
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/ function Wo(t, e) {
const n = B(t), s = e.snapshotVersion;
let i = n.qi;
return n.persistence.runTransaction("Apply remote event", "readwrite-primary", (t => {
const r = n.Gi.newChangeBuffer({
trackRemovals: !0
});
// Reset newTargetDataByTargetMap in case this transaction gets re-run.
i = n.qi;
const o = [];
e.targetChanges.forEach(((r, u) => {
const c = i.get(u);
if (!c) return;
// Only update the remote keys if the target is still active. This
// ensures that we can persist the updated target data along with
// the updated assignment.
o.push(n.Cs.removeMatchingKeys(t, r.removedDocuments, u).next((() => n.Cs.addMatchingKeys(t, r.addedDocuments, u))));
let a = c.withSequenceNumber(t.currentSequenceNumber);
e.targetMismatches.has(u) ? a = a.withResumeToken(Wt.EMPTY_BYTE_STRING, it.min()).withLastLimboFreeSnapshotVersion(it.min()) : r.resumeToken.approximateByteSize() > 0 && (a = a.withResumeToken(r.resumeToken, s)),
i = i.insert(u, a),
// Update the target data if there are target changes (or if
// sufficient time has passed since the last update).
/**
* Returns true if the newTargetData should be persisted during an update of
* an active target. TargetData should always be persisted when a target is
* being released and should not call this function.
*
* While the target is active, TargetData updates can be omitted when nothing
* about the target has changed except metadata like the resume token or
* snapshot version. Occasionally it's worth the extra write to prevent these
* values from getting too stale after a crash, but this doesn't have to be
* too frequent.
*/
function(t, e, n) {
// Always persist target data if we don't already have a resume token.
if (0 === t.resumeToken.approximateByteSize()) return !0;
// Don't allow resume token changes to be buffered indefinitely. This
// allows us to be reasonably up-to-date after a crash and avoids needing
// to loop over all active queries on shutdown. Especially in the browser
// we may not get time to do anything interesting while the current tab is
// closing.
if (e.snapshotVersion.toMicroseconds() - t.snapshotVersion.toMicroseconds() >= 3e8) return !0;
// Otherwise if the only thing that has changed about a target is its resume
// token it's not worth persisting. Note that the RemoteStore keeps an
// in-memory view of the currently active targets which includes the current
// resume token, so stream failure or user changes will still use an
// up-to-date resume token regardless of what we do here.
return n.addedDocuments.size + n.modifiedDocuments.size + n.removedDocuments.size > 0;
}
/**
* Notifies local store of the changed views to locally pin documents.
*/ (c, a, r) && o.push(n.Cs.updateTargetData(t, a));
}));
let u = ws(), c = Rs();
// HACK: The only reason we allow a null snapshot version is so that we
// can synthesize remote events when we get permission denied errors while
// trying to resolve the state of a locally cached document that is in
// limbo.
if (e.documentUpdates.forEach((s => {
e.resolvedLimboDocuments.has(s) && o.push(n.persistence.referenceDelegate.updateLimboDocument(t, s));
})),
// Each loop iteration only affects its "own" doc, so it's safe to get all
// the remote documents in advance in a single call.
o.push(zo(t, r, e.documentUpdates).next((t => {
u = t.Wi, c = t.zi;
}))), !s.isEqual(it.min())) {
const e = n.Cs.getLastRemoteSnapshotVersion(t).next((e => n.Cs.setTargetsMetadata(t, t.currentSequenceNumber, s)));
o.push(e);
}
return Rt.waitFor(o).next((() => r.apply(t))).next((() => n.localDocuments.getLocalViewOfDocuments(t, u, c))).next((() => u));
})).then((t => (n.qi = i, t)));
}
/**
* Populates document change buffer with documents from backend or a bundle.
* Returns the document changes resulting from applying those documents, and
* also a set of documents whose existence state are changed as a result.
*
* @param txn - Transaction to use to read existing documents from storage.
* @param documentBuffer - Document buffer to collect the resulted changes to be
* applied to storage.
* @param documents - Documents to be applied.
*/ function zo(t, e, n) {
let s = Rs(), i = Rs();
return n.forEach((t => s = s.add(t))), e.getEntries(t, s).next((t => {
let s = ws();
return n.forEach(((n, r) => {
const o = t.get(n);
// Check if see if there is a existence state change for this document.
r.isFoundDocument() !== o.isFoundDocument() && (i = i.add(n)),
// Note: The order of the steps below is important, since we want
// to ensure that rejected limbo resolutions (which fabricate
// NoDocuments with SnapshotVersion.min()) never add documents to
// cache.
r.isNoDocument() && r.version.isEqual(it.min()) ? (
// NoDocuments with SnapshotVersion.min() are used in manufactured
// events. We remove these documents from cache since we lost
// access.
e.removeEntry(n, r.readTime), s = s.insert(n, r)) : !o.isValidDocument() || r.version.compareTo(o.version) > 0 || 0 === r.version.compareTo(o.version) && o.hasPendingWrites ? (e.addEntry(r),
s = s.insert(n, r)) : x("LocalStore", "Ignoring outdated watch update for ", n, ". Current version:", o.version, " Watch version:", r.version);
})), {
Wi: s,
zi: i
};
}));
}
/**
* Gets the mutation batch after the passed in batchId in the mutation queue
* or null if empty.
* @param afterBatchId - If provided, the batch to search after.
* @returns The next mutation or null if there wasn't one.
*/
function Ho(t, e) {
const n = B(t);
return n.persistence.runTransaction("Get next mutation batch", "readonly", (t => (void 0 === e && (e = -1),
n.mutationQueue.getNextMutationBatchAfterBatchId(t, e))));
}
/**
* Reads the current value of a Document with a given key or null if not
* found - used for testing.
*/
/**
* Assigns the given target an internal ID so that its results can be pinned so
* they don't get GC'd. A target must be allocated in the local store before
* the store can be used to manage its view.
*
* Allocating an already allocated `Target` will return the existing `TargetData`
* for that `Target`.
*/
function Jo(t, e) {
const n = B(t);
return n.persistence.runTransaction("Allocate target", "readwrite", (t => {
let s;
return n.Cs.getTargetData(t, e).next((i => i ? (
// This target has been listened to previously, so reuse the
// previous targetID.
// TODO(mcg): freshen last accessed date?
s = i, Rt.resolve(s)) : n.Cs.allocateTargetId(t).next((i => (s = new Ji(e, i, 0 /* TargetPurpose.Listen */ , t.currentSequenceNumber),
n.Cs.addTargetData(t, s).next((() => s)))))));
})).then((t => {
// If Multi-Tab is enabled, the existing target data may be newer than
// the in-memory data
const s = n.qi.get(t.targetId);
return (null === s || t.snapshotVersion.compareTo(s.snapshotVersion) > 0) && (n.qi = n.qi.insert(t.targetId, t),
n.Ui.set(e, t.targetId)), t;
}));
}
/**
* Returns the TargetData as seen by the LocalStore, including updates that may
* have not yet been persisted to the TargetCache.
*/
// Visible for testing.
/**
* Unpins all the documents associated with the given target. If
* `keepPersistedTargetData` is set to false and Eager GC enabled, the method
* directly removes the associated target data from the target cache.
*
* Releasing a non-existing `Target` is a no-op.
*/
// PORTING NOTE: `keepPersistedTargetData` is multi-tab only.
async function Yo(t, e, n) {
const s = B(t), i = s.qi.get(e), r = n ? "readwrite" : "readwrite-primary";
try {
n || await s.persistence.runTransaction("Release target", r, (t => s.persistence.referenceDelegate.removeTarget(t, i)));
} catch (t) {
if (!St(t)) throw t;
// All `releaseTarget` does is record the final metadata state for the
// target, but we've been recording this periodically during target
// activity. If we lose this write this could cause a very slight
// difference in the order of target deletion during GC, but we
// don't define exact LRU semantics so this is acceptable.
x("LocalStore", `Failed to update sequence numbers for target ${e}: ${t}`);
}
s.qi = s.qi.remove(e), s.Ui.delete(i.target);
}
/**
* Runs the specified query against the local store and returns the results,
* potentially taking advantage of query data from previous executions (such
* as the set of remote keys).
*
* @param usePreviousResults - Whether results from previous executions can
* be used to optimize this query execution.
*/ function Xo(t, e, n) {
const s = B(t);
let i = it.min(), r = Rs();
return s.persistence.runTransaction("Execute query", "readonly", (t => function(t, e, n) {
const s = B(t), i = s.Ui.get(n);
return void 0 !== i ? Rt.resolve(s.qi.get(i)) : s.Cs.getTargetData(e, n);
}(s, t, pn(e)).next((e => {
if (e) return i = e.lastLimboFreeSnapshotVersion, s.Cs.getMatchingKeysForTargetId(t, e.targetId).next((t => {
r = t;
}));
})).next((() => s.Li.getDocumentsMatchingQuery(t, e, n ? i : it.min(), n ? r : Rs()))).next((t => (eu(s, Pn(e), t),
{
documents: t,
Hi: r
})))));
}
// PORTING NOTE: Multi-Tab only.
function Zo(t, e) {
const n = B(t), s = B(n.Cs), i = n.qi.get(e);
return i ? Promise.resolve(i.target) : n.persistence.runTransaction("Get target data", "readonly", (t => s.ne(t, e).next((t => t ? t.target : null))));
}
/**
* Returns the set of documents that have been updated since the last call.
* If this is the first call, returns the set of changes since client
* initialization. Further invocations will return document that have changed
* since the prior call.
*/
// PORTING NOTE: Multi-Tab only.
function tu(t, e) {
const n = B(t), s = n.Ki.get(e) || it.min();
// Get the current maximum read time for the collection. This should always
// exist, but to reduce the chance for regressions we default to
// SnapshotVersion.Min()
// TODO(indexing): Consider removing the default value.
return n.persistence.runTransaction("Get new document changes", "readonly", (t => n.Gi.getAllFromCollectionGroup(t, e, gt(s, -1),
/* limit= */ Number.MAX_SAFE_INTEGER))).then((t => (eu(n, e, t), t)));
}
/** Sets the collection group's maximum read time from the given documents. */
// PORTING NOTE: Multi-Tab only.
function eu(t, e, n) {
let s = t.Ki.get(e) || it.min();
n.forEach(((t, e) => {
e.readTime.compareTo(s) > 0 && (s = e.readTime);
})), t.Ki.set(e, s);
}
/**
* Creates a new target using the given bundle name, which will be used to
* hold the keys of all documents from the bundle in query-document mappings.
* This ensures that the loaded documents do not get garbage collected
* right away.
*/
/**
* Applies the documents from a bundle to the "ground-state" (remote)
* documents.
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
async function nu(t, e, n, s) {
const i = B(t);
let r = Rs(), o = ws();
for (const t of n) {
const n = e.Ji(t.metadata.name);
t.document && (r = r.add(n));
const s = e.Yi(t);
s.setReadTime(e.Xi(t.metadata.readTime)), o = o.insert(n, s);
}
const u = i.Gi.newChangeBuffer({
trackRemovals: !0
}), c = await Jo(i, function(t) {
// It is OK that the path used for the query is not valid, because this will
// not be read and queried.
return pn(dn(ot.fromString(`__bundle__/docs/${t}`)));
}(s));
// Allocates a target to hold all document keys from the bundle, such that
// they will not get garbage collected right away.
return i.persistence.runTransaction("Apply bundle documents", "readwrite", (t => zo(t, u, o).next((e => (u.apply(t),
e))).next((e => i.Cs.removeMatchingKeysForTargetId(t, c.targetId).next((() => i.Cs.addMatchingKeys(t, r, c.targetId))).next((() => i.localDocuments.getLocalViewOfDocuments(t, e.Wi, e.zi))).next((() => e.Wi))))));
}
/**
* Returns a promise of a boolean to indicate if the given bundle has already
* been loaded and the create time is newer than the current loading bundle.
*/
/**
* Saves the given `NamedQuery` to local persistence.
*/
async function su(t, e, n = Rs()) {
// Allocate a target for the named query such that it can be resumed
// from associated read time if users use it to listen.
// NOTE: this also means if no corresponding target exists, the new target
// will remain active and will not get collected, unless users happen to
// unlisten the query somehow.
const s = await Jo(t, pn(or(e.bundledQuery))), i = B(t);
return i.persistence.runTransaction("Save named query", "readwrite", (t => {
const r = Ks(e.readTime);
// Simply save the query itself if it is older than what the SDK already
// has.
if (s.snapshotVersion.compareTo(r) >= 0) return i.Ns.saveNamedQuery(t, e);
// Update existing target data because the query from the bundle is newer.
const o = s.withResumeToken(Wt.EMPTY_BYTE_STRING, r);
return i.qi = i.qi.insert(o.targetId, o), i.Cs.updateTargetData(t, o).next((() => i.Cs.removeMatchingKeysForTargetId(t, s.targetId))).next((() => i.Cs.addMatchingKeys(t, n, s.targetId))).next((() => i.Ns.saveNamedQuery(t, e)));
}));
}
/** Assembles the key for a client state in WebStorage */
function iu(t, e) {
return `firestore_clients_${t}_${e}`;
}
// The format of the WebStorage key that stores the mutation state is:
// firestore_mutations__
// (for unauthenticated users)
// or: firestore_mutations___
// 'user_uid' is last to avoid needing to escape '_' characters that it might
// contain.
/** Assembles the key for a mutation batch in WebStorage */
function ru(t, e, n) {
let s = `firestore_mutations_${t}_${n}`;
return e.isAuthenticated() && (s += `_${e.uid}`), s;
}
// The format of the WebStorage key that stores a query target's metadata is:
// firestore_targets__
/** Assembles the key for a query state in WebStorage */
function ou(t, e) {
return `firestore_targets_${t}_${e}`;
}
// The WebStorage prefix that stores the primary tab's online state. The
// format of the key is:
// firestore_online_state_
/**
* Holds the state of a mutation batch, including its user ID, batch ID and
* whether the batch is 'pending', 'acknowledged' or 'rejected'.
*/
// Visible for testing
class uu {
constructor(t, e, n, s) {
this.user = t, this.batchId = e, this.state = n, this.error = s;
}
/**
* Parses a MutationMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/ static Zi(t, e, n) {
const s = JSON.parse(n);
let i, r = "object" == typeof s && -1 !== [ "pending", "acknowledged", "rejected" ].indexOf(s.state) && (void 0 === s.error || "object" == typeof s.error);
return r && s.error && (r = "string" == typeof s.error.message && "string" == typeof s.error.code,
r && (i = new q(s.error.code, s.error.message))), r ? new uu(t, e, s.state, i) : (N("SharedClientState", `Failed to parse mutation state for ID '${e}': ${n}`),
null);
}
tr() {
const t = {
state: this.state,
updateTimeMs: Date.now()
};
return this.error && (t.error = {
code: this.error.code,
message: this.error.message
}), JSON.stringify(t);
}
}
/**
* Holds the state of a query target, including its target ID and whether the
* target is 'not-current', 'current' or 'rejected'.
*/
// Visible for testing
class cu {
constructor(t, e, n) {
this.targetId = t, this.state = e, this.error = n;
}
/**
* Parses a QueryTargetMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/ static Zi(t, e) {
const n = JSON.parse(e);
let s, i = "object" == typeof n && -1 !== [ "not-current", "current", "rejected" ].indexOf(n.state) && (void 0 === n.error || "object" == typeof n.error);
return i && n.error && (i = "string" == typeof n.error.message && "string" == typeof n.error.code,
i && (s = new q(n.error.code, n.error.message))), i ? new cu(t, n.state, s) : (N("SharedClientState", `Failed to parse target state for ID '${t}': ${e}`),
null);
}
tr() {
const t = {
state: this.state,
updateTimeMs: Date.now()
};
return this.error && (t.error = {
code: this.error.code,
message: this.error.message
}), JSON.stringify(t);
}
}
/**
* This class represents the immutable ClientState for a client read from
* WebStorage, containing the list of active query targets.
*/ class au {
constructor(t, e) {
this.clientId = t, this.activeTargetIds = e;
}
/**
* Parses a RemoteClientState from the JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/ static Zi(t, e) {
const n = JSON.parse(e);
let s = "object" == typeof n && n.activeTargetIds instanceof Array, i = Ps();
for (let t = 0; s && t < n.activeTargetIds.length; ++t) s = Gt(n.activeTargetIds[t]),
i = i.add(n.activeTargetIds[t]);
return s ? new au(t, i) : (N("SharedClientState", `Failed to parse client data for instance '${t}': ${e}`),
null);
}
}
/**
* This class represents the online state for all clients participating in
* multi-tab. The online state is only written to by the primary client, and
* used in secondary clients to update their query views.
*/ class hu {
constructor(t, e) {
this.clientId = t, this.onlineState = e;
}
/**
* Parses a SharedOnlineState from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/ static Zi(t) {
const e = JSON.parse(t);
return "object" == typeof e && -1 !== [ "Unknown", "Online", "Offline" ].indexOf(e.onlineState) && "string" == typeof e.clientId ? new hu(e.clientId, e.onlineState) : (N("SharedClientState", `Failed to parse online state: ${t}`),
null);
}
}
/**
* Metadata state of the local client. Unlike `RemoteClientState`, this class is
* mutable and keeps track of all pending mutations, which allows us to
* update the range of pending mutation batch IDs as new mutations are added or
* removed.
*
* The data in `LocalClientState` is not read from WebStorage and instead
* updated via its instance methods. The updated state can be serialized via
* `toWebStorageJSON()`.
*/
// Visible for testing.
class lu {
constructor() {
this.activeTargetIds = Ps();
}
er(t) {
this.activeTargetIds = this.activeTargetIds.add(t);
}
nr(t) {
this.activeTargetIds = this.activeTargetIds.delete(t);
}
/**
* Converts this entry into a JSON-encoded format we can use for WebStorage.
* Does not encode `clientId` as it is part of the key in WebStorage.
*/ tr() {
const t = {
activeTargetIds: this.activeTargetIds.toArray(),
updateTimeMs: Date.now()
};
return JSON.stringify(t);
}
}
/**
* `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
* backing store for the SharedClientState. It keeps track of all active
* clients and supports modifications of the local client's data.
*/ class fu {
constructor(t, e, n, s, i) {
this.window = t, this.Hs = e, this.persistenceKey = n, this.sr = s, this.syncEngine = null,
this.onlineStateHandler = null, this.sequenceNumberHandler = null, this.ir = this.rr.bind(this),
this.ur = new je(tt), this.started = !1,
/**
* Captures WebStorage events that occur before `start()` is called. These
* events are replayed once `WebStorageSharedClientState` is started.
*/
this.cr = [];
// Escape the special characters mentioned here:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
const r = n.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
this.storage = this.window.localStorage, this.currentUser = i, this.ar = iu(this.persistenceKey, this.sr),
this.hr =
/** Assembles the key for the current sequence number. */
function(t) {
return `firestore_sequence_number_${t}`;
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ (this.persistenceKey), this.ur = this.ur.insert(this.sr, new lu), this.lr = new RegExp(`^firestore_clients_${r}_([^_]*)$`),
this.dr = new RegExp(`^firestore_mutations_${r}_(\\d+)(?:_(.*))?$`), this._r = new RegExp(`^firestore_targets_${r}_(\\d+)$`),
this.wr =
/** Assembles the key for the online state of the primary tab. */
function(t) {
return `firestore_online_state_${t}`;
}
// The WebStorage prefix that plays as a event to indicate the remote documents
// might have changed due to some secondary tabs loading a bundle.
// format of the key is:
// firestore_bundle_loaded_v2_
// The version ending with "v2" stores the list of modified collection groups.
(this.persistenceKey), this.mr = function(t) {
return `firestore_bundle_loaded_v2_${t}`;
}
// The WebStorage key prefix for the key that stores the last sequence number allocated. The key
// looks like 'firestore_sequence_number_'.
(this.persistenceKey),
// Rather than adding the storage observer during start(), we add the
// storage observer during initialization. This ensures that we collect
// events before other components populate their initial state (during their
// respective start() calls). Otherwise, we might for example miss a
// mutation that is added after LocalStore's start() processed the existing
// mutations but before we observe WebStorage events.
this.window.addEventListener("storage", this.ir);
}
/** Returns 'true' if WebStorage is available in the current environment. */ static C(t) {
return !(!t || !t.localStorage);
}
async start() {
// Retrieve the list of existing clients to backfill the data in
// SharedClientState.
const t = await this.syncEngine.vi();
for (const e of t) {
if (e === this.sr) continue;
const t = this.getItem(iu(this.persistenceKey, e));
if (t) {
const n = au.Zi(e, t);
n && (this.ur = this.ur.insert(n.clientId, n));
}
}
this.gr();
// Check if there is an existing online state and call the callback handler
// if applicable.
const e = this.storage.getItem(this.wr);
if (e) {
const t = this.yr(e);
t && this.pr(t);
}
for (const t of this.cr) this.rr(t);
this.cr = [],
// Register a window unload hook to remove the client metadata entry from
// WebStorage even if `shutdown()` was not called.
this.window.addEventListener("pagehide", (() => this.shutdown())), this.started = !0;
}
writeSequenceNumber(t) {
this.setItem(this.hr, JSON.stringify(t));
}
getAllActiveQueryTargets() {
return this.Ir(this.ur);
}
isActiveQueryTarget(t) {
let e = !1;
return this.ur.forEach(((n, s) => {
s.activeTargetIds.has(t) && (e = !0);
})), e;
}
addPendingMutation(t) {
this.Tr(t, "pending");
}
updateMutationState(t, e, n) {
this.Tr(t, e, n),
// Once a final mutation result is observed by other clients, they no longer
// access the mutation's metadata entry. Since WebStorage replays events
// in order, it is safe to delete the entry right after updating it.
this.Er(t);
}
addLocalQueryTarget(t) {
let e = "not-current";
// Lookup an existing query state if the target ID was already registered
// by another tab
if (this.isActiveQueryTarget(t)) {
const n = this.storage.getItem(ou(this.persistenceKey, t));
if (n) {
const s = cu.Zi(t, n);
s && (e = s.state);
}
}
return this.Ar.er(t), this.gr(), e;
}
removeLocalQueryTarget(t) {
this.Ar.nr(t), this.gr();
}
isLocalQueryTarget(t) {
return this.Ar.activeTargetIds.has(t);
}
clearQueryState(t) {
this.removeItem(ou(this.persistenceKey, t));
}
updateQueryState(t, e, n) {
this.Rr(t, e, n);
}
handleUserChange(t, e, n) {
e.forEach((t => {
this.Er(t);
})), this.currentUser = t, n.forEach((t => {
this.addPendingMutation(t);
}));
}
setOnlineState(t) {
this.br(t);
}
notifyBundleLoaded(t) {
this.Pr(t);
}
shutdown() {
this.started && (this.window.removeEventListener("storage", this.ir), this.removeItem(this.ar),
this.started = !1);
}
getItem(t) {
const e = this.storage.getItem(t);
return x("SharedClientState", "READ", t, e), e;
}
setItem(t, e) {
x("SharedClientState", "SET", t, e), this.storage.setItem(t, e);
}
removeItem(t) {
x("SharedClientState", "REMOVE", t), this.storage.removeItem(t);
}
rr(t) {
// Note: The function is typed to take Event to be interface-compatible with
// `Window.addEventListener`.
const e = t;
if (e.storageArea === this.storage) {
if (x("SharedClientState", "EVENT", e.key, e.newValue), e.key === this.ar) return void N("Received WebStorage notification for local change. Another client might have garbage-collected our state");
this.Hs.enqueueRetryable((async () => {
if (this.started) {
if (null !== e.key) if (this.lr.test(e.key)) {
if (null == e.newValue) {
const t = this.vr(e.key);
return this.Vr(t, null);
}
{
const t = this.Sr(e.key, e.newValue);
if (t) return this.Vr(t.clientId, t);
}
} else if (this.dr.test(e.key)) {
if (null !== e.newValue) {
const t = this.Dr(e.key, e.newValue);
if (t) return this.Cr(t);
}
} else if (this._r.test(e.key)) {
if (null !== e.newValue) {
const t = this.Nr(e.key, e.newValue);
if (t) return this.kr(t);
}
} else if (e.key === this.wr) {
if (null !== e.newValue) {
const t = this.yr(e.newValue);
if (t) return this.pr(t);
}
} else if (e.key === this.hr) {
const t = function(t) {
let e = Mt.at;
if (null != t) try {
const n = JSON.parse(t);
F("number" == typeof n), e = n;
} catch (t) {
N("SharedClientState", "Failed to read sequence number from WebStorage", t);
}
return e;
}
/**
* `MemorySharedClientState` is a simple implementation of SharedClientState for
* clients using memory persistence. The state in this class remains fully
* isolated and no synchronization is performed.
*/ (e.newValue);
t !== Mt.at && this.sequenceNumberHandler(t);
} else if (e.key === this.mr) {
const t = this.Or(e.newValue);
await Promise.all(t.map((t => this.syncEngine.Mr(t))));
}
} else this.cr.push(e);
}));
}
}
get Ar() {
return this.ur.get(this.sr);
}
gr() {
this.setItem(this.ar, this.Ar.tr());
}
Tr(t, e, n) {
const s = new uu(this.currentUser, t, e, n), i = ru(this.persistenceKey, this.currentUser, t);
this.setItem(i, s.tr());
}
Er(t) {
const e = ru(this.persistenceKey, this.currentUser, t);
this.removeItem(e);
}
br(t) {
const e = {
clientId: this.sr,
onlineState: t
};
this.storage.setItem(this.wr, JSON.stringify(e));
}
Rr(t, e, n) {
const s = ou(this.persistenceKey, t), i = new cu(t, e, n);
this.setItem(s, i.tr());
}
Pr(t) {
const e = JSON.stringify(Array.from(t));
this.setItem(this.mr, e);
}
/**
* Parses a client state key in WebStorage. Returns null if the key does not
* match the expected key format.
*/ vr(t) {
const e = this.lr.exec(t);
return e ? e[1] : null;
}
/**
* Parses a client state in WebStorage. Returns 'null' if the value could not
* be parsed.
*/ Sr(t, e) {
const n = this.vr(t);
return au.Zi(n, e);
}
/**
* Parses a mutation batch state in WebStorage. Returns 'null' if the value
* could not be parsed.
*/ Dr(t, e) {
const n = this.dr.exec(t), s = Number(n[1]), i = void 0 !== n[2] ? n[2] : null;
return uu.Zi(new v(i), s, e);
}
/**
* Parses a query target state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/ Nr(t, e) {
const n = this._r.exec(t), s = Number(n[1]);
return cu.Zi(s, e);
}
/**
* Parses an online state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/ yr(t) {
return hu.Zi(t);
}
Or(t) {
return JSON.parse(t);
}
async Cr(t) {
if (t.user.uid === this.currentUser.uid) return this.syncEngine.Fr(t.batchId, t.state, t.error);
x("SharedClientState", `Ignoring mutation for non-active user ${t.user.uid}`);
}
kr(t) {
return this.syncEngine.$r(t.targetId, t.state, t.error);
}
Vr(t, e) {
const n = e ? this.ur.insert(t, e) : this.ur.remove(t), s = this.Ir(this.ur), i = this.Ir(n), r = [], o = [];
return i.forEach((t => {
s.has(t) || r.push(t);
})), s.forEach((t => {
i.has(t) || o.push(t);
})), this.syncEngine.Br(r, o).then((() => {
this.ur = n;
}));
}
pr(t) {
// We check whether the client that wrote this online state is still active
// by comparing its client ID to the list of clients kept active in
// IndexedDb. If a client does not update their IndexedDb client state
// within 5 seconds, it is considered inactive and we don't emit an online
// state event.
this.ur.get(t.clientId) && this.onlineStateHandler(t.onlineState);
}
Ir(t) {
let e = Ps();
return t.forEach(((t, n) => {
e = e.unionWith(n.activeTargetIds);
})), e;
}
}
class du {
constructor() {
this.Lr = new lu, this.qr = {}, this.onlineStateHandler = null, this.sequenceNumberHandler = null;
}
addPendingMutation(t) {
// No op.
}
updateMutationState(t, e, n) {
// No op.
}
addLocalQueryTarget(t) {
return this.Lr.er(t), this.qr[t] || "not-current";
}
updateQueryState(t, e, n) {
this.qr[t] = e;
}
removeLocalQueryTarget(t) {
this.Lr.nr(t);
}
isLocalQueryTarget(t) {
return this.Lr.activeTargetIds.has(t);
}
clearQueryState(t) {
delete this.qr[t];
}
getAllActiveQueryTargets() {
return this.Lr.activeTargetIds;
}
isActiveQueryTarget(t) {
return this.Lr.activeTargetIds.has(t);
}
start() {
return this.Lr = new lu, Promise.resolve();
}
handleUserChange(t, e, n) {
// No op.
}
setOnlineState(t) {
// No op.
}
shutdown() {}
writeSequenceNumber(t) {}
notifyBundleLoaded(t) {
// No op.
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class _u {
Ur(t) {
// No-op.
}
shutdown() {
// No-op.
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// References to `window` are guarded by BrowserConnectivityMonitor.isAvailable()
/* eslint-disable no-restricted-globals */
/**
* Browser implementation of ConnectivityMonitor.
*/
class wu {
constructor() {
this.Kr = () => this.Gr(), this.Qr = () => this.jr(), this.Wr = [], this.zr();
}
Ur(t) {
this.Wr.push(t);
}
shutdown() {
window.removeEventListener("online", this.Kr), window.removeEventListener("offline", this.Qr);
}
zr() {
window.addEventListener("online", this.Kr), window.addEventListener("offline", this.Qr);
}
Gr() {
x("ConnectivityMonitor", "Network connectivity changed: AVAILABLE");
for (const t of this.Wr) t(0 /* NetworkStatus.AVAILABLE */);
}
jr() {
x("ConnectivityMonitor", "Network connectivity changed: UNAVAILABLE");
for (const t of this.Wr) t(1 /* NetworkStatus.UNAVAILABLE */);
}
// TODO(chenbrian): Consider passing in window either into this component or
// here for testing via FakeWindow.
/** Checks that all used attributes of window are available. */
static C() {
return "undefined" != typeof window && void 0 !== window.addEventListener && void 0 !== window.removeEventListener;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const mu = {
BatchGetDocuments: "batchGet",
Commit: "commit",
RunQuery: "runQuery",
RunAggregationQuery: "runAggregationQuery"
};
/**
* Maps RPC names to the corresponding REST endpoint name.
*
* We use array notation to avoid mangling.
*/
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a simple helper class that implements the Stream interface to
* bridge to other implementations that are streams but do not implement the
* interface. The stream callbacks are invoked with the callOn... methods.
*/
class gu {
constructor(t) {
this.Hr = t.Hr, this.Jr = t.Jr;
}
Yr(t) {
this.Xr = t;
}
Zr(t) {
this.eo = t;
}
onMessage(t) {
this.no = t;
}
close() {
this.Jr();
}
send(t) {
this.Hr(t);
}
so() {
this.Xr();
}
io(t) {
this.eo(t);
}
ro(t) {
this.no(t);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class yu extends
/**
* Base class for all Rest-based connections to the backend (WebChannel and
* HTTP).
*/
class {
constructor(t) {
this.databaseInfo = t, this.databaseId = t.databaseId;
const e = t.ssl ? "https" : "http";
this.oo = e + "://" + t.host, this.uo = "projects/" + this.databaseId.projectId + "/databases/" + this.databaseId.database + "/documents";
}
get co() {
// Both `invokeRPC()` and `invokeStreamingRPC()` use their `path` arguments to determine
// where to run the query, and expect the `request` to NOT specify the "path".
return !1;
}
ao(t, e, n, s, i) {
const r = this.ho(t, e);
x("RestConnection", "Sending: ", r, n);
const o = {};
return this.lo(o, s, i), this.fo(t, r, o, n).then((t => (x("RestConnection", "Received: ", t),
t)), (e => {
throw k("RestConnection", `${t} failed with error: `, e, "url: ", r, "request:", n),
e;
}));
}
_o(t, e, n, s, i, r) {
// The REST API automatically aggregates all of the streamed results, so we
// can just use the normal invoke() method.
return this.ao(t, e, n, s, i);
}
/**
* Modifies the headers for a request, adding any authorization token if
* present and any additional headers for the request.
*/ lo(t, e, n) {
t["X-Goog-Api-Client"] = "gl-js/ fire/" + V,
// Content-Type: text/plain will avoid preflight requests which might
// mess with CORS and redirects by proxies. If we add custom headers
// we will need to change this code to potentially use the $httpOverwrite
// parameter supported by ESF to avoid triggering preflight requests.
t["Content-Type"] = "text/plain", this.databaseInfo.appId && (t["X-Firebase-GMPID"] = this.databaseInfo.appId),
e && e.headers.forEach(((e, n) => t[n] = e)), n && n.headers.forEach(((e, n) => t[n] = e));
}
ho(t, e) {
const n = mu[t];
return `${this.oo}/v1/${e}:${n}`;
}
} {
constructor(t) {
super(t), this.forceLongPolling = t.forceLongPolling, this.autoDetectLongPolling = t.autoDetectLongPolling,
this.useFetchStreams = t.useFetchStreams;
}
fo(t, e, n, s) {
return new Promise(((i, r) => {
const o = new g;
o.setWithCredentials(!0), o.listenOnce(y.COMPLETE, (() => {
try {
switch (o.getLastErrorCode()) {
case p.NO_ERROR:
const e = o.getResponseJson();
x("Connection", "XHR received:", JSON.stringify(e)), i(e);
break;
case p.TIMEOUT:
x("Connection", 'RPC "' + t + '" timed out'), r(new q(L.DEADLINE_EXCEEDED, "Request time out"));
break;
case p.HTTP_ERROR:
const n = o.getStatus();
if (x("Connection", 'RPC "' + t + '" failed with status:', n, "response text:", o.getResponseText()),
n > 0) {
let t = o.getResponseJson();
Array.isArray(t) && (t = t[0]);
const e = null == t ? void 0 : t.error;
if (e && e.status && e.message) {
const t = function(t) {
const e = t.toLowerCase().replace(/_/g, "-");
return Object.values(L).indexOf(e) >= 0 ? e : L.UNKNOWN;
}(e.status);
r(new q(t, e.message));
} else r(new q(L.UNKNOWN, "Server responded with status " + o.getStatus()));
} else
// If we received an HTTP_ERROR but there's no status code,
// it's most probably a connection issue
r(new q(L.UNAVAILABLE, "Connection failed."));
break;
default:
M();
}
} finally {
x("Connection", 'RPC "' + t + '" completed.');
}
}));
const u = JSON.stringify(s);
o.send(e, "POST", u, n, 15);
}));
}
wo(t, e, n) {
const s = [ this.oo, "/", "google.firestore.v1.Firestore", "/", t, "/channel" ], i = I(), r = T(), o = {
// Required for backend stickiness, routing behavior is based on this
// parameter.
httpSessionIdParam: "gsessionid",
initMessageHeaders: {},
messageUrlParams: {
// This param is used to improve routing and project isolation by the
// backend and must be included in every request.
database: `projects/${this.databaseId.projectId}/databases/${this.databaseId.database}`
},
sendRawJson: !0,
supportsCrossDomainXhr: !0,
internalChannelParams: {
// Override the default timeout (randomized between 10-20 seconds) since
// a large write batch on a slow internet connection may take a long
// time to send to the backend. Rather than have WebChannel impose a
// tight timeout which could lead to infinite timeouts and retries, we
// set it very large (5-10 minutes) and rely on the browser's builtin
// timeouts to kick in if the request isn't working.
forwardChannelRequestTimeoutMs: 6e5
},
forceLongPolling: this.forceLongPolling,
detectBufferingProxy: this.autoDetectLongPolling
};
this.useFetchStreams && (o.xmlHttpFactory = new E({})), this.lo(o.initMessageHeaders, e, n),
// Sending the custom headers we just added to request.initMessageHeaders
// (Authorization, etc.) will trigger the browser to make a CORS preflight
// request because the XHR will no longer meet the criteria for a "simple"
// CORS request:
// https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Simple_requests
// Therefore to avoid the CORS preflight request (an extra network
// roundtrip), we use the encodeInitMessageHeaders option to specify that
// the headers should instead be encoded in the request's POST payload,
// which is recognized by the webchannel backend.
o.encodeInitMessageHeaders = !0;
const u = s.join("");
x("Connection", "Creating WebChannel: " + u, o);
const c = i.createWebChannel(u, o);
// WebChannel supports sending the first message with the handshake - saving
// a network round trip. However, it will have to call send in the same
// JS event loop as open. In order to enforce this, we delay actually
// opening the WebChannel until send is called. Whether we have called
// open is tracked with this variable.
let a = !1, h = !1;
// A flag to determine whether the stream was closed (by us or through an
// error/close event) to avoid delivering multiple close events or sending
// on a closed stream
const l = new gu({
Hr: t => {
h ? x("Connection", "Not sending because WebChannel is closed:", t) : (a || (x("Connection", "Opening WebChannel transport."),
c.open(), a = !0), x("Connection", "WebChannel sending:", t), c.send(t));
},
Jr: () => c.close()
}), f = (t, e, n) => {
// TODO(dimond): closure typing seems broken because WebChannel does
// not implement goog.events.Listenable
t.listen(e, (t => {
try {
n(t);
} catch (t) {
setTimeout((() => {
throw t;
}), 0);
}
}));
};
// Closure events are guarded and exceptions are swallowed, so catch any
// exception and rethrow using a setTimeout so they become visible again.
// Note that eventually this function could go away if we are confident
// enough the code is exception free.
return f(c, A.EventType.OPEN, (() => {
h || x("Connection", "WebChannel transport opened.");
})), f(c, A.EventType.CLOSE, (() => {
h || (h = !0, x("Connection", "WebChannel transport closed"), l.io());
})), f(c, A.EventType.ERROR, (t => {
h || (h = !0, k("Connection", "WebChannel transport errored:", t), l.io(new q(L.UNAVAILABLE, "The operation could not be completed")));
})), f(c, A.EventType.MESSAGE, (t => {
var e;
if (!h) {
const n = t.data[0];
F(!!n);
// TODO(b/35143891): There is a bug in One Platform that caused errors
// (and only errors) to be wrapped in an extra array. To be forward
// compatible with the bug we need to check either condition. The latter
// can be removed once the fix has been rolled out.
// Use any because msgData.error is not typed.
const s = n, i = s.error || (null === (e = s[0]) || void 0 === e ? void 0 : e.error);
if (i) {
x("Connection", "WebChannel received error:", i);
// error.status will be a string like 'OK' or 'NOT_FOUND'.
const t = i.status;
let e =
/**
* Maps an error Code from a GRPC status identifier like 'NOT_FOUND'.
*
* @returns The Code equivalent to the given status string or undefined if
* there is no match.
*/
function(t) {
// lookup by string
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const e = as[t];
if (void 0 !== e) return fs(e);
}(t), n = i.message;
void 0 === e && (e = L.INTERNAL, n = "Unknown error status: " + t + " with message " + i.message),
// Mark closed so no further events are propagated
h = !0, l.io(new q(e, n)), c.close();
} else x("Connection", "WebChannel received:", n), l.ro(n);
}
})), f(r, R.STAT_EVENT, (t => {
t.stat === b.PROXY ? x("Connection", "Detected buffering proxy") : t.stat === b.NOPROXY && x("Connection", "Detected no buffering proxy");
})), setTimeout((() => {
// Technically we could/should wait for the WebChannel opened event,
// but because we want to send the first message with the WebChannel
// handshake we pretend the channel opened here (asynchronously), and
// then delay the actual open until the first message is sent.
l.so();
}), 0), l;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Initializes the WebChannelConnection for the browser. */
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** The Platform's 'window' implementation or null if not available. */
function pu() {
// `window` is not always available, e.g. in ReactNative and WebWorkers.
// eslint-disable-next-line no-restricted-globals
return "undefined" != typeof window ? window : null;
}
/** The Platform's 'document' implementation or null if not available. */ function Iu() {
// `document` is not always available, e.g. in ReactNative and WebWorkers.
// eslint-disable-next-line no-restricted-globals
return "undefined" != typeof document ? document : null;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ function Tu(t) {
return new Bs(t, /* useProto3Json= */ !0);
}
/**
* An instance of the Platform's 'TextEncoder' implementation.
*/
/**
* A helper for running delayed tasks following an exponential backoff curve
* between attempts.
*
* Each delay is made up of a "base" delay which follows the exponential
* backoff curve, and a +/- 50% "jitter" that is calculated and added to the
* base delay. This prevents clients from accidentally synchronizing their
* delays causing spikes of load to the backend.
*/
class Eu {
constructor(
/**
* The AsyncQueue to run backoff operations on.
*/
t,
/**
* The ID to use when scheduling backoff operations on the AsyncQueue.
*/
e,
/**
* The initial delay (used as the base delay on the first retry attempt).
* Note that jitter will still be applied, so the actual delay could be as
* little as 0.5*initialDelayMs.
*/
n = 1e3
/**
* The multiplier to use to determine the extended base delay after each
* attempt.
*/ , s = 1.5
/**
* The maximum base delay after which no further backoff is performed.
* Note that jitter will still be applied, so the actual delay could be as
* much as 1.5*maxDelayMs.
*/ , i = 6e4) {
this.Hs = t, this.timerId = e, this.mo = n, this.yo = s, this.po = i, this.Io = 0,
this.To = null,
/** The last backoff attempt, as epoch milliseconds. */
this.Eo = Date.now(), this.reset();
}
/**
* Resets the backoff delay.
*
* The very next backoffAndWait() will have no delay. If it is called again
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
* subsequent ones will increase according to the backoffFactor.
*/ reset() {
this.Io = 0;
}
/**
* Resets the backoff delay to the maximum delay (e.g. for use after a
* RESOURCE_EXHAUSTED error).
*/ Ao() {
this.Io = this.po;
}
/**
* Returns a promise that resolves after currentDelayMs, and increases the
* delay for any subsequent attempts. If there was a pending backoff operation
* already, it will be canceled.
*/ Ro(t) {
// Cancel any pending backoff operation.
this.cancel();
// First schedule using the current base (which may be 0 and should be
// honored as such).
const e = Math.floor(this.Io + this.bo()), n = Math.max(0, Date.now() - this.Eo), s = Math.max(0, e - n);
// Guard against lastAttemptTime being in the future due to a clock change.
s > 0 && x("ExponentialBackoff", `Backing off for ${s} ms (base delay: ${this.Io} ms, delay with jitter: ${e} ms, last attempt: ${n} ms ago)`),
this.To = this.Hs.enqueueAfterDelay(this.timerId, s, (() => (this.Eo = Date.now(),
t()))),
// Apply backoff factor to determine next delay and ensure it is within
// bounds.
this.Io *= this.yo, this.Io < this.mo && (this.Io = this.mo), this.Io > this.po && (this.Io = this.po);
}
Po() {
null !== this.To && (this.To.skipDelay(), this.To = null);
}
cancel() {
null !== this.To && (this.To.cancel(), this.To = null);
}
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */ bo() {
return (Math.random() - .5) * this.Io;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A PersistentStream is an abstract base class that represents a streaming RPC
* to the Firestore backend. It's built on top of the connections own support
* for streaming RPCs, and adds several critical features for our clients:
*
* - Exponential backoff on failure
* - Authentication via CredentialsProvider
* - Dispatching all callbacks into the shared worker queue
* - Closing idle streams after 60 seconds of inactivity
*
* Subclasses of PersistentStream implement serialization of models to and
* from the JSON representation of the protocol buffers for a specific
* streaming RPC.
*
* ## Starting and Stopping
*
* Streaming RPCs are stateful and need to be start()ed before messages can
* be sent and received. The PersistentStream will call the onOpen() function
* of the listener once the stream is ready to accept requests.
*
* Should a start() fail, PersistentStream will call the registered onClose()
* listener with a FirestoreError indicating what went wrong.
*
* A PersistentStream can be started and stopped repeatedly.
*
* Generic types:
* SendType: The type of the outgoing message of the underlying
* connection stream
* ReceiveType: The type of the incoming message of the underlying
* connection stream
* ListenerType: The type of the listener that will be used for callbacks
*/
class Au {
constructor(t, e, n, s, i, r, o, u) {
this.Hs = t, this.vo = n, this.Vo = s, this.connection = i, this.authCredentialsProvider = r,
this.appCheckCredentialsProvider = o, this.listener = u, this.state = 0 /* PersistentStreamState.Initial */ ,
/**
* A close count that's incremented every time the stream is closed; used by
* getCloseGuardedDispatcher() to invalidate callbacks that happen after
* close.
*/
this.So = 0, this.Do = null, this.Co = null, this.stream = null, this.xo = new Eu(t, e);
}
/**
* Returns true if start() has been called and no error has occurred. True
* indicates the stream is open or in the process of opening (which
* encompasses respecting backoff, getting auth tokens, and starting the
* actual RPC). Use isOpen() to determine if the stream is open and ready for
* outbound requests.
*/ No() {
return 1 /* PersistentStreamState.Starting */ === this.state || 5 /* PersistentStreamState.Backoff */ === this.state || this.ko();
}
/**
* Returns true if the underlying RPC is open (the onOpen() listener has been
* called) and the stream is ready for outbound requests.
*/ ko() {
return 2 /* PersistentStreamState.Open */ === this.state || 3 /* PersistentStreamState.Healthy */ === this.state;
}
/**
* Starts the RPC. Only allowed if isStarted() returns false. The stream is
* not immediately ready for use: onOpen() will be invoked when the RPC is
* ready for outbound requests, at which point isOpen() will return true.
*
* When start returns, isStarted() will return true.
*/ start() {
4 /* PersistentStreamState.Error */ !== this.state ? this.auth() : this.Oo();
}
/**
* Stops the RPC. This call is idempotent and allowed regardless of the
* current isStarted() state.
*
* When stop returns, isStarted() and isOpen() will both return false.
*/ async stop() {
this.No() && await this.close(0 /* PersistentStreamState.Initial */);
}
/**
* After an error the stream will usually back off on the next attempt to
* start it. If the error warrants an immediate restart of the stream, the
* sender can use this to indicate that the receiver should not back off.
*
* Each error will call the onClose() listener. That function can decide to
* inhibit backoff if required.
*/ Mo() {
this.state = 0 /* PersistentStreamState.Initial */ , this.xo.reset();
}
/**
* Marks this stream as idle. If no further actions are performed on the
* stream for one minute, the stream will automatically close itself and
* notify the stream's onClose() handler with Status.OK. The stream will then
* be in a !isStarted() state, requiring the caller to start the stream again
* before further use.
*
* Only streams that are in state 'Open' can be marked idle, as all other
* states imply pending network operations.
*/ Fo() {
// Starts the idle time if we are in state 'Open' and are not yet already
// running a timer (in which case the previous idle timeout still applies).
this.ko() && null === this.Do && (this.Do = this.Hs.enqueueAfterDelay(this.vo, 6e4, (() => this.$o())));
}
/** Sends a message to the underlying stream. */ Bo(t) {
this.Lo(), this.stream.send(t);
}
/** Called by the idle timer when the stream should close due to inactivity. */ async $o() {
if (this.ko())
// When timing out an idle stream there's no reason to force the stream into backoff when
// it restarts so set the stream state to Initial instead of Error.
return this.close(0 /* PersistentStreamState.Initial */);
}
/** Marks the stream as active again. */ Lo() {
this.Do && (this.Do.cancel(), this.Do = null);
}
/** Cancels the health check delayed operation. */ qo() {
this.Co && (this.Co.cancel(), this.Co = null);
}
/**
* Closes the stream and cleans up as necessary:
*
* * closes the underlying GRPC stream;
* * calls the onClose handler with the given 'error';
* * sets internal stream state to 'finalState';
* * adjusts the backoff timer based on the error
*
* A new stream can be opened by calling start().
*
* @param finalState - the intended state of the stream after closing.
* @param error - the error the connection was closed with.
*/ async close(t, e) {
// Cancel any outstanding timers (they're guaranteed not to execute).
this.Lo(), this.qo(), this.xo.cancel(),
// Invalidates any stream-related callbacks (e.g. from auth or the
// underlying stream), guaranteeing they won't execute.
this.So++, 4 /* PersistentStreamState.Error */ !== t ?
// If this is an intentional close ensure we don't delay our next connection attempt.
this.xo.reset() : e && e.code === L.RESOURCE_EXHAUSTED ? (
// Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
N(e.toString()), N("Using maximum backoff delay to prevent overloading the backend."),
this.xo.Ao()) : e && e.code === L.UNAUTHENTICATED && 3 /* PersistentStreamState.Healthy */ !== this.state && (
// "unauthenticated" error means the token was rejected. This should rarely
// happen since both Auth and AppCheck ensure a sufficient TTL when we
// request a token. If a user manually resets their system clock this can
// fail, however. In this case, we should get a Code.UNAUTHENTICATED error
// before we received the first message and we need to invalidate the token
// to ensure that we fetch a new token.
this.authCredentialsProvider.invalidateToken(), this.appCheckCredentialsProvider.invalidateToken()),
// Clean up the underlying stream because we are no longer interested in events.
null !== this.stream && (this.Uo(), this.stream.close(), this.stream = null),
// This state must be assigned before calling onClose() to allow the callback to
// inhibit backoff or otherwise manipulate the state in its non-started state.
this.state = t,
// Notify the listener that the stream closed.
await this.listener.Zr(e);
}
/**
* Can be overridden to perform additional cleanup before the stream is closed.
* Calling super.tearDown() is not required.
*/ Uo() {}
auth() {
this.state = 1 /* PersistentStreamState.Starting */;
const t = this.Ko(this.So), e = this.So;
// TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
Promise.all([ this.authCredentialsProvider.getToken(), this.appCheckCredentialsProvider.getToken() ]).then((([t, n]) => {
// Stream can be stopped while waiting for authentication.
// TODO(mikelehen): We really should just use dispatchIfNotClosed
// and let this dispatch onto the queue, but that opened a spec test can
// of worms that I don't want to deal with in this PR.
this.So === e &&
// Normally we'd have to schedule the callback on the AsyncQueue.
// However, the following calls are safe to be called outside the
// AsyncQueue since they don't chain asynchronous calls
this.Go(t, n);
}), (e => {
t((() => {
const t = new q(L.UNKNOWN, "Fetching auth token failed: " + e.message);
return this.Qo(t);
}));
}));
}
Go(t, e) {
const n = this.Ko(this.So);
this.stream = this.jo(t, e), this.stream.Yr((() => {
n((() => (this.state = 2 /* PersistentStreamState.Open */ , this.Co = this.Hs.enqueueAfterDelay(this.Vo, 1e4, (() => (this.ko() && (this.state = 3 /* PersistentStreamState.Healthy */),
Promise.resolve()))), this.listener.Yr())));
})), this.stream.Zr((t => {
n((() => this.Qo(t)));
})), this.stream.onMessage((t => {
n((() => this.onMessage(t)));
}));
}
Oo() {
this.state = 5 /* PersistentStreamState.Backoff */ , this.xo.Ro((async () => {
this.state = 0 /* PersistentStreamState.Initial */ , this.start();
}));
}
// Visible for tests
Qo(t) {
// In theory the stream could close cleanly, however, in our current model
// we never expect this to happen because if we stop a stream ourselves,
// this callback will never be called. To prevent cases where we retry
// without a backoff accidentally, we set the stream to error in all cases.
return x("PersistentStream", `close with error: ${t}`), this.stream = null, this.close(4 /* PersistentStreamState.Error */ , t);
}
/**
* Returns a "dispatcher" function that dispatches operations onto the
* AsyncQueue but only runs them if closeCount remains unchanged. This allows
* us to turn auth / stream callbacks into no-ops if the stream is closed /
* re-opened, etc.
*/ Ko(t) {
return e => {
this.Hs.enqueueAndForget((() => this.So === t ? e() : (x("PersistentStream", "stream callback skipped by getCloseGuardedDispatcher."),
Promise.resolve())));
};
}
}
/**
* A PersistentStream that implements the Listen RPC.
*
* Once the Listen stream has called the onOpen() listener, any number of
* listen() and unlisten() calls can be made to control what changes will be
* sent from the server for ListenResponses.
*/ class Ru extends Au {
constructor(t, e, n, s, i, r) {
super(t, "listen_stream_connection_backoff" /* TimerId.ListenStreamConnectionBackoff */ , "listen_stream_idle" /* TimerId.ListenStreamIdle */ , "health_check_timeout" /* TimerId.HealthCheckTimeout */ , e, n, s, r),
this.yt = i;
}
jo(t, e) {
return this.connection.wo("Listen", t, e);
}
onMessage(t) {
// A successful response means the stream is healthy
this.xo.reset();
const e = ei(this.yt, t), n = function(t) {
// We have only reached a consistent snapshot for the entire stream if there
// is a read_time set and it applies to all targets (i.e. the list of
// targets is empty). The backend is guaranteed to send such responses.
if (!("targetChange" in t)) return it.min();
const e = t.targetChange;
return e.targetIds && e.targetIds.length ? it.min() : e.readTime ? Ks(e.readTime) : it.min();
}(t);
return this.listener.Wo(e, n);
}
/**
* Registers interest in the results of the given target. If the target
* includes a resumeToken it will be included in the request. Results that
* affect the target will be streamed back as WatchChange messages that
* reference the targetId.
*/ zo(t) {
const e = {};
e.database = Js(this.yt), e.addTarget = function(t, e) {
let n;
const s = e.target;
return n = un(s) ? {
documents: ri(t, s)
} : {
query: oi(t, s)
}, n.targetId = e.targetId, e.resumeToken.approximateByteSize() > 0 ? n.resumeToken = qs(t, e.resumeToken) : e.snapshotVersion.compareTo(it.min()) > 0 && (
// TODO(wuandy): Consider removing above check because it is most likely true.
// Right now, many tests depend on this behaviour though (leaving min() out
// of serialization).
n.readTime = Ls(t, e.snapshotVersion.toTimestamp())), n;
}(this.yt, t);
const n = ci(this.yt, t);
n && (e.labels = n), this.Bo(e);
}
/**
* Unregisters interest in the results of the target associated with the
* given targetId.
*/ Ho(t) {
const e = {};
e.database = Js(this.yt), e.removeTarget = t, this.Bo(e);
}
}
/**
* A Stream that implements the Write RPC.
*
* The Write RPC requires the caller to maintain special streamToken
* state in between calls, to help the server understand which responses the
* client has processed by the time the next request is made. Every response
* will contain a streamToken; this value must be passed to the next
* request.
*
* After calling start() on this stream, the next request must be a handshake,
* containing whatever streamToken is on hand. Once a response to this
* request is received, all pending mutations may be submitted. When
* submitting multiple batches of mutations at the same time, it's
* okay to use the same streamToken for the calls to writeMutations.
*
* TODO(b/33271235): Use proto types
*/ class bu extends Au {
constructor(t, e, n, s, i, r) {
super(t, "write_stream_connection_backoff" /* TimerId.WriteStreamConnectionBackoff */ , "write_stream_idle" /* TimerId.WriteStreamIdle */ , "health_check_timeout" /* TimerId.HealthCheckTimeout */ , e, n, s, r),
this.yt = i, this.Jo = !1;
}
/**
* Tracks whether or not a handshake has been successfully exchanged and
* the stream is ready to accept mutations.
*/ get Yo() {
return this.Jo;
}
// Override of PersistentStream.start
start() {
this.Jo = !1, this.lastStreamToken = void 0, super.start();
}
Uo() {
this.Jo && this.Xo([]);
}
jo(t, e) {
return this.connection.wo("Write", t, e);
}
onMessage(t) {
if (
// Always capture the last stream token.
F(!!t.streamToken), this.lastStreamToken = t.streamToken, this.Jo) {
// A successful first write response means the stream is healthy,
// Note, that we could consider a successful handshake healthy, however,
// the write itself might be causing an error we want to back off from.
this.xo.reset();
const e = ii(t.writeResults, t.commitTime), n = Ks(t.commitTime);
return this.listener.Zo(n, e);
}
// The first response is always the handshake response
return F(!t.writeResults || 0 === t.writeResults.length), this.Jo = !0, this.listener.tu();
}
/**
* Sends an initial streamToken to the server, performing the handshake
* required to make the StreamingWrite RPC work. Subsequent
* calls should wait until onHandshakeComplete was called.
*/ eu() {
// TODO(dimond): Support stream resumption. We intentionally do not set the
// stream token on the handshake, ignoring any stream token we might have.
const t = {};
t.database = Js(this.yt), this.Bo(t);
}
/** Sends a group of mutations to the Firestore backend to apply. */ Xo(t) {
const e = {
streamToken: this.lastStreamToken,
writes: t.map((t => ni(this.yt, t)))
};
this.Bo(e);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Datastore and its related methods are a wrapper around the external Google
* Cloud Datastore grpc API, which provides an interface that is more convenient
* for the rest of the client SDK architecture to consume.
*/
/**
* An implementation of Datastore that exposes additional state for internal
* consumption.
*/
class Pu extends class {} {
constructor(t, e, n, s) {
super(), this.authCredentials = t, this.appCheckCredentials = e, this.connection = n,
this.yt = s, this.nu = !1;
}
su() {
if (this.nu) throw new q(L.FAILED_PRECONDITION, "The client has already been terminated.");
}
/** Invokes the provided RPC with auth and AppCheck tokens. */ ao(t, e, n) {
return this.su(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([s, i]) => this.connection.ao(t, e, n, s, i))).catch((t => {
throw "FirebaseError" === t.name ? (t.code === L.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
this.appCheckCredentials.invalidateToken()), t) : new q(L.UNKNOWN, t.toString());
}));
}
/** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */ _o(t, e, n, s) {
return this.su(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([i, r]) => this.connection._o(t, e, n, i, r, s))).catch((t => {
throw "FirebaseError" === t.name ? (t.code === L.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
this.appCheckCredentials.invalidateToken()), t) : new q(L.UNKNOWN, t.toString());
}));
}
terminate() {
this.nu = !0;
}
}
// TODO(firestorexp): Make sure there is only one Datastore instance per
// firestore-exp client.
async function vu(t, e) {
const n = B(t), s = function(t, e) {
const n = oi(t, e);
return {
structuredAggregationQuery: {
aggregations: [ {
count: {},
alias: "count_alias"
} ],
structuredQuery: n.structuredQuery
},
parent: n.parent
};
}(n.yt, pn(e)), i = s.parent;
n.connection.co || delete s.parent;
return (await n._o("RunAggregationQuery", i, s, /*expectedResponseCount=*/ 1)).filter((t => !!t.result)).map((t => t.result.aggregateFields));
}
/**
* A component used by the RemoteStore to track the OnlineState (that is,
* whether or not the client as a whole should be considered to be online or
* offline), implementing the appropriate heuristics.
*
* In particular, when the client is trying to connect to the backend, we
* allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
* a connection to succeed. If we have too many failures or the timeout elapses,
* then we set the OnlineState to Offline, and the client will behave as if
* it is offline (get()s will return cached data, etc.).
*/
class Vu {
constructor(t, e) {
this.asyncQueue = t, this.onlineStateHandler = e,
/** The current OnlineState. */
this.state = "Unknown" /* OnlineState.Unknown */ ,
/**
* A count of consecutive failures to open the stream. If it reaches the
* maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
* Offline.
*/
this.iu = 0,
/**
* A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
* transition from OnlineState.Unknown to OnlineState.Offline without waiting
* for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
*/
this.ru = null,
/**
* Whether the client should log a warning message if it fails to connect to
* the backend (initially true, cleared after a successful stream, or if we've
* logged the message already).
*/
this.ou = !0;
}
/**
* Called by RemoteStore when a watch stream is started (including on each
* backoff attempt).
*
* If this is the first attempt, it sets the OnlineState to Unknown and starts
* the onlineStateTimer.
*/ uu() {
0 === this.iu && (this.cu("Unknown" /* OnlineState.Unknown */), this.ru = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */ , 1e4, (() => (this.ru = null,
this.au("Backend didn't respond within 10 seconds."), this.cu("Offline" /* OnlineState.Offline */),
Promise.resolve()))));
}
/**
* Updates our OnlineState as appropriate after the watch stream reports a
* failure. The first failure moves us to the 'Unknown' state. We then may
* allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
* actually transition to the 'Offline' state.
*/ hu(t) {
"Online" /* OnlineState.Online */ === this.state ? this.cu("Unknown" /* OnlineState.Unknown */) : (this.iu++,
this.iu >= 1 && (this.lu(), this.au(`Connection failed 1 times. Most recent error: ${t.toString()}`),
this.cu("Offline" /* OnlineState.Offline */)));
}
/**
* Explicitly sets the OnlineState to the specified state.
*
* Note that this resets our timers / failure counters, etc. used by our
* Offline heuristics, so must not be used in place of
* handleWatchStreamStart() and handleWatchStreamFailure().
*/ set(t) {
this.lu(), this.iu = 0, "Online" /* OnlineState.Online */ === t && (
// We've connected to watch at least once. Don't warn the developer
// about being offline going forward.
this.ou = !1), this.cu(t);
}
cu(t) {
t !== this.state && (this.state = t, this.onlineStateHandler(t));
}
au(t) {
const e = `Could not reach Cloud Firestore backend. ${t}\nThis typically indicates that your device does not have a healthy Internet connection at the moment. The client will operate in offline mode until it is able to successfully connect to the backend.`;
this.ou ? (N(e), this.ou = !1) : x("OnlineStateTracker", e);
}
lu() {
null !== this.ru && (this.ru.cancel(), this.ru = null);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ class Su {
constructor(
/**
* The local store, used to fill the write pipeline with outbound mutations.
*/
t,
/** The client-side proxy for interacting with the backend. */
e, n, s, i) {
this.localStore = t, this.datastore = e, this.asyncQueue = n, this.remoteSyncer = {},
/**
* A list of up to MAX_PENDING_WRITES writes that we have fetched from the
* LocalStore via fillWritePipeline() and have or will send to the write
* stream.
*
* Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
* restart the write stream. When the stream is established the writes in the
* pipeline will be sent in order.
*
* Writes remain in writePipeline until they are acknowledged by the backend
* and thus will automatically be re-sent if the stream is interrupted /
* restarted before they're acknowledged.
*
* Write responses from the backend are linked to their originating request
* purely based on order, and so we can just shift() writes from the front of
* the writePipeline as we receive responses.
*/
this.fu = [],
/**
* A mapping of watched targets that the client cares about tracking and the
* user has explicitly called a 'listen' for this target.
*
* These targets may or may not have been sent to or acknowledged by the
* server. On re-establishing the listen stream, these targets should be sent
* to the server. The targets removed with unlistens are removed eagerly
* without waiting for confirmation from the listen stream.
*/
this.du = new Map,
/**
* A set of reasons for why the RemoteStore may be offline. If empty, the
* RemoteStore may start its network connections.
*/
this._u = new Set,
/**
* Event handlers that get called when the network is disabled or enabled.
*
* PORTING NOTE: These functions are used on the Web client to create the
* underlying streams (to support tree-shakeable streams). On Android and iOS,
* the streams are created during construction of RemoteStore.
*/
this.wu = [], this.mu = i, this.mu.Ur((t => {
n.enqueueAndForget((async () => {
// Porting Note: Unlike iOS, `restartNetwork()` is called even when the
// network becomes unreachable as we don't have any other way to tear
// down our streams.
$u(this) && (x("RemoteStore", "Restarting streams for network reachability change."),
await async function(t) {
const e = B(t);
e._u.add(4 /* OfflineCause.ConnectivityChange */), await Cu(e), e.gu.set("Unknown" /* OnlineState.Unknown */),
e._u.delete(4 /* OfflineCause.ConnectivityChange */), await Du(e);
}(this));
}));
})), this.gu = new Vu(n, s);
}
}
async function Du(t) {
if ($u(t)) for (const e of t.wu) await e(/* enabled= */ !0);
}
/**
* Temporarily disables the network. The network can be re-enabled using
* enableNetwork().
*/ async function Cu(t) {
for (const e of t.wu) await e(/* enabled= */ !1);
}
/**
* Starts new listen for the given target. Uses resume token if provided. It
* is a no-op if the target of given `TargetData` is already being listened to.
*/
function xu(t, e) {
const n = B(t);
n.du.has(e.targetId) || (
// Mark this as something the client is currently listening for.
n.du.set(e.targetId, e), Fu(n) ?
// The listen will be sent in onWatchStreamOpen
Mu(n) : nc(n).ko() && ku(n, e));
}
/**
* Removes the listen from server. It is a no-op if the given target id is
* not being listened to.
*/ function Nu(t, e) {
const n = B(t), s = nc(n);
n.du.delete(e), s.ko() && Ou(n, e), 0 === n.du.size && (s.ko() ? s.Fo() : $u(n) &&
// Revert to OnlineState.Unknown if the watch stream is not open and we
// have no listeners, since without any listens to send we cannot
// confirm if the stream is healthy and upgrade to OnlineState.Online.
n.gu.set("Unknown" /* OnlineState.Unknown */));
}
/**
* We need to increment the the expected number of pending responses we're due
* from watch so we wait for the ack to process any messages from this target.
*/ function ku(t, e) {
t.yu.Ot(e.targetId), nc(t).zo(e);
}
/**
* We need to increment the expected number of pending responses we're due
* from watch so we wait for the removal on the server before we process any
* messages from this target.
*/ function Ou(t, e) {
t.yu.Ot(e), nc(t).Ho(e);
}
function Mu(t) {
t.yu = new Ns({
getRemoteKeysForTarget: e => t.remoteSyncer.getRemoteKeysForTarget(e),
ne: e => t.du.get(e) || null
}), nc(t).start(), t.gu.uu();
}
/**
* Returns whether the watch stream should be started because it's necessary
* and has not yet been started.
*/ function Fu(t) {
return $u(t) && !nc(t).No() && t.du.size > 0;
}
function $u(t) {
return 0 === B(t)._u.size;
}
function Bu(t) {
t.yu = void 0;
}
async function Lu(t) {
t.du.forEach(((e, n) => {
ku(t, e);
}));
}
async function qu(t, e) {
Bu(t),
// If we still need the watch stream, retry the connection.
Fu(t) ? (t.gu.hu(e), Mu(t)) :
// No need to restart watch stream because there are no active targets.
// The online state is set to unknown because there is no active attempt
// at establishing a connection
t.gu.set("Unknown" /* OnlineState.Unknown */);
}
async function Uu(t, e, n) {
if (
// Mark the client as online since we got a message from the server
t.gu.set("Online" /* OnlineState.Online */), e instanceof Cs && 2 /* WatchTargetChangeState.Removed */ === e.state && e.cause)
// There was an error on a target, don't wait for a consistent snapshot
// to raise events
try {
await
/** Handles an error on a target */
async function(t, e) {
const n = e.cause;
for (const s of e.targetIds)
// A watched target might have been removed already.
t.du.has(s) && (await t.remoteSyncer.rejectListen(s, n), t.du.delete(s), t.yu.removeTarget(s));
}
/**
* Attempts to fill our write pipeline with writes from the LocalStore.
*
* Called internally to bootstrap or refill the write pipeline and by
* SyncEngine whenever there are new mutations to process.
*
* Starts the write stream if necessary.
*/ (t, e);
} catch (n) {
x("RemoteStore", "Failed to remove targets %s: %s ", e.targetIds.join(","), n),
await Ku(t, n);
} else if (e instanceof Ss ? t.yu.Kt(e) : e instanceof Ds ? t.yu.Jt(e) : t.yu.jt(e),
!n.isEqual(it.min())) try {
const e = await jo(t.localStore);
n.compareTo(e) >= 0 &&
// We have received a target change with a global snapshot if the snapshot
// version is not equal to SnapshotVersion.min().
await
/**
* Takes a batch of changes from the Datastore, repackages them as a
* RemoteEvent, and passes that on to the listener, which is typically the
* SyncEngine.
*/
function(t, e) {
const n = t.yu.Zt(e);
// Update in-memory resume tokens. LocalStore will update the
// persistent view of these when applying the completed RemoteEvent.
return n.targetChanges.forEach(((n, s) => {
if (n.resumeToken.approximateByteSize() > 0) {
const i = t.du.get(s);
// A watched target might have been removed already.
i && t.du.set(s, i.withResumeToken(n.resumeToken, e));
}
})),
// Re-establish listens for the targets that have been invalidated by
// existence filter mismatches.
n.targetMismatches.forEach((e => {
const n = t.du.get(e);
if (!n)
// A watched target might have been removed already.
return;
// Clear the resume token for the target, since we're in a known mismatch
// state.
t.du.set(e, n.withResumeToken(Wt.EMPTY_BYTE_STRING, n.snapshotVersion)),
// Cause a hard reset by unwatching and rewatching immediately, but
// deliberately don't send a resume token so that we get a full update.
Ou(t, e);
// Mark the target we send as being on behalf of an existence filter
// mismatch, but don't actually retain that in listenTargets. This ensures
// that we flag the first re-listen this way without impacting future
// listens of this target (that might happen e.g. on reconnect).
const s = new Ji(n.target, e, 1 /* TargetPurpose.ExistenceFilterMismatch */ , n.sequenceNumber);
ku(t, s);
})), t.remoteSyncer.applyRemoteEvent(n);
}(t, n);
} catch (e) {
x("RemoteStore", "Failed to raise snapshot:", e), await Ku(t, e);
}
}
/**
* Recovery logic for IndexedDB errors that takes the network offline until
* `op` succeeds. Retries are scheduled with backoff using
* `enqueueRetryable()`. If `op()` is not provided, IndexedDB access is
* validated via a generic operation.
*
* The returned Promise is resolved once the network is disabled and before
* any retry attempt.
*/ async function Ku(t, e, n) {
if (!St(e)) throw e;
t._u.add(1 /* OfflineCause.IndexedDbFailed */),
// Disable network and raise offline snapshots
await Cu(t), t.gu.set("Offline" /* OnlineState.Offline */), n || (
// Use a simple read operation to determine if IndexedDB recovered.
// Ideally, we would expose a health check directly on SimpleDb, but
// RemoteStore only has access to persistence through LocalStore.
n = () => jo(t.localStore)),
// Probe IndexedDB periodically and re-enable network
t.asyncQueue.enqueueRetryable((async () => {
x("RemoteStore", "Retrying IndexedDB access"), await n(), t._u.delete(1 /* OfflineCause.IndexedDbFailed */),
await Du(t);
}));
}
/**
* Executes `op`. If `op` fails, takes the network offline until `op`
* succeeds. Returns after the first attempt.
*/ function Gu(t, e) {
return e().catch((n => Ku(t, n, e)));
}
async function Qu(t) {
const e = B(t), n = sc(e);
let s = e.fu.length > 0 ? e.fu[e.fu.length - 1].batchId : -1;
for (;ju(e); ) try {
const t = await Ho(e.localStore, s);
if (null === t) {
0 === e.fu.length && n.Fo();
break;
}
s = t.batchId, Wu(e, t);
} catch (t) {
await Ku(e, t);
}
zu(e) && Hu(e);
}
/**
* Returns true if we can add to the write pipeline (i.e. the network is
* enabled and the write pipeline is not full).
*/ function ju(t) {
return $u(t) && t.fu.length < 10;
}
/**
* Queues additional writes to be sent to the write stream, sending them
* immediately if the write stream is established.
*/ function Wu(t, e) {
t.fu.push(e);
const n = sc(t);
n.ko() && n.Yo && n.Xo(e.mutations);
}
function zu(t) {
return $u(t) && !sc(t).No() && t.fu.length > 0;
}
function Hu(t) {
sc(t).start();
}
async function Ju(t) {
sc(t).eu();
}
async function Yu(t) {
const e = sc(t);
// Send the write pipeline now that the stream is established.
for (const n of t.fu) e.Xo(n.mutations);
}
async function Xu(t, e, n) {
const s = t.fu.shift(), i = zi.from(s, e, n);
await Gu(t, (() => t.remoteSyncer.applySuccessfulWrite(i))),
// It's possible that with the completion of this mutation another
// slot has freed up.
await Qu(t);
}
async function Zu(t, e) {
// If the write stream closed after the write handshake completes, a write
// operation failed and we fail the pending operation.
e && sc(t).Yo &&
// This error affects the actual write.
await async function(t, e) {
// Only handle permanent errors here. If it's transient, just let the retry
// logic kick in.
if (n = e.code, ls(n) && n !== L.ABORTED) {
// This was a permanent error, the request itself was the problem
// so it's not going to succeed if we resend it.
const n = t.fu.shift();
// In this case it's also unlikely that the server itself is melting
// down -- this was just a bad request so inhibit backoff on the next
// restart.
sc(t).Mo(), await Gu(t, (() => t.remoteSyncer.rejectFailedWrite(n.batchId, e))),
// It's possible that with the completion of this mutation
// another slot has freed up.
await Qu(t);
}
var n;
}(t, e),
// The write stream might have been started by refilling the write
// pipeline for failed writes
zu(t) && Hu(t);
}
async function tc(t, e) {
const n = B(t);
n.asyncQueue.verifyOperationInProgress(), x("RemoteStore", "RemoteStore received new credentials");
const s = $u(n);
// Tear down and re-create our network streams. This will ensure we get a
// fresh auth token for the new user and re-fill the write pipeline with
// new mutations from the LocalStore (since mutations are per-user).
n._u.add(3 /* OfflineCause.CredentialChange */), await Cu(n), s &&
// Don't set the network status to Unknown if we are offline.
n.gu.set("Unknown" /* OnlineState.Unknown */), await n.remoteSyncer.handleCredentialChange(e),
n._u.delete(3 /* OfflineCause.CredentialChange */), await Du(n);
}
/**
* Toggles the network state when the client gains or loses its primary lease.
*/ async function ec(t, e) {
const n = B(t);
e ? (n._u.delete(2 /* OfflineCause.IsSecondary */), await Du(n)) : e || (n._u.add(2 /* OfflineCause.IsSecondary */),
await Cu(n), n.gu.set("Unknown" /* OnlineState.Unknown */));
}
/**
* If not yet initialized, registers the WatchStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/ function nc(t) {
return t.pu || (
// Create stream (but note that it is not started yet).
t.pu = function(t, e, n) {
const s = B(t);
return s.su(), new Ru(e, s.connection, s.authCredentials, s.appCheckCredentials, s.yt, n);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ (t.datastore, t.asyncQueue, {
Yr: Lu.bind(null, t),
Zr: qu.bind(null, t),
Wo: Uu.bind(null, t)
}), t.wu.push((async e => {
e ? (t.pu.Mo(), Fu(t) ? Mu(t) : t.gu.set("Unknown" /* OnlineState.Unknown */)) : (await t.pu.stop(),
Bu(t));
}))), t.pu;
}
/**
* If not yet initialized, registers the WriteStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/ function sc(t) {
return t.Iu || (
// Create stream (but note that it is not started yet).
t.Iu = function(t, e, n) {
const s = B(t);
return s.su(), new bu(e, s.connection, s.authCredentials, s.appCheckCredentials, s.yt, n);
}(t.datastore, t.asyncQueue, {
Yr: Ju.bind(null, t),
Zr: Zu.bind(null, t),
tu: Yu.bind(null, t),
Zo: Xu.bind(null, t)
}), t.wu.push((async e => {
e ? (t.Iu.Mo(),
// This will start the write stream if necessary.
await Qu(t)) : (await t.Iu.stop(), t.fu.length > 0 && (x("RemoteStore", `Stopping write stream with ${t.fu.length} pending writes`),
t.fu = []));
}))), t.Iu;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*
* Note: We implement `PromiseLike` instead of `Promise`, as the `Promise` type
* in newer versions of TypeScript defines `finally`, which is not available in
* IE.
*/
class ic {
constructor(t, e, n, s, i) {
this.asyncQueue = t, this.timerId = e, this.targetTimeMs = n, this.op = s, this.removalCallback = i,
this.deferred = new U, this.then = this.deferred.promise.then.bind(this.deferred.promise),
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch((t => {}));
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue - The queue to schedule the operation on.
* @param id - A Timer ID identifying the type of operation this is.
* @param delayMs - The delay (ms) before the operation should be scheduled.
* @param op - The operation to run.
* @param removalCallback - A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/ static createAndSchedule(t, e, n, s, i) {
const r = Date.now() + n, o = new ic(t, e, r, s, i);
return o.start(n), o;
}
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/ start(t) {
this.timerHandle = setTimeout((() => this.handleDelayElapsed()), t);
}
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/ skipDelay() {
return this.handleDelayElapsed();
}
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/ cancel(t) {
null !== this.timerHandle && (this.clearTimeout(), this.deferred.reject(new q(L.CANCELLED, "Operation cancelled" + (t ? ": " + t : ""))));
}
handleDelayElapsed() {
this.asyncQueue.enqueueAndForget((() => null !== this.timerHandle ? (this.clearTimeout(),
this.op().then((t => this.deferred.resolve(t)))) : Promise.resolve()));
}
clearTimeout() {
null !== this.timerHandle && (this.removalCallback(this), clearTimeout(this.timerHandle),
this.timerHandle = null);
}
}
/**
* Returns a FirestoreError that can be surfaced to the user if the provided
* error is an IndexedDbTransactionError. Re-throws the error otherwise.
*/ function rc(t, e) {
if (N("AsyncQueue", `${e}: ${t}`), St(t)) return new q(L.UNAVAILABLE, `${e}: ${t}`);
throw t;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentSet is an immutable (copy-on-write) collection that holds documents
* in order specified by the provided comparator. We always add a document key
* comparator on top of what is provided to guarantee document equality based on
* the key.
*/ class oc {
/** The default ordering is by key if the comparator is omitted */
constructor(t) {
// We are adding document key comparator to the end as it's the only
// guaranteed unique property of a document.
this.comparator = t ? (e, n) => t(e, n) || at.comparator(e.key, n.key) : (t, e) => at.comparator(t.key, e.key),
this.keyedMap = gs(), this.sortedSet = new je(this.comparator);
}
/**
* Returns an empty copy of the existing DocumentSet, using the same
* comparator.
*/ static emptySet(t) {
return new oc(t.comparator);
}
has(t) {
return null != this.keyedMap.get(t);
}
get(t) {
return this.keyedMap.get(t);
}
first() {
return this.sortedSet.minKey();
}
last() {
return this.sortedSet.maxKey();
}
isEmpty() {
return this.sortedSet.isEmpty();
}
/**
* Returns the index of the provided key in the document set, or -1 if the
* document key is not present in the set;
*/ indexOf(t) {
const e = this.keyedMap.get(t);
return e ? this.sortedSet.indexOf(e) : -1;
}
get size() {
return this.sortedSet.size;
}
/** Iterates documents in order defined by "comparator" */ forEach(t) {
this.sortedSet.inorderTraversal(((e, n) => (t(e), !1)));
}
/** Inserts or updates a document with the same key */ add(t) {
// First remove the element if we have it.
const e = this.delete(t.key);
return e.copy(e.keyedMap.insert(t.key, t), e.sortedSet.insert(t, null));
}
/** Deletes a document with a given key */ delete(t) {
const e = this.get(t);
return e ? this.copy(this.keyedMap.remove(t), this.sortedSet.remove(e)) : this;
}
isEqual(t) {
if (!(t instanceof oc)) return !1;
if (this.size !== t.size) return !1;
const e = this.sortedSet.getIterator(), n = t.sortedSet.getIterator();
for (;e.hasNext(); ) {
const t = e.getNext().key, s = n.getNext().key;
if (!t.isEqual(s)) return !1;
}
return !0;
}
toString() {
const t = [];
return this.forEach((e => {
t.push(e.toString());
})), 0 === t.length ? "DocumentSet ()" : "DocumentSet (\n " + t.join(" \n") + "\n)";
}
copy(t, e) {
const n = new oc;
return n.comparator = this.comparator, n.keyedMap = t, n.sortedSet = e, n;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentChangeSet keeps track of a set of changes to docs in a query, merging
* duplicate events for the same doc.
*/ class uc {
constructor() {
this.Tu = new je(at.comparator);
}
track(t) {
const e = t.doc.key, n = this.Tu.get(e);
n ?
// Merge the new change with the existing change.
0 /* ChangeType.Added */ !== t.type && 3 /* ChangeType.Metadata */ === n.type ? this.Tu = this.Tu.insert(e, t) : 3 /* ChangeType.Metadata */ === t.type && 1 /* ChangeType.Removed */ !== n.type ? this.Tu = this.Tu.insert(e, {
type: n.type,
doc: t.doc
}) : 2 /* ChangeType.Modified */ === t.type && 2 /* ChangeType.Modified */ === n.type ? this.Tu = this.Tu.insert(e, {
type: 2 /* ChangeType.Modified */ ,
doc: t.doc
}) : 2 /* ChangeType.Modified */ === t.type && 0 /* ChangeType.Added */ === n.type ? this.Tu = this.Tu.insert(e, {
type: 0 /* ChangeType.Added */ ,
doc: t.doc
}) : 1 /* ChangeType.Removed */ === t.type && 0 /* ChangeType.Added */ === n.type ? this.Tu = this.Tu.remove(e) : 1 /* ChangeType.Removed */ === t.type && 2 /* ChangeType.Modified */ === n.type ? this.Tu = this.Tu.insert(e, {
type: 1 /* ChangeType.Removed */ ,
doc: n.doc
}) : 0 /* ChangeType.Added */ === t.type && 1 /* ChangeType.Removed */ === n.type ? this.Tu = this.Tu.insert(e, {
type: 2 /* ChangeType.Modified */ ,
doc: t.doc
}) :
// This includes these cases, which don't make sense:
// Added->Added
// Removed->Removed
// Modified->Added
// Removed->Modified
// Metadata->Added
// Removed->Metadata
M() : this.Tu = this.Tu.insert(e, t);
}
Eu() {
const t = [];
return this.Tu.inorderTraversal(((e, n) => {
t.push(n);
})), t;
}
}
class cc {
constructor(t, e, n, s, i, r, o, u, c) {
this.query = t, this.docs = e, this.oldDocs = n, this.docChanges = s, this.mutatedKeys = i,
this.fromCache = r, this.syncStateChanged = o, this.excludesMetadataChanges = u,
this.hasCachedResults = c;
}
/** Returns a view snapshot as if all documents in the snapshot were added. */ static fromInitialDocuments(t, e, n, s, i) {
const r = [];
return e.forEach((t => {
r.push({
type: 0 /* ChangeType.Added */ ,
doc: t
});
})), new cc(t, e, oc.emptySet(e), r, n, s,
/* syncStateChanged= */ !0,
/* excludesMetadataChanges= */ !1, i);
}
get hasPendingWrites() {
return !this.mutatedKeys.isEmpty();
}
isEqual(t) {
if (!(this.fromCache === t.fromCache && this.hasCachedResults === t.hasCachedResults && this.syncStateChanged === t.syncStateChanged && this.mutatedKeys.isEqual(t.mutatedKeys) && En(this.query, t.query) && this.docs.isEqual(t.docs) && this.oldDocs.isEqual(t.oldDocs))) return !1;
const e = this.docChanges, n = t.docChanges;
if (e.length !== n.length) return !1;
for (let t = 0; t < e.length; t++) if (e[t].type !== n[t].type || !e[t].doc.isEqual(n[t].doc)) return !1;
return !0;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Holds the listeners and the last received ViewSnapshot for a query being
* tracked by EventManager.
*/ class ac {
constructor() {
this.Au = void 0, this.listeners = [];
}
}
class hc {
constructor() {
this.queries = new ds((t => An(t)), En), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
this.Ru = new Set;
}
}
async function lc(t, e) {
const n = B(t), s = e.query;
let i = !1, r = n.queries.get(s);
if (r || (i = !0, r = new ac), i) try {
r.Au = await n.onListen(s);
} catch (t) {
const n = rc(t, `Initialization of query '${Rn(e.query)}' failed`);
return void e.onError(n);
}
if (n.queries.set(s, r), r.listeners.push(e),
// Run global snapshot listeners if a consistent snapshot has been emitted.
e.bu(n.onlineState), r.Au) {
e.Pu(r.Au) && wc(n);
}
}
async function fc(t, e) {
const n = B(t), s = e.query;
let i = !1;
const r = n.queries.get(s);
if (r) {
const t = r.listeners.indexOf(e);
t >= 0 && (r.listeners.splice(t, 1), i = 0 === r.listeners.length);
}
if (i) return n.queries.delete(s), n.onUnlisten(s);
}
function dc(t, e) {
const n = B(t);
let s = !1;
for (const t of e) {
const e = t.query, i = n.queries.get(e);
if (i) {
for (const e of i.listeners) e.Pu(t) && (s = !0);
i.Au = t;
}
}
s && wc(n);
}
function _c(t, e, n) {
const s = B(t), i = s.queries.get(e);
if (i) for (const t of i.listeners) t.onError(n);
// Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
// after an error.
s.queries.delete(e);
}
// Call all global snapshot listeners that have been set.
function wc(t) {
t.Ru.forEach((t => {
t.next();
}));
}
/**
* QueryListener takes a series of internal view snapshots and determines
* when to raise the event.
*
* It uses an Observer to dispatch events.
*/ class mc {
constructor(t, e, n) {
this.query = t, this.vu = e,
/**
* Initial snapshots (e.g. from cache) may not be propagated to the wrapped
* observer. This flag is set to true once we've actually raised an event.
*/
this.Vu = !1, this.Su = null, this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
this.options = n || {};
}
/**
* Applies the new ViewSnapshot to this listener, raising a user-facing event
* if applicable (depending on what changed, whether the user has opted into
* metadata-only changes, etc.). Returns true if a user-facing event was
* indeed raised.
*/ Pu(t) {
if (!this.options.includeMetadataChanges) {
// Remove the metadata only changes.
const e = [];
for (const n of t.docChanges) 3 /* ChangeType.Metadata */ !== n.type && e.push(n);
t = new cc(t.query, t.docs, t.oldDocs, e, t.mutatedKeys, t.fromCache, t.syncStateChanged,
/* excludesMetadataChanges= */ !0, t.hasCachedResults);
}
let e = !1;
return this.Vu ? this.Du(t) && (this.vu.next(t), e = !0) : this.Cu(t, this.onlineState) && (this.xu(t),
e = !0), this.Su = t, e;
}
onError(t) {
this.vu.error(t);
}
/** Returns whether a snapshot was raised. */ bu(t) {
this.onlineState = t;
let e = !1;
return this.Su && !this.Vu && this.Cu(this.Su, t) && (this.xu(this.Su), e = !0),
e;
}
Cu(t, e) {
// Always raise the first event when we're synced
if (!t.fromCache) return !0;
// NOTE: We consider OnlineState.Unknown as online (it should become Offline
// or Online if we wait long enough).
const n = "Offline" /* OnlineState.Offline */ !== e;
// Don't raise the event if we're online, aren't synced yet (checked
// above) and are waiting for a sync.
return (!this.options.Nu || !n) && (!t.docs.isEmpty() || t.hasCachedResults || "Offline" /* OnlineState.Offline */ === e);
// Raise data from cache if we have any documents, have cached results before,
// or we are offline.
}
Du(t) {
// We don't need to handle includeDocumentMetadataChanges here because
// the Metadata only changes have already been stripped out if needed.
// At this point the only changes we will see are the ones we should
// propagate.
if (t.docChanges.length > 0) return !0;
const e = this.Su && this.Su.hasPendingWrites !== t.hasPendingWrites;
return !(!t.syncStateChanged && !e) && !0 === this.options.includeMetadataChanges;
// Generally we should have hit one of the cases above, but it's possible
// to get here if there were only metadata docChanges and they got
// stripped out.
}
xu(t) {
t = cc.fromInitialDocuments(t.query, t.docs, t.mutatedKeys, t.fromCache, t.hasCachedResults),
this.Vu = !0, this.vu.next(t);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A complete element in the bundle stream, together with the byte length it
* occupies in the stream.
*/ class gc {
constructor(t,
// How many bytes this element takes to store in the bundle.
e) {
this.ku = t, this.byteLength = e;
}
Ou() {
return "metadata" in this.ku;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Helper to convert objects from bundles to model objects in the SDK.
*/ class yc {
constructor(t) {
this.yt = t;
}
Ji(t) {
return Ws(this.yt, t);
}
/**
* Converts a BundleDocument to a MutableDocument.
*/ Yi(t) {
return t.metadata.exists ? Zs(this.yt, t.document, !1) : en.newNoDocument(this.Ji(t.metadata.name), this.Xi(t.metadata.readTime));
}
Xi(t) {
return Ks(t);
}
}
/**
* A class to process the elements from a bundle, load them into local
* storage and provide progress update while loading.
*/ class pc {
constructor(t, e, n) {
this.Mu = t, this.localStore = e, this.yt = n,
/** Batched queries to be saved into storage */
this.queries = [],
/** Batched documents to be saved into storage */
this.documents = [],
/** The collection groups affected by this bundle. */
this.collectionGroups = new Set, this.progress = Ic(t);
}
/**
* Adds an element from the bundle to the loader.
*
* Returns a new progress if adding the element leads to a new progress,
* otherwise returns null.
*/ Fu(t) {
this.progress.bytesLoaded += t.byteLength;
let e = this.progress.documentsLoaded;
if (t.ku.namedQuery) this.queries.push(t.ku.namedQuery); else if (t.ku.documentMetadata) {
this.documents.push({
metadata: t.ku.documentMetadata
}), t.ku.documentMetadata.exists || ++e;
const n = ot.fromString(t.ku.documentMetadata.name);
this.collectionGroups.add(n.get(n.length - 2));
} else t.ku.document && (this.documents[this.documents.length - 1].document = t.ku.document,
++e);
return e !== this.progress.documentsLoaded ? (this.progress.documentsLoaded = e,
Object.assign({}, this.progress)) : null;
}
$u(t) {
const e = new Map, n = new yc(this.yt);
for (const s of t) if (s.metadata.queries) {
const t = n.Ji(s.metadata.name);
for (const n of s.metadata.queries) {
const s = (e.get(n) || Rs()).add(t);
e.set(n, s);
}
}
return e;
}
/**
* Update the progress to 'Success' and return the updated progress.
*/ async complete() {
const t = await nu(this.localStore, new yc(this.yt), this.documents, this.Mu.id), e = this.$u(this.documents);
for (const t of this.queries) await su(this.localStore, t, e.get(t.name));
return this.progress.taskState = "Success", {
progress: this.progress,
Bu: this.collectionGroups,
Lu: t
};
}
}
/**
* Returns a `LoadBundleTaskProgress` representing the initial progress of
* loading a bundle.
*/ function Ic(t) {
return {
taskState: "Running",
documentsLoaded: 0,
bytesLoaded: 0,
totalDocuments: t.totalDocuments,
totalBytes: t.totalBytes
};
}
/**
* Returns a `LoadBundleTaskProgress` representing the progress that the loading
* has succeeded.
*/
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class Tc {
constructor(t) {
this.key = t;
}
}
class Ec {
constructor(t) {
this.key = t;
}
}
/**
* View is responsible for computing the final merged truth of what docs are in
* a query. It gets notified of local and remote changes to docs, and applies
* the query filters and limits to determine the most correct possible results.
*/ class Ac {
constructor(t,
/** Documents included in the remote target */
e) {
this.query = t, this.qu = e, this.Uu = null, this.hasCachedResults = !1,
/**
* A flag whether the view is current with the backend. A view is considered
* current after it has seen the current flag from the backend and did not
* lose consistency within the watch stream (e.g. because of an existence
* filter mismatch).
*/
this.current = !1,
/** Documents in the view but not in the remote target */
this.Ku = Rs(),
/** Document Keys that have local changes */
this.mutatedKeys = Rs(), this.Gu = vn(t), this.Qu = new oc(this.Gu);
}
/**
* The set of remote documents that the server has told us belongs to the target associated with
* this view.
*/ get ju() {
return this.qu;
}
/**
* Iterates over a set of doc changes, applies the query limit, and computes
* what the new results should be, what the changes were, and whether we may
* need to go back to the local cache for more results. Does not make any
* changes to the view.
* @param docChanges - The doc changes to apply to this view.
* @param previousChanges - If this is being called with a refill, then start
* with this set of docs and changes instead of the current view.
* @returns a new set of docs, changes, and refill flag.
*/ Wu(t, e) {
const n = e ? e.zu : new uc, s = e ? e.Qu : this.Qu;
let i = e ? e.mutatedKeys : this.mutatedKeys, r = s, o = !1;
// Track the last doc in a (full) limit. This is necessary, because some
// update (a delete, or an update moving a doc past the old limit) might
// mean there is some other document in the local cache that either should
// come (1) between the old last limit doc and the new last document, in the
// case of updates, or (2) after the new last document, in the case of
// deletes. So we keep this doc at the old limit to compare the updates to.
// Note that this should never get used in a refill (when previousChanges is
// set), because there will only be adds -- no deletes or updates.
const u = "F" /* LimitType.First */ === this.query.limitType && s.size === this.query.limit ? s.last() : null, c = "L" /* LimitType.Last */ === this.query.limitType && s.size === this.query.limit ? s.first() : null;
// Drop documents out to meet limit/limitToLast requirement.
if (t.inorderTraversal(((t, e) => {
const a = s.get(t), h = bn(this.query, e) ? e : null, l = !!a && this.mutatedKeys.has(a.key), f = !!h && (h.hasLocalMutations ||
// We only consider committed mutations for documents that were
// mutated during the lifetime of the view.
this.mutatedKeys.has(h.key) && h.hasCommittedMutations);
let d = !1;
// Calculate change
if (a && h) {
a.data.isEqual(h.data) ? l !== f && (n.track({
type: 3 /* ChangeType.Metadata */ ,
doc: h
}), d = !0) : this.Hu(a, h) || (n.track({
type: 2 /* ChangeType.Modified */ ,
doc: h
}), d = !0, (u && this.Gu(h, u) > 0 || c && this.Gu(h, c) < 0) && (
// This doc moved from inside the limit to outside the limit.
// That means there may be some other doc in the local cache
// that should be included instead.
o = !0));
} else !a && h ? (n.track({
type: 0 /* ChangeType.Added */ ,
doc: h
}), d = !0) : a && !h && (n.track({
type: 1 /* ChangeType.Removed */ ,
doc: a
}), d = !0, (u || c) && (
// A doc was removed from a full limit query. We'll need to
// requery from the local cache to see if we know about some other
// doc that should be in the results.
o = !0));
d && (h ? (r = r.add(h), i = f ? i.add(t) : i.delete(t)) : (r = r.delete(t), i = i.delete(t)));
})), null !== this.query.limit) for (;r.size > this.query.limit; ) {
const t = "F" /* LimitType.First */ === this.query.limitType ? r.last() : r.first();
r = r.delete(t.key), i = i.delete(t.key), n.track({
type: 1 /* ChangeType.Removed */ ,
doc: t
});
}
return {
Qu: r,
zu: n,
$i: o,
mutatedKeys: i
};
}
Hu(t, e) {
// We suppress the initial change event for documents that were modified as
// part of a write acknowledgment (e.g. when the value of a server transform
// is applied) as Watch will send us the same document again.
// By suppressing the event, we only raise two user visible events (one with
// `hasPendingWrites` and the final state of the document) instead of three
// (one with `hasPendingWrites`, the modified document with
// `hasPendingWrites` and the final state of the document).
return t.hasLocalMutations && e.hasCommittedMutations && !e.hasLocalMutations;
}
/**
* Updates the view with the given ViewDocumentChanges and optionally updates
* limbo docs and sync state from the provided target change.
* @param docChanges - The set of changes to make to the view's docs.
* @param updateLimboDocuments - Whether to update limbo documents based on
* this change.
* @param targetChange - A target change to apply for computing limbo docs and
* sync state.
* @returns A new ViewChange with the given docs, changes, and sync state.
*/
// PORTING NOTE: The iOS/Android clients always compute limbo document changes.
applyChanges(t, e, n) {
const s = this.Qu;
this.Qu = t.Qu, this.mutatedKeys = t.mutatedKeys;
// Sort changes based on type and query comparator
const i = t.zu.Eu();
i.sort(((t, e) => function(t, e) {
const n = t => {
switch (t) {
case 0 /* ChangeType.Added */ :
return 1;
case 2 /* ChangeType.Modified */ :
case 3 /* ChangeType.Metadata */ :
// A metadata change is converted to a modified change at the public
// api layer. Since we sort by document key and then change type,
// metadata and modified changes must be sorted equivalently.
return 2;
case 1 /* ChangeType.Removed */ :
return 0;
default:
return M();
}
};
return n(t) - n(e);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ (t.type, e.type) || this.Gu(t.doc, e.doc))), this.Ju(n);
const r = e ? this.Yu() : [], o = 0 === this.Ku.size && this.current ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */ , u = o !== this.Uu;
if (this.Uu = o, 0 !== i.length || u) {
return {
snapshot: new cc(this.query, t.Qu, s, i, t.mutatedKeys, 0 /* SyncState.Local */ === o, u,
/* excludesMetadataChanges= */ !1, !!n && n.resumeToken.approximateByteSize() > 0),
Xu: r
};
}
// no changes
return {
Xu: r
};
}
/**
* Applies an OnlineState change to the view, potentially generating a
* ViewChange if the view's syncState changes as a result.
*/ bu(t) {
return this.current && "Offline" /* OnlineState.Offline */ === t ? (
// If we're offline, set `current` to false and then call applyChanges()
// to refresh our syncState and generate a ViewChange as appropriate. We
// are guaranteed to get a new TargetChange that sets `current` back to
// true once the client is back online.
this.current = !1, this.applyChanges({
Qu: this.Qu,
zu: new uc,
mutatedKeys: this.mutatedKeys,
$i: !1
},
/* updateLimboDocuments= */ !1)) : {
Xu: []
};
}
/**
* Returns whether the doc for the given key should be in limbo.
*/ Zu(t) {
// If the remote end says it's part of this query, it's not in limbo.
return !this.qu.has(t) && (
// The local store doesn't think it's a result, so it shouldn't be in limbo.
!!this.Qu.has(t) && !this.Qu.get(t).hasLocalMutations);
}
/**
* Updates syncedDocuments, current, and limbo docs based on the given change.
* Returns the list of changes to which docs are in limbo.
*/ Ju(t) {
t && (t.addedDocuments.forEach((t => this.qu = this.qu.add(t))), t.modifiedDocuments.forEach((t => {})),
t.removedDocuments.forEach((t => this.qu = this.qu.delete(t))), this.current = t.current);
}
Yu() {
// We can only determine limbo documents when we're in-sync with the server.
if (!this.current) return [];
// TODO(klimt): Do this incrementally so that it's not quadratic when
// updating many documents.
const t = this.Ku;
this.Ku = Rs(), this.Qu.forEach((t => {
this.Zu(t.key) && (this.Ku = this.Ku.add(t.key));
}));
// Diff the new limbo docs with the old limbo docs.
const e = [];
return t.forEach((t => {
this.Ku.has(t) || e.push(new Ec(t));
})), this.Ku.forEach((n => {
t.has(n) || e.push(new Tc(n));
})), e;
}
/**
* Update the in-memory state of the current view with the state read from
* persistence.
*
* We update the query view whenever a client's primary status changes:
* - When a client transitions from primary to secondary, it can miss
* LocalStorage updates and its query views may temporarily not be
* synchronized with the state on disk.
* - For secondary to primary transitions, the client needs to update the list
* of `syncedDocuments` since secondary clients update their query views
* based purely on synthesized RemoteEvents.
*
* @param queryResult.documents - The documents that match the query according
* to the LocalStore.
* @param queryResult.remoteKeys - The keys of the documents that match the
* query according to the backend.
*
* @returns The ViewChange that resulted from this synchronization.
*/
// PORTING NOTE: Multi-tab only.
tc(t) {
this.qu = t.Hi, this.Ku = Rs();
const e = this.Wu(t.documents);
return this.applyChanges(e, /*updateLimboDocuments=*/ !0);
}
/**
* Returns a view snapshot as if this query was just listened to. Contains
* a document add for every existing document and the `fromCache` and
* `hasPendingWrites` status of the already established view.
*/
// PORTING NOTE: Multi-tab only.
ec() {
return cc.fromInitialDocuments(this.query, this.Qu, this.mutatedKeys, 0 /* SyncState.Local */ === this.Uu, this.hasCachedResults);
}
}
/**
* QueryView contains all of the data that SyncEngine needs to keep track of for
* a particular query.
*/
class Rc {
constructor(
/**
* The query itself.
*/
t,
/**
* The target number created by the client that is used in the watch
* stream to identify this query.
*/
e,
/**
* The view is responsible for computing the final merged truth of what
* docs are in the query. It gets notified of local and remote changes,
* and applies the query filters and limits to determine the most correct
* possible results.
*/
n) {
this.query = t, this.targetId = e, this.view = n;
}
}
/** Tracks a limbo resolution. */ class bc {
constructor(t) {
this.key = t,
/**
* Set to true once we've received a document. This is used in
* getRemoteKeysForTarget() and ultimately used by WatchChangeAggregator to
* decide whether it needs to manufacture a delete event for the target once
* the target is CURRENT.
*/
this.nc = !1;
}
}
/**
* An implementation of `SyncEngine` coordinating with other parts of SDK.
*
* The parts of SyncEngine that act as a callback to RemoteStore need to be
* registered individually. This is done in `syncEngineWrite()` and
* `syncEngineListen()` (as well as `applyPrimaryState()`) as these methods
* serve as entry points to RemoteStore's functionality.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/ class Pc {
constructor(t, e, n,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
s, i, r) {
this.localStore = t, this.remoteStore = e, this.eventManager = n, this.sharedClientState = s,
this.currentUser = i, this.maxConcurrentLimboResolutions = r, this.sc = {}, this.ic = new ds((t => An(t)), En),
this.rc = new Map,
/**
* The keys of documents that are in limbo for which we haven't yet started a
* limbo resolution query. The strings in this set are the result of calling
* `key.path.canonicalString()` where `key` is a `DocumentKey` object.
*
* The `Set` type was chosen because it provides efficient lookup and removal
* of arbitrary elements and it also maintains insertion order, providing the
* desired queue-like FIFO semantics.
*/
this.oc = new Set,
/**
* Keeps track of the target ID for each document that is in limbo with an
* active target.
*/
this.uc = new je(at.comparator),
/**
* Keeps track of the information about an active limbo resolution for each
* active target ID that was started for the purpose of limbo resolution.
*/
this.cc = new Map, this.ac = new Ro,
/** Stores user completion handlers, indexed by User and BatchId. */
this.hc = {},
/** Stores user callbacks waiting for all pending writes to be acknowledged. */
this.lc = new Map, this.fc = Zr.vn(), this.onlineState = "Unknown" /* OnlineState.Unknown */ ,
// The primary state is set to `true` or `false` immediately after Firestore
// startup. In the interim, a client should only be considered primary if
// `isPrimary` is true.
this.dc = void 0;
}
get isPrimaryClient() {
return !0 === this.dc;
}
}
/**
* Initiates the new listen, resolves promise when listen enqueued to the
* server. All the subsequent view snapshots or errors are sent to the
* subscribed handlers. Returns the initial snapshot.
*/
async function vc(t, e) {
const n = na(t);
let s, i;
const r = n.ic.get(e);
if (r)
// PORTING NOTE: With Multi-Tab Web, it is possible that a query view
// already exists when EventManager calls us for the first time. This
// happens when the primary tab is already listening to this query on
// behalf of another tab and the user of the primary also starts listening
// to the query. EventManager will not have an assigned target ID in this
// case and calls `listen` to obtain this ID.
s = r.targetId, n.sharedClientState.addLocalQueryTarget(s), i = r.view.ec(); else {
const t = await Jo(n.localStore, pn(e));
n.isPrimaryClient && xu(n.remoteStore, t);
const r = n.sharedClientState.addLocalQueryTarget(t.targetId);
s = t.targetId, i = await Vc(n, e, s, "current" === r, t.resumeToken);
}
return i;
}
/**
* Registers a view for a previously unknown query and computes its initial
* snapshot.
*/ async function Vc(t, e, n, s, i) {
// PORTING NOTE: On Web only, we inject the code that registers new Limbo
// targets based on view changes. This allows us to only depend on Limbo
// changes when user code includes queries.
t._c = (e, n, s) => async function(t, e, n, s) {
let i = e.view.Wu(n);
i.$i && (
// The query has a limit and some docs were removed, so we need
// to re-run the query against the local store to make sure we
// didn't lose any good docs that had been past the limit.
i = await Xo(t.localStore, e.query,
/* usePreviousResults= */ !1).then((({documents: t}) => e.view.Wu(t, i))));
const r = s && s.targetChanges.get(e.targetId), o = e.view.applyChanges(i,
/* updateLimboDocuments= */ t.isPrimaryClient, r);
return qc(t, e.targetId, o.Xu), o.snapshot;
}(t, e, n, s);
const r = await Xo(t.localStore, e,
/* usePreviousResults= */ !0), o = new Ac(e, r.Hi), u = o.Wu(r.documents), c = Vs.createSynthesizedTargetChangeForCurrentChange(n, s && "Offline" /* OnlineState.Offline */ !== t.onlineState, i), a = o.applyChanges(u,
/* updateLimboDocuments= */ t.isPrimaryClient, c);
qc(t, n, a.Xu);
const h = new Rc(e, n, o);
return t.ic.set(e, h), t.rc.has(n) ? t.rc.get(n).push(e) : t.rc.set(n, [ e ]), a.snapshot;
}
/** Stops listening to the query. */ async function Sc(t, e) {
const n = B(t), s = n.ic.get(e), i = n.rc.get(s.targetId);
if (i.length > 1) return n.rc.set(s.targetId, i.filter((t => !En(t, e)))), void n.ic.delete(e);
// No other queries are mapped to the target, clean up the query and the target.
if (n.isPrimaryClient) {
// We need to remove the local query target first to allow us to verify
// whether any other client is still interested in this target.
n.sharedClientState.removeLocalQueryTarget(s.targetId);
n.sharedClientState.isActiveQueryTarget(s.targetId) || await Yo(n.localStore, s.targetId,
/*keepPersistedTargetData=*/ !1).then((() => {
n.sharedClientState.clearQueryState(s.targetId), Nu(n.remoteStore, s.targetId),
Bc(n, s.targetId);
})).catch(At);
} else Bc(n, s.targetId), await Yo(n.localStore, s.targetId,
/*keepPersistedTargetData=*/ !0);
}
/**
* Initiates the write of local mutation batch which involves adding the
* writes to the mutation queue, notifying the remote store about new
* mutations and raising events for any changes this write caused.
*
* The promise returned by this call is resolved when the above steps
* have completed, *not* when the write was acked by the backend. The
* userCallback is resolved once the write was acked/rejected by the
* backend (or failed locally for any other reason).
*/ async function Dc(t, e, n) {
const s = sa(t);
try {
const t = await function(t, e) {
const n = B(t), s = st.now(), i = e.reduce(((t, e) => t.add(e.key)), Rs());
let r, o;
return n.persistence.runTransaction("Locally write mutations", "readwrite", (t => {
// Figure out which keys do not have a remote version in the cache, this
// is needed to create the right overlay mutation: if no remote version
// presents, we do not need to create overlays as patch mutations.
// TODO(Overlay): Is there a better way to determine this? Using the
// document version does not work because local mutations set them back
// to 0.
let u = ws(), c = Rs();
return n.Gi.getEntries(t, i).next((t => {
u = t, u.forEach(((t, e) => {
e.isValidDocument() || (c = c.add(t));
}));
})).next((() => n.localDocuments.getOverlayedDocuments(t, u))).next((i => {
r = i;
// For non-idempotent mutations (such as `FieldValue.increment()`),
// we record the base state in a separate patch mutation. This is
// later used to guarantee consistent values and prevents flicker
// even if the backend sends us an update that already includes our
// transform.
const o = [];
for (const t of e) {
const e = Zn(t, r.get(t.key).overlayedDocument);
null != e &&
// NOTE: The base state should only be applied if there's some
// existing document to override, so use a Precondition of
// exists=true
o.push(new ns(t.key, e, tn(e.value.mapValue), Wn.exists(!0)));
}
return n.mutationQueue.addMutationBatch(t, s, o, e);
})).next((e => {
o = e;
const s = e.applyToLocalDocumentSet(r, c);
return n.documentOverlayCache.saveOverlays(t, e.batchId, s);
}));
})).then((() => ({
batchId: o.batchId,
changes: ys(r)
})));
}(s.localStore, e);
s.sharedClientState.addPendingMutation(t.batchId), function(t, e, n) {
let s = t.hc[t.currentUser.toKey()];
s || (s = new je(tt));
s = s.insert(e, n), t.hc[t.currentUser.toKey()] = s;
}
/**
* Resolves or rejects the user callback for the given batch and then discards
* it.
*/ (s, t.batchId, n), await Gc(s, t.changes), await Qu(s.remoteStore);
} catch (t) {
// If we can't persist the mutation, we reject the user callback and
// don't send the mutation. The user can then retry the write.
const e = rc(t, "Failed to persist write");
n.reject(e);
}
}
/**
* Applies one remote event to the sync engine, notifying any views of the
* changes, and releasing any pending mutation batches that would become
* visible because of the snapshot version the remote event contains.
*/ async function Cc(t, e) {
const n = B(t);
try {
const t = await Wo(n.localStore, e);
// Update `receivedDocument` as appropriate for any limbo targets.
e.targetChanges.forEach(((t, e) => {
const s = n.cc.get(e);
s && (
// Since this is a limbo resolution lookup, it's for a single document
// and it could be added, modified, or removed, but not a combination.
F(t.addedDocuments.size + t.modifiedDocuments.size + t.removedDocuments.size <= 1),
t.addedDocuments.size > 0 ? s.nc = !0 : t.modifiedDocuments.size > 0 ? F(s.nc) : t.removedDocuments.size > 0 && (F(s.nc),
s.nc = !1));
})), await Gc(n, t, e);
} catch (t) {
await At(t);
}
}
/**
* Applies an OnlineState change to the sync engine and notifies any views of
* the change.
*/ function xc(t, e, n) {
const s = B(t);
// If we are the secondary client, we explicitly ignore the remote store's
// online state (the local client may go offline, even though the primary
// tab remains online) and only apply the primary tab's online state from
// SharedClientState.
if (s.isPrimaryClient && 0 /* OnlineStateSource.RemoteStore */ === n || !s.isPrimaryClient && 1 /* OnlineStateSource.SharedClientState */ === n) {
const t = [];
s.ic.forEach(((n, s) => {
const i = s.view.bu(e);
i.snapshot && t.push(i.snapshot);
})), function(t, e) {
const n = B(t);
n.onlineState = e;
let s = !1;
n.queries.forEach(((t, n) => {
for (const t of n.listeners)
// Run global snapshot listeners if a consistent snapshot has been emitted.
t.bu(e) && (s = !0);
})), s && wc(n);
}(s.eventManager, e), t.length && s.sc.Wo(t), s.onlineState = e, s.isPrimaryClient && s.sharedClientState.setOnlineState(e);
}
}
/**
* Rejects the listen for the given targetID. This can be triggered by the
* backend for any active target.
*
* @param syncEngine - The sync engine implementation.
* @param targetId - The targetID corresponds to one previously initiated by the
* user as part of TargetData passed to listen() on RemoteStore.
* @param err - A description of the condition that has forced the rejection.
* Nearly always this will be an indication that the user is no longer
* authorized to see the data matching the target.
*/ async function Nc(t, e, n) {
const s = B(t);
// PORTING NOTE: Multi-tab only.
s.sharedClientState.updateQueryState(e, "rejected", n);
const i = s.cc.get(e), r = i && i.key;
if (r) {
// TODO(klimt): We really only should do the following on permission
// denied errors, but we don't have the cause code here.
// It's a limbo doc. Create a synthetic event saying it was deleted.
// This is kind of a hack. Ideally, we would have a method in the local
// store to purge a document. However, it would be tricky to keep all of
// the local store's invariants with another method.
let t = new je(at.comparator);
// TODO(b/217189216): This limbo document should ideally have a read time,
// so that it is picked up by any read-time based scans. The backend,
// however, does not send a read time for target removals.
t = t.insert(r, en.newNoDocument(r, it.min()));
const n = Rs().add(r), i = new vs(it.min(),
/* targetChanges= */ new Map,
/* targetMismatches= */ new He(tt), t, n);
await Cc(s, i),
// Since this query failed, we won't want to manually unlisten to it.
// We only remove it from bookkeeping after we successfully applied the
// RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
// this query when the RemoteStore restarts the Watch stream, which should
// re-trigger the target failure.
s.uc = s.uc.remove(r), s.cc.delete(e), Kc(s);
} else await Yo(s.localStore, e,
/* keepPersistedTargetData */ !1).then((() => Bc(s, e, n))).catch(At);
}
async function kc(t, e) {
const n = B(t), s = e.batch.batchId;
try {
const t = await Qo(n.localStore, e);
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught
// up), so we raise user callbacks first so that they consistently happen
// before listen events.
$c(n, s, /*error=*/ null), Fc(n, s), n.sharedClientState.updateMutationState(s, "acknowledged"),
await Gc(n, t);
} catch (t) {
await At(t);
}
}
async function Oc(t, e, n) {
const s = B(t);
try {
const t = await function(t, e) {
const n = B(t);
return n.persistence.runTransaction("Reject batch", "readwrite-primary", (t => {
let s;
return n.mutationQueue.lookupMutationBatch(t, e).next((e => (F(null !== e), s = e.keys(),
n.mutationQueue.removeMutationBatch(t, e)))).next((() => n.mutationQueue.performConsistencyCheck(t))).next((() => n.documentOverlayCache.removeOverlaysForBatchId(t, s, e))).next((() => n.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(t, s))).next((() => n.localDocuments.getDocuments(t, s)));
}));
}
/**
* Returns the largest (latest) batch id in mutation queue that is pending
* server response.
*
* Returns `BATCHID_UNKNOWN` if the queue is empty.
*/ (s.localStore, e);
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught up),
// so we raise user callbacks first so that they consistently happen before
// listen events.
$c(s, e, n), Fc(s, e), s.sharedClientState.updateMutationState(e, "rejected", n),
await Gc(s, t);
} catch (n) {
await At(n);
}
}
/**
* Registers a user callback that resolves when all pending mutations at the moment of calling
* are acknowledged .
*/ async function Mc(t, e) {
const n = B(t);
$u(n.remoteStore) || x("SyncEngine", "The network is disabled. The task returned by 'awaitPendingWrites()' will not complete until the network is enabled.");
try {
const t = await function(t) {
const e = B(t);
return e.persistence.runTransaction("Get highest unacknowledged batch id", "readonly", (t => e.mutationQueue.getHighestUnacknowledgedBatchId(t)));
}(n.localStore);
if (-1 === t)
// Trigger the callback right away if there is no pending writes at the moment.
return void e.resolve();
const s = n.lc.get(t) || [];
s.push(e), n.lc.set(t, s);
} catch (t) {
const n = rc(t, "Initialization of waitForPendingWrites() operation failed");
e.reject(n);
}
}
/**
* Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
* if there are any.
*/ function Fc(t, e) {
(t.lc.get(e) || []).forEach((t => {
t.resolve();
})), t.lc.delete(e);
}
/** Reject all outstanding callbacks waiting for pending writes to complete. */ function $c(t, e, n) {
const s = B(t);
let i = s.hc[s.currentUser.toKey()];
// NOTE: Mutations restored from persistence won't have callbacks, so it's
// okay for there to be no callback for this ID.
if (i) {
const t = i.get(e);
t && (n ? t.reject(n) : t.resolve(), i = i.remove(e)), s.hc[s.currentUser.toKey()] = i;
}
}
function Bc(t, e, n = null) {
t.sharedClientState.removeLocalQueryTarget(e);
for (const s of t.rc.get(e)) t.ic.delete(s), n && t.sc.wc(s, n);
if (t.rc.delete(e), t.isPrimaryClient) {
t.ac.ls(e).forEach((e => {
t.ac.containsKey(e) ||
// We removed the last reference for this key
Lc(t, e);
}));
}
}
function Lc(t, e) {
t.oc.delete(e.path.canonicalString());
// It's possible that the target already got removed because the query failed. In that case,
// the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
const n = t.uc.get(e);
null !== n && (Nu(t.remoteStore, n), t.uc = t.uc.remove(e), t.cc.delete(n), Kc(t));
}
function qc(t, e, n) {
for (const s of n) if (s instanceof Tc) t.ac.addReference(s.key, e), Uc(t, s); else if (s instanceof Ec) {
x("SyncEngine", "Document no longer in limbo: " + s.key), t.ac.removeReference(s.key, e);
t.ac.containsKey(s.key) ||
// We removed the last reference for this key
Lc(t, s.key);
} else M();
}
function Uc(t, e) {
const n = e.key, s = n.path.canonicalString();
t.uc.get(n) || t.oc.has(s) || (x("SyncEngine", "New document in limbo: " + n), t.oc.add(s),
Kc(t));
}
/**
* Starts listens for documents in limbo that are enqueued for resolution,
* subject to a maximum number of concurrent resolutions.
*
* Without bounding the number of concurrent resolutions, the server can fail
* with "resource exhausted" errors which can lead to pathological client
* behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
*/ function Kc(t) {
for (;t.oc.size > 0 && t.uc.size < t.maxConcurrentLimboResolutions; ) {
const e = t.oc.values().next().value;
t.oc.delete(e);
const n = new at(ot.fromString(e)), s = t.fc.next();
t.cc.set(s, new bc(n)), t.uc = t.uc.insert(n, s), xu(t.remoteStore, new Ji(pn(dn(n.path)), s, 2 /* TargetPurpose.LimboResolution */ , Mt.at));
}
}
async function Gc(t, e, n) {
const s = B(t), i = [], r = [], o = [];
s.ic.isEmpty() || (s.ic.forEach(((t, u) => {
o.push(s._c(u, e, n).then((t => {
// Update views if there are actual changes.
if (
// If there are changes, or we are handling a global snapshot, notify
// secondary clients to update query state.
(t || n) && s.isPrimaryClient && s.sharedClientState.updateQueryState(u.targetId, (null == t ? void 0 : t.fromCache) ? "not-current" : "current"),
t) {
i.push(t);
const e = Lo.Ci(u.targetId, t);
r.push(e);
}
})));
})), await Promise.all(o), s.sc.Wo(i), await async function(t, e) {
const n = B(t);
try {
await n.persistence.runTransaction("notifyLocalViewChanges", "readwrite", (t => Rt.forEach(e, (e => Rt.forEach(e.Si, (s => n.persistence.referenceDelegate.addReference(t, e.targetId, s))).next((() => Rt.forEach(e.Di, (s => n.persistence.referenceDelegate.removeReference(t, e.targetId, s)))))))));
} catch (t) {
if (!St(t)) throw t;
// If `notifyLocalViewChanges` fails, we did not advance the sequence
// number for the documents that were included in this transaction.
// This might trigger them to be deleted earlier than they otherwise
// would have, but it should not invalidate the integrity of the data.
x("LocalStore", "Failed to update sequence numbers: " + t);
}
for (const t of e) {
const e = t.targetId;
if (!t.fromCache) {
const t = n.qi.get(e), s = t.snapshotVersion, i = t.withLastLimboFreeSnapshotVersion(s);
// Advance the last limbo free snapshot version
n.qi = n.qi.insert(e, i);
}
}
}(s.localStore, r));
}
async function Qc(t, e) {
const n = B(t);
if (!n.currentUser.isEqual(e)) {
x("SyncEngine", "User change. New user:", e.toKey());
const t = await Go(n.localStore, e);
n.currentUser = e,
// Fails tasks waiting for pending writes requested by previous user.
function(t, e) {
t.lc.forEach((t => {
t.forEach((t => {
t.reject(new q(L.CANCELLED, e));
}));
})), t.lc.clear();
}(n, "'waitForPendingWrites' promise is rejected due to a user change."),
// TODO(b/114226417): Consider calling this only in the primary tab.
n.sharedClientState.handleUserChange(e, t.removedBatchIds, t.addedBatchIds), await Gc(n, t.ji);
}
}
function jc(t, e) {
const n = B(t), s = n.cc.get(e);
if (s && s.nc) return Rs().add(s.key);
{
let t = Rs();
const s = n.rc.get(e);
if (!s) return t;
for (const e of s) {
const s = n.ic.get(e);
t = t.unionWith(s.view.ju);
}
return t;
}
}
/**
* Reconcile the list of synced documents in an existing view with those
* from persistence.
*/ async function Wc(t, e) {
const n = B(t), s = await Xo(n.localStore, e.query,
/* usePreviousResults= */ !0), i = e.view.tc(s);
return n.isPrimaryClient && qc(n, e.targetId, i.Xu), i;
}
/**
* Retrieves newly changed documents from remote document cache and raises
* snapshots if needed.
*/
// PORTING NOTE: Multi-Tab only.
async function zc(t, e) {
const n = B(t);
return tu(n.localStore, e).then((t => Gc(n, t)));
}
/** Applies a mutation state to an existing batch. */
// PORTING NOTE: Multi-Tab only.
async function Hc(t, e, n, s) {
const i = B(t), r = await function(t, e) {
const n = B(t), s = B(n.mutationQueue);
return n.persistence.runTransaction("Lookup mutation documents", "readonly", (t => s.Tn(t, e).next((e => e ? n.localDocuments.getDocuments(t, e) : Rt.resolve(null)))));
}
// PORTING NOTE: Multi-Tab only.
(i.localStore, e);
null !== r ? ("pending" === n ?
// If we are the primary client, we need to send this write to the
// backend. Secondary clients will ignore these writes since their remote
// connection is disabled.
await Qu(i.remoteStore) : "acknowledged" === n || "rejected" === n ? (
// NOTE: Both these methods are no-ops for batches that originated from
// other clients.
$c(i, e, s || null), Fc(i, e), function(t, e) {
B(B(t).mutationQueue).An(e);
}
// PORTING NOTE: Multi-Tab only.
(i.localStore, e)) : M(), await Gc(i, r)) :
// A throttled tab may not have seen the mutation before it was completed
// and removed from the mutation queue, in which case we won't have cached
// the affected documents. In this case we can safely ignore the update
// since that means we didn't apply the mutation locally at all (if we
// had, we would have cached the affected documents), and so we will just
// see any resulting document changes via normal remote document updates
// as applicable.
x("SyncEngine", "Cannot apply mutation batch with id: " + e);
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
async function Jc(t, e) {
const n = B(t);
if (na(n), sa(n), !0 === e && !0 !== n.dc) {
// Secondary tabs only maintain Views for their local listeners and the
// Views internal state may not be 100% populated (in particular
// secondary tabs don't track syncedDocuments, the set of documents the
// server considers to be in the target). So when a secondary becomes
// primary, we need to need to make sure that all views for all targets
// match the state on disk.
const t = n.sharedClientState.getAllActiveQueryTargets(), e = await Yc(n, t.toArray());
n.dc = !0, await ec(n.remoteStore, !0);
for (const t of e) xu(n.remoteStore, t);
} else if (!1 === e && !1 !== n.dc) {
const t = [];
let e = Promise.resolve();
n.rc.forEach(((s, i) => {
n.sharedClientState.isLocalQueryTarget(i) ? t.push(i) : e = e.then((() => (Bc(n, i),
Yo(n.localStore, i,
/*keepPersistedTargetData=*/ !0)))), Nu(n.remoteStore, i);
})), await e, await Yc(n, t),
// PORTING NOTE: Multi-Tab only.
function(t) {
const e = B(t);
e.cc.forEach(((t, n) => {
Nu(e.remoteStore, n);
})), e.ac.fs(), e.cc = new Map, e.uc = new je(at.comparator);
}
/**
* Reconcile the query views of the provided query targets with the state from
* persistence. Raises snapshots for any changes that affect the local
* client and returns the updated state of all target's query data.
*
* @param syncEngine - The sync engine implementation
* @param targets - the list of targets with views that need to be recomputed
* @param transitionToPrimary - `true` iff the tab transitions from a secondary
* tab to a primary tab
*/
// PORTING NOTE: Multi-Tab only.
(n), n.dc = !1, await ec(n.remoteStore, !1);
}
}
async function Yc(t, e, n) {
const s = B(t), i = [], r = [];
for (const t of e) {
let e;
const n = s.rc.get(t);
if (n && 0 !== n.length) {
// For queries that have a local View, we fetch their current state
// from LocalStore (as the resume token and the snapshot version
// might have changed) and reconcile their views with the persisted
// state (the list of syncedDocuments may have gotten out of sync).
e = await Jo(s.localStore, pn(n[0]));
for (const t of n) {
const e = s.ic.get(t), n = await Wc(s, e);
n.snapshot && r.push(n.snapshot);
}
} else {
// For queries that never executed on this client, we need to
// allocate the target in LocalStore and initialize a new View.
const n = await Zo(s.localStore, t);
e = await Jo(s.localStore, n), await Vc(s, Xc(n), t,
/*current=*/ !1, e.resumeToken);
}
i.push(e);
}
return s.sc.Wo(r), i;
}
/**
* Creates a `Query` object from the specified `Target`. There is no way to
* obtain the original `Query`, so we synthesize a `Query` from the `Target`
* object.
*
* The synthesized result might be different from the original `Query`, but
* since the synthesized `Query` should return the same results as the
* original one (only the presentation of results might differ), the potential
* difference will not cause issues.
*/
// PORTING NOTE: Multi-Tab only.
function Xc(t) {
return fn(t.path, t.collectionGroup, t.orderBy, t.filters, t.limit, "F" /* LimitType.First */ , t.startAt, t.endAt);
}
/** Returns the IDs of the clients that are currently active. */
// PORTING NOTE: Multi-Tab only.
function Zc(t) {
const e = B(t);
return B(B(e.localStore).persistence).vi();
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
async function ta(t, e, n, s) {
const i = B(t);
if (i.dc)
// If we receive a target state notification via WebStorage, we are
// either already secondary or another tab has taken the primary lease.
return void x("SyncEngine", "Ignoring unexpected query state notification.");
const r = i.rc.get(e);
if (r && r.length > 0) switch (n) {
case "current":
case "not-current":
{
const t = await tu(i.localStore, Pn(r[0])), s = vs.createSynthesizedRemoteEventForCurrentChange(e, "current" === n, Wt.EMPTY_BYTE_STRING);
await Gc(i, t, s);
break;
}
case "rejected":
await Yo(i.localStore, e,
/* keepPersistedTargetData */ !0), Bc(i, e, s);
break;
default:
M();
}
}
/** Adds or removes Watch targets for queries from different tabs. */ async function ea(t, e, n) {
const s = na(t);
if (s.dc) {
for (const t of e) {
if (s.rc.has(t)) {
// A target might have been added in a previous attempt
x("SyncEngine", "Adding an already active target " + t);
continue;
}
const e = await Zo(s.localStore, t), n = await Jo(s.localStore, e);
await Vc(s, Xc(e), n.targetId,
/*current=*/ !1, n.resumeToken), xu(s.remoteStore, n);
}
for (const t of n)
// Check that the target is still active since the target might have been
// removed if it has been rejected by the backend.
s.rc.has(t) &&
// Release queries that are still active.
await Yo(s.localStore, t,
/* keepPersistedTargetData */ !1).then((() => {
Nu(s.remoteStore, t), Bc(s, t);
})).catch(At);
}
}
function na(t) {
const e = B(t);
return e.remoteStore.remoteSyncer.applyRemoteEvent = Cc.bind(null, e), e.remoteStore.remoteSyncer.getRemoteKeysForTarget = jc.bind(null, e),
e.remoteStore.remoteSyncer.rejectListen = Nc.bind(null, e), e.sc.Wo = dc.bind(null, e.eventManager),
e.sc.wc = _c.bind(null, e.eventManager), e;
}
function sa(t) {
const e = B(t);
return e.remoteStore.remoteSyncer.applySuccessfulWrite = kc.bind(null, e), e.remoteStore.remoteSyncer.rejectFailedWrite = Oc.bind(null, e),
e;
}
/**
* Loads a Firestore bundle into the SDK. The returned promise resolves when
* the bundle finished loading.
*
* @param syncEngine - SyncEngine to use.
* @param bundleReader - Bundle to load into the SDK.
* @param task - LoadBundleTask used to update the loading progress to public API.
*/ function ia(t, e, n) {
const s = B(t);
// eslint-disable-next-line @typescript-eslint/no-floating-promises
(
/** Loads a bundle and returns the list of affected collection groups. */
async function(t, e, n) {
try {
const s = await e.getMetadata();
if (await function(t, e) {
const n = B(t), s = Ks(e.createTime);
return n.persistence.runTransaction("hasNewerBundle", "readonly", (t => n.Ns.getBundleMetadata(t, e.id))).then((t => !!t && t.createTime.compareTo(s) >= 0));
}
/**
* Saves the given `BundleMetadata` to local persistence.
*/ (t.localStore, s)) return await e.close(), n._completeWith(function(t) {
return {
taskState: "Success",
documentsLoaded: t.totalDocuments,
bytesLoaded: t.totalBytes,
totalDocuments: t.totalDocuments,
totalBytes: t.totalBytes
};
}(s)), Promise.resolve(new Set);
n._updateProgress(Ic(s));
const i = new pc(s, t.localStore, e.yt);
let r = await e.mc();
for (;r; ) {
const t = await i.Fu(r);
t && n._updateProgress(t), r = await e.mc();
}
const o = await i.complete();
return await Gc(t, o.Lu,
/* remoteEvent */ void 0),
// Save metadata, so loading the same bundle will skip.
await function(t, e) {
const n = B(t);
return n.persistence.runTransaction("Save bundle", "readwrite", (t => n.Ns.saveBundleMetadata(t, e)));
}
/**
* Returns a promise of a `NamedQuery` associated with given query name. Promise
* resolves to undefined if no persisted data can be found.
*/ (t.localStore, s), n._completeWith(o.progress), Promise.resolve(o.Bu);
} catch (t) {
return k("SyncEngine", `Loading bundle failed with ${t}`), n._failWith(t), Promise.resolve(new Set);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides all components needed for Firestore with in-memory persistence.
* Uses EagerGC garbage collection.
*/)(s, e, n).then((t => {
s.sharedClientState.notifyBundleLoaded(t);
}));
}
class ra {
constructor() {
this.synchronizeTabs = !1;
}
async initialize(t) {
this.yt = Tu(t.databaseInfo.databaseId), this.sharedClientState = this.gc(t), this.persistence = this.yc(t),
await this.persistence.start(), this.localStore = this.Ic(t), this.gcScheduler = this.Tc(t, this.localStore),
this.indexBackfillerScheduler = this.Ec(t, this.localStore);
}
Tc(t, e) {
return null;
}
Ec(t, e) {
return null;
}
Ic(t) {
return Ko(this.persistence, new qo, t.initialUser, this.yt);
}
yc(t) {
return new Do(xo.Bs, this.yt);
}
gc(t) {
return new du;
}
async terminate() {
this.gcScheduler && this.gcScheduler.stop(), await this.sharedClientState.shutdown(),
await this.persistence.shutdown();
}
}
/**
* Provides all components needed for Firestore with IndexedDB persistence.
*/ class oa extends ra {
constructor(t, e, n) {
super(), this.Ac = t, this.cacheSizeBytes = e, this.forceOwnership = n, this.synchronizeTabs = !1;
}
async initialize(t) {
await super.initialize(t), await this.Ac.initialize(this, t),
// Enqueue writes from a previous session
await sa(this.Ac.syncEngine), await Qu(this.Ac.remoteStore),
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
await this.persistence.li((() => (this.gcScheduler && !this.gcScheduler.started && this.gcScheduler.start(),
this.indexBackfillerScheduler && !this.indexBackfillerScheduler.started && this.indexBackfillerScheduler.start(),
Promise.resolve())));
}
Ic(t) {
return Ko(this.persistence, new qo, t.initialUser, this.yt);
}
Tc(t, e) {
const n = this.persistence.referenceDelegate.garbageCollector;
return new oo(n, t.asyncQueue, e);
}
Ec(t, e) {
const n = new Ot(e, this.persistence);
return new kt(t.asyncQueue, n);
}
yc(t) {
const e = Bo(t.databaseInfo.databaseId, t.databaseInfo.persistenceKey), n = void 0 !== this.cacheSizeBytes ? Qr.withCacheSize(this.cacheSizeBytes) : Qr.DEFAULT;
return new Mo(this.synchronizeTabs, e, t.clientId, n, t.asyncQueue, pu(), Iu(), this.yt, this.sharedClientState, !!this.forceOwnership);
}
gc(t) {
return new du;
}
}
/**
* Provides all components needed for Firestore with multi-tab IndexedDB
* persistence.
*
* In the legacy client, this provider is used to provide both multi-tab and
* non-multi-tab persistence since we cannot tell at build time whether
* `synchronizeTabs` will be enabled.
*/ class ua extends oa {
constructor(t, e) {
super(t, e, /* forceOwnership= */ !1), this.Ac = t, this.cacheSizeBytes = e, this.synchronizeTabs = !0;
}
async initialize(t) {
await super.initialize(t);
const e = this.Ac.syncEngine;
this.sharedClientState instanceof fu && (this.sharedClientState.syncEngine = {
Fr: Hc.bind(null, e),
$r: ta.bind(null, e),
Br: ea.bind(null, e),
vi: Zc.bind(null, e),
Mr: zc.bind(null, e)
}, await this.sharedClientState.start()),
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
await this.persistence.li((async t => {
await Jc(this.Ac.syncEngine, t), this.gcScheduler && (t && !this.gcScheduler.started ? this.gcScheduler.start() : t || this.gcScheduler.stop()),
this.indexBackfillerScheduler && (t && !this.indexBackfillerScheduler.started ? this.indexBackfillerScheduler.start() : t || this.indexBackfillerScheduler.stop());
}));
}
gc(t) {
const e = pu();
if (!fu.C(e)) throw new q(L.UNIMPLEMENTED, "IndexedDB persistence is only available on platforms that support LocalStorage.");
const n = Bo(t.databaseInfo.databaseId, t.databaseInfo.persistenceKey);
return new fu(e, t.asyncQueue, n, t.clientId, t.initialUser);
}
}
/**
* Initializes and wires the components that are needed to interface with the
* network.
*/ class ca {
async initialize(t, e) {
this.localStore || (this.localStore = t.localStore, this.sharedClientState = t.sharedClientState,
this.datastore = this.createDatastore(e), this.remoteStore = this.createRemoteStore(e),
this.eventManager = this.createEventManager(e), this.syncEngine = this.createSyncEngine(e,
/* startAsPrimary=*/ !t.synchronizeTabs), this.sharedClientState.onlineStateHandler = t => xc(this.syncEngine, t, 1 /* OnlineStateSource.SharedClientState */),
this.remoteStore.remoteSyncer.handleCredentialChange = Qc.bind(null, this.syncEngine),
await ec(this.remoteStore, this.syncEngine.isPrimaryClient));
}
createEventManager(t) {
return new hc;
}
createDatastore(t) {
const e = Tu(t.databaseInfo.databaseId), n = (s = t.databaseInfo, new yu(s));
var s;
/** Return the Platform-specific connectivity monitor. */ return function(t, e, n, s) {
return new Pu(t, e, n, s);
}(t.authCredentials, t.appCheckCredentials, n, e);
}
createRemoteStore(t) {
return e = this.localStore, n = this.datastore, s = t.asyncQueue, i = t => xc(this.syncEngine, t, 0 /* OnlineStateSource.RemoteStore */),
r = wu.C() ? new wu : new _u, new Su(e, n, s, i, r);
var e, n, s, i, r;
/** Re-enables the network. Idempotent. */ }
createSyncEngine(t, e) {
return function(t, e, n,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
s, i, r, o) {
const u = new Pc(t, e, n, s, i, r);
return o && (u.dc = !0), u;
}(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, t.initialUser, t.maxConcurrentLimboResolutions, e);
}
terminate() {
return async function(t) {
const e = B(t);
x("RemoteStore", "RemoteStore shutting down."), e._u.add(5 /* OfflineCause.Shutdown */),
await Cu(e), e.mu.shutdown(),
// Set the OnlineState to Unknown (rather than Offline) to avoid potentially
// triggering spurious listener events with cached data, etc.
e.gu.set("Unknown" /* OnlineState.Unknown */);
}(this.remoteStore);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ function aa(t, e, n) {
if (!n) throw new q(L.INVALID_ARGUMENT, `Function ${t}() cannot be called with an empty ${e}.`);
}
/**
* Validates that two boolean options are not set at the same time.
* @internal
*/ function ha(t, e, n, s) {
if (!0 === e && !0 === s) throw new q(L.INVALID_ARGUMENT, `${t} and ${n} cannot be used together.`);
}
/**
* Validates that `path` refers to a document (indicated by the fact it contains
* an even numbers of segments).
*/ function la(t) {
if (!at.isDocumentKey(t)) throw new q(L.INVALID_ARGUMENT, `Invalid document reference. Document references must have an even number of segments, but ${t} has ${t.length}.`);
}
/**
* Validates that `path` refers to a collection (indicated by the fact it
* contains an odd numbers of segments).
*/ function fa(t) {
if (at.isDocumentKey(t)) throw new q(L.INVALID_ARGUMENT, `Invalid collection reference. Collection references must have an odd number of segments, but ${t} has ${t.length}.`);
}
/**
* Returns true if it's a non-null object without a custom prototype
* (i.e. excludes Array, Date, etc.).
*/
/** Returns a string describing the type / value of the provided input. */
function da(t) {
if (void 0 === t) return "undefined";
if (null === t) return "null";
if ("string" == typeof t) return t.length > 20 && (t = `${t.substring(0, 20)}...`),
JSON.stringify(t);
if ("number" == typeof t || "boolean" == typeof t) return "" + t;
if ("object" == typeof t) {
if (t instanceof Array) return "an array";
{
const e =
/** try to get the constructor name for an object. */
function(t) {
if (t.constructor) return t.constructor.name;
return null;
}
/**
* Casts `obj` to `T`, optionally unwrapping Compat types to expose the
* underlying instance. Throws if `obj` is not an instance of `T`.
*
* This cast is used in the Lite and Full SDK to verify instance types for
* arguments passed to the public API.
* @internal
*/ (t);
return e ? `a custom ${e} object` : "an object";
}
}
return "function" == typeof t ? "a function" : M();
}
function _a(t,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
e) {
if ("_delegate" in t && (
// Unwrap Compat types
// eslint-disable-next-line @typescript-eslint/no-explicit-any
t = t._delegate), !(t instanceof e)) {
if (e.name === t.constructor.name) throw new q(L.INVALID_ARGUMENT, "Type does not match the expected instance. Did you pass a reference from a different Firestore SDK?");
{
const n = da(t);
throw new q(L.INVALID_ARGUMENT, `Expected type '${e.name}', but it was: ${n}`);
}
}
return t;
}
function wa(t, e) {
if (e <= 0) throw new q(L.INVALID_ARGUMENT, `Function ${t}() requires a positive number, but it was: ${e}.`);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const ma = new Map;
/**
* An instance map that ensures only one Datastore exists per Firestore
* instance.
*/
/**
* A concrete type describing all the values that can be applied via a
* user-supplied `FirestoreSettings` object. This is a separate type so that
* defaults can be supplied and the value can be checked for equality.
*/
class ga {
constructor(t) {
var e;
if (void 0 === t.host) {
if (void 0 !== t.ssl) throw new q(L.INVALID_ARGUMENT, "Can't provide ssl option if host option is not set");
this.host = "firestore.googleapis.com", this.ssl = true;
} else this.host = t.host, this.ssl = null === (e = t.ssl) || void 0 === e || e;
if (this.credentials = t.credentials, this.ignoreUndefinedProperties = !!t.ignoreUndefinedProperties,
void 0 === t.cacheSizeBytes) this.cacheSizeBytes = 41943040; else {
if (-1 !== t.cacheSizeBytes && t.cacheSizeBytes < 1048576) throw new q(L.INVALID_ARGUMENT, "cacheSizeBytes must be at least 1048576");
this.cacheSizeBytes = t.cacheSizeBytes;
}
this.experimentalForceLongPolling = !!t.experimentalForceLongPolling, this.experimentalAutoDetectLongPolling = !!t.experimentalAutoDetectLongPolling,
this.useFetchStreams = !!t.useFetchStreams, ha("experimentalForceLongPolling", t.experimentalForceLongPolling, "experimentalAutoDetectLongPolling", t.experimentalAutoDetectLongPolling);
}
isEqual(t) {
return this.host === t.host && this.ssl === t.ssl && this.credentials === t.credentials && this.cacheSizeBytes === t.cacheSizeBytes && this.experimentalForceLongPolling === t.experimentalForceLongPolling && this.experimentalAutoDetectLongPolling === t.experimentalAutoDetectLongPolling && this.ignoreUndefinedProperties === t.ignoreUndefinedProperties && this.useFetchStreams === t.useFetchStreams;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
*/ class ya {
/** @hideconstructor */
constructor(t, e, n, s) {
this._authCredentials = t, this._appCheckCredentials = e, this._databaseId = n,
this._app = s,
/**
* Whether it's a Firestore or Firestore Lite instance.
*/
this.type = "firestore-lite", this._persistenceKey = "(lite)", this._settings = new ga({}),
this._settingsFrozen = !1;
}
/**
* The {@link @firebase/app#FirebaseApp} associated with this `Firestore` service
* instance.
*/ get app() {
if (!this._app) throw new q(L.FAILED_PRECONDITION, "Firestore was not initialized using the Firebase SDK. 'app' is not available");
return this._app;
}
get _initialized() {
return this._settingsFrozen;
}
get _terminated() {
return void 0 !== this._terminateTask;
}
_setSettings(t) {
if (this._settingsFrozen) throw new q(L.FAILED_PRECONDITION, "Firestore has already been started and its settings can no longer be changed. You can only modify settings before calling any other methods on a Firestore object.");
this._settings = new ga(t), void 0 !== t.credentials && (this._authCredentials = function(t) {
if (!t) return new G;
switch (t.type) {
case "gapi":
const e = t.client;
return new z(e, t.sessionIndex || "0", t.iamToken || null, t.authTokenFactory || null);
case "provider":
return t.client;
default:
throw new q(L.INVALID_ARGUMENT, "makeAuthCredentialsProvider failed due to invalid credential type");
}
}(t.credentials));
}
_getSettings() {
return this._settings;
}
_freezeSettings() {
return this._settingsFrozen = !0, this._settings;
}
_delete() {
return this._terminateTask || (this._terminateTask = this._terminate()), this._terminateTask;
}
/** Returns a JSON-serializable representation of this `Firestore` instance. */ toJSON() {
return {
app: this._app,
databaseId: this._databaseId,
settings: this._settings
};
}
/**
* Terminates all components used by this client. Subclasses can override
* this method to clean up their own dependencies, but must also call this
* method.
*
* Only ever called once.
*/ _terminate() {
/**
* Removes all components associated with the provided instance. Must be called
* when the `Firestore` instance is terminated.
*/
return function(t) {
const e = ma.get(t);
e && (x("ComponentProvider", "Removing Datastore"), ma.delete(t), e.terminate());
}(this), Promise.resolve();
}
}
/**
* Modify this instance to communicate with the Cloud Firestore emulator.
*
* Note: This must be called before this instance has been used to do any
* operations.
*
* @param firestore - The `Firestore` instance to configure to connect to the
* emulator.
* @param host - the emulator host (ex: localhost).
* @param port - the emulator port (ex: 9000).
* @param options.mockUserToken - the mock auth token to use for unit testing
* Security Rules.
*/ function pa(t, e, n, s = {}) {
var i;
const r = (t = _a(t, ya))._getSettings();
if ("firestore.googleapis.com" !== r.host && r.host !== e && k("Host has been set in both settings() and useEmulator(), emulator host will be used"),
t._setSettings(Object.assign(Object.assign({}, r), {
host: `${e}:${n}`,
ssl: !1
})), s.mockUserToken) {
let e, n;
if ("string" == typeof s.mockUserToken) e = s.mockUserToken, n = v.MOCK_USER; else {
// Let createMockUserToken validate first (catches common mistakes like
// invalid field "uid" and missing field "sub" / "user_id".)
e = d(s.mockUserToken, null === (i = t._app) || void 0 === i ? void 0 : i.options.projectId);
const r = s.mockUserToken.sub || s.mockUserToken.user_id;
if (!r) throw new q(L.INVALID_ARGUMENT, "mockUserToken must contain 'sub' or 'user_id' field!");
n = new v(r);
}
t._authCredentials = new Q(new K(e, n));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentReference` refers to a document location in a Firestore database
* and can be used to write, read, or listen to the location. The document at
* the referenced location may or may not exist.
*/ class Ia {
/** @hideconstructor */
constructor(t,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
e, n) {
this.converter = e, this._key = n,
/** The type of this Firestore reference. */
this.type = "document", this.firestore = t;
}
get _path() {
return this._key.path;
}
/**
* The document's identifier within its collection.
*/ get id() {
return this._key.path.lastSegment();
}
/**
* A string representing the path of the referenced document (relative
* to the root of the database).
*/ get path() {
return this._key.path.canonicalString();
}
/**
* The collection this `DocumentReference` belongs to.
*/ get parent() {
return new Ea(this.firestore, this.converter, this._key.path.popLast());
}
withConverter(t) {
return new Ia(this.firestore, t, this._key);
}
}
/**
* A `Query` refers to a query which you can read or listen to. You can also
* construct refined `Query` objects by adding filters and ordering.
*/ class Ta {
// This is the lite version of the Query class in the main SDK.
/** @hideconstructor protected */
constructor(t,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
e, n) {
this.converter = e, this._query = n,
/** The type of this Firestore reference. */
this.type = "query", this.firestore = t;
}
withConverter(t) {
return new Ta(this.firestore, t, this._query);
}
}
/**
* A `CollectionReference` object can be used for adding documents, getting
* document references, and querying for documents (using {@link query}).
*/ class Ea extends Ta {
/** @hideconstructor */
constructor(t, e, n) {
super(t, e, dn(n)), this._path = n,
/** The type of this Firestore reference. */
this.type = "collection";
}
/** The collection's identifier. */ get id() {
return this._query.path.lastSegment();
}
/**
* A string representing the path of the referenced collection (relative
* to the root of the database).
*/ get path() {
return this._query.path.canonicalString();
}
/**
* A reference to the containing `DocumentReference` if this is a
* subcollection. If this isn't a subcollection, the reference is null.
*/ get parent() {
const t = this._path.popLast();
return t.isEmpty() ? null : new Ia(this.firestore,
/* converter= */ null, new at(t));
}
withConverter(t) {
return new Ea(this.firestore, t, this._path);
}
}
function Aa(t, e, ...n) {
if (t = _(t), aa("collection", "path", e), t instanceof ya) {
const s = ot.fromString(e, ...n);
return fa(s), new Ea(t, /* converter= */ null, s);
}
{
if (!(t instanceof Ia || t instanceof Ea)) throw new q(L.INVALID_ARGUMENT, "Expected first argument to collection() to be a CollectionReference, a DocumentReference or FirebaseFirestore");
const s = t._path.child(ot.fromString(e, ...n));
return fa(s), new Ea(t.firestore,
/* converter= */ null, s);
}
}
// TODO(firestorelite): Consider using ErrorFactory -
// https://github.com/firebase/firebase-js-sdk/blob/0131e1f/packages/util/src/errors.ts#L106
/**
* Creates and returns a new `Query` instance that includes all documents in the
* database that are contained in a collection or subcollection with the
* given `collectionId`.
*
* @param firestore - A reference to the root `Firestore` instance.
* @param collectionId - Identifies the collections to query over. Every
* collection or subcollection with this ID as the last segment of its path
* will be included. Cannot contain a slash.
* @returns The created `Query`.
*/ function Ra(t, e) {
if (t = _a(t, ya), aa("collectionGroup", "collection id", e), e.indexOf("/") >= 0) throw new q(L.INVALID_ARGUMENT, `Invalid collection ID '${e}' passed to function collectionGroup(). Collection IDs must not contain '/'.`);
return new Ta(t,
/* converter= */ null, function(t) {
return new ln(ot.emptyPath(), t);
}(e));
}
function ba(t, e, ...n) {
if (t = _(t),
// We allow omission of 'pathString' but explicitly prohibit passing in both
// 'undefined' and 'null'.
1 === arguments.length && (e = Z.R()), aa("doc", "path", e), t instanceof ya) {
const s = ot.fromString(e, ...n);
return la(s), new Ia(t,
/* converter= */ null, new at(s));
}
{
if (!(t instanceof Ia || t instanceof Ea)) throw new q(L.INVALID_ARGUMENT, "Expected first argument to collection() to be a CollectionReference, a DocumentReference or FirebaseFirestore");
const s = t._path.child(ot.fromString(e, ...n));
return la(s), new Ia(t.firestore, t instanceof Ea ? t.converter : null, new at(s));
}
}
/**
* Returns true if the provided references are equal.
*
* @param left - A reference to compare.
* @param right - A reference to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/ function Pa(t, e) {
return t = _(t), e = _(e), (t instanceof Ia || t instanceof Ea) && (e instanceof Ia || e instanceof Ea) && (t.firestore === e.firestore && t.path === e.path && t.converter === e.converter);
}
/**
* Returns true if the provided queries point to the same collection and apply
* the same constraints.
*
* @param left - A `Query` to compare.
* @param right - A `Query` to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/ function va(t, e) {
return t = _(t), e = _(e), t instanceof Ta && e instanceof Ta && (t.firestore === e.firestore && En(t._query, e._query) && t.converter === e.converter);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* How many bytes to read each time when `ReadableStreamReader.read()` is
* called. Only applicable for byte streams that we control (e.g. those backed
* by an UInt8Array).
*/
/**
* Builds a `ByteStreamReader` from a UInt8Array.
* @param source - The data source to use.
* @param bytesPerRead - How many bytes each `read()` from the returned reader
* will read.
*/
function Va(t, e = 10240) {
let n = 0;
// The TypeScript definition for ReadableStreamReader changed. We use
// `any` here to allow this code to compile with different versions.
// See https://github.com/microsoft/TypeScript/issues/42970
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async read() {
if (n < t.byteLength) {
const s = {
value: t.slice(n, n + e),
done: !1
};
return n += e, s;
}
return {
done: !0
};
},
async cancel() {},
releaseLock() {},
closed: Promise.reject("unimplemented")
};
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* On web, a `ReadableStream` is wrapped around by a `ByteStreamReader`.
*/
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* A wrapper implementation of Observer that will dispatch events
* asynchronously. To allow immediate silencing, a mute call is added which
* causes events scheduled to no longer be raised.
*/
class Sa {
constructor(t) {
this.observer = t,
/**
* When set to true, will not raise future events. Necessary to deal with
* async detachment of listener.
*/
this.muted = !1;
}
next(t) {
this.observer.next && this.Rc(this.observer.next, t);
}
error(t) {
this.observer.error ? this.Rc(this.observer.error, t) : N("Uncaught Error in snapshot listener:", t.toString());
}
bc() {
this.muted = !0;
}
Rc(t, e) {
this.muted || setTimeout((() => {
this.muted || t(e);
}), 0);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A class representing a bundle.
*
* Takes a bundle stream or buffer, and presents abstractions to read bundled
* elements out of the underlying content.
*/ class Da {
constructor(
/** The reader to read from underlying binary bundle data source. */
t, e) {
this.Pc = t, this.yt = e,
/** Cached bundle metadata. */
this.metadata = new U,
/**
* Internal buffer to hold bundle content, accumulating incomplete element
* content.
*/
this.buffer = new Uint8Array, this.vc = new TextDecoder("utf-8"),
// Read the metadata (which is the first element).
this.Vc().then((t => {
t && t.Ou() ? this.metadata.resolve(t.ku.metadata) : this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is\n ${JSON.stringify(null == t ? void 0 : t.ku)}`));
}), (t => this.metadata.reject(t)));
}
close() {
return this.Pc.cancel();
}
async getMetadata() {
return this.metadata.promise;
}
async mc() {
// Makes sure metadata is read before proceeding.
return await this.getMetadata(), this.Vc();
}
/**
* Reads from the head of internal buffer, and pulling more data from
* underlying stream if a complete element cannot be found, until an
* element(including the prefixed length and the JSON string) is found.
*
* Once a complete element is read, it is dropped from internal buffer.
*
* Returns either the bundled element, or null if we have reached the end of
* the stream.
*/ async Vc() {
const t = await this.Sc();
if (null === t) return null;
const e = this.vc.decode(t), n = Number(e);
isNaN(n) && this.Dc(`length string (${e}) is not valid number`);
const s = await this.Cc(n);
return new gc(JSON.parse(s), t.length + n);
}
/** First index of '{' from the underlying buffer. */ xc() {
return this.buffer.findIndex((t => t === "{".charCodeAt(0)));
}
/**
* Reads from the beginning of the internal buffer, until the first '{', and
* return the content.
*
* If reached end of the stream, returns a null.
*/ async Sc() {
for (;this.xc() < 0; ) {
if (await this.Nc()) break;
}
// Broke out of the loop because underlying stream is closed, and there
// happens to be no more data to process.
if (0 === this.buffer.length) return null;
const t = this.xc();
// Broke out of the loop because underlying stream is closed, but still
// cannot find an open bracket.
t < 0 && this.Dc("Reached the end of bundle when a length string is expected.");
const e = this.buffer.slice(0, t);
// Update the internal buffer to drop the read length.
return this.buffer = this.buffer.slice(t), e;
}
/**
* Reads from a specified position from the internal buffer, for a specified
* number of bytes, pulling more data from the underlying stream if needed.
*
* Returns a string decoded from the read bytes.
*/ async Cc(t) {
for (;this.buffer.length < t; ) {
await this.Nc() && this.Dc("Reached the end of bundle when more is expected.");
}
const e = this.vc.decode(this.buffer.slice(0, t));
// Update the internal buffer to drop the read json string.
return this.buffer = this.buffer.slice(t), e;
}
Dc(t) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
throw this.Pc.cancel(), new Error(`Invalid bundle format: ${t}`);
}
/**
* Pulls more data from underlying stream to internal buffer.
* Returns a boolean indicating whether the stream is finished.
*/ async Nc() {
const t = await this.Pc.read();
if (!t.done) {
const e = new Uint8Array(this.buffer.length + t.value.length);
e.set(this.buffer), e.set(t.value, this.buffer.length), this.buffer = e;
}
return t.done;
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents an aggregation that can be performed by Firestore.
*/
// eslint-disable-next-line @typescript-eslint/no-unused-vars
class Ca {
constructor() {
/** A type string to uniquely identify instances of this class. */
this.type = "AggregateField";
}
}
/**
* The results of executing an aggregation query.
*/ class xa {
/** @hideconstructor */
constructor(t, e) {
this._data = e,
/** A type string to uniquely identify instances of this class. */
this.type = "AggregateQuerySnapshot", this.query = t;
}
/**
* Returns the results of the aggregations performed over the underlying
* query.
*
* The keys of the returned object will be the same as those of the
* `AggregateSpec` object specified to the aggregation method, and the values
* will be the corresponding aggregation result.
*
* @returns The results of the aggregations performed over the underlying
* query.
*/ data() {
return this._data;
}
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* CountQueryRunner encapsulates the logic needed to run the count aggregation
* queries.
*/ class Na {
constructor(t, e, n) {
this.query = t, this.datastore = e, this.userDataWriter = n;
}
run() {
return vu(this.datastore, this.query._query).then((t => {
F(void 0 !== t[0]);
const e = Object.entries(t[0]).filter((([t, e]) => "count_alias" === t)).map((([t, e]) => this.userDataWriter.convertValue(e)))[0];
return F("number" == typeof e), Promise.resolve(new xa(this.query, {
count: e
}));
}));
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal transaction object responsible for accumulating the mutations to
* perform and the base versions for any documents read.
*/ class ka {
constructor(t) {
this.datastore = t,
// The version of each document that was read during this transaction.
this.readVersions = new Map, this.mutations = [], this.committed = !1,
/**
* A deferred usage error that occurred previously in this transaction that
* will cause the transaction to fail once it actually commits.
*/
this.lastWriteError = null,
/**
* Set of documents that have been written in the transaction.
*
* When there's more than one write to the same key in a transaction, any
* writes after the first are handled differently.
*/
this.writtenDocs = new Set;
}
async lookup(t) {
if (this.ensureCommitNotCalled(), this.mutations.length > 0) throw new q(L.INVALID_ARGUMENT, "Firestore transactions require all reads to be executed before all writes.");
const e = await async function(t, e) {
const n = B(t), s = Js(n.yt) + "/documents", i = {
documents: e.map((t => js(n.yt, t)))
}, r = await n._o("BatchGetDocuments", s, i, e.length), o = new Map;
r.forEach((t => {
const e = ti(n.yt, t);
o.set(e.key.toString(), e);
}));
const u = [];
return e.forEach((t => {
const e = o.get(t.toString());
F(!!e), u.push(e);
})), u;
}(this.datastore, t);
return e.forEach((t => this.recordVersion(t))), e;
}
set(t, e) {
this.write(e.toMutation(t, this.precondition(t))), this.writtenDocs.add(t.toString());
}
update(t, e) {
try {
this.write(e.toMutation(t, this.preconditionForUpdate(t)));
} catch (t) {
this.lastWriteError = t;
}
this.writtenDocs.add(t.toString());
}
delete(t) {
this.write(new os(t, this.precondition(t))), this.writtenDocs.add(t.toString());
}
async commit() {
if (this.ensureCommitNotCalled(), this.lastWriteError) throw this.lastWriteError;
const t = this.readVersions;
// For each mutation, note that the doc was written.
this.mutations.forEach((e => {
t.delete(e.key.toString());
})),
// For each document that was read but not written to, we want to perform
// a `verify` operation.
t.forEach(((t, e) => {
const n = at.fromPath(e);
this.mutations.push(new us(n, this.precondition(n)));
})), await async function(t, e) {
const n = B(t), s = Js(n.yt) + "/documents", i = {
writes: e.map((t => ni(n.yt, t)))
};
await n.ao("Commit", s, i);
}(this.datastore, this.mutations), this.committed = !0;
}
recordVersion(t) {
let e;
if (t.isFoundDocument()) e = t.version; else {
if (!t.isNoDocument()) throw M();
// Represent a deleted doc using SnapshotVersion.min().
e = it.min();
}
const n = this.readVersions.get(t.key.toString());
if (n) {
if (!e.isEqual(n))
// This transaction will fail no matter what.
throw new q(L.ABORTED, "Document version changed between two reads.");
} else this.readVersions.set(t.key.toString(), e);
}
/**
* Returns the version of this document when it was read in this transaction,
* as a precondition, or no precondition if it was not read.
*/ precondition(t) {
const e = this.readVersions.get(t.toString());
return !this.writtenDocs.has(t.toString()) && e ? e.isEqual(it.min()) ? Wn.exists(!1) : Wn.updateTime(e) : Wn.none();
}
/**
* Returns the precondition for a document if the operation is an update.
*/ preconditionForUpdate(t) {
const e = this.readVersions.get(t.toString());
// The first time a document is written, we want to take into account the
// read time and existence
if (!this.writtenDocs.has(t.toString()) && e) {
if (e.isEqual(it.min()))
// The document doesn't exist, so fail the transaction.
// This has to be validated locally because you can't send a
// precondition that a document does not exist without changing the
// semantics of the backend write to be an insert. This is the reverse
// of what we want, since we want to assert that the document doesn't
// exist but then send the update and have it fail. Since we can't
// express that to the backend, we have to validate locally.
// Note: this can change once we can send separate verify writes in the
// transaction.
throw new q(L.INVALID_ARGUMENT, "Can't update a document that doesn't exist.");
// Document exists, base precondition on document update time.
return Wn.updateTime(e);
}
// Document was not read, so we just use the preconditions for a blind
// update.
return Wn.exists(!0);
}
write(t) {
this.ensureCommitNotCalled(), this.mutations.push(t);
}
ensureCommitNotCalled() {}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* TransactionRunner encapsulates the logic needed to run and retry transactions
* with backoff.
*/ class Oa {
constructor(t, e, n, s, i) {
this.asyncQueue = t, this.datastore = e, this.options = n, this.updateFunction = s,
this.deferred = i, this.kc = n.maxAttempts, this.xo = new Eu(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
}
/** Runs the transaction and sets the result on deferred. */ run() {
this.kc -= 1, this.Oc();
}
Oc() {
this.xo.Ro((async () => {
const t = new ka(this.datastore), e = this.Mc(t);
e && e.then((e => {
this.asyncQueue.enqueueAndForget((() => t.commit().then((() => {
this.deferred.resolve(e);
})).catch((t => {
this.Fc(t);
}))));
})).catch((t => {
this.Fc(t);
}));
}));
}
Mc(t) {
try {
const e = this.updateFunction(t);
return !Ut(e) && e.catch && e.then ? e : (this.deferred.reject(Error("Transaction callback must return a Promise")),
null);
} catch (t) {
// Do not retry errors thrown by user provided updateFunction.
return this.deferred.reject(t), null;
}
}
Fc(t) {
this.kc > 0 && this.$c(t) ? (this.kc -= 1, this.asyncQueue.enqueueAndForget((() => (this.Oc(),
Promise.resolve())))) : this.deferred.reject(t);
}
$c(t) {
if ("FirebaseError" === t.name) {
// In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
// non-matching document versions with ABORTED. These errors should be retried.
const e = t.code;
return "aborted" === e || "failed-precondition" === e || "already-exists" === e || !ls(e);
}
return !1;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* FirestoreClient is a top-level class that constructs and owns all of the
* pieces of the client SDK architecture. It is responsible for creating the
* async queue that is shared by all of the other components in the system.
*/
class Ma {
constructor(t, e,
/**
* Asynchronous queue responsible for all of our internal processing. When
* we get incoming work from the user (via public API) or the network
* (incoming GRPC messages), we should always schedule onto this queue.
* This ensures all of our work is properly serialized (e.g. we don't
* start processing a new operation while the previous one is waiting for
* an async I/O to complete).
*/
n, s) {
this.authCredentials = t, this.appCheckCredentials = e, this.asyncQueue = n, this.databaseInfo = s,
this.user = v.UNAUTHENTICATED, this.clientId = Z.R(), this.authCredentialListener = () => Promise.resolve(),
this.appCheckCredentialListener = () => Promise.resolve(), this.authCredentials.start(n, (async t => {
x("FirestoreClient", "Received user=", t.uid), await this.authCredentialListener(t),
this.user = t;
})), this.appCheckCredentials.start(n, (t => (x("FirestoreClient", "Received new app check token=", t),
this.appCheckCredentialListener(t, this.user))));
}
async getConfiguration() {
return {
asyncQueue: this.asyncQueue,
databaseInfo: this.databaseInfo,
clientId: this.clientId,
authCredentials: this.authCredentials,
appCheckCredentials: this.appCheckCredentials,
initialUser: this.user,
maxConcurrentLimboResolutions: 100
};
}
setCredentialChangeListener(t) {
this.authCredentialListener = t;
}
setAppCheckTokenChangeListener(t) {
this.appCheckCredentialListener = t;
}
/**
* Checks that the client has not been terminated. Ensures that other methods on
* this class cannot be called after the client is terminated.
*/ verifyNotTerminated() {
if (this.asyncQueue.isShuttingDown) throw new q(L.FAILED_PRECONDITION, "The client has already been terminated.");
}
terminate() {
this.asyncQueue.enterRestrictedMode();
const t = new U;
return this.asyncQueue.enqueueAndForgetEvenWhileRestricted((async () => {
try {
this.onlineComponents && await this.onlineComponents.terminate(), this.offlineComponents && await this.offlineComponents.terminate(),
// The credentials provider must be terminated after shutting down the
// RemoteStore as it will prevent the RemoteStore from retrieving auth
// tokens.
this.authCredentials.shutdown(), this.appCheckCredentials.shutdown(), t.resolve();
} catch (e) {
const n = rc(e, "Failed to shutdown persistence");
t.reject(n);
}
})), t.promise;
}
}
async function Fa(t, e) {
t.asyncQueue.verifyOperationInProgress(), x("FirestoreClient", "Initializing OfflineComponentProvider");
const n = await t.getConfiguration();
await e.initialize(n);
let s = n.initialUser;
t.setCredentialChangeListener((async t => {
s.isEqual(t) || (await Go(e.localStore, t), s = t);
})),
// When a user calls clearPersistence() in one client, all other clients
// need to be terminated to allow the delete to succeed.
e.persistence.setDatabaseDeletedListener((() => t.terminate())), t.offlineComponents = e;
}
async function $a(t, e) {
t.asyncQueue.verifyOperationInProgress();
const n = await Ba(t);
x("FirestoreClient", "Initializing OnlineComponentProvider");
const s = await t.getConfiguration();
await e.initialize(n, s),
// The CredentialChangeListener of the online component provider takes
// precedence over the offline component provider.
t.setCredentialChangeListener((t => tc(e.remoteStore, t))), t.setAppCheckTokenChangeListener(((t, n) => tc(e.remoteStore, n))),
t.onlineComponents = e;
}
async function Ba(t) {
return t.offlineComponents || (x("FirestoreClient", "Using default OfflineComponentProvider"),
await Fa(t, new ra)), t.offlineComponents;
}
async function La(t) {
return t.onlineComponents || (x("FirestoreClient", "Using default OnlineComponentProvider"),
await $a(t, new ca)), t.onlineComponents;
}
function qa(t) {
return Ba(t).then((t => t.persistence));
}
function Ua(t) {
return Ba(t).then((t => t.localStore));
}
function Ka(t) {
return La(t).then((t => t.remoteStore));
}
function Ga(t) {
return La(t).then((t => t.syncEngine));
}
function Qa(t) {
return La(t).then((t => t.datastore));
}
async function ja(t) {
const e = await La(t), n = e.eventManager;
return n.onListen = vc.bind(null, e.syncEngine), n.onUnlisten = Sc.bind(null, e.syncEngine),
n;
}
/** Enables the network connection and re-enqueues all pending operations. */ function Wa(t) {
return t.asyncQueue.enqueue((async () => {
const e = await qa(t), n = await Ka(t);
return e.setNetworkEnabled(!0), function(t) {
const e = B(t);
return e._u.delete(0 /* OfflineCause.UserDisabled */), Du(e);
}(n);
}));
}
/** Disables the network connection. Pending operations will not complete. */ function za(t) {
return t.asyncQueue.enqueue((async () => {
const e = await qa(t), n = await Ka(t);
return e.setNetworkEnabled(!1), async function(t) {
const e = B(t);
e._u.add(0 /* OfflineCause.UserDisabled */), await Cu(e),
// Set the OnlineState to Offline so get()s return from cache, etc.
e.gu.set("Offline" /* OnlineState.Offline */);
}(n);
}));
}
/**
* Returns a Promise that resolves when all writes that were pending at the time
* this method was called received server acknowledgement. An acknowledgement
* can be either acceptance or rejection.
*/ function Ha(t, e) {
const n = new U;
return t.asyncQueue.enqueueAndForget((async () => async function(t, e, n) {
try {
const s = await function(t, e) {
const n = B(t);
return n.persistence.runTransaction("read document", "readonly", (t => n.localDocuments.getDocument(t, e)));
}(t, e);
s.isFoundDocument() ? n.resolve(s) : s.isNoDocument() ? n.resolve(null) : n.reject(new q(L.UNAVAILABLE, "Failed to get document from cache. (However, this document may exist on the server. Run again without setting 'source' in the GetOptions to attempt to retrieve the document from the server.)"));
} catch (t) {
const s = rc(t, `Failed to get document '${e} from cache`);
n.reject(s);
}
}
/**
* Retrieves a latency-compensated document from the backend via a
* SnapshotListener.
*/ (await Ua(t), e, n))), n.promise;
}
function Ja(t, e, n = {}) {
const s = new U;
return t.asyncQueue.enqueueAndForget((async () => function(t, e, n, s, i) {
const r = new Sa({
next: r => {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
e.enqueueAndForget((() => fc(t, o)));
const u = r.docs.has(n);
!u && r.fromCache ?
// TODO(dimond): If we're online and the document doesn't
// exist then we resolve with a doc.exists set to false. If
// we're offline however, we reject the Promise in this
// case. Two options: 1) Cache the negative response from
// the server so we can deliver that even when you're
// offline 2) Actually reject the Promise in the online case
// if the document doesn't exist.
i.reject(new q(L.UNAVAILABLE, "Failed to get document because the client is offline.")) : u && r.fromCache && s && "server" === s.source ? i.reject(new q(L.UNAVAILABLE, 'Failed to get document from server. (However, this document does exist in the local cache. Run again without setting source to "server" to retrieve the cached document.)')) : i.resolve(r);
},
error: t => i.reject(t)
}), o = new mc(dn(n.path), r, {
includeMetadataChanges: !0,
Nu: !0
});
return lc(t, o);
}(await ja(t), t.asyncQueue, e, n, s))), s.promise;
}
function Ya(t, e) {
const n = new U;
return t.asyncQueue.enqueueAndForget((async () => async function(t, e, n) {
try {
const s = await Xo(t, e,
/* usePreviousResults= */ !0), i = new Ac(e, s.Hi), r = i.Wu(s.documents), o = i.applyChanges(r,
/* updateLimboDocuments= */ !1);
n.resolve(o.snapshot);
} catch (t) {
const s = rc(t, `Failed to execute query '${e} against cache`);
n.reject(s);
}
}
/**
* Retrieves a latency-compensated query snapshot from the backend via a
* SnapshotListener.
*/ (await Ua(t), e, n))), n.promise;
}
function Xa(t, e, n = {}) {
const s = new U;
return t.asyncQueue.enqueueAndForget((async () => function(t, e, n, s, i) {
const r = new Sa({
next: n => {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
e.enqueueAndForget((() => fc(t, o))), n.fromCache && "server" === s.source ? i.reject(new q(L.UNAVAILABLE, 'Failed to get documents from server. (However, these documents may exist in the local cache. Run again without setting source to "server" to retrieve the cached documents.)')) : i.resolve(n);
},
error: t => i.reject(t)
}), o = new mc(n, r, {
includeMetadataChanges: !0,
Nu: !0
});
return lc(t, o);
}(await ja(t), t.asyncQueue, e, n, s))), s.promise;
}
function Za(t, e) {
const n = new Sa(e);
return t.asyncQueue.enqueueAndForget((async () => function(t, e) {
B(t).Ru.add(e),
// Immediately fire an initial event, indicating all existing listeners
// are in-sync.
e.next();
}(await ja(t), n))), () => {
n.bc(), t.asyncQueue.enqueueAndForget((async () => function(t, e) {
B(t).Ru.delete(e);
}(await ja(t), n)));
};
}
/**
* Takes an updateFunction in which a set of reads and writes can be performed
* atomically. In the updateFunction, the client can read and write values
* using the supplied transaction object. After the updateFunction, all
* changes will be committed. If a retryable error occurs (ex: some other
* client has changed any of the data referenced), then the updateFunction
* will be called again after a backoff. If the updateFunction still fails
* after all retries, then the transaction will be rejected.
*
* The transaction object passed to the updateFunction contains methods for
* accessing documents and collections. Unlike other datastore access, data
* accessed with the transaction will not reflect local changes that have not
* been committed. For this reason, it is required that all reads are
* performed before any writes. Transactions must be performed while online.
*/ function th(t, e, n, s) {
const i = function(t, e) {
let n;
n = "string" == typeof t ? (new TextEncoder).encode(t) : t;
return function(t, e) {
return new Da(t, e);
}(function(t, e) {
if (t instanceof Uint8Array) return Va(t, e);
if (t instanceof ArrayBuffer) return Va(new Uint8Array(t), e);
if (t instanceof ReadableStream) return t.getReader();
throw new Error("Source of `toByteStreamReader` has to be a ArrayBuffer or ReadableStream");
}(n), e);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ (n, Tu(e));
t.asyncQueue.enqueueAndForget((async () => {
ia(await Ga(t), i, s);
}));
}
function eh(t, e) {
return t.asyncQueue.enqueue((async () => function(t, e) {
const n = B(t);
return n.persistence.runTransaction("Get named query", "readonly", (t => n.Ns.getNamedQuery(t, e)));
}(await Ua(t), e)));
}
class nh {
constructor() {
// The last promise in the queue.
this.Bc = Promise.resolve(),
// A list of retryable operations. Retryable operations are run in order and
// retried with backoff.
this.Lc = [],
// Is this AsyncQueue being shut down? Once it is set to true, it will not
// be changed again.
this.qc = !1,
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.Uc = [],
// visible for testing
this.Kc = null,
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.Gc = !1,
// Enabled during shutdown on Safari to prevent future access to IndexedDB.
this.Qc = !1,
// List of TimerIds to fast-forward delays for.
this.jc = [],
// Backoff timer used to schedule retries for retryable operations
this.xo = new Eu(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */),
// Visibility handler that triggers an immediate retry of all retryable
// operations. Meant to speed up recovery when we regain file system access
// after page comes into foreground.
this.Wc = () => {
const t = Iu();
t && x("AsyncQueue", "Visibility state changed to " + t.visibilityState), this.xo.Po();
};
const t = Iu();
t && "function" == typeof t.addEventListener && t.addEventListener("visibilitychange", this.Wc);
}
get isShuttingDown() {
return this.qc;
}
/**
* Adds a new operation to the queue without waiting for it to complete (i.e.
* we ignore the Promise result).
*/ enqueueAndForget(t) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueue(t);
}
enqueueAndForgetEvenWhileRestricted(t) {
this.zc(),
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.Hc(t);
}
enterRestrictedMode(t) {
if (!this.qc) {
this.qc = !0, this.Qc = t || !1;
const e = Iu();
e && "function" == typeof e.removeEventListener && e.removeEventListener("visibilitychange", this.Wc);
}
}
enqueue(t) {
if (this.zc(), this.qc)
// Return a Promise which never resolves.
return new Promise((() => {}));
// Create a deferred Promise that we can return to the callee. This
// allows us to return a "hanging Promise" only to the callee and still
// advance the queue even when the operation is not run.
const e = new U;
return this.Hc((() => this.qc && this.Qc ? Promise.resolve() : (t().then(e.resolve, e.reject),
e.promise))).then((() => e.promise));
}
enqueueRetryable(t) {
this.enqueueAndForget((() => (this.Lc.push(t), this.Jc())));
}
/**
* Runs the next operation from the retryable queue. If the operation fails,
* reschedules with backoff.
*/ async Jc() {
if (0 !== this.Lc.length) {
try {
await this.Lc[0](), this.Lc.shift(), this.xo.reset();
} catch (t) {
if (!St(t)) throw t;
// Failure will be handled by AsyncQueue
x("AsyncQueue", "Operation failed with retryable error: " + t);
}
this.Lc.length > 0 &&
// If there are additional operations, we re-schedule `retryNextOp()`.
// This is necessary to run retryable operations that failed during
// their initial attempt since we don't know whether they are already
// enqueued. If, for example, `op1`, `op2`, `op3` are enqueued and `op1`
// needs to be re-run, we will run `op1`, `op1`, `op2` using the
// already enqueued calls to `retryNextOp()`. `op3()` will then run in the
// call scheduled here.
// Since `backoffAndRun()` cancels an existing backoff and schedules a
// new backoff on every call, there is only ever a single additional
// operation in the queue.
this.xo.Ro((() => this.Jc()));
}
}
Hc(t) {
const e = this.Bc.then((() => (this.Gc = !0, t().catch((t => {
this.Kc = t, this.Gc = !1;
const e =
/**
* Chrome includes Error.message in Error.stack. Other browsers do not.
* This returns expected output of message + stack when available.
* @param error - Error or FirestoreError
*/
function(t) {
let e = t.message || "";
t.stack && (e = t.stack.includes(t.message) ? t.stack : t.message + "\n" + t.stack);
return e;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ (t);
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw N("INTERNAL UNHANDLED ERROR: ", e), t;
})).then((t => (this.Gc = !1, t))))));
return this.Bc = e, e;
}
enqueueAfterDelay(t, e, n) {
this.zc(),
// Fast-forward delays for timerIds that have been overriden.
this.jc.indexOf(t) > -1 && (e = 0);
const s = ic.createAndSchedule(this, t, e, n, (t => this.Yc(t)));
return this.Uc.push(s), s;
}
zc() {
this.Kc && M();
}
verifyOperationInProgress() {}
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/ async Xc() {
// Operations in the queue prior to draining may have enqueued additional
// operations. Keep draining the queue until the tail is no longer advanced,
// which indicates that no more new operations were enqueued and that all
// operations were executed.
let t;
do {
t = this.Bc, await t;
} while (t !== this.Bc);
}
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/ Zc(t) {
for (const e of this.Uc) if (e.timerId === t) return !0;
return !1;
}
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId - Delayed operations up to and including this TimerId
* will be drained. Pass TimerId.All to run all delayed operations.
* @returns a Promise that resolves once all operations have been run.
*/ ta(t) {
// Note that draining may generate more delayed ops, so we do that first.
return this.Xc().then((() => {
// Run ops in the same order they'd run if they ran naturally.
this.Uc.sort(((t, e) => t.targetTimeMs - e.targetTimeMs));
for (const e of this.Uc) if (e.skipDelay(), "all" /* TimerId.All */ !== t && e.timerId === t) break;
return this.Xc();
}));
}
/**
* For Tests: Skip all subsequent delays for a timer id.
*/ ea(t) {
this.jc.push(t);
}
/** Called once a DelayedOperation is run or canceled. */ Yc(t) {
// NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
const e = this.Uc.indexOf(t);
this.Uc.splice(e, 1);
}
}
function sh(t) {
/**
* Returns true if obj is an object and contains at least one of the specified
* methods.
*/
return function(t, e) {
if ("object" != typeof t || null === t) return !1;
const n = t;
for (const t of e) if (t in n && "function" == typeof n[t]) return !0;
return !1;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents the task of loading a Firestore bundle. It provides progress of bundle
* loading, as well as task completion and error events.
*
* The API is compatible with `Promise`.
*/ (t, [ "next", "error", "complete" ]);
}
class ih {
constructor() {
this._progressObserver = {}, this._taskCompletionResolver = new U, this._lastProgress = {
taskState: "Running",
totalBytes: 0,
totalDocuments: 0,
bytesLoaded: 0,
documentsLoaded: 0
};
}
/**
* Registers functions to listen to bundle loading progress events.
* @param next - Called when there is a progress update from bundle loading. Typically `next` calls occur
* each time a Firestore document is loaded from the bundle.
* @param error - Called when an error occurs during bundle loading. The task aborts after reporting the
* error, and there should be no more updates after this.
* @param complete - Called when the loading task is complete.
*/ onProgress(t, e, n) {
this._progressObserver = {
next: t,
error: e,
complete: n
};
}
/**
* Implements the `Promise.catch` interface.
*
* @param onRejected - Called when an error occurs during bundle loading.
*/ catch(t) {
return this._taskCompletionResolver.promise.catch(t);
}
/**
* Implements the `Promise.then` interface.
*
* @param onFulfilled - Called on the completion of the loading task with a final `LoadBundleTaskProgress` update.
* The update will always have its `taskState` set to `"Success"`.
* @param onRejected - Called when an error occurs during bundle loading.
*/ then(t, e) {
return this._taskCompletionResolver.promise.then(t, e);
}
/**
* Notifies all observers that bundle loading has completed, with a provided
* `LoadBundleTaskProgress` object.
*
* @private
*/ _completeWith(t) {
this._updateProgress(t), this._progressObserver.complete && this._progressObserver.complete(),
this._taskCompletionResolver.resolve(t);
}
/**
* Notifies all observers that bundle loading has failed, with a provided
* `Error` as the reason.
*
* @private
*/ _failWith(t) {
this._lastProgress.taskState = "Error", this._progressObserver.next && this._progressObserver.next(this._lastProgress),
this._progressObserver.error && this._progressObserver.error(t), this._taskCompletionResolver.reject(t);
}
/**
* Notifies a progress update of loading a bundle.
* @param progress - The new progress.
*
* @private
*/ _updateProgress(t) {
this._lastProgress = t, this._progressObserver.next && this._progressObserver.next(t);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** DOMException error code constants. */ const rh = -1;
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
*/
class oh extends ya {
/** @hideconstructor */
constructor(t, e, n, s) {
super(t, e, n, s),
/**
* Whether it's a {@link Firestore} or Firestore Lite instance.
*/
this.type = "firestore", this._queue = new nh, this._persistenceKey = (null == s ? void 0 : s.name) || "[DEFAULT]";
}
_terminate() {
return this._firestoreClient ||
// The client must be initialized to ensure that all subsequent API
// usage throws an exception.
hh(this), this._firestoreClient.terminate();
}
}
/**
* Initializes a new instance of {@link Firestore} with the provided settings.
* Can only be called before any other function, including
* {@link (getFirestore:1)}. If the custom settings are empty, this function is
* equivalent to calling {@link (getFirestore:1)}.
*
* @param app - The {@link @firebase/app#FirebaseApp} with which the {@link Firestore} instance will
* be associated.
* @param settings - A settings object to configure the {@link Firestore} instance.
* @param databaseId - The name of database.
* @returns A newly initialized {@link Firestore} instance.
*/ function uh(t, e, n) {
n || (n = "(default)");
const s = _getProvider(t, "firestore");
if (s.isInitialized(n)) {
const t = s.getImmediate({
identifier: n
}), i = s.getOptions(n);
if (w(i, e)) return t;
throw new q(L.FAILED_PRECONDITION, "initializeFirestore() has already been called with different options. To avoid this error, call initializeFirestore() with the same options as when it was originally called, or call getFirestore() to return the already initialized instance.");
}
if (void 0 !== e.cacheSizeBytes && -1 !== e.cacheSizeBytes && e.cacheSizeBytes < 1048576) throw new q(L.INVALID_ARGUMENT, "cacheSizeBytes must be at least 1048576");
return s.initialize({
options: e,
instanceIdentifier: n
});
}
function ch(e, n) {
const s = "object" == typeof e ? e : t(), i = "string" == typeof e ? e : n || "(default)", r = _getProvider(s, "firestore").getImmediate({
identifier: i
});
if (!r._initialized) {
const t = m("firestore");
t && pa(r, ...t);
}
return r;
}
/**
* @internal
*/ function ah(t) {
return t._firestoreClient || hh(t), t._firestoreClient.verifyNotTerminated(), t._firestoreClient;
}
function hh(t) {
var e;
const n = t._freezeSettings(), s = function(t, e, n, s) {
return new Ft(t, e, n, s.host, s.ssl, s.experimentalForceLongPolling, s.experimentalAutoDetectLongPolling, s.useFetchStreams);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// settings() defaults:
(t._databaseId, (null === (e = t._app) || void 0 === e ? void 0 : e.options.appId) || "", t._persistenceKey, n);
t._firestoreClient = new Ma(t._authCredentials, t._appCheckCredentials, t._queue, s);
}
/**
* Attempts to enable persistent storage, if possible.
*
* Must be called before any other functions (other than
* {@link initializeFirestore}, {@link (getFirestore:1)} or
* {@link clearIndexedDbPersistence}.
*
* If this fails, `enableIndexedDbPersistence()` will reject the promise it
* returns. Note that even after this failure, the {@link Firestore} instance will
* remain usable, however offline persistence will be disabled.
*
* There are several reasons why this can fail, which can be identified by
* the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab.
* * unimplemented: The browser is incompatible with the offline
* persistence implementation.
*
* @param firestore - The {@link Firestore} instance to enable persistence for.
* @param persistenceSettings - Optional settings object to configure
* persistence.
* @returns A `Promise` that represents successfully enabling persistent storage.
*/ function lh(t, e) {
Th(t = _a(t, oh));
const n = ah(t), s = t._freezeSettings(), i = new ca;
return dh(n, i, new oa(i, s.cacheSizeBytes, null == e ? void 0 : e.forceOwnership));
}
/**
* Attempts to enable multi-tab persistent storage, if possible. If enabled
* across all tabs, all operations share access to local persistence, including
* shared execution of queries and latency-compensated local document updates
* across all connected instances.
*
* If this fails, `enableMultiTabIndexedDbPersistence()` will reject the promise
* it returns. Note that even after this failure, the {@link Firestore} instance will
* remain usable, however offline persistence will be disabled.
*
* There are several reasons why this can fail, which can be identified by
* the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab and
* multi-tab is not enabled.
* * unimplemented: The browser is incompatible with the offline
* persistence implementation.
*
* @param firestore - The {@link Firestore} instance to enable persistence for.
* @returns A `Promise` that represents successfully enabling persistent
* storage.
*/ function fh(t) {
Th(t = _a(t, oh));
const e = ah(t), n = t._freezeSettings(), s = new ca;
return dh(e, s, new ua(s, n.cacheSizeBytes));
}
/**
* Registers both the `OfflineComponentProvider` and `OnlineComponentProvider`.
* If the operation fails with a recoverable error (see
* `canRecoverFromIndexedDbError()` below), the returned Promise is rejected
* but the client remains usable.
*/ function dh(t, e, n) {
const s = new U;
return t.asyncQueue.enqueue((async () => {
try {
await Fa(t, n), await $a(t, e), s.resolve();
} catch (t) {
const e = t;
if (!
/**
* Decides whether the provided error allows us to gracefully disable
* persistence (as opposed to crashing the client).
*/
function(t) {
if ("FirebaseError" === t.name) return t.code === L.FAILED_PRECONDITION || t.code === L.UNIMPLEMENTED;
if ("undefined" != typeof DOMException && t instanceof DOMException)
// There are a few known circumstances where we can open IndexedDb but
// trying to read/write will fail (e.g. quota exceeded). For
// well-understood cases, we attempt to detect these and then gracefully
// fall back to memory persistence.
// NOTE: Rather than continue to add to this list, we could decide to
// always fall back, with the risk that we might accidentally hide errors
// representing actual SDK bugs.
// When the browser is out of quota we could get either quota exceeded
// or an aborted error depending on whether the error happened during
// schema migration.
return 22 === t.code || 20 === t.code ||
// Firefox Private Browsing mode disables IndexedDb and returns
// INVALID_STATE for any usage.
11 === t.code;
return !0;
}
/**
* Clears the persistent storage. This includes pending writes and cached
* documents.
*
* Must be called while the {@link Firestore} instance is not started (after the app is
* terminated or when the app is first initialized). On startup, this function
* must be called before other functions (other than {@link
* initializeFirestore} or {@link (getFirestore:1)})). If the {@link Firestore}
* instance is still running, the promise will be rejected with the error code
* of `failed-precondition`.
*
* Note: `clearIndexedDbPersistence()` is primarily intended to help write
* reliable tests that use Cloud Firestore. It uses an efficient mechanism for
* dropping existing data but does not attempt to securely overwrite or
* otherwise make cached data unrecoverable. For applications that are sensitive
* to the disclosure of cached data in between user sessions, we strongly
* recommend not enabling persistence at all.
*
* @param firestore - The {@link Firestore} instance to clear persistence for.
* @returns A `Promise` that is resolved when the persistent storage is
* cleared. Otherwise, the promise is rejected with an error.
*/ (e)) throw e;
k("Error enabling offline persistence. Falling back to persistence disabled: " + e),
s.reject(e);
}
})).then((() => s.promise));
}
function _h(t) {
if (t._initialized && !t._terminated) throw new q(L.FAILED_PRECONDITION, "Persistence can only be cleared before a Firestore instance is initialized or after it is terminated.");
const e = new U;
return t._queue.enqueueAndForgetEvenWhileRestricted((async () => {
try {
await async function(t) {
if (!Pt.C()) return Promise.resolve();
const e = t + "main";
await Pt.delete(e);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compares two array for equality using comparator. The method computes the
* intersection and invokes `onAdd` for every element that is in `after` but not
* `before`. `onRemove` is invoked for every element in `before` but missing
* from `after`.
*
* The method creates a copy of both `before` and `after` and runs in O(n log
* n), where n is the size of the two lists.
*
* @param before - The elements that exist in the original array.
* @param after - The elements to diff against the original array.
* @param comparator - The comparator for the elements in before and after.
* @param onAdd - A function to invoke for every element that is part of `
* after` but not `before`.
* @param onRemove - A function to invoke for every element that is part of
* `before` but not `after`.
*/ (Bo(t._databaseId, t._persistenceKey)), e.resolve();
} catch (t) {
e.reject(t);
}
})), e.promise;
}
/**
* Waits until all currently pending writes for the active user have been
* acknowledged by the backend.
*
* The returned promise resolves immediately if there are no outstanding writes.
* Otherwise, the promise waits for all previously issued writes (including
* those written in a previous app session), but it does not wait for writes
* that were added after the function is called. If you want to wait for
* additional writes, call `waitForPendingWrites()` again.
*
* Any outstanding `waitForPendingWrites()` promises are rejected during user
* changes.
*
* @returns A `Promise` which resolves when all currently pending writes have been
* acknowledged by the backend.
*/ function wh(t) {
return function(t) {
const e = new U;
return t.asyncQueue.enqueueAndForget((async () => Mc(await Ga(t), e))), e.promise;
}(ah(t = _a(t, oh)));
}
/**
* Re-enables use of the network for this {@link Firestore} instance after a prior
* call to {@link disableNetwork}.
*
* @returns A `Promise` that is resolved once the network has been enabled.
*/ function mh(t) {
return Wa(ah(t = _a(t, oh)));
}
/**
* Disables network usage for this instance. It can be re-enabled via {@link
* enableNetwork}. While the network is disabled, any snapshot listeners,
* `getDoc()` or `getDocs()` calls will return results from cache, and any write
* operations will be queued until the network is restored.
*
* @returns A `Promise` that is resolved once the network has been disabled.
*/ function gh(t) {
return za(ah(t = _a(t, oh)));
}
/**
* Terminates the provided {@link Firestore} instance.
*
* After calling `terminate()` only the `clearIndexedDbPersistence()` function
* may be used. Any other function will throw a `FirestoreError`.
*
* To restart after termination, create a new instance of FirebaseFirestore with
* {@link (getFirestore:1)}.
*
* Termination does not cancel any pending writes, and any promises that are
* awaiting a response from the server will not be resolved. If you have
* persistence enabled, the next time you start this instance, it will resume
* sending these writes to the server.
*
* Note: Under normal circumstances, calling `terminate()` is not required. This
* function is useful only when you want to force this instance to release all
* of its resources or in combination with `clearIndexedDbPersistence()` to
* ensure that all local state is destroyed between test runs.
*
* @returns A `Promise` that is resolved when the instance has been successfully
* terminated.
*/ function yh(t) {
return e(t.app, "firestore", t._databaseId.database), t._delete();
}
/**
* Loads a Firestore bundle into the local cache.
*
* @param firestore - The {@link Firestore} instance to load bundles for.
* @param bundleData - An object representing the bundle to be loaded. Valid
* objects are `ArrayBuffer`, `ReadableStream` or `string`.
*
* @returns A `LoadBundleTask` object, which notifies callers with progress
* updates, and completion or error events. It can be used as a
* `Promise`.
*/ function ph(t, e) {
const n = ah(t = _a(t, oh)), s = new ih;
return th(n, t._databaseId, e, s), s;
}
/**
* Reads a Firestore {@link Query} from local cache, identified by the given
* name.
*
* The named queries are packaged into bundles on the server side (along
* with resulting documents), and loaded to local cache using `loadBundle`. Once
* in local cache, use this method to extract a {@link Query} by name.
*
* @param firestore - The {@link Firestore} instance to read the query from.
* @param name - The name of the query.
* @returns A `Promise` that is resolved with the Query or `null`.
*/ function Ih(t, e) {
return eh(ah(t = _a(t, oh)), e).then((e => e ? new Ta(t, null, e.query) : null));
}
function Th(t) {
if (t._initialized || t._terminated) throw new q(L.FAILED_PRECONDITION, "Firestore has already been started and persistence can no longer be enabled. You can only enable persistence before calling any other methods on a Firestore object.");
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing an array of bytes.
*/
class Eh {
/** @hideconstructor */
constructor(t) {
this._byteString = t;
}
/**
* Creates a new `Bytes` object from the given Base64 string, converting it to
* bytes.
*
* @param base64 - The Base64 string used to create the `Bytes` object.
*/ static fromBase64String(t) {
try {
return new Eh(Wt.fromBase64String(t));
} catch (t) {
throw new q(L.INVALID_ARGUMENT, "Failed to construct data from Base64 string: " + t);
}
}
/**
* Creates a new `Bytes` object from the given Uint8Array.
*
* @param array - The Uint8Array used to create the `Bytes` object.
*/ static fromUint8Array(t) {
return new Eh(Wt.fromUint8Array(t));
}
/**
* Returns the underlying bytes as a Base64-encoded string.
*
* @returns The Base64-encoded string created from the `Bytes` object.
*/ toBase64() {
return this._byteString.toBase64();
}
/**
* Returns the underlying bytes in a new `Uint8Array`.
*
* @returns The Uint8Array created from the `Bytes` object.
*/ toUint8Array() {
return this._byteString.toUint8Array();
}
/**
* Returns a string representation of the `Bytes` object.
*
* @returns A string representation of the `Bytes` object.
*/ toString() {
return "Bytes(base64: " + this.toBase64() + ")";
}
/**
* Returns true if this `Bytes` object is equal to the provided one.
*
* @param other - The `Bytes` object to compare against.
* @returns true if this `Bytes` object is equal to the provided one.
*/ isEqual(t) {
return this._byteString.isEqual(t._byteString);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `FieldPath` refers to a field in a document. The path may consist of a
* single field name (referring to a top-level field in the document), or a
* list of field names (referring to a nested field in the document).
*
* Create a `FieldPath` by providing field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*/ class Ah {
/**
* Creates a `FieldPath` from the provided field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*
* @param fieldNames - A list of field names.
*/
constructor(...t) {
for (let e = 0; e < t.length; ++e) if (0 === t[e].length) throw new q(L.INVALID_ARGUMENT, "Invalid field name at argument $(i + 1). Field names must not be empty.");
this._internalPath = new ct(t);
}
/**
* Returns true if this `FieldPath` is equal to the provided one.
*
* @param other - The `FieldPath` to compare against.
* @returns true if this `FieldPath` is equal to the provided one.
*/ isEqual(t) {
return this._internalPath.isEqual(t._internalPath);
}
}
/**
* Returns a special sentinel `FieldPath` to refer to the ID of a document.
* It can be used in queries to sort or filter by the document ID.
*/ function Rh() {
return new Ah("__name__");
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Sentinel values that can be used when writing document fields with `set()`
* or `update()`.
*/ class bh {
/**
* @param _methodName - The public API endpoint that returns this class.
* @hideconstructor
*/
constructor(t) {
this._methodName = t;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing a geographic location in Firestore. The
* location is represented as latitude/longitude pair.
*
* Latitude values are in the range of [-90, 90].
* Longitude values are in the range of [-180, 180].
*/ class Ph {
/**
* Creates a new immutable `GeoPoint` object with the provided latitude and
* longitude values.
* @param latitude - The latitude as number between -90 and 90.
* @param longitude - The longitude as number between -180 and 180.
*/
constructor(t, e) {
if (!isFinite(t) || t < -90 || t > 90) throw new q(L.INVALID_ARGUMENT, "Latitude must be a number between -90 and 90, but was: " + t);
if (!isFinite(e) || e < -180 || e > 180) throw new q(L.INVALID_ARGUMENT, "Longitude must be a number between -180 and 180, but was: " + e);
this._lat = t, this._long = e;
}
/**
* The latitude of this `GeoPoint` instance.
*/ get latitude() {
return this._lat;
}
/**
* The longitude of this `GeoPoint` instance.
*/ get longitude() {
return this._long;
}
/**
* Returns true if this `GeoPoint` is equal to the provided one.
*
* @param other - The `GeoPoint` to compare against.
* @returns true if this `GeoPoint` is equal to the provided one.
*/ isEqual(t) {
return this._lat === t._lat && this._long === t._long;
}
/** Returns a JSON-serializable representation of this GeoPoint. */ toJSON() {
return {
latitude: this._lat,
longitude: this._long
};
}
/**
* Actually private to JS consumers of our API, so this function is prefixed
* with an underscore.
*/ _compareTo(t) {
return tt(this._lat, t._lat) || tt(this._long, t._long);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const vh = /^__.*__$/;
/** The result of parsing document data (e.g. for a setData call). */ class Vh {
constructor(t, e, n) {
this.data = t, this.fieldMask = e, this.fieldTransforms = n;
}
toMutation(t, e) {
return null !== this.fieldMask ? new ns(t, this.data, this.fieldMask, e, this.fieldTransforms) : new es(t, this.data, e, this.fieldTransforms);
}
}
/** The result of parsing "update" data (i.e. for an updateData call). */ class Sh {
constructor(t,
// The fieldMask does not include document transforms.
e, n) {
this.data = t, this.fieldMask = e, this.fieldTransforms = n;
}
toMutation(t, e) {
return new ns(t, this.data, this.fieldMask, e, this.fieldTransforms);
}
}
function Dh(t) {
switch (t) {
case 0 /* UserDataSource.Set */ :
// fall through
case 2 /* UserDataSource.MergeSet */ :
// fall through
case 1 /* UserDataSource.Update */ :
return !0;
case 3 /* UserDataSource.Argument */ :
case 4 /* UserDataSource.ArrayArgument */ :
return !1;
default:
throw M();
}
}
/** A "context" object passed around while parsing user data. */ class Ch {
/**
* Initializes a ParseContext with the given source and path.
*
* @param settings - The settings for the parser.
* @param databaseId - The database ID of the Firestore instance.
* @param serializer - The serializer to use to generate the Value proto.
* @param ignoreUndefinedProperties - Whether to ignore undefined properties
* rather than throw.
* @param fieldTransforms - A mutable list of field transforms encountered
* while parsing the data.
* @param fieldMask - A mutable list of field paths encountered while parsing
* the data.
*
* TODO(b/34871131): We don't support array paths right now, so path can be
* null to indicate the context represents any location within an array (in
* which case certain features will not work and errors will be somewhat
* compromised).
*/
constructor(t, e, n, s, i, r) {
this.settings = t, this.databaseId = e, this.yt = n, this.ignoreUndefinedProperties = s,
// Minor hack: If fieldTransforms is undefined, we assume this is an
// external call and we need to validate the entire path.
void 0 === i && this.na(), this.fieldTransforms = i || [], this.fieldMask = r || [];
}
get path() {
return this.settings.path;
}
get sa() {
return this.settings.sa;
}
/** Returns a new context with the specified settings overwritten. */ ia(t) {
return new Ch(Object.assign(Object.assign({}, this.settings), t), this.databaseId, this.yt, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
}
ra(t) {
var e;
const n = null === (e = this.path) || void 0 === e ? void 0 : e.child(t), s = this.ia({
path: n,
oa: !1
});
return s.ua(t), s;
}
ca(t) {
var e;
const n = null === (e = this.path) || void 0 === e ? void 0 : e.child(t), s = this.ia({
path: n,
oa: !1
});
return s.na(), s;
}
aa(t) {
// TODO(b/34871131): We don't support array paths right now; so make path
// undefined.
return this.ia({
path: void 0,
oa: !0
});
}
ha(t) {
return Yh(t, this.settings.methodName, this.settings.la || !1, this.path, this.settings.fa);
}
/** Returns 'true' if 'fieldPath' was traversed when creating this context. */ contains(t) {
return void 0 !== this.fieldMask.find((e => t.isPrefixOf(e))) || void 0 !== this.fieldTransforms.find((e => t.isPrefixOf(e.field)));
}
na() {
// TODO(b/34871131): Remove null check once we have proper paths for fields
// within arrays.
if (this.path) for (let t = 0; t < this.path.length; t++) this.ua(this.path.get(t));
}
ua(t) {
if (0 === t.length) throw this.ha("Document fields must not be empty");
if (Dh(this.sa) && vh.test(t)) throw this.ha('Document fields cannot begin and end with "__"');
}
}
/**
* Helper for parsing raw user input (provided via the API) into internal model
* classes.
*/ class xh {
constructor(t, e, n) {
this.databaseId = t, this.ignoreUndefinedProperties = e, this.yt = n || Tu(t);
}
/** Creates a new top-level parse context. */ da(t, e, n, s = !1) {
return new Ch({
sa: t,
methodName: e,
fa: n,
path: ct.emptyPath(),
oa: !1,
la: s
}, this.databaseId, this.yt, this.ignoreUndefinedProperties);
}
}
function Nh(t) {
const e = t._freezeSettings(), n = Tu(t._databaseId);
return new xh(t._databaseId, !!e.ignoreUndefinedProperties, n);
}
/** Parse document data from a set() call. */ function kh(t, e, n, s, i, r = {}) {
const o = t.da(r.merge || r.mergeFields ? 2 /* UserDataSource.MergeSet */ : 0 /* UserDataSource.Set */ , e, n, i);
Wh("Data must be an object, but it was:", o, s);
const u = Qh(s, o);
let c, a;
if (r.merge) c = new Xe(o.fieldMask), a = o.fieldTransforms; else if (r.mergeFields) {
const t = [];
for (const s of r.mergeFields) {
const i = zh(e, s, n);
if (!o.contains(i)) throw new q(L.INVALID_ARGUMENT, `Field '${i}' is specified in your field mask but missing from your input data.`);
Xh(t, i) || t.push(i);
}
c = new Xe(t), a = o.fieldTransforms.filter((t => c.covers(t.field)));
} else c = null, a = o.fieldTransforms;
return new Vh(new Ze(u), c, a);
}
class Oh extends bh {
_toFieldTransform(t) {
if (2 /* UserDataSource.MergeSet */ !== t.sa) throw 1 /* UserDataSource.Update */ === t.sa ? t.ha(`${this._methodName}() can only appear at the top level of your update data`) : t.ha(`${this._methodName}() cannot be used with set() unless you pass {merge:true}`);
// No transform to add for a delete, but we need to add it to our
// fieldMask so it gets deleted.
return t.fieldMask.push(t.path), null;
}
isEqual(t) {
return t instanceof Oh;
}
}
/**
* Creates a child context for parsing SerializableFieldValues.
*
* This is different than calling `ParseContext.contextWith` because it keeps
* the fieldTransforms and fieldMask separate.
*
* The created context has its `dataSource` set to `UserDataSource.Argument`.
* Although these values are used with writes, any elements in these FieldValues
* are not considered writes since they cannot contain any FieldValue sentinels,
* etc.
*
* @param fieldValue - The sentinel FieldValue for which to create a child
* context.
* @param context - The parent context.
* @param arrayElement - Whether or not the FieldValue has an array.
*/ function Mh(t, e, n) {
return new Ch({
sa: 3 /* UserDataSource.Argument */ ,
fa: e.settings.fa,
methodName: t._methodName,
oa: n
}, e.databaseId, e.yt, e.ignoreUndefinedProperties);
}
class Fh extends bh {
_toFieldTransform(t) {
return new Gn(t.path, new Mn);
}
isEqual(t) {
return t instanceof Fh;
}
}
class $h extends bh {
constructor(t, e) {
super(t), this._a = e;
}
_toFieldTransform(t) {
const e = Mh(this, t,
/*array=*/ !0), n = this._a.map((t => Gh(t, e))), s = new Fn(n);
return new Gn(t.path, s);
}
isEqual(t) {
// TODO(mrschmidt): Implement isEquals
return this === t;
}
}
class Bh extends bh {
constructor(t, e) {
super(t), this._a = e;
}
_toFieldTransform(t) {
const e = Mh(this, t,
/*array=*/ !0), n = this._a.map((t => Gh(t, e))), s = new Bn(n);
return new Gn(t.path, s);
}
isEqual(t) {
// TODO(mrschmidt): Implement isEquals
return this === t;
}
}
class Lh extends bh {
constructor(t, e) {
super(t), this.wa = e;
}
_toFieldTransform(t) {
const e = new qn(t.yt, Cn(t.yt, this.wa));
return new Gn(t.path, e);
}
isEqual(t) {
// TODO(mrschmidt): Implement isEquals
return this === t;
}
}
/** Parse update data from an update() call. */ function qh(t, e, n, s) {
const i = t.da(1 /* UserDataSource.Update */ , e, n);
Wh("Data must be an object, but it was:", i, s);
const r = [], o = Ze.empty();
Lt(s, ((t, s) => {
const u = Jh(e, t, n);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
s = _(s);
const c = i.ca(u);
if (s instanceof Oh)
// Add it to the field mask, but don't add anything to updateData.
r.push(u); else {
const t = Gh(s, c);
null != t && (r.push(u), o.set(u, t));
}
}));
const u = new Xe(r);
return new Sh(o, u, i.fieldTransforms);
}
/** Parse update data from a list of field/value arguments. */ function Uh(t, e, n, s, i, r) {
const o = t.da(1 /* UserDataSource.Update */ , e, n), u = [ zh(e, s, n) ], c = [ i ];
if (r.length % 2 != 0) throw new q(L.INVALID_ARGUMENT, `Function ${e}() needs to be called with an even number of arguments that alternate between field names and values.`);
for (let t = 0; t < r.length; t += 2) u.push(zh(e, r[t])), c.push(r[t + 1]);
const a = [], h = Ze.empty();
// We iterate in reverse order to pick the last value for a field if the
// user specified the field multiple times.
for (let t = u.length - 1; t >= 0; --t) if (!Xh(a, u[t])) {
const e = u[t];
let n = c[t];
// For Compat types, we have to "extract" the underlying types before
// performing validation.
n = _(n);
const s = o.ca(e);
if (n instanceof Oh)
// Add it to the field mask, but don't add anything to updateData.
a.push(e); else {
const t = Gh(n, s);
null != t && (a.push(e), h.set(e, t));
}
}
const l = new Xe(a);
return new Sh(h, l, o.fieldTransforms);
}
/**
* Parse a "query value" (e.g. value in a where filter or a value in a cursor
* bound).
*
* @param allowArrays - Whether the query value is an array that may directly
* contain additional arrays (e.g. the operand of an `in` query).
*/ function Kh(t, e, n, s = !1) {
return Gh(n, t.da(s ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */ , e));
}
/**
* Parses user data to Protobuf Values.
*
* @param input - Data to be parsed.
* @param context - A context object representing the current path being parsed,
* the source of the data being parsed, etc.
* @returns The parsed value, or null if the value was a FieldValue sentinel
* that should not be included in the resulting parsed data.
*/ function Gh(t, e) {
if (jh(
// Unwrap the API type from the Compat SDK. This will return the API type
// from firestore-exp.
t = _(t))) return Wh("Unsupported field value:", e, t), Qh(t, e);
if (t instanceof bh)
// FieldValues usually parse into transforms (except deleteField())
// in which case we do not want to include this field in our parsed data
// (as doing so will overwrite the field directly prior to the transform
// trying to transform it). So we don't add this location to
// context.fieldMask and we return null as our parsing result.
/**
* "Parses" the provided FieldValueImpl, adding any necessary transforms to
* context.fieldTransforms.
*/
return function(t, e) {
// Sentinels are only supported with writes, and not within arrays.
if (!Dh(e.sa)) throw e.ha(`${t._methodName}() can only be used with update() and set()`);
if (!e.path) throw e.ha(`${t._methodName}() is not currently supported inside arrays`);
const n = t._toFieldTransform(e);
n && e.fieldTransforms.push(n);
}
/**
* Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
*
* @returns The parsed value
*/ (t, e), null;
if (void 0 === t && e.ignoreUndefinedProperties)
// If the input is undefined it can never participate in the fieldMask, so
// don't handle this below. If `ignoreUndefinedProperties` is false,
// `parseScalarValue` will reject an undefined value.
return null;
if (
// If context.path is null we are inside an array and we don't support
// field mask paths more granular than the top-level array.
e.path && e.fieldMask.push(e.path), t instanceof Array) {
// TODO(b/34871131): Include the path containing the array in the error
// message.
// In the case of IN queries, the parsed data is an array (representing
// the set of values to be included for the IN query) that may directly
// contain additional arrays (each representing an individual field
// value), so we disable this validation.
if (e.settings.oa && 4 /* UserDataSource.ArrayArgument */ !== e.sa) throw e.ha("Nested arrays are not supported");
return function(t, e) {
const n = [];
let s = 0;
for (const i of t) {
let t = Gh(i, e.aa(s));
null == t && (
// Just include nulls in the array for fields being replaced with a
// sentinel.
t = {
nullValue: "NULL_VALUE"
}), n.push(t), s++;
}
return {
arrayValue: {
values: n
}
};
}(t, e);
}
return function(t, e) {
if (null === (t = _(t))) return {
nullValue: "NULL_VALUE"
};
if ("number" == typeof t) return Cn(e.yt, t);
if ("boolean" == typeof t) return {
booleanValue: t
};
if ("string" == typeof t) return {
stringValue: t
};
if (t instanceof Date) {
const n = st.fromDate(t);
return {
timestampValue: Ls(e.yt, n)
};
}
if (t instanceof st) {
// Firestore backend truncates precision down to microseconds. To ensure
// offline mode works the same with regards to truncation, perform the
// truncation immediately without waiting for the backend to do that.
const n = new st(t.seconds, 1e3 * Math.floor(t.nanoseconds / 1e3));
return {
timestampValue: Ls(e.yt, n)
};
}
if (t instanceof Ph) return {
geoPointValue: {
latitude: t.latitude,
longitude: t.longitude
}
};
if (t instanceof Eh) return {
bytesValue: qs(e.yt, t._byteString)
};
if (t instanceof Ia) {
const n = e.databaseId, s = t.firestore._databaseId;
if (!s.isEqual(n)) throw e.ha(`Document reference is for database ${s.projectId}/${s.database} but should be for database ${n.projectId}/${n.database}`);
return {
referenceValue: Gs(t.firestore._databaseId || e.databaseId, t._key.path)
};
}
throw e.ha(`Unsupported field value: ${da(t)}`);
}
/**
* Checks whether an object looks like a JSON object that should be converted
* into a struct. Normal class/prototype instances are considered to look like
* JSON objects since they should be converted to a struct value. Arrays, Dates,
* GeoPoints, etc. are not considered to look like JSON objects since they map
* to specific FieldValue types other than ObjectValue.
*/ (t, e);
}
function Qh(t, e) {
const n = {};
return qt(t) ?
// If we encounter an empty object, we explicitly add it to the update
// mask to ensure that the server creates a map entry.
e.path && e.path.length > 0 && e.fieldMask.push(e.path) : Lt(t, ((t, s) => {
const i = Gh(s, e.ra(t));
null != i && (n[t] = i);
})), {
mapValue: {
fields: n
}
};
}
function jh(t) {
return !("object" != typeof t || null === t || t instanceof Array || t instanceof Date || t instanceof st || t instanceof Ph || t instanceof Eh || t instanceof Ia || t instanceof bh);
}
function Wh(t, e, n) {
if (!jh(n) || !function(t) {
return "object" == typeof t && null !== t && (Object.getPrototypeOf(t) === Object.prototype || null === Object.getPrototypeOf(t));
}(n)) {
const s = da(n);
throw "an object" === s ? e.ha(t + " a custom object") : e.ha(t + " " + s);
}
}
/**
* Helper that calls fromDotSeparatedString() but wraps any error thrown.
*/ function zh(t, e, n) {
if ((
// If required, replace the FieldPath Compat class with with the firestore-exp
// FieldPath.
e = _(e)) instanceof Ah) return e._internalPath;
if ("string" == typeof e) return Jh(t, e);
throw Yh("Field path arguments must be of type string or ", t,
/* hasConverter= */ !1,
/* path= */ void 0, n);
}
/**
* Matches any characters in a field path string that are reserved.
*/ const Hh = new RegExp("[~\\*/\\[\\]]");
/**
* Wraps fromDotSeparatedString with an error message about the method that
* was thrown.
* @param methodName - The publicly visible method name
* @param path - The dot-separated string form of a field path which will be
* split on dots.
* @param targetDoc - The document against which the field path will be
* evaluated.
*/ function Jh(t, e, n) {
if (e.search(Hh) >= 0) throw Yh(`Invalid field path (${e}). Paths must not contain '~', '*', '/', '[', or ']'`, t,
/* hasConverter= */ !1,
/* path= */ void 0, n);
try {
return new Ah(...e.split("."))._internalPath;
} catch (s) {
throw Yh(`Invalid field path (${e}). Paths must not be empty, begin with '.', end with '.', or contain '..'`, t,
/* hasConverter= */ !1,
/* path= */ void 0, n);
}
}
function Yh(t, e, n, s, i) {
const r = s && !s.isEmpty(), o = void 0 !== i;
let u = `Function ${e}() called with invalid data`;
n && (u += " (via `toFirestore()`)"), u += ". ";
let c = "";
return (r || o) && (c += " (found", r && (c += ` in field ${s}`), o && (c += ` in document ${i}`),
c += ")"), new q(L.INVALID_ARGUMENT, u + t + c);
}
/** Checks `haystack` if FieldPath `needle` is present. Runs in O(n). */ function Xh(t, e) {
return t.some((t => t.isEqual(e)));
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get()` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/ class Zh {
// Note: This class is stripped down version of the DocumentSnapshot in
// the legacy SDK. The changes are:
// - No support for SnapshotMetadata.
// - No support for SnapshotOptions.
/** @hideconstructor protected */
constructor(t, e, n, s, i) {
this._firestore = t, this._userDataWriter = e, this._key = n, this._document = s,
this._converter = i;
}
/** Property of the `DocumentSnapshot` that provides the document's ID. */ get id() {
return this._key.path.lastSegment();
}
/**
* The `DocumentReference` for the document included in the `DocumentSnapshot`.
*/ get ref() {
return new Ia(this._firestore, this._converter, this._key);
}
/**
* Signals whether or not the document at the snapshot's location exists.
*
* @returns true if the document exists.
*/ exists() {
return null !== this._document;
}
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* @returns An `Object` containing all fields in the document or `undefined`
* if the document doesn't exist.
*/ data() {
if (this._document) {
if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
const t = new tl(this._firestore, this._userDataWriter, this._key, this._document,
/* converter= */ null);
return this._converter.fromFirestore(t);
}
return this._userDataWriter.convertValue(this._document.data.value);
}
}
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get(t) {
if (this._document) {
const e = this._document.data.field(el("DocumentSnapshot.get", t));
if (null !== e) return this._userDataWriter.convertValue(e);
}
}
}
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get()` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/ class tl extends Zh {
/**
* Retrieves all fields in the document as an `Object`.
*
* @override
* @returns An `Object` containing all fields in the document.
*/
data() {
return super.data();
}
}
/**
* Helper that calls `fromDotSeparatedString()` but wraps any error thrown.
*/ function el(t, e) {
return "string" == typeof e ? Jh(t, e) : e instanceof Ah ? e._internalPath : e._delegate._internalPath;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ function nl(t) {
if ("L" /* LimitType.Last */ === t.limitType && 0 === t.explicitOrderBy.length) throw new q(L.UNIMPLEMENTED, "limitToLast() queries require specifying at least one orderBy() clause");
}
/**
* An `AppliableConstraint` is an abstraction of a constraint that can be applied
* to a Firestore query.
*/ class sl {}
/**
* A `QueryConstraint` is used to narrow the set of documents returned by a
* Firestore query. `QueryConstraint`s are created by invoking {@link where},
* {@link orderBy}, {@link startAt}, {@link startAfter}, {@link
* endBefore}, {@link endAt}, {@link limit}, {@link limitToLast} and
* can then be passed to {@link query} to create a new query instance that
* also contains this `QueryConstraint`.
*/ class il extends sl {}
function rl(t, e, ...n) {
let s = [];
e instanceof sl && s.push(e), s = s.concat(n), function(t) {
const e = t.filter((t => t instanceof cl)).length, n = t.filter((t => t instanceof ol)).length;
if (e > 1 || e > 0 && n > 0) throw new q(L.INVALID_ARGUMENT, "InvalidQuery. When using composite filters, you cannot use more than one filter at the top level. Consider nesting the multiple filters within an `and(...)` statement. For example: change `query(query, where(...), or(...))` to `query(query, and(where(...), or(...)))`.");
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts Firestore's internal types to the JavaScript types that we expose
* to the user.
*
* @internal
*/ (s);
for (const e of s) t = e._apply(t);
return t;
}
/**
* A `QueryFieldFilterConstraint` is used to narrow the set of documents returned by
* a Firestore query by filtering on one or more document fields.
* `QueryFieldFilterConstraint`s are created by invoking {@link where} and can then
* be passed to {@link query} to create a new query instance that also contains
* this `QueryFieldFilterConstraint`.
*/ class ol extends il {
/**
* @internal
*/
constructor(t, e, n) {
super(), this._field = t, this._op = e, this._value = n,
/** The type of this query constraint */
this.type = "where";
}
static _create(t, e, n) {
return new ol(t, e, n);
}
_apply(t) {
const e = this._parse(t);
return bl(t._query, e), new Ta(t.firestore, t.converter, In(t._query, e));
}
_parse(t) {
const e = Nh(t.firestore), n = function(t, e, n, s, i, r, o) {
let u;
if (i.isKeyField()) {
if ("array-contains" /* Operator.ARRAY_CONTAINS */ === r || "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ === r) throw new q(L.INVALID_ARGUMENT, `Invalid Query. You can't perform '${r}' queries on documentId().`);
if ("in" /* Operator.IN */ === r || "not-in" /* Operator.NOT_IN */ === r) {
Rl(o, r);
const e = [];
for (const n of o) e.push(Al(s, t, n));
u = {
arrayValue: {
values: e
}
};
} else u = Al(s, t, o);
} else "in" /* Operator.IN */ !== r && "not-in" /* Operator.NOT_IN */ !== r && "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ !== r || Rl(o, r),
u = Kh(n, e, o,
/* allowArrays= */ "in" /* Operator.IN */ === r || "not-in" /* Operator.NOT_IN */ === r);
return Pe.create(i, r, u);
}(t._query, "where", e, t.firestore._databaseId, this._field, this._op, this._value);
return n;
}
}
/**
* Creates a {@link QueryFieldFilterConstraint} that enforces that documents
* must contain the specified field and that the value should satisfy the
* relation constraint provided.
*
* @param fieldPath - The path to compare
* @param opStr - The operation string (e.g "<", "<=", "==", "<",
* "<=", "!=").
* @param value - The value for comparison
* @returns The created {@link QueryFieldFilterConstraint}.
*/ function ul(t, e, n) {
const s = e, i = el("where", t);
return ol._create(i, s, n);
}
/**
* A `QueryCompositeFilterConstraint` is used to narrow the set of documents
* returned by a Firestore query by performing the logical OR or AND of multiple
* {@link QueryFieldFilterConstraint}s or {@link QueryCompositeFilterConstraint}s.
* `QueryCompositeFilterConstraint`s are created by invoking {@link or} or
* {@link and} and can then be passed to {@link query} to create a new query
* instance that also contains the `QueryCompositeFilterConstraint`.
* @internal TODO remove this internal tag with OR Query support in the server
*/ class cl extends sl {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
t, e) {
super(), this.type = t, this._queryConstraints = e;
}
static _create(t, e) {
return new cl(t, e);
}
_parse(t) {
const e = this._queryConstraints.map((e => e._parse(t))).filter((t => t.getFilters().length > 0));
return 1 === e.length ? e[0] : ve.create(e, this._getOperator());
}
_apply(t) {
const e = this._parse(t);
return 0 === e.getFilters().length ? t : (function(t, e) {
let n = t;
const s = e.getFlattenedFilters();
for (const t of s) bl(n, t), n = In(n, t);
}
// Checks if any of the provided filter operators are included in the given list of filters and
// returns the first one that is, or null if none are.
(t._query, e), new Ta(t.firestore, t.converter, In(t._query, e)));
}
_getQueryConstraints() {
return this._queryConstraints;
}
_getOperator() {
return "and" === this.type ? "and" /* CompositeOperator.AND */ : "or" /* CompositeOperator.OR */;
}
}
/**
* Creates a new {@link QueryCompositeFilterConstraint} that is a disjunction of
* the given filter constraints. A disjunction filter includes a document if it
* satisfies any of the given filters.
*
* @param queryConstraints - Optional. The list of
* {@link QueryFilterConstraint}s to perform a disjunction for. These must be
* created with calls to {@link where}, {@link or}, or {@link and}.
* @returns The newly created {@link QueryCompositeFilterConstraint}.
* @internal TODO remove this internal tag with OR Query support in the server
*/ function al(...t) {
// Only support QueryFilterConstraints
return t.forEach((t => vl("or", t))), cl._create("or" /* CompositeOperator.OR */ , t);
}
/**
* Creates a new {@link QueryCompositeFilterConstraint} that is a conjunction of
* the given filter constraints. A conjunction filter includes a document if it
* satisfies all of the given filters.
*
* @param queryConstraints - Optional. The list of
* {@link QueryFilterConstraint}s to perform a conjunction for. These must be
* created with calls to {@link where}, {@link or}, or {@link and}.
* @returns The newly created {@link QueryCompositeFilterConstraint}.
* @internal TODO remove this internal tag with OR Query support in the server
*/ function hl(...t) {
// Only support QueryFilterConstraints
return t.forEach((t => vl("and", t))), cl._create("and" /* CompositeOperator.AND */ , t);
}
/**
* A `QueryOrderByConstraint` is used to sort the set of documents returned by a
* Firestore query. `QueryOrderByConstraint`s are created by invoking
* {@link orderBy} and can then be passed to {@link query} to create a new query
* instance that also contains this `QueryOrderByConstraint`.
*
* Note: Documents that do not contain the orderBy field will not be present in
* the query result.
*/ class ll extends il {
/**
* @internal
*/
constructor(t, e) {
super(), this._field = t, this._direction = e,
/** The type of this query constraint */
this.type = "orderBy";
}
static _create(t, e) {
return new ll(t, e);
}
_apply(t) {
const e = function(t, e, n) {
if (null !== t.startAt) throw new q(L.INVALID_ARGUMENT, "Invalid query. You must not call startAt() or startAfter() before calling orderBy().");
if (null !== t.endAt) throw new q(L.INVALID_ARGUMENT, "Invalid query. You must not call endAt() or endBefore() before calling orderBy().");
const s = new Ge(e, n);
return function(t, e) {
if (null === wn(t)) {
// This is the first order by. It must match any inequality.
const n = mn(t);
null !== n && Pl(t, n, e.field);
}
}(t, s), s;
}
/**
* Create a `Bound` from a query and a document.
*
* Note that the `Bound` will always include the key of the document
* and so only the provided document will compare equal to the returned
* position.
*
* Will throw if the document does not contain all fields of the order by
* of the query or if any of the fields in the order by are an uncommitted
* server timestamp.
*/ (t._query, this._field, this._direction);
return new Ta(t.firestore, t.converter, function(t, e) {
// TODO(dimond): validate that orderBy does not list the same key twice.
const n = t.explicitOrderBy.concat([ e ]);
return new ln(t.path, t.collectionGroup, n, t.filters.slice(), t.limit, t.limitType, t.startAt, t.endAt);
}(t._query, e));
}
}
/**
* Creates a {@link QueryOrderByConstraint} that sorts the query result by the
* specified field, optionally in descending order instead of ascending.
*
* Note: Documents that do not contain the specified field will not be present
* in the query result.
*
* @param fieldPath - The field to sort by.
* @param directionStr - Optional direction to sort by ('asc' or 'desc'). If
* not specified, order will be ascending.
* @returns The created {@link QueryOrderByConstraint}.
*/ function fl(t, e = "asc") {
const n = e, s = el("orderBy", t);
return ll._create(s, n);
}
/**
* A `QueryLimitConstraint` is used to limit the number of documents returned by
* a Firestore query.
* `QueryLimitConstraint`s are created by invoking {@link limit} or
* {@link limitToLast} and can then be passed to {@link query} to create a new
* query instance that also contains this `QueryLimitConstraint`.
*/ class dl extends il {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
t, e, n) {
super(), this.type = t, this._limit = e, this._limitType = n;
}
static _create(t, e, n) {
return new dl(t, e, n);
}
_apply(t) {
return new Ta(t.firestore, t.converter, Tn(t._query, this._limit, this._limitType));
}
}
/**
* Creates a {@link QueryLimitConstraint} that only returns the first matching
* documents.
*
* @param limit - The maximum number of items to return.
* @returns The created {@link QueryLimitConstraint}.
*/ function _l(t) {
return wa("limit", t), dl._create("limit", t, "F" /* LimitType.First */);
}
/**
* Creates a {@link QueryLimitConstraint} that only returns the last matching
* documents.
*
* You must specify at least one `orderBy` clause for `limitToLast` queries,
* otherwise an exception will be thrown during execution.
*
* @param limit - The maximum number of items to return.
* @returns The created {@link QueryLimitConstraint}.
*/ function wl(t) {
return wa("limitToLast", t), dl._create("limitToLast", t, "L" /* LimitType.Last */);
}
/**
* A `QueryStartAtConstraint` is used to exclude documents from the start of a
* result set returned by a Firestore query.
* `QueryStartAtConstraint`s are created by invoking {@link (startAt:1)} or
* {@link (startAfter:1)} and can then be passed to {@link query} to create a
* new query instance that also contains this `QueryStartAtConstraint`.
*/ class ml extends il {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
t, e, n) {
super(), this.type = t, this._docOrFields = e, this._inclusive = n;
}
static _create(t, e, n) {
return new ml(t, e, n);
}
_apply(t) {
const e = El(t, this.type, this._docOrFields, this._inclusive);
return new Ta(t.firestore, t.converter, function(t, e) {
return new ln(t.path, t.collectionGroup, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, e, t.endAt);
}(t._query, e));
}
}
function gl(...t) {
return ml._create("startAt", t,
/*inclusive=*/ !0);
}
function yl(...t) {
return ml._create("startAfter", t,
/*inclusive=*/ !1);
}
/**
* A `QueryEndAtConstraint` is used to exclude documents from the end of a
* result set returned by a Firestore query.
* `QueryEndAtConstraint`s are created by invoking {@link (endAt:1)} or
* {@link (endBefore:1)} and can then be passed to {@link query} to create a new
* query instance that also contains this `QueryEndAtConstraint`.
*/ class pl extends il {
/**
* @internal
*/
constructor(
/** The type of this query constraint */
t, e, n) {
super(), this.type = t, this._docOrFields = e, this._inclusive = n;
}
static _create(t, e, n) {
return new pl(t, e, n);
}
_apply(t) {
const e = El(t, this.type, this._docOrFields, this._inclusive);
return new Ta(t.firestore, t.converter, function(t, e) {
return new ln(t.path, t.collectionGroup, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, t.startAt, e);
}(t._query, e));
}
}
function Il(...t) {
return pl._create("endBefore", t,
/*inclusive=*/ !1);
}
function Tl(...t) {
return pl._create("endAt", t,
/*inclusive=*/ !0);
}
/** Helper function to create a bound from a document or fields */ function El(t, e, n, s) {
if (n[0] = _(n[0]), n[0] instanceof Zh) return function(t, e, n, s, i) {
if (!s) throw new q(L.NOT_FOUND, `Can't use a DocumentSnapshot that doesn't exist for ${n}().`);
const r = [];
// Because people expect to continue/end a query at the exact document
// provided, we need to use the implicit sort order rather than the explicit
// sort order, because it's guaranteed to contain the document key. That way
// the position becomes unambiguous and the query continues/ends exactly at
// the provided document. Without the key (by using the explicit sort
// orders), multiple documents could match the position, yielding duplicate
// results.
for (const n of yn(t)) if (n.field.isKeyField()) r.push(he(e, s.key)); else {
const t = s.data.field(n.field);
if (Xt(t)) throw new q(L.INVALID_ARGUMENT, 'Invalid query. You are trying to start or end a query using a document for which the field "' + n.field + '" is an uncommitted server timestamp. (Since the value of this field is unknown, you cannot start/end a query with it.)');
if (null === t) {
const t = n.field.canonicalString();
throw new q(L.INVALID_ARGUMENT, `Invalid query. You are trying to start or end a query using a document for which the field '${t}' (used as the orderBy) does not exist.`);
}
r.push(t);
}
return new Ee(r, i);
}
/**
* Converts a list of field values to a `Bound` for the given query.
*/ (t._query, t.firestore._databaseId, e, n[0]._document, s);
{
const i = Nh(t.firestore);
return function(t, e, n, s, i, r) {
// Use explicit order by's because it has to match the query the user made
const o = t.explicitOrderBy;
if (i.length > o.length) throw new q(L.INVALID_ARGUMENT, `Too many arguments provided to ${s}(). The number of arguments must be less than or equal to the number of orderBy() clauses`);
const u = [];
for (let r = 0; r < i.length; r++) {
const c = i[r];
if (o[r].field.isKeyField()) {
if ("string" != typeof c) throw new q(L.INVALID_ARGUMENT, `Invalid query. Expected a string for document ID in ${s}(), but got a ${typeof c}`);
if (!gn(t) && -1 !== c.indexOf("/")) throw new q(L.INVALID_ARGUMENT, `Invalid query. When querying a collection and ordering by documentId(), the value passed to ${s}() must be a plain document ID, but '${c}' contains a slash.`);
const n = t.path.child(ot.fromString(c));
if (!at.isDocumentKey(n)) throw new q(L.INVALID_ARGUMENT, `Invalid query. When querying a collection group and ordering by documentId(), the value passed to ${s}() must result in a valid document path, but '${n}' is not because it contains an odd number of segments.`);
const i = new at(n);
u.push(he(e, i));
} else {
const t = Kh(n, s, c);
u.push(t);
}
}
return new Ee(u, r);
}
/**
* Parses the given `documentIdValue` into a `ReferenceValue`, throwing
* appropriate errors if the value is anything other than a `DocumentReference`
* or `string`, or if the string is malformed.
*/ (t._query, t.firestore._databaseId, i, e, n, s);
}
}
function Al(t, e, n) {
if ("string" == typeof (n = _(n))) {
if ("" === n) throw new q(L.INVALID_ARGUMENT, "Invalid query. When querying with documentId(), you must provide a valid document ID, but it was an empty string.");
if (!gn(e) && -1 !== n.indexOf("/")) throw new q(L.INVALID_ARGUMENT, `Invalid query. When querying a collection by documentId(), you must provide a plain document ID, but '${n}' contains a '/' character.`);
const s = e.path.child(ot.fromString(n));
if (!at.isDocumentKey(s)) throw new q(L.INVALID_ARGUMENT, `Invalid query. When querying a collection group by documentId(), the value provided must result in a valid document path, but '${s}' is not because it has an odd number of segments (${s.length}).`);
return he(t, new at(s));
}
if (n instanceof Ia) return he(t, n._key);
throw new q(L.INVALID_ARGUMENT, `Invalid query. When querying with documentId(), you must provide a valid string or a DocumentReference, but it was: ${da(n)}.`);
}
/**
* Validates that the value passed into a disjunctive filter satisfies all
* array requirements.
*/ function Rl(t, e) {
if (!Array.isArray(t) || 0 === t.length) throw new q(L.INVALID_ARGUMENT, `Invalid Query. A non-empty array is required for '${e.toString()}' filters.`);
if (t.length > 10) throw new q(L.INVALID_ARGUMENT, `Invalid Query. '${e.toString()}' filters support a maximum of 10 elements in the value array.`);
}
/**
* Given an operator, returns the set of operators that cannot be used with it.
*
* Operators in a query must adhere to the following set of rules:
* 1. Only one array operator is allowed.
* 2. Only one disjunctive operator is allowed.
* 3. `NOT_EQUAL` cannot be used with another `NOT_EQUAL` operator.
* 4. `NOT_IN` cannot be used with array, disjunctive, or `NOT_EQUAL` operators.
*
* Array operators: `ARRAY_CONTAINS`, `ARRAY_CONTAINS_ANY`
* Disjunctive operators: `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`
*/ function bl(t, e) {
if (e.isInequality()) {
const n = mn(t), s = e.field;
if (null !== n && !n.isEqual(s)) throw new q(L.INVALID_ARGUMENT, `Invalid query. All where filters with an inequality (<, <=, !=, not-in, >, or >=) must be on the same field. But you have inequality filters on '${n.toString()}' and '${s.toString()}'`);
const i = wn(t);
null !== i && Pl(t, s, i);
}
const n = function(t, e) {
for (const n of t) for (const t of n.getFlattenedFilters()) if (e.indexOf(t.op) >= 0) return t.op;
return null;
}(t.filters, function(t) {
switch (t) {
case "!=" /* Operator.NOT_EQUAL */ :
return [ "!=" /* Operator.NOT_EQUAL */ , "not-in" /* Operator.NOT_IN */ ];
case "array-contains" /* Operator.ARRAY_CONTAINS */ :
return [ "array-contains" /* Operator.ARRAY_CONTAINS */ , "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ , "not-in" /* Operator.NOT_IN */ ];
case "in" /* Operator.IN */ :
return [ "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ , "in" /* Operator.IN */ , "not-in" /* Operator.NOT_IN */ ];
case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ :
return [ "array-contains" /* Operator.ARRAY_CONTAINS */ , "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ , "in" /* Operator.IN */ , "not-in" /* Operator.NOT_IN */ ];
case "not-in" /* Operator.NOT_IN */ :
return [ "array-contains" /* Operator.ARRAY_CONTAINS */ , "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */ , "in" /* Operator.IN */ , "not-in" /* Operator.NOT_IN */ , "!=" /* Operator.NOT_EQUAL */ ];
default:
return [];
}
}(e.op));
if (null !== n)
// Special case when it's a duplicate op to give a slightly clearer error message.
throw n === e.op ? new q(L.INVALID_ARGUMENT, `Invalid query. You cannot use more than one '${e.op.toString()}' filter.`) : new q(L.INVALID_ARGUMENT, `Invalid query. You cannot use '${e.op.toString()}' filters with '${n.toString()}' filters.`);
}
function Pl(t, e, n) {
if (!n.isEqual(e)) throw new q(L.INVALID_ARGUMENT, `Invalid query. You have a where filter with an inequality (<, <=, !=, not-in, >, or >=) on field '${e.toString()}' and so you must also use '${e.toString()}' as your first argument to orderBy(), but your first orderBy() is on field '${n.toString()}' instead.`);
}
function vl(t, e) {
if (!(e instanceof ol || e instanceof cl)) throw new q(L.INVALID_ARGUMENT, `Function ${t}() requires AppliableConstraints created with a call to 'where(...)', 'or(...)', or 'and(...)'.`);
}
class Vl {
convertValue(t, e = "none") {
switch (se(t)) {
case 0 /* TypeOrder.NullValue */ :
return null;
case 1 /* TypeOrder.BooleanValue */ :
return t.booleanValue;
case 2 /* TypeOrder.NumberValue */ :
return Jt(t.integerValue || t.doubleValue);
case 3 /* TypeOrder.TimestampValue */ :
return this.convertTimestamp(t.timestampValue);
case 4 /* TypeOrder.ServerTimestampValue */ :
return this.convertServerTimestamp(t, e);
case 5 /* TypeOrder.StringValue */ :
return t.stringValue;
case 6 /* TypeOrder.BlobValue */ :
return this.convertBytes(Yt(t.bytesValue));
case 7 /* TypeOrder.RefValue */ :
return this.convertReference(t.referenceValue);
case 8 /* TypeOrder.GeoPointValue */ :
return this.convertGeoPoint(t.geoPointValue);
case 9 /* TypeOrder.ArrayValue */ :
return this.convertArray(t.arrayValue, e);
case 10 /* TypeOrder.ObjectValue */ :
return this.convertObject(t.mapValue, e);
default:
throw M();
}
}
convertObject(t, e) {
const n = {};
return Lt(t.fields, ((t, s) => {
n[t] = this.convertValue(s, e);
})), n;
}
convertGeoPoint(t) {
return new Ph(Jt(t.latitude), Jt(t.longitude));
}
convertArray(t, e) {
return (t.values || []).map((t => this.convertValue(t, e)));
}
convertServerTimestamp(t, e) {
switch (e) {
case "previous":
const n = Zt(t);
return null == n ? null : this.convertValue(n, e);
case "estimate":
return this.convertTimestamp(te(t));
default:
return null;
}
}
convertTimestamp(t) {
const e = Ht(t);
return new st(e.seconds, e.nanos);
}
convertDocumentKey(t, e) {
const n = ot.fromString(t);
F(gi(n));
const s = new $t(n.get(1), n.get(3)), i = new at(n.popFirst(5));
return s.isEqual(e) ||
// TODO(b/64130202): Somehow support foreign references.
N(`Document ${i} contains a document reference within a different database (${s.projectId}/${s.database}) which is not supported. It will be treated as a reference in the current database (${e.projectId}/${e.database}) instead.`),
i;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts custom model object of type T into `DocumentData` by applying the
* converter if it exists.
*
* This function is used when converting user objects to `DocumentData`
* because we want to provide the user with a more specific error message if
* their `set()` or fails due to invalid data originating from a `toFirestore()`
* call.
*/ function Sl(t, e, n) {
let s;
// Cast to `any` in order to satisfy the union type constraint on
// toFirestore().
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return s = t ? n && (n.merge || n.mergeFields) ? t.toFirestore(e, n) : t.toFirestore(e) : e,
s;
}
class Dl extends Vl {
constructor(t) {
super(), this.firestore = t;
}
convertBytes(t) {
return new Eh(t);
}
convertReference(t) {
const e = this.convertDocumentKey(t, this.firestore._databaseId);
return new Ia(this.firestore, /* converter= */ null, e);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Metadata about a snapshot, describing the state of the snapshot.
*/ class Cl {
/** @hideconstructor */
constructor(t, e) {
this.hasPendingWrites = t, this.fromCache = e;
}
/**
* Returns true if this `SnapshotMetadata` is equal to the provided one.
*
* @param other - The `SnapshotMetadata` to compare against.
* @returns true if this `SnapshotMetadata` is equal to the provided one.
*/ isEqual(t) {
return this.hasPendingWrites === t.hasPendingWrites && this.fromCache === t.fromCache;
}
}
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get()` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/ class xl extends Zh {
/** @hideconstructor protected */
constructor(t, e, n, s, i, r) {
super(t, e, n, s, r), this._firestore = t, this._firestoreImpl = t, this.metadata = i;
}
/**
* Returns whether or not the data exists. True if the document exists.
*/ exists() {
return super.exists();
}
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* By default, `serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document or `undefined` if
* the document doesn't exist.
*/ data(t = {}) {
if (this._document) {
if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
const e = new Nl(this._firestore, this._userDataWriter, this._key, this._document, this.metadata,
/* converter= */ null);
return this._converter.fromFirestore(e, t);
}
return this._userDataWriter.convertValue(this._document.data.value, t.serverTimestamps);
}
}
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* By default, a `serverTimestamp()` that has not yet been set to
* its final value will be returned as `null`. You can override this by
* passing an options object.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @param options - An options object to configure how the field is retrieved
* from the snapshot (for example the desired behavior for server timestamps
* that have not yet been set to their final value).
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
get(t, e = {}) {
if (this._document) {
const n = this._document.data.field(el("DocumentSnapshot.get", t));
if (null !== n) return this._userDataWriter.convertValue(n, e.serverTimestamps);
}
}
}
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get()` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/ class Nl extends xl {
/**
* Retrieves all fields in the document as an `Object`.
*
* By default, `serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @override
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document.
*/
data(t = {}) {
return super.data(t);
}
}
/**
* A `QuerySnapshot` contains zero or more `DocumentSnapshot` objects
* representing the results of a query. The documents can be accessed as an
* array via the `docs` property or enumerated using the `forEach` method. The
* number of documents can be determined via the `empty` and `size`
* properties.
*/ class kl {
/** @hideconstructor */
constructor(t, e, n, s) {
this._firestore = t, this._userDataWriter = e, this._snapshot = s, this.metadata = new Cl(s.hasPendingWrites, s.fromCache),
this.query = n;
}
/** An array of all the documents in the `QuerySnapshot`. */ get docs() {
const t = [];
return this.forEach((e => t.push(e))), t;
}
/** The number of documents in the `QuerySnapshot`. */ get size() {
return this._snapshot.docs.size;
}
/** True if there are no documents in the `QuerySnapshot`. */ get empty() {
return 0 === this.size;
}
/**
* Enumerates all of the documents in the `QuerySnapshot`.
*
* @param callback - A callback to be called with a `QueryDocumentSnapshot` for
* each document in the snapshot.
* @param thisArg - The `this` binding for the callback.
*/ forEach(t, e) {
this._snapshot.docs.forEach((n => {
t.call(e, new Nl(this._firestore, this._userDataWriter, n.key, n, new Cl(this._snapshot.mutatedKeys.has(n.key), this._snapshot.fromCache), this.query.converter));
}));
}
/**
* Returns an array of the documents changes since the last snapshot. If this
* is the first snapshot, all documents will be in the list as 'added'
* changes.
*
* @param options - `SnapshotListenOptions` that control whether metadata-only
* changes (i.e. only `DocumentSnapshot.metadata` changed) should trigger
* snapshot events.
*/ docChanges(t = {}) {
const e = !!t.includeMetadataChanges;
if (e && this._snapshot.excludesMetadataChanges) throw new q(L.INVALID_ARGUMENT, "To include metadata changes with your document changes, you must also pass { includeMetadataChanges:true } to onSnapshot().");
return this._cachedChanges && this._cachedChangesIncludeMetadataChanges === e || (this._cachedChanges =
/** Calculates the array of `DocumentChange`s for a given `ViewSnapshot`. */
function(t, e) {
if (t._snapshot.oldDocs.isEmpty()) {
let e = 0;
return t._snapshot.docChanges.map((n => {
const s = new Nl(t._firestore, t._userDataWriter, n.doc.key, n.doc, new Cl(t._snapshot.mutatedKeys.has(n.doc.key), t._snapshot.fromCache), t.query.converter);
return n.doc, {
type: "added",
doc: s,
oldIndex: -1,
newIndex: e++
};
}));
}
{
// A `DocumentSet` that is updated incrementally as changes are applied to use
// to lookup the index of a document.
let n = t._snapshot.oldDocs;
return t._snapshot.docChanges.filter((t => e || 3 /* ChangeType.Metadata */ !== t.type)).map((e => {
const s = new Nl(t._firestore, t._userDataWriter, e.doc.key, e.doc, new Cl(t._snapshot.mutatedKeys.has(e.doc.key), t._snapshot.fromCache), t.query.converter);
let i = -1, r = -1;
return 0 /* ChangeType.Added */ !== e.type && (i = n.indexOf(e.doc.key), n = n.delete(e.doc.key)),
1 /* ChangeType.Removed */ !== e.type && (n = n.add(e.doc), r = n.indexOf(e.doc.key)),
{
type: Ol(e.type),
doc: s,
oldIndex: i,
newIndex: r
};
}));
}
}(this, e), this._cachedChangesIncludeMetadataChanges = e), this._cachedChanges;
}
}
function Ol(t) {
switch (t) {
case 0 /* ChangeType.Added */ :
return "added";
case 2 /* ChangeType.Modified */ :
case 3 /* ChangeType.Metadata */ :
return "modified";
case 1 /* ChangeType.Removed */ :
return "removed";
default:
return M();
}
}
// TODO(firestoreexp): Add tests for snapshotEqual with different snapshot
// metadata
/**
* Returns true if the provided snapshots are equal.
*
* @param left - A snapshot to compare.
* @param right - A snapshot to compare.
* @returns true if the snapshots are equal.
*/ function Ml(t, e) {
return t instanceof xl && e instanceof xl ? t._firestore === e._firestore && t._key.isEqual(e._key) && (null === t._document ? null === e._document : t._document.isEqual(e._document)) && t._converter === e._converter : t instanceof kl && e instanceof kl && (t._firestore === e._firestore && va(t.query, e.query) && t.metadata.isEqual(e.metadata) && t._snapshot.isEqual(e._snapshot));
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Reads the document referred to by this `DocumentReference`.
*
* Note: `getDoc()` attempts to provide up-to-date data when possible by waiting
* for data from the server, but it may return cached data or fail if you are
* offline and the server cannot be reached. To specify this behavior, invoke
* {@link getDocFromCache} or {@link getDocFromServer}.
*
* @param reference - The reference of the document to fetch.
* @returns A Promise resolved with a `DocumentSnapshot` containing the
* current document contents.
*/ function Fl(t) {
t = _a(t, Ia);
const e = _a(t.firestore, oh);
return Ja(ah(e), t._key).then((n => Yl(e, t, n)));
}
class $l extends Vl {
constructor(t) {
super(), this.firestore = t;
}
convertBytes(t) {
return new Eh(t);
}
convertReference(t) {
const e = this.convertDocumentKey(t, this.firestore._databaseId);
return new Ia(this.firestore, /* converter= */ null, e);
}
}
/**
* Reads the document referred to by this `DocumentReference` from cache.
* Returns an error if the document is not currently cached.
*
* @returns A `Promise` resolved with a `DocumentSnapshot` containing the
* current document contents.
*/ function Bl(t) {
t = _a(t, Ia);
const e = _a(t.firestore, oh), n = ah(e), s = new $l(e);
return Ha(n, t._key).then((n => new xl(e, s, t._key, n, new Cl(null !== n && n.hasLocalMutations,
/* fromCache= */ !0), t.converter)));
}
/**
* Reads the document referred to by this `DocumentReference` from the server.
* Returns an error if the network is not available.
*
* @returns A `Promise` resolved with a `DocumentSnapshot` containing the
* current document contents.
*/ function Ll(t) {
t = _a(t, Ia);
const e = _a(t.firestore, oh);
return Ja(ah(e), t._key, {
source: "server"
}).then((n => Yl(e, t, n)));
}
/**
* Executes the query and returns the results as a `QuerySnapshot`.
*
* Note: `getDocs()` attempts to provide up-to-date data when possible by
* waiting for data from the server, but it may return cached data or fail if
* you are offline and the server cannot be reached. To specify this behavior,
* invoke {@link getDocsFromCache} or {@link getDocsFromServer}.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/ function ql(t) {
t = _a(t, Ta);
const e = _a(t.firestore, oh), n = ah(e), s = new $l(e);
return nl(t._query), Xa(n, t._query).then((n => new kl(e, s, t, n)));
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from cache.
* Returns an empty result set if no documents matching the query are currently
* cached.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/ function Ul(t) {
t = _a(t, Ta);
const e = _a(t.firestore, oh), n = ah(e), s = new $l(e);
return Ya(n, t._query).then((n => new kl(e, s, t, n)));
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from the
* server. Returns an error if the network is not available.
*
* @returns A `Promise` that will be resolved with the results of the query.
*/ function Kl(t) {
t = _a(t, Ta);
const e = _a(t.firestore, oh), n = ah(e), s = new $l(e);
return Xa(n, t._query, {
source: "server"
}).then((n => new kl(e, s, t, n)));
}
function Gl(t, e, n) {
t = _a(t, Ia);
const s = _a(t.firestore, oh), i = Sl(t.converter, e, n);
return Jl(s, [ kh(Nh(s), "setDoc", t._key, i, null !== t.converter, n).toMutation(t._key, Wn.none()) ]);
}
function Ql(t, e, n, ...s) {
t = _a(t, Ia);
const i = _a(t.firestore, oh), r = Nh(i);
let o;
o = "string" == typeof (
// For Compat types, we have to "extract" the underlying types before
// performing validation.
e = _(e)) || e instanceof Ah ? Uh(r, "updateDoc", t._key, e, n, s) : qh(r, "updateDoc", t._key, e);
return Jl(i, [ o.toMutation(t._key, Wn.exists(!0)) ]);
}
/**
* Deletes the document referred to by the specified `DocumentReference`.
*
* @param reference - A reference to the document to delete.
* @returns A Promise resolved once the document has been successfully
* deleted from the backend (note that it won't resolve while you're offline).
*/ function jl(t) {
return Jl(_a(t.firestore, oh), [ new os(t._key, Wn.none()) ]);
}
/**
* Add a new document to specified `CollectionReference` with the given data,
* assigning it a document ID automatically.
*
* @param reference - A reference to the collection to add this document to.
* @param data - An Object containing the data for the new document.
* @returns A `Promise` resolved with a `DocumentReference` pointing to the
* newly created document after it has been written to the backend (Note that it
* won't resolve while you're offline).
*/ function Wl(t, e) {
const n = _a(t.firestore, oh), s = ba(t), i = Sl(t.converter, e);
return Jl(n, [ kh(Nh(t.firestore), "addDoc", s._key, i, null !== t.converter, {}).toMutation(s._key, Wn.exists(!1)) ]).then((() => s));
}
function zl(t, ...e) {
var n, s, i;
t = _(t);
let r = {
includeMetadataChanges: !1
}, o = 0;
"object" != typeof e[o] || sh(e[o]) || (r = e[o], o++);
const u = {
includeMetadataChanges: r.includeMetadataChanges
};
if (sh(e[o])) {
const t = e[o];
e[o] = null === (n = t.next) || void 0 === n ? void 0 : n.bind(t), e[o + 1] = null === (s = t.error) || void 0 === s ? void 0 : s.bind(t),
e[o + 2] = null === (i = t.complete) || void 0 === i ? void 0 : i.bind(t);
}
let c, a, h;
if (t instanceof Ia) a = _a(t.firestore, oh), h = dn(t._key.path), c = {
next: n => {
e[o] && e[o](Yl(a, t, n));
},
error: e[o + 1],
complete: e[o + 2]
}; else {
const n = _a(t, Ta);
a = _a(n.firestore, oh), h = n._query;
const s = new $l(a);
c = {
next: t => {
e[o] && e[o](new kl(a, s, n, t));
},
error: e[o + 1],
complete: e[o + 2]
}, nl(t._query);
}
return function(t, e, n, s) {
const i = new Sa(s), r = new mc(e, i, n);
return t.asyncQueue.enqueueAndForget((async () => lc(await ja(t), r))), () => {
i.bc(), t.asyncQueue.enqueueAndForget((async () => fc(await ja(t), r)));
};
}(ah(a), h, u, c);
}
function Hl(t, e) {
return Za(ah(t = _a(t, oh)), sh(e) ? e : {
next: e
});
}
/**
* Locally writes `mutations` on the async queue.
* @internal
*/ function Jl(t, e) {
return function(t, e) {
const n = new U;
return t.asyncQueue.enqueueAndForget((async () => Dc(await Ga(t), e, n))), n.promise;
}(ah(t), e);
}
/**
* Converts a {@link ViewSnapshot} that contains the single document specified by `ref`
* to a {@link DocumentSnapshot}.
*/ function Yl(t, e, n) {
const s = n.docs.get(e._key), i = new $l(t);
return new xl(t, i, e._key, s, new Cl(n.hasPendingWrites, n.fromCache), e.converter);
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Compares two `AggregateQuerySnapshot` instances for equality.
*
* Two `AggregateQuerySnapshot` instances are considered "equal" if they have
* underlying queries that compare equal, and the same data.
*
* @param left - The first `AggregateQuerySnapshot` to compare.
* @param right - The second `AggregateQuerySnapshot` to compare.
*
* @returns `true` if the objects are "equal", as defined above, or `false`
* otherwise.
*/ function Xl(t, e) {
return va(t.query, e.query) && w(t.data(), e.data());
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Calculates the number of documents in the result set of the given query,
* without actually downloading the documents.
*
* Using this function to count the documents is efficient because only the
* final count, not the documents' data, is downloaded. This function can even
* count the documents if the result set would be prohibitively large to
* download entirely (e.g. thousands of documents).
*
* The result received from the server is presented, unaltered, without
* considering any local state. That is, documents in the local cache are not
* taken into consideration, neither are local modifications not yet
* synchronized with the server. Previously-downloaded results, if any, are not
* used: every request using this source necessarily involves a round trip to
* the server.
*
* @param query - The query whose result set size to calculate.
* @returns A Promise that will be resolved with the count; the count can be
* retrieved from `snapshot.data().count`, where `snapshot` is the
* `AggregateQuerySnapshot` to which the returned Promise resolves.
*/ function Zl(t) {
const e = _a(t.firestore, oh);
return function(t, e, n) {
const s = new U;
return t.asyncQueue.enqueueAndForget((async () => {
try {
if ($u(await Ka(t))) {
const i = await Qa(t), r = new Na(e, i, n).run();
s.resolve(r);
} else s.reject(new q(L.UNAVAILABLE, "Failed to get count result because the client is offline."));
} catch (t) {
s.reject(t);
}
})), s.promise;
}(ah(e), t, new $l(e));
}
/**
* @license
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ const tf = {
maxAttempts: 5
};
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A write batch, used to perform multiple writes as a single atomic unit.
*
* A `WriteBatch` object can be acquired by calling {@link writeBatch}. It
* provides methods for adding writes to the write batch. None of the writes
* will be committed (or visible locally) until {@link WriteBatch.commit} is
* called.
*/
class ef {
/** @hideconstructor */
constructor(t, e) {
this._firestore = t, this._commitHandler = e, this._mutations = [], this._committed = !1,
this._dataReader = Nh(t);
}
set(t, e, n) {
this._verifyNotCommitted();
const s = nf(t, this._firestore), i = Sl(s.converter, e, n), r = kh(this._dataReader, "WriteBatch.set", s._key, i, null !== s.converter, n);
return this._mutations.push(r.toMutation(s._key, Wn.none())), this;
}
update(t, e, n, ...s) {
this._verifyNotCommitted();
const i = nf(t, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
let r;
return r = "string" == typeof (e = _(e)) || e instanceof Ah ? Uh(this._dataReader, "WriteBatch.update", i._key, e, n, s) : qh(this._dataReader, "WriteBatch.update", i._key, e),
this._mutations.push(r.toMutation(i._key, Wn.exists(!0))), this;
}
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `WriteBatch` instance. Used for chaining method calls.
*/ delete(t) {
this._verifyNotCommitted();
const e = nf(t, this._firestore);
return this._mutations = this._mutations.concat(new os(e._key, Wn.none())), this;
}
/**
* Commits all of the writes in this write batch as a single atomic unit.
*
* The result of these writes will only be reflected in document reads that
* occur after the returned promise resolves. If the client is offline, the
* write fails. If you would like to see local modifications or buffer writes
* until the client is online, use the full Firestore SDK.
*
* @returns A `Promise` resolved once all of the writes in the batch have been
* successfully written to the backend as an atomic unit (note that it won't
* resolve while you're offline).
*/ commit() {
return this._verifyNotCommitted(), this._committed = !0, this._mutations.length > 0 ? this._commitHandler(this._mutations) : Promise.resolve();
}
_verifyNotCommitted() {
if (this._committed) throw new q(L.FAILED_PRECONDITION, "A write batch can no longer be used after commit() has been called.");
}
}
function nf(t, e) {
if ((t = _(t)).firestore !== e) throw new q(L.INVALID_ARGUMENT, "Provided document reference is from a different Firestore instance.");
return t;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO(mrschmidt) Consider using `BaseTransaction` as the base class in the
// legacy SDK.
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
class sf extends class {
/** @hideconstructor */
constructor(t, e) {
this._firestore = t, this._transaction = e, this._dataReader = Nh(t);
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/ get(t) {
const e = nf(t, this._firestore), n = new Dl(this._firestore);
return this._transaction.lookup([ e._key ]).then((t => {
if (!t || 1 !== t.length) return M();
const s = t[0];
if (s.isFoundDocument()) return new Zh(this._firestore, n, s.key, s, e.converter);
if (s.isNoDocument()) return new Zh(this._firestore, n, e._key, null, e.converter);
throw M();
}));
}
set(t, e, n) {
const s = nf(t, this._firestore), i = Sl(s.converter, e, n), r = kh(this._dataReader, "Transaction.set", s._key, i, null !== s.converter, n);
return this._transaction.set(s._key, r), this;
}
update(t, e, n, ...s) {
const i = nf(t, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
let r;
return r = "string" == typeof (e = _(e)) || e instanceof Ah ? Uh(this._dataReader, "Transaction.update", i._key, e, n, s) : qh(this._dataReader, "Transaction.update", i._key, e),
this._transaction.update(i._key, r), this;
}
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `Transaction` instance. Used for chaining method calls.
*/ delete(t) {
const e = nf(t, this._firestore);
return this._transaction.delete(e._key), this;
}
} {
// This class implements the same logic as the Transaction API in the Lite SDK
// but is subclassed in order to return its own DocumentSnapshot types.
/** @hideconstructor */
constructor(t, e) {
super(t, e), this._firestore = t;
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/ get(t) {
const e = nf(t, this._firestore), n = new $l(this._firestore);
return super.get(t).then((t => new xl(this._firestore, n, e._key, t._document, new Cl(
/* hasPendingWrites= */ !1,
/* fromCache= */ !1), e.converter)));
}
}
/**
* Executes the given `updateFunction` and then attempts to commit the changes
* applied within the transaction. If any document read within the transaction
* has changed, Cloud Firestore retries the `updateFunction`. If it fails to
* commit after 5 attempts, the transaction fails.
*
* The maximum number of writes allowed in a single transaction is 500.
*
* @param firestore - A reference to the Firestore database to run this
* transaction against.
* @param updateFunction - The function to execute within the transaction
* context.
* @param options - An options object to configure maximum number of attempts to
* commit.
* @returns If the transaction completed successfully or was explicitly aborted
* (the `updateFunction` returned a failed promise), the promise returned by the
* `updateFunction `is returned here. Otherwise, if the transaction failed, a
* rejected promise with the corresponding failure error is returned.
*/ function rf(t, e, n) {
t = _a(t, oh);
const s = Object.assign(Object.assign({}, tf), n);
!function(t) {
if (t.maxAttempts < 1) throw new q(L.INVALID_ARGUMENT, "Max attempts must be at least 1");
}(s);
return function(t, e, n) {
const s = new U;
return t.asyncQueue.enqueueAndForget((async () => {
const i = await Qa(t);
new Oa(t.asyncQueue, i, n, e, s).run();
})), s.promise;
}(ah(t), (n => e(new sf(t, n))), s);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Returns a sentinel for use with {@link @firebase/firestore/lite#(updateDoc:1)} or
* {@link @firebase/firestore/lite#(setDoc:1)} with `{merge: true}` to mark a field for deletion.
*/ function of() {
return new Oh("deleteField");
}
/**
* Returns a sentinel used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link @firebase/firestore/lite#(updateDoc:1)} to
* include a server-generated timestamp in the written data.
*/ function uf() {
return new Fh("serverTimestamp");
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to union the given elements with any array
* value that already exists on the server. Each specified element that doesn't
* already exist in the array will be added to the end. If the field being
* modified is not already an array it will be overwritten with an array
* containing exactly the specified elements.
*
* @param elements - The elements to union into the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`.
*/ function cf(...t) {
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new $h("arrayUnion", t);
}
/**
* Returns a special value that can be used with {@link (setDoc:1)} or {@link
* updateDoc:1} that tells the server to remove the given elements from any
* array value that already exists on the server. All instances of each element
* specified will be removed from the array. If the field being modified is not
* already an array it will be overwritten with an empty array.
*
* @param elements - The elements to remove from the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/ function af(...t) {
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new Bh("arrayRemove", t);
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to increment the field's current value by
* the given value.
*
* If either the operand or the current field value uses floating point
* precision, all arithmetic follows IEEE 754 semantics. If both values are
* integers, values outside of JavaScript's safe number range
* (`Number.MIN_SAFE_INTEGER` to `Number.MAX_SAFE_INTEGER`) are also subject to
* precision loss. Furthermore, once processed by the Firestore backend, all
* integer operations are capped between -2^63 and 2^63-1.
*
* If the current field value is not of type `number`, or if the field does not
* yet exist, the transformation sets the field to the given value.
*
* @param n - The value to increment by.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/ function hf(t) {
return new Lh("increment", t);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Creates a write batch, used for performing multiple writes as a single
* atomic operation. The maximum number of writes allowed in a single {@link WriteBatch}
* is 500.
*
* Unlike transactions, write batches are persisted offline and therefore are
* preferable when you don't need to condition your writes on read data.
*
* @returns A {@link WriteBatch} that can be used to atomically execute multiple
* writes.
*/ function lf(t) {
return ah(t = _a(t, oh)), new ef(t, (e => Jl(t, e)));
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ function ff(t, e) {
var n;
const s = ah(t = _a(t, oh));
// PORTING NOTE: We don't return an error if the user has not enabled
// persistence since `enableIndexeddbPersistence()` can fail on the Web.
if (!(null === (n = s.offlineComponents) || void 0 === n ? void 0 : n.indexBackfillerScheduler)) return k("Cannot enable indexes when persistence is disabled"),
Promise.resolve();
const i = function(t) {
const e = "string" == typeof t ? function(t) {
try {
return JSON.parse(t);
} catch (t) {
throw new q(L.INVALID_ARGUMENT, "Failed to parse JSON: " + (null == t ? void 0 : t.message));
}
}(t) : t, n = [];
if (Array.isArray(e.indexes)) for (const t of e.indexes) {
const e = df(t, "collectionGroup"), s = [];
if (Array.isArray(t.fields)) for (const e of t.fields) {
const t = Jh("setIndexConfiguration", df(e, "fieldPath"));
"CONTAINS" === e.arrayConfig ? s.push(new _t(t, 2 /* IndexKind.CONTAINS */)) : "ASCENDING" === e.order ? s.push(new _t(t, 0 /* IndexKind.ASCENDING */)) : "DESCENDING" === e.order && s.push(new _t(t, 1 /* IndexKind.DESCENDING */));
}
n.push(new ht(ht.UNKNOWN_ID, e, s, mt.empty()));
}
return n;
}(e);
return Ua(s).then((t => async function(t, e) {
const n = B(t), s = n.indexManager, i = [];
return n.persistence.runTransaction("Configure indexes", "readwrite", (t => s.getFieldIndexes(t).next((n => function(t, e, n, s, i) {
t = [ ...t ], e = [ ...e ], t.sort(n), e.sort(n);
const r = t.length, o = e.length;
let u = 0, c = 0;
for (;u < o && c < r; ) {
const r = n(t[c], e[u]);
r < 0 ?
// The element was removed if the next element in our ordered
// walkthrough is only in `before`.
i(t[c++]) : r > 0 ?
// The element was added if the next element in our ordered walkthrough
// is only in `after`.
s(e[u++]) : (u++, c++);
}
for (;u < o; ) s(e[u++]);
for (;c < r; ) i(t[c++]);
}(n, e, dt, (e => {
i.push(s.addFieldIndex(t, e));
}), (e => {
i.push(s.deleteFieldIndex(t, e));
})))).next((() => Rt.waitFor(i)))));
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The format of the LocalStorage key that stores the client state is:
// firestore_clients__
(t, i)));
}
function df(t, e) {
if ("string" != typeof t[e]) throw new q(L.INVALID_ARGUMENT, "Missing string value for: " + e);
return t[e];
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ !function(t, e = !0) {
!function(t) {
V = t;
}(i), n(new r("firestore", ((t, {instanceIdentifier: n, options: s}) => {
const i = t.getProvider("app").getImmediate(), r = new oh(new j(t.getProvider("auth-internal")), new J(t.getProvider("app-check-internal")), function(t, e) {
if (!Object.prototype.hasOwnProperty.apply(t.options, [ "projectId" ])) throw new q(L.INVALID_ARGUMENT, '"projectId" not provided in firebase.initializeApp.');
return new $t(t.options.projectId, e);
}(i, n), i);
return s = Object.assign({
useFetchStreams: e
}, s), r._setSettings(s), r;
}), "PUBLIC").setMultipleInstances(!0)), s(P, "3.8.1", t),
// BUILD_TARGET will be replaced by values like esm5, esm2017, cjs5, etc during the compilation
s(P, "3.8.1", "esm2017");
}("rn", /* useFetchStreams= */ !1);
export { Vl as AbstractUserDataWriter, Ca as AggregateField, xa as AggregateQuerySnapshot, Eh as Bytes, rh as CACHE_SIZE_UNLIMITED, Ea as CollectionReference, Ia as DocumentReference, xl as DocumentSnapshot, Ah as FieldPath, bh as FieldValue, oh as Firestore, q as FirestoreError, Ph as GeoPoint, ih as LoadBundleTask, Ta as Query, cl as QueryCompositeFilterConstraint, il as QueryConstraint, Nl as QueryDocumentSnapshot, pl as QueryEndAtConstraint, ol as QueryFieldFilterConstraint, dl as QueryLimitConstraint, ll as QueryOrderByConstraint, kl as QuerySnapshot, ml as QueryStartAtConstraint, Cl as SnapshotMetadata, st as Timestamp, sf as Transaction, ef as WriteBatch, $t as _DatabaseId, at as _DocumentKey, Y as _EmptyAppCheckTokenProvider, G as _EmptyAuthCredentialsProvider, ct as _FieldPath, _a as _cast, $ as _debugAssert, jt as _isBase64Available, k as _logWarn, ha as _validateIsNotUsedTogether, Wl as addDoc, Xl as aggregateQuerySnapshotEqual, hl as and, af as arrayRemove, cf as arrayUnion, _h as clearIndexedDbPersistence, Aa as collection, Ra as collectionGroup, pa as connectFirestoreEmulator, jl as deleteDoc, of as deleteField, gh as disableNetwork, ba as doc, Rh as documentId, lh as enableIndexedDbPersistence, fh as enableMultiTabIndexedDbPersistence, mh as enableNetwork, Tl as endAt, Il as endBefore, ah as ensureFirestoreConfigured, Jl as executeWrite, Zl as getCountFromServer, Fl as getDoc, Bl as getDocFromCache, Ll as getDocFromServer, ql as getDocs, Ul as getDocsFromCache, Kl as getDocsFromServer, ch as getFirestore, hf as increment, uh as initializeFirestore, _l as limit, wl as limitToLast, ph as loadBundle, Ih as namedQuery, zl as onSnapshot, Hl as onSnapshotsInSync, al as or, fl as orderBy, rl as query, va as queryEqual, Pa as refEqual, rf as runTransaction, uf as serverTimestamp, Gl as setDoc, ff as setIndexConfiguration, C as setLogLevel, Ml as snapshotEqual, yl as startAfter, gl as startAt, yh as terminate, Ql as updateDoc, wh as waitForPendingWrites, ul as where, lf as writeBatch };
//# sourceMappingURL=index.rn.js.map