unordered.js
7.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
'use strict';
const common = require('./common');
const BulkOperationBase = common.BulkOperationBase;
const utils = require('../utils');
const toError = utils.toError;
const handleCallback = utils.handleCallback;
const BulkWriteResult = common.BulkWriteResult;
const Batch = common.Batch;
const mergeBatchResults = common.mergeBatchResults;
const executeOperation = utils.executeOperation;
const MongoWriteConcernError = require('mongodb-core').MongoWriteConcernError;
const handleMongoWriteConcernError = require('./common').handleMongoWriteConcernError;
const bson = common.bson;
/**
* Add to internal list of Operations
*
* @param {UnorderedBulkOperation} bulkOperation
* @param {number} docType number indicating the document type
* @param {object} document
* @return {UnorderedBulkOperation}
*/
function addToOperationsList(bulkOperation, docType, document) {
// Get the bsonSize
const bsonSize = bson.calculateObjectSize(document, {
checkKeys: false
});
// Throw error if the doc is bigger than the max BSON size
if (bsonSize >= bulkOperation.s.maxBatchSizeBytes)
throw toError('document is larger than the maximum size ' + bulkOperation.s.maxBatchSizeBytes);
// Holds the current batch
bulkOperation.s.currentBatch = null;
// Get the right type of batch
if (docType === common.INSERT) {
bulkOperation.s.currentBatch = bulkOperation.s.currentInsertBatch;
} else if (docType === common.UPDATE) {
bulkOperation.s.currentBatch = bulkOperation.s.currentUpdateBatch;
} else if (docType === common.REMOVE) {
bulkOperation.s.currentBatch = bulkOperation.s.currentRemoveBatch;
}
// Create a new batch object if we don't have a current one
if (bulkOperation.s.currentBatch == null)
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
// Check if we need to create a new batch
if (
bulkOperation.s.currentBatch.size + 1 >= bulkOperation.s.maxWriteBatchSize ||
bulkOperation.s.currentBatch.sizeBytes + bsonSize >= bulkOperation.s.maxBatchSizeBytes ||
bulkOperation.s.currentBatch.batchType !== docType
) {
// Save the batch to the execution stack
bulkOperation.s.batches.push(bulkOperation.s.currentBatch);
// Create a new batch
bulkOperation.s.currentBatch = new Batch(docType, bulkOperation.s.currentIndex);
}
// We have an array of documents
if (Array.isArray(document)) {
throw toError('operation passed in cannot be an Array');
} else {
bulkOperation.s.currentBatch.operations.push(document);
bulkOperation.s.currentBatch.originalIndexes.push(bulkOperation.s.currentIndex);
bulkOperation.s.currentIndex = bulkOperation.s.currentIndex + 1;
}
// Save back the current Batch to the right type
if (docType === common.INSERT) {
bulkOperation.s.currentInsertBatch = bulkOperation.s.currentBatch;
bulkOperation.s.bulkResult.insertedIds.push({
index: bulkOperation.s.bulkResult.insertedIds.length,
_id: document._id
});
} else if (docType === common.UPDATE) {
bulkOperation.s.currentUpdateBatch = bulkOperation.s.currentBatch;
} else if (docType === common.REMOVE) {
bulkOperation.s.currentRemoveBatch = bulkOperation.s.currentBatch;
}
// Update current batch size
bulkOperation.s.currentBatch.size = bulkOperation.s.currentBatch.size + 1;
bulkOperation.s.currentBatch.sizeBytes = bulkOperation.s.currentBatch.sizeBytes + bsonSize;
// Return bulkOperation
return bulkOperation;
}
/**
* Create a new UnorderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
* @class
* @property {number} length Get the number of operations in the bulk.
* @return {UnorderedBulkOperation} a UnorderedBulkOperation instance.
*/
class UnorderedBulkOperation extends BulkOperationBase {
constructor(topology, collection, options) {
options = options || {};
options = Object.assign(options, { addToOperationsList });
super(topology, collection, options, false);
}
/**
* The callback format for results
* @callback UnorderedBulkOperation~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {BulkWriteResult} result The bulk write result.
*/
/**
* Execute the ordered bulk operation
*
* @method
* @param {object} [options] Optional settings.
* @param {(number|string)} [options.w] The write concern.
* @param {number} [options.wtimeout] The write concern timeout.
* @param {boolean} [options.j=false] Specify a journal write concern.
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
* @param {UnorderedBulkOperation~resultCallback} [callback] The result callback
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
execute(_writeConcern, options, callback) {
const ret = this.bulkExecute(_writeConcern, options, callback);
options = ret.options;
callback = ret.callback;
return executeOperation(this.s.topology, executeBatches, [this, options, callback]);
}
}
/**
* Execute the command
*
* @param {UnorderedBulkOperation} bulkOperation
* @param {object} batch
* @param {object} options
* @param {function} callback
*/
function executeBatch(bulkOperation, batch, options, callback) {
function resultHandler(err, result) {
// Error is a driver related error not a bulk op error, terminate
if (((err && err.driver) || (err && err.message)) && !(err instanceof MongoWriteConcernError)) {
return handleCallback(callback, err);
}
// If we have and error
if (err) err.ok = 0;
if (err instanceof MongoWriteConcernError) {
return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, false, err, callback);
}
handleCallback(
callback,
null,
mergeBatchResults(false, batch, bulkOperation.s.bulkResult, err, result)
);
}
bulkOperation.finalOptionsHandler({ options, batch, resultHandler }, callback);
}
/**
* Execute all the commands
*
* @param {UnorderedBulkOperation} bulkOperation
* @param {object} options
* @param {function} callback
*/
function executeBatches(bulkOperation, options, callback) {
let numberOfCommandsToExecute = bulkOperation.s.batches.length;
// Execute over all the batches
for (let i = 0; i < bulkOperation.s.batches.length; i++) {
executeBatch(bulkOperation, bulkOperation.s.batches[i], options, function(err) {
// Count down the number of commands left to execute
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
// Execute
if (numberOfCommandsToExecute === 0) {
// Driver level error
if (err) return handleCallback(callback, err);
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult);
if (bulkOperation.handleWriteError(callback, writeResult)) return;
return handleCallback(callback, null, writeResult);
}
});
}
}
/**
* Returns an unordered batch object
* @ignore
*/
function initializeUnorderedBulkOp(topology, collection, options) {
return new UnorderedBulkOperation(topology, collection, options);
}
initializeUnorderedBulkOp.UnorderedBulkOperation = UnorderedBulkOperation;
module.exports = initializeUnorderedBulkOp;
module.exports.Bulk = UnorderedBulkOperation;