I'm using the C++ driver to do bulk `replace_one` operation with "_id" as the only filter. And I'm getting E11000 after some time running the program, always.
Since I'm generating the IDs myself, I thought my keys are simply colliding. But 1) they aren't, I triple check through logging all operations and analyzing the log with a purpose-written tool; and 2) even if they were, `replace_one` with filter on "_id" should simply replace the existing document, right?..
So how is this error even possible??? I'm at a loss.
bool CMongoDbInterface::registerTokens(const std::vector<std::string>& tokens)
{
if (tokens.empty())
return true;
try {
auto client = get_client();
auto db = client->database("web");
auto tokenCollection = db.collection("tokens");
mongocxx::bulk_write bulk{};
for (auto item = tokens.cbegin(), end = tokens.cend(); item != end; ++item)
{
const auto nextItem = std::next(item); // <- The source token list is already sorted, so duplicate items shall not pass
if (nextItem != end && *item == *nextItem)
continue;
const auto tokenId = hash128bit(*item);
logHash(item->text);// <- I'm watching all the hashes for collisions, there weren't any!
basic::document basic_builder{};
basic_builder.append(kvp("_id", binaryFromArray(tokenId))); // binaryFromArray constructs bsoncxx::types::b_binary from std::array
basic_builder.append(kvp("token", bsonstring(item->text)));
mongocxx::model::replace_one upsert_op{ stream::document{} << "_id" << binaryFromArray(tokenId) << stream::finalize, basic_builder.view() };
upsert_op.upsert(true);
bulk.append(upsert_op);
}
const auto result = tokenCollection.bulk_write(bulk);
return (bool)result; //&& (result->upserted_count() + result->matched_count() == uniqueItems);
}
catch (mongocxx::bulk_write_exception& e) {
assert_unconditional_r(std::string("MongoDB bulk_write exception: ") + e.what());
return false;
}
}