Unless otherwise noted, this will in general be for 3.4, with 3.6 notes.
// This prints with braces db.p.find( {}, {city:1, _id:0}).sort({city:1}) //Just print the field with braces var Y = db.p.find( {}, {city:1, _id:0}).sort({city:1}); while (Y.hasNext() ) { print (Y.next().name); } // or another way var Z = db.p.find( {}, {city:1, _id:0}).sort({city:1}); Z.forEach(function (obj) {print (obj.name) } )
select l.first_label, l.second_label, c.color, sum(h.hits) from label l join label_color_hit h on (l.label_id = h.label_id) join color c on (c.color_id = h.color_id) group by l.first_label, l.second_label, c.color order by l.first_label, l.second_label, c.color;
+-------------+--------------+-------+-------------+ | first_label | second_label | color | sum(h.hits) | +-------------+--------------+-------+-------------+ | a | a | red | 11 | | a | b | green | 22 | | a | c | blue | 33 | | b | b | red | 44 | | b | c | green | 55 | +-------------+--------------+-------+-------------+ 5 rows in set (0.01 sec)
MongoSource: '[color label_color_hit label]' (db: 'SAMPLE', collection: '[color label_color_hit label]') as '[c h l]': {"$match":{"color_id":{"$ne":null}}}, {"$lookup":{"as":"__joined_h","foreignField":"color_id","from":"label_color_hit","localField":"color_id"}}, {"$unwind":{"path":"$__joined_h","preserveNullAndEmptyArrays":false}}, {"$match":{"__joined_h.label_id":{"$ne":null}}}, {"$lookup":{"as":"__joined_l","foreignField":"label_id","from":"label","localField":"__joined_h.label_id"}}, {"$unwind":{"path":"$__joined_l","preserveNullAndEmptyArrays":false}}, {"$group":{"_id":{"l_DOT_first_label":"$__joined_l.first_label","l_DOT_second_label":"$__joined_l.second_label","c_DOT_color":"$color"},"sum(h_DOT_hits)":{"$sum":"$__joined_h.hits"},"sum(h_DOT_hits)_count":{"$sum":{"$cond":[{"$eq":[{"$ifNull":["$__joined_h.hits",null]},null]},0,1]}}}}, {"$project":{"_id":0,"c_DOT_color":"$_id.c_DOT_color","l_DOT_first_label":"$_id.l_DOT_first_label","l_DOT_second_label":"$_id.l_DOT_second_label","sum(h_DOT_hits)":{"$cond":[{"$or":[{"$eq":[{"$ifNull":["$sum(h_DOT_hits)_count",null]},null]},{"$eq":["$sum(h_DOT_hits)_count",0]},{"$eq":["$sum(h_DOT_hits)_count",false]}]},{"$literal":null},"$sum(h_DOT_hits)"]}}}, {"$sort":{"l_DOT_first_label":1,"l_DOT_second_label":1,"c_DOT_color":1}}, {"$project":{"c_DOT_color":"$c_DOT_color","l_DOT_first_label":"$l_DOT_first_label","l_DOT_second_label":"$l_DOT_second_label","sum(h_DOT_hits)":"$sum(h_DOT_hits)"}}
MySQL [SAMPLE_embed]> show tables; +------------------------+ | Tables_in_SAMPLE_embed | +------------------------+ | label | | label_hit_list | +------------------------+ 2 rows in set (0.00 sec)
select l.first_label, l.second_label, lhl.`hit_list.color` as color , sum(lhl.`hit_list.hits`) as count from label l join label_hit_list lhl on (l._id = lhl._id) group by l.first_label, l.second_label, lhl.`hit_list.color` order by l.first_label, l.second_label, lhl.`hit_list.color`;
+-------------+--------------+-------+-------+ | first_label | second_label | color | count | +-------------+--------------+-------+-------+ | a | a | red | 11 | | a | b | green | 22 | | a | c | blue | 33 | | b | b | red | 44 | | b | c | green | 55 | +-------------+--------------+-------+-------+
MongoSource: '[label_hit_list label]' (db: 'SAMPLE_embed', collection: '[label label]') as '[lhl l]': {"$unwind":{"includeArrayIndex":"hit_list_idx","path":"$hit_list"}}, {"$group":{"_id":{"l_DOT_first_label":"$first_label","l_DOT_second_label":"$second_label","lhl_DOT_hit_list_DOT_color":"$hit_list.color"},"sum(lhl_DOT_hit_list_DOT_hits)":{"$sum":"$hit_list.hits"},"sum(lhl_DOT_hit_list_DOT_hits)_count":{"$sum":{"$cond":[{"$eq":[{"$ifNull":["$hit_list.hits",null]},null]},0,1]}}}}, {"$project":{"_id":0,"l_DOT_first_label":"$_id.l_DOT_first_label","l_DOT_second_label":"$_id.l_DOT_second_label","lhl_DOT_hit_list_DOT_color":"$_id.lhl_DOT_hit_list_DOT_color","sum(lhl_DOT_hit_list_DOT_hits)":{"$cond":[{"$or":[{"$eq":[{"$ifNull":["$sum(lhl_DOT_hit_list_DOT_hits)_count",null]},null]},{"$eq":["$sum(lhl_DOT_hit_list_DOT_hits)_count",0]},{"$eq":["$sum(lhl_DOT_hit_list_DOT_hits)_count",false]}]},{"$literal":null},"$sum(lhl_DOT_hit_list_DOT_hits)"]}}}, {"$sort":{"l_DOT_first_label":1,"l_DOT_second_label":1,"lhl_DOT_hit_list_DOT_color":1}}, {"$project":{"l_DOT_first_label":"$l_DOT_first_label","l_DOT_second_label":"$l_DOT_second_label","lhl_DOT_hit_list_DOT_color":"$lhl_DOT_hit_list_DOT_color","sum(lhl_DOT_hit_list_DOT_hits)":"$sum(lhl_DOT_hit_list_DOT_hits)"}}
security: redactClientLogData: true
db.p.find({ p1 : { $exists: true}}).froeach(functin(d) { d.pd = NumberDecimal(d.p/100); delete (d.p1); db.p.save.(d); // or just update it pd = NumberDecimal(d.p/100); db.p.update({_id : d._id}, {$set: {pd: pd}}) # update where the _id matches, do a set. // or if a string with a decimal in it like "10.230" d.pd = NumberDecimal(d.p/100); delete (d.p); db.p.save.(d); } // another useful complicated query which saves resources in bulk // if string do this..... ups = [] db.p.find({p1: {"$type": "string"}).forEach (function(d1) {ups.push({"replaceOne": {"filter": {_id: d1._id}, "replacement": {"$set": {"$price" : NumerDecimal(d1.p1)}}} }) }); db.p.bulkWrite(ups) // If int do this db.p.updateMany({p1: {"$type": "int"}}, {"$mul": { p1: NumberDecimal(0.001) }});
cursor = stuff.find().skip(5).limit(10).sort([('people', pymongo.DESCENDING)])
> Object.keys(db.serverStatus()) [ "host", "version", "process", "pid", "uptime", "uptimeMillis", "uptimeEstimate", "localTime", "asserts", - when errors happen, look place to look. "connections", --- good place to look. "extra_info", "globalLock", -- good place, order by length of time "locks", -- good place, order by length of time r,w = intent shared and exclusive, R,W = shared and exclusive lock. "network", "opLatencies", -- good to look at "opcounters", -- activity, good to know what is going n "opcountersRepl", "storageEngine", "tcmalloc", "wiredTiger", -- look at cache, "eviction calls to get page" "cache: maximum buyts configured, bytes currently in cache" "mem", "metrics", cursors and document counts. opcounters is 1 per time, metrics:documents is all the documents for an operation. "ok" ]
bsondump -- convert bson dump files to human readable. mongo mongod mongodecrypt mongodump -- binary mongoexport -- json files mongofiles --- for grid fs mongoimport -- json files mongoldap * Allows you to validate connection to ldap server. mongooplog -- allow you to pull oplog and apply it mongoperf * -- performance testing tool, disk io performance. mongoreplay * -- network diagnostic tool to : monitor, record, replay and debug -- heavily used. mongorestore -- binary mongos mongostat * mongotop *
Class | Status | Due date | |
M001: MongoDB Basics | DONE | ||
M034: New Features and Tools in MongoDB 3.4 | DONE | Nov 07 | SAVE |
M101P: MongoDB for Developers | Dec 12th | REDO (time) | |
M102: MongoDB for DBAs | DONE | Dec 5th | NOTE each page. waiting for confirmation. |
M121: The MongoDB Aggregation Framework | Startnig | Dec 19th | Make doc notes. |
M123: Getting Started with MongoDB Atlas | Starting | DEc 12th | Everything due at end. |
M201: MongoDB Performance | DONE | Nov 7th | SAVE. Review all. |
M310: MongoDB Security | Nov 7th | REDO (time) | |
M312: Diagnostics and Debugging | week 3 | Dec 19th | Make Doc Notes. |
1. ops manager --- why? who cares. Its cool, its nice, it is great and you should learn ops manager, but a real DBA should't need it and should be able to do things manually. Some would argue time, I am for ability first. OPS Manager is great, but easy as hell. No great ability. 2. command to see if upgrade can happen: db.upgradeCheckAllDBs() or db.upgradeCheck(). Who cares. If you follow the steps for upgrade, you will do this. In addition, this was only for 2.6. 3. compass --- its nice and recommend using it. But a real DBA doesn't need it. 4. Atlas -- same thing. 5. Mongo Isolation levels -- who cares. Mongo releases locks between connections for inconsistent results anyways. Two aggreage functions with write locks or shared locks run, what happens. https://docs.mongodb.com/v3.4/faq/concurrency/ https://docs.mongodb.com/v3.4/core/write-operations-atomicity/ Run A write test, 2 queries, updating date which should take 10 seconds. 1st starts at beignning, 2nd starts 90% of way through, then does the rest. Count them after. 6. How can you configure HA for 3 servers and 2 data centers. you can't and an arbiter (still one of the 3) doesn't help. The person was thinking of https://docs.mongodb.com/manual/core/replica-set-architecture-geographically-distributed/ but you can't have HA with two data centers and one goes down. 7. What happens specifically, between journaling, writing to the ops logs, and if a system crashes. Well the journaling will help the ops log be consistent before the crash. But this is why who cares. Its just a a way for junior DBA to show he/she is superior when they are not. Mongo is ONLY one document at a time atomic. It is not atomic at the collection level with locks and stuff. Crashes are helped by journaling to keep ops logs consistent with the data. Read more about journaling. You never need to know the specifics in the real world. 8. Have you installed ssl, Kerberos, ldap, etc. Who cares, this is easy and most people don't use them. A lot don't even use passwords. 9. updateMany() --- why is this useless? Its an alias for update with the multi option. New in 3.2.Legitimate questions