From 8eec24c555bc296ce5b3a5fe3a6c0a2684760092 Mon Sep 17 00:00:00 2001 From: Aaron Madsen Date: Wed, 21 Nov 2018 10:53:54 -0700 Subject: [PATCH 1/3] WIP: split the engine and the store via an API --- .gitignore | 1 + bin/digd.js | 4 +- lib/digd.js | 4 +- lib/httpd.js | 7 + lib/{store.json.js => store/index.js} | 268 +++++++++++++------------- lib/store/store.json.js | 167 ++++++++++++++++ 6 files changed, 310 insertions(+), 141 deletions(-) rename lib/{store.json.js => store/index.js} (68%) create mode 100644 lib/store/store.json.js diff --git a/.gitignore b/.gitignore index 97c3e07..1151208 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ node_modules .*.sw* +local-db.js diff --git a/bin/digd.js b/bin/digd.js index 8f90164..3663abb 100755 --- a/bin/digd.js +++ b/bin/digd.js @@ -387,7 +387,7 @@ cli.main(function (args, cli) { } try { - engine = engine || require('../lib/store.json.js').create(engineOpts); + engine = engine || require('../lib/store').create(engineOpts); } catch(e) { respondWithResults(e); return; @@ -413,7 +413,7 @@ cli.main(function (args, cli) { } if (cli.http) { try { - engine = engine || require('../lib/store.json.js').create(engineOpts); + engine = engine || require('../lib/store').create(engineOpts); } catch(e) { console.error(e); return; diff --git a/lib/digd.js b/lib/digd.js index 63c797b..7761c56 100644 --- a/lib/digd.js +++ b/lib/digd.js @@ -184,7 +184,7 @@ function getNs(engine, zs, results, cb) { // d.vanityNs should only be vanity nameservers (pointing to this same server) if (z.vanityNs || results.authority.some(function (ns) { console.log('[debug] ns', ns); - return -1 !== engine.primaryNameservers.indexOf(ns.data.toLowerCase()); + return -1 !== engine.primaryNameservers().indexOf(ns.data.toLowerCase()); })) { results.authority.length = 0; results.authority.push(engine.zones._toSoa(z)); @@ -359,7 +359,7 @@ module.exports.query = function (engine, query, cb) { // NOTE: I think that the issue here is EXTERNAL vs INTERNAL vanity NS // We _should_ reply for EXTERNAL vanity NS... but not when it's listed on the SOA internally? // It's surrounding the problem of what if I do sub domain delegation to the same server. - if (-1 === engine.primaryNameservers.indexOf(r.data.toLowerCase())) { + if (-1 === engine.primaryNameservers().indexOf(r.data.toLowerCase())) { console.log("It's a vanity NS"); return false; } diff --git a/lib/httpd.js b/lib/httpd.js index 7594286..c2c1476 100644 --- a/lib/httpd.js +++ b/lib/httpd.js @@ -230,6 +230,11 @@ module.exports.create = function (cli, engine/*, dnsd*/) { zone.class = zone.className; zone.type = zone.typeName; zone.soa = true; + + // TODO: consider sending a predicate object through the engine + // to the actual store in case it is highly inefficient to transfer + // a large number of records from the store that will just be + // thrown away. engine.records.all(function (err, records) { records = records.filter(function (r) { return r.zone === zonename; @@ -239,6 +244,8 @@ module.exports.create = function (cli, engine/*, dnsd*/) { }); }); }); + + // I wonder what an API that gets ALL records from all zones is for app.get('/api/records', function (req, res) { engine.records.all(function (err, records) { res.send({ records: records.map(mapRecord) }); diff --git a/lib/store.json.js b/lib/store/index.js similarity index 68% rename from lib/store.json.js rename to lib/store/index.js index 672ebcb..6507812 100644 --- a/lib/store.json.js +++ b/lib/store/index.js @@ -1,101 +1,80 @@ 'use strict'; +var crypto = require('crypto'); +var dns = require('dns'); +var os = require('os'); +var path = require('path'); + +var pathResolvers = { + '.': function fromCwd(relPath) { + return path.join(process.cwd(), relPath); + }, + '~': function fromHomedir(relPath) { + if (!os.homedir) { + throw new Error( + 'Resolving home directory relative paths is not supported in this version of node.' + ); + } + return path.join(os.homedir(), relPath); + }, + noop: function (p) { return p; } +} + module.exports.create = function (opts) { // opts = { filepath }; + // `opts.filepath` is a module id or path to a module that contains a store plugin or file + var pathFn = pathResolvers[opts.filepath[0]] || pathResolvers.noop; + var storeId = pathFn(opts.filepath); + var pathToStore = require.resolve(storeId); + var engine = { db: null }; function notDeleted(r) { return !r.revokedAt && !r.deletedAt; } - var db = require(opts.filepath); - var stat = require('fs').statSync(opts.filepath); - var crypto = require('crypto'); - // - // Manual Migration - // - db.primaryNameservers.forEach(function (ns, i, arr) { - if ('string' === typeof ns) { - ns = { name: ns }; - arr[i] = ns; - } - if (!ns.id) { - ns.id = crypto.randomBytes(16).toString('hex'); - } - }); - db.zones = db.zones || []; - if (db.domains) { - db.zones = db.zones.concat(db.domains); - } - db.zones.forEach(function (zone) { - if (!zone.name) { - zone.name = zone.id; - zone.id = null; - } - if (!zone.id) { - zone.id = crypto.randomBytes(16).toString('hex'); - } - if (!zone.createdAt) { zone.createdAt = stat.mtime.valueOf(); } - if (!zone.updatedAt) { zone.updatedAt = stat.mtime.valueOf(); } - }); - db.records.forEach(function (record) { - if (!record.id) { - record.id = crypto.randomBytes(16).toString('hex'); - } - }); - require('fs').writeFileSync(opts.filepath, JSON.stringify(db, null, 2)); - // - // End Migration - // + // instantiate the DB module + var db = (pathToStore.slice(-5) === '.json') ? + // JSON files should be loaded using our built in store.json.js + require('./store.json.js')(pathToStore) : + // everything else should be loaded as a module and passed our opts object + require(storeId)(opts); - db.save = function (cb) { - if (db.save._saving) { - console.log('make pending'); - db.save._pending.push(cb); - return; - } + // TODO: examine usage of engine.primaryNameservers to see if we are supporting it right + engine.primaryNameservers = db.primaryNameservers.list; - db.save._saving = true; - require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) { - console.log('done writing'); - var pending = db.save._pending.splice(0); - db.save._saving = false; - cb(err); - if (!pending.length) { - return; - } - db.save(function (err) { - console.log('double save'); - pending.forEach(function (cb) { cb(err); }); - }); - }); - }; - db.save._pending = []; - - engine.primaryNameservers = db.primaryNameservers; engine.peers = { all: function (cb) { - var dns = require('dns'); - var count = db.primaryNameservers.length; - function gotRecord() { - count -= 1; - if (!count) { - cb(null, db.primaryNameservers); - } - } - function getRecord(ns) { + var pNS = db.primaryNameservers.list(); + + function getRecord(ns, done) { dns.resolve4(ns.name, function (err, addresses) { console.log('ns addresses:'); console.log(addresses); - if (err) { console.error(err); gotRecord(); return; } + if (err) { console.error(err); done(); return; } ns.type = 'A'; ns.address = addresses[0]; - gotRecord(); + done(); }); } - db.primaryNameservers.forEach(getRecord); + + // resolve addreses for all of the primary nameservers in parallel + pNS.forEach(function (ns) { + var status = { pending: true }; + function done() { + status.pending = false; + // TODO: determine if the locally stored records should get updated + var incomplete = tasks.filter(function (s) { return s.pending; }); + if (incomplete.length < 1) { + cb(null, pNS); + } + } + getRecord(ns, done); + return status; + }); } }; + engine.zones = { _immutableKeys: [ 'id', 'name', 'primary', 'serial', 'revokedAt', 'changedAt', 'insertedAt', 'updatedAt', 'deletedAt' ] , _mutableKeys: [ 'admin', 'expiration', 'minimum', 'refresh', 'retry', 'ttl', 'vanity' ] @@ -105,8 +84,9 @@ module.exports.create = function (opts) { // epoch in seconds will do return parseInt(Math.round(date/1000).toString().slice(-10), 10); } + // NOTE/TODO: despite the _, _toSoa is used outside this file (in lib/digd.js and lib/httpd.js) , _toSoa: function (domain) { - var nameservers = domain.vanityNs || engine.primaryNameservers.map(function (n) { return n.name; }); + var nameservers = domain.vanityNs || engine.primaryNameservers().map(function (n) { return n.name; }); var index = Math.floor(Math.random() * nameservers.length) % nameservers.length; var nameserver = nameservers[index]; @@ -122,6 +102,7 @@ module.exports.create = function (opts) { , name_server: nameserver // admin -- email address or domain for admin + // default is effectively admin@{domain name} , admin: domain.admin || ('admin.' + domain.name) , email_addr: domain.admin || ('admin.' + domain.name) @@ -148,7 +129,7 @@ module.exports.create = function (opts) { } , all: function (cb) { process.nextTick(function () { - cb(null, db.zones.slice(0).filter(notDeleted)); + cb(null, db.zones().filter(notDeleted)); }); } , get: function (queries, cb) { @@ -157,7 +138,7 @@ module.exports.create = function (opts) { return { name: n }; }); } - var myDomains = db.zones.filter(function (d) { + var myDomains = db.zones().filter(function (d) { return queries.some(function (q) { return (d.name.toLowerCase() === q.name) && notDeleted(d); }); @@ -167,19 +148,17 @@ module.exports.create = function (opts) { }); } , touch: function (zone, cb) { - var existing; - db.zones.some(function (z) { - if (z.id && zone.id === z.id) { existing = z; return true; } - if (z.name && zone.name === z.name) { existing = z; return true; } - }); - if (!existing) { - cb(null, null); + db.zones.get(zone, function (err, existing) { + if (err || !existing) { + cb(err, null); + return; + } + existing.updatedAt = new Date().valueOf(); // toISOString(); + console.log('touch saving...'); + db.zone.update(existing, function (err) { + cb(err, !err && existing || null); + }); return; - } - existing.updatedAt = new Date().valueOf(); // toISOString(); - console.log('touch saving...'); - db.save(function (err) { - cb(err, !err && existing || null); }); } , save: function (zone, cb) { @@ -191,65 +170,69 @@ module.exports.create = function (opts) { } } , update: function (zone, cb) { - var existing; - var dirty; + db.zones.get({ id: zone.id }, function (err, found) { + var dirty; - db.zones.some(function (z) { - if (z.id === zone.id) { - existing = z; - return true; + if (err) { + console.log('error finding zone'); + cb(new Error("Error finding zone for '" + zone.id + "'"), null); + return; } - }); - if (!existing) { - console.log('no existing zone'); - cb(new Error("zone for '" + zone.id + "' does not exist"), null); - return; - } - - console.log('found existing zone'); - console.log(existing); - console.log(zone); - Object.keys(zone).forEach(function (key) { - if (-1 !== engine.zones._immutableKeys.indexOf(key)) { return; } - if (existing[key] !== zone[key]) { - dirty = true; - console.log('existing key', key, existing[key], zone[key]); - existing[key] = zone[key]; + if (!found) { + console.log('no existing zone'); + cb(new Error("zone for '" + zone.id + "' does not exist"), null); + return; } - }); - zone.updatedAt = new Date().valueOf(); // toISOString(); // Math.round(Date.now() / 1000); - if (dirty) { - zone.changedAt = zone.updatedAt; - } + console.log('found existing zone'); + console.log(found); + console.log(zone); + Object.keys(zone).forEach(function (key) { + if (-1 !== engine.zones._immutableKeys.indexOf(key)) { return; } + if (found[key] !== zone[key]) { + dirty = true; + console.log('existing key', key, found[key], zone[key]); + found[key] = zone[key]; + } + }); - console.log('saving...'); - db.save(function (err) { - cb(err, !err && existing || null); + found.updatedAt = new Date().valueOf(); // toISOString(); // Math.round(Date.now() / 1000); + if (dirty) { + found.changedAt = found.updatedAt; + } + + console.log('saving...'); + db.zones.update(found, function (err) { + cb(err, !err && found || null); + }); }); } , create: function (zone, cb) { - var newZone = { id: crypto.randomBytes(16).toString('hex') }; - var existing; - var nss = []; - - zone.name = (zone.name||'').toLowerCase(); - db.zones.some(function (z) { - if (z.name === zone.name) { - existing = z; - return true; - } - }); - - if (existing) { - cb(new Error("tried to create new zone, but '" + existing.name + "' already exists")); + var zoneName = (zone.name||'').toLowerCase(); + db.zones.get({ name: zoneName }, function (err, found) { + if (err) { + console.error(err); + cb(new Error("error attempting to create new zone '" + zoneName + "'")); return; } - newZone.name = zone.name; + if (found) { + cb(new Error("tried to create new zone, but '" + found.name + "' already exists")); + return; + } + + var newZone = { + id: crypto.randomBytes(16).toString('hex'), + name: zoneName + }; + var nss = []; + newZone.createdAt = Date.now(); newZone.updatedAt = newZone.createdAt; + /* + Set only the mutable keys in the new zone from the proposed zone object + */ Object.keys(zone).forEach(function (key) { //if (-1 !== engine.zones._immutableKeys.indexOf(key)) { return; } if (-1 === engine.zones._mutableKeys.indexOf(key)) { return; } @@ -262,7 +245,12 @@ module.exports.create = function (opts) { } else { newZone.vanity = false; } - db.primaryNameservers.forEach(function (ns, i) { + + // TODO: distinguish between primary and secondary zones + // TODO: determine if we need to do anything special for delegation + + // create records for the primary nameservers (or vanity name servers) + db.primaryNameservers.list().forEach(function (ns, i) { var nsx = 'ns' + (i + 1); var nsZone; var ttl = 43200; // 12h // TODO pick a well-reasoned number @@ -302,7 +290,13 @@ module.exports.create = function (opts) { }); }); - db.zones.push(newZone); + db.zones.create(newZone, function (err) { + // WIP: going to need to figure out how to manage this as a transaction + // Significant benefit to having records owned by the zone is we won't have + // records for zones that don't otherwise exist - at least at the engine level. + + // every line below this one is not yet modified... + }); nss.forEach(function (ns) { db.records.push(ns); }); diff --git a/lib/store/store.json.js b/lib/store/store.json.js new file mode 100644 index 0000000..392ce18 --- /dev/null +++ b/lib/store/store.json.js @@ -0,0 +1,167 @@ +'use strict'; + +function jsonDeepClone(target) { + return JSON.parse( + JSON.stringify(target) + ); +} +/* +init() should return an object with: { + save: function -> undefined - changes to in memory representation should be persisted + This could be considered the equivalent of committing a transaction to the database. + primaryNameservers: { + list: function -> list nameservers + }, + zones: { + list: function -> list zones, + find: function -> read zone by ???, + create: + update: + delete: + }, + records: { + list: function -> list records, + find: function -> read record by ???, + create: + update: + delete: + } +} + +All lists will be a deep copy of the data actually stored. + */ + +module.exports = function init (opts) { + // opts = { filepath }; + + var db = require(opts.filepath); + var stat = require('fs').statSync(opts.filepath); + var crypto = require('crypto'); + // + // Manual Migration + // + + // Convert the primary nameservers from strings to objects with names and IDs. + db.primaryNameservers.forEach(function (ns, i, arr) { + if ('string' === typeof ns) { + ns = { name: ns }; + arr[i] = ns; + } + if (!ns.id) { + ns.id = crypto.randomBytes(16).toString('hex'); + } + }); + + // Convert domains to zones and ensure that they have proper IDs and timestamps + db.zones = db.zones || []; + if (db.domains) { + db.zones = db.zones.concat(db.domains); + } + db.zones.forEach(function (zone) { + if (!zone.name) { + zone.name = zone.id; + zone.id = null; + } + if (!zone.id) { + zone.id = crypto.randomBytes(16).toString('hex'); + } + if (!zone.createdAt) { zone.createdAt = stat.mtime.valueOf(); } + if (!zone.updatedAt) { zone.updatedAt = stat.mtime.valueOf(); } + }); + + // Records belong to zones, but they (currently) refer to them by a zone property. + // NOTE/TODO: This may pose problems where the whole list of records is not easily + // filtered / kept in memory / indexed and/or retrieved by zone. Traditionally, + // records are stored "within a zone" in a zone file. We may wish to have the + // DB API behave more traditionally, even though some stores (like a SQL database + // table) might actually store the zone as a property of a record as we currently do. + db.records.forEach(function (record) { + if (!record.id) { + record.id = crypto.randomBytes(16).toString('hex'); + } + }); + + // Write the migrated data + require('fs').writeFileSync(opts.filepath, JSON.stringify(db, null, 2)); + // + // End Migration + // + + var save = function save (cb) { + if (save._saving) { + console.log('make pending'); + save._pending.push(cb); + return; + } + + save._saving = true; + // TODO: replace with something not destructive to original non-json data + require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) { + console.log('done writing'); + var pending = save._pending.splice(0); + save._saving = false; + cb(err); + if (!pending.length) { + return; + } + save(function (err) { + console.log('double save'); + pending.forEach(function (cb) { cb(err); }); + }); + }); + }; + save._pending = []; + + var dbApi = { + save: function () { + // hide _pending and _saving from callers + var args = [].slice.call(arguments); + return save.apply(null, args); + }, + // primaryNameservers really isn't editable - it's literally the list of FQDN's + // that this database is replicated to in a multi-master fashion. + // + // However, lib/store/index.js does plenty to update these records in support + // of the SOA records that are built from them (as does this file in the "migration" + // section). I'm toying with the idea of not storing them seperately or creating the + // SOA records somewhat immediately. + primaryNameservers: { + list: function listNameservers() { + return jsonDeepClone(db.primaryNameservers); + } + }, + zones: { + list: function listZones() { + return jsonDeepClone(db.zones); + }, + find: function getZone(predicate, cb) { + var found; + db.zones.some(function (z) { + if (z.id && predicate.id === z.id) { found = z; return true; } + if (z.name && predicate.name === z.name) { found = z; return true; } + }); + if (!found) { + cb(null, null); + return; + } + cb(null, jsonDeepClone(found)); + return; + }, + create: function() {}, + update: function() {}, + delete: function() {} + }, + records: { + list: function listRecords() { + return jsonDeepClone(db.records); + }, + find: function getRecord(predicate, cb) { + }, + create: function() {}, + update: function() {}, + delete: function() {} + } + }; + + return dbApi; +}; -- 2.38.5 From e43c169257e47e813fff1fe4c2f188141e5f699d Mon Sep 17 00:00:00 2001 From: Aaron Madsen Date: Sat, 24 Nov 2018 22:36:04 -0700 Subject: [PATCH 2/3] WIP: keep iterating on the store API --- lib/store/store.json.js | 393 +++++++++++++++++++++++++++++++++------- 1 file changed, 331 insertions(+), 62 deletions(-) diff --git a/lib/store/store.json.js b/lib/store/store.json.js index 392ce18..d6883b5 100644 --- a/lib/store/store.json.js +++ b/lib/store/store.json.js @@ -1,10 +1,99 @@ 'use strict'; +var crypto = require('crypto'); + function jsonDeepClone(target) { return JSON.parse( JSON.stringify(target) ); } + +function mergeObjects() { + // arguments should be an array of objects. We + // reverse it because the last argument to set + // a value wins. + var args = [].slice.call(arguments).reverse(); + var len = args.length; + if (len === 1) { + return args[0]; + } + + // gather the set of keys from all arguments + var keyLists = args.map(function (arg) { + return Object.keys(arg); + }); + + var keys = Object.keys(keyLists.reduce(function (all, list) { + list.forEach(function (k) { + all[k] = true; + }); + return all; + }, {})); + + // for each key + return keys.reduce(function (target, k) { + // find the first argument (because of the reverse() above) with the key set + var values = []; + var isObject = false; + for (var i = 0; i < len; i++) { + var v = args[i]; + var vType = typeof v; + + if (vType === 'object') { + if (!v) { + // typeof null is object. null is the only falsey object. null represents + // a delete or the end of our argument list; + break; + } + // we need to collect values until we get a non-object, so we can merge them + values.push(v); + isObject = true; + } else if (!isObject) { + if (vType === 'undefined') { + // if the arg actually has the key set this is effectively a "delete" + if (keyList[i].indexOf(k) != -1) { + break; + } + // otherwise we need to check the next argument's value, so we don't break the loop + } else { + values.push(v); + break; + } + } else { + // a previous value was an object, this one isn't + // That means we are done collecting values. + break; + } + } + + if (values.length > 0) { + target[k] = mergeObjects.apply(null, values); + } + + return target; + }, {}); +} + +function prepareZone(zone, options) { + var opts = options || {}; + var timestamp = opts.timestamp || Date.now(); + if (!zone.name) { + zone.name = zone.id; + zone.id = null; + } + if (!zone.id) { + zone.id = crypto.randomBytes(16).toString('hex'); + } + if (!zone.createdAt) { zone.createdAt = timestamp; } + if (!zone.updatedAt || opts.isUpdate) { zone.updatedAt = timestamp; } + + // create a names set for the zone, keyed by record name mapped to + // an object for the various records with that name, by type (A, MX, TXT, etc.) + zone.records = zone.records || {}; + + return zone; +} + /* init() should return an object with: { save: function -> undefined - changes to in memory representation should be persisted @@ -14,14 +103,12 @@ init() should return an object with: { }, zones: { list: function -> list zones, - find: function -> read zone by ???, create: update: delete: }, records: { list: function -> list records, - find: function -> read record by ???, create: update: delete: @@ -35,51 +122,71 @@ module.exports = function init (opts) { // opts = { filepath }; var db = require(opts.filepath); - var stat = require('fs').statSync(opts.filepath); - var crypto = require('crypto'); + var mtime = require('fs').statSync(opts.filepath).mtime.valueOf(); + // - // Manual Migration + // Migration from other formats // - // Convert the primary nameservers from strings to objects with names and IDs. - db.primaryNameservers.forEach(function (ns, i, arr) { - if ('string' === typeof ns) { - ns = { name: ns }; - arr[i] = ns; - } - if (!ns.id) { - ns.id = crypto.randomBytes(16).toString('hex'); - } + // Convert the primary nameservers from an array of strings to objects with names and IDs. + // also switch to the 'peers' name, since we are really interested in the other FQDNs that + // use the same data store and are kept in sync. + var peerList = (!db.peers || Array.isArray(db.peers))? db.peers : Object.keys(db.peers).map(function (p) { + return db.peers[p]; }); + db.peers = [].concat(db.primaryNameservers, peerList).filter(function (p) { + // filer out empty strings, undefined, etc. + return !!p; + }).map(function (ns) { + var peer = ('string' === typeof ns)? ns : { name: ns }; + if (!ns.id) { + peer.id = crypto.randomBytes(16).toString('hex'); + } + return peer; + }).reduce(function (peers, p) { + peers[p.name] = p; + return peers; + }, {}); + delete db.primaryNameservers; // Convert domains to zones and ensure that they have proper IDs and timestamps - db.zones = db.zones || []; - if (db.domains) { - db.zones = db.zones.concat(db.domains); - } - db.zones.forEach(function (zone) { - if (!zone.name) { - zone.name = zone.id; - zone.id = null; - } - if (!zone.id) { - zone.id = crypto.randomBytes(16).toString('hex'); - } - if (!zone.createdAt) { zone.createdAt = stat.mtime.valueOf(); } - if (!zone.updatedAt) { zone.updatedAt = stat.mtime.valueOf(); } + // Organize zones as a set of zone names + var zoneList = (!db.zones || Array.isArray(db.zones))? db.zones : Object.keys(db.zones).map(function (z) { + return db.zones[z]; }); - // Records belong to zones, but they (currently) refer to them by a zone property. - // NOTE/TODO: This may pose problems where the whole list of records is not easily + db.zones = [].concat(db.domains, zoneList).filter(function (z) { + // filer out empty strings, undefined, etc. + return !!z; + }).map(function (zone) { + return prepareZone(zone, { timestamp: mtime }); + }).reduce(function (zones, z) { + zones[z.name] = z; + return zones; + }, {}); + delete db.domains; + + // NOTE: Records belong to zones, but they previously referred to them only by a + // zone property. This may pose problems where the whole list of records is not easily // filtered / kept in memory / indexed and/or retrieved by zone. Traditionally, - // records are stored "within a zone" in a zone file. We may wish to have the - // DB API behave more traditionally, even though some stores (like a SQL database + // records are stored "within a zone" in a zone file. We want to have the store API + // behave more traditionally, even though some stores (like a SQL database // table) might actually store the zone as a property of a record as we currently do. - db.records.forEach(function (record) { + // (This fits with the somewhat unexpected and confusing logic of wildcard records.) + (db.records || []).forEach(function (record) { + // make sure the record has an ID if (!record.id) { record.id = crypto.randomBytes(16).toString('hex'); } + + // put it in it's zone - synthesize one if needed + db.zones[record.zone] = db.zones[record.zone] || prepareZone({ name: record.zone }); + var zone = db.zones[record.zone]; + zone.records[record.name] = zone.records[record.name] || []; + var recordsForName = zone.records[record.name]; + recordsForName.push(record); }); + delete db.records; // Write the migrated data require('fs').writeFileSync(opts.filepath, JSON.stringify(db, null, 2)); @@ -95,7 +202,6 @@ module.exports = function init (opts) { } save._saving = true; - // TODO: replace with something not destructive to original non-json data require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) { console.log('done writing'); var pending = save._pending.splice(0); @@ -112,54 +218,217 @@ module.exports = function init (opts) { }; save._pending = []; + function matchPredicate(predicate) { + return function (toCheck) { + // which items match the predicate? + if (!toCheck) { + return false; + } + + // check all the keys in the predicate - only supporting exact match + // of at least one listed option for all keys right now + if (Object.keys(predicate || {}).some(function (k) { + return [].concat(predicate[k]).indexOf(toCheck[k]) === -1; + })) { + return false; + } + + // we have a match + return true; + }; + } + + function matchZone(predicate) { + var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones); + var check = matchPredicate(predicate); + // TODO: swap the filter() for a functional style "loop" recursive function + // that lets us return early if we have a limit, etc. + var found = zonenames.filter(function (zonename) { + /* + if (predicate.id && predicate.id !== z.id) { return false; } + if (predicate.name && predicate.name !== z.name) { return false; } + */ + return check(db.zones[zonename]); + }).map(function (zonename) { + return db.zones[zonename]; + }); + + return found; + } + var dbApi = { save: function () { // hide _pending and _saving from callers var args = [].slice.call(arguments); return save.apply(null, args); }, - // primaryNameservers really isn't editable - it's literally the list of FQDN's + // peers really isn't editable - it's literally the list of FQDN's // that this database is replicated to in a multi-master fashion. // // However, lib/store/index.js does plenty to update these records in support // of the SOA records that are built from them (as does this file in the "migration" // section). I'm toying with the idea of not storing them seperately or creating the // SOA records somewhat immediately. - primaryNameservers: { - list: function listNameservers() { - return jsonDeepClone(db.primaryNameservers); - } + peers: function listPeers(cb) { + // Most data stores are going to have an asynchronous storage API. If we need + // synchronous access to the data it is going to have to be cached. If it is + // cached, there is still the issue the cache getting out of sync (a legitimate + // issue anyway). If we explicitly make all of these operations async then we + // have greater flexibility for store implmentations to address these issues. + return setImmediate(cb, null, jsonDeepClone(db.peers)); }, zones: { - list: function listZones() { - return jsonDeepClone(db.zones); - }, - find: function getZone(predicate, cb) { - var found; - db.zones.some(function (z) { - if (z.id && predicate.id === z.id) { found = z; return true; } - if (z.name && predicate.name === z.name) { found = z; return true; } + /* + I'm fairly certan that zone names must be unique and therefore are legitimately + IDs within the zones namespace. This is similarly true of record names within a zone. + I'm not certain that having a distinct ID adds value and it may add confusion / complexity. + */ + // NOTE: `opts` exists so we can add options - like properties to read - easily in the future + // without modifying the function signature + list: function listZones(predicate, opts, cb) { + // TODO: consider whether we should just return the zone names + var found = jsonDeepClone(matchZone(predicate)).map(function (z) { + // This is fairly inefficient!! Consider alternative storage + // that does not require deleting the records like this. + delete z.records; + return z; }); - if (!found) { - cb(null, null); - return; - } - cb(null, jsonDeepClone(found)); - return; + return setImmediate(cb, null, found); }, - create: function() {}, - update: function() {}, - delete: function() {} + // // NOTE: I'm not sure we need a distinct 'find()' operation in the API + // // unless we are going to limit the output of the + // // 'list()' operation in some incompatible way. + // // NOTE: `opts` exists so we can add options - like properties to read - easily in the future + // // without modifying the function signature + // find: function getZone(predicate, opts, cb) { + // if (!predicate.name || predicate.id) { + // return setImmediate(cb, new Error('Finding a zone requires a `name` or `id`')); + // } + // // TODO: implement a limit / short circuit and possibly offset + // // to allow for paging of zone data. + // var found = matchZone(predicate); + // if (!found[0]) { + // // TODO: make error message more specific? + // return setImmediate(cb, new Error('Zone not found')); + // } + + // var z = jsonDeepClone(found[0]); + // delete z.records; + // return setImmediate(cb, null, z); + // }, + create: function createZone(zone, cb) { + // We'll need a lock mechanism of some sort that works + // for simultaneous requests and multiple processes. + matchZone({ name: zone.name }, function (err, matched) { + if (err) { + return setImmediate(cb, err); + } + + var found = matched[0]; + if (found) { + return setImmediate(cb, new Error('Zone ' + zone.name + ' already exists')); + } + + db.zones[zone.name] = prepareZone(zone); + return setImmediate(function () { + cb(null, jsonDeepClone(db.zones[zone.name])); + // release lock + }); + }); + }, + update: function updateZone(zone, cb) { + // We'll need a lock mechanism of some sort that works + // for simultaneous requests and multiple processes. + matchZone({ name: zone.name }, function (err, matched) { + if (err) { + return setImmediate(cb, err); + } + var found = matched[0]; + if (!found) { + return setImmediate(cb, new Error('Zone not found')); + } + // make sure we are not writing records through this interface + delete zone.records; + + var combined = mergeObjects(found, zone); + db.zones[zone.name] = prepareZone(combined, { isUpdate: true }); + return setImmediate(function () { + cb(null, jsonDeepClone(db.zones[zone.name])); + // release lock + }); + }); + }, + delete: function(zone, cb) { + // We'll need a lock mechanism of some sort that works + // for simultaneous requests and multiple processes. + matchZone({ name: zone.name }, function (err, matched) { + if (err) { + return setImmediate(cb, err); + } + var found = matched[0]; + if (!found) { + return setImmediate(cb, new Error('Zone not found')); + } + + delete db.zones[zone.name]; + return setImmediate(function () { + cb(); + // release lock + }); + }); + } }, records: { - list: function listRecords() { - return jsonDeepClone(db.records); + list: function listRecords(rPredicate, cb) { + var recordNames = [].concat(rPredicate.name); + var check = matchPredicate(rPredicate); + + var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) { + // get the records from the zone that match the record predicate + var zFound = recordNames.filter(function (name) { + return !!zone.records[name]; + }).map(function (name) { + return zone.records[name].filter(check); + }); + return records.concat(zFound); + }, []); + + return setImmediate(cb, null, jsonDeepClone(found)); }, - find: function getRecord(predicate, cb) { + // find: function getRecord(rPredicate, cb) { + // var recordNames = [].concat(rPredicate.name); + // var check = matchPredicate(rPredicate); + + // // TODO: swap the `filter()` and `map()` for a functional style "loop" + // // recursive function that lets us return early if we have a limit, etc. + // var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) { + // // get the records from the zone that match the record predicate + // var zFound = recordNames.filter(function (name) { + // return !!zone.records[name]; + // }).map(function (name) { + // return zone.records[name].filter(check); + // }); + // return records.concat(zFound); + // }, []); + + // return setImmediate(cb, null, jsonDeepClone(found[0])); + // }, + create: function(record, cb) { + var zone = matchZone({ name: record.zone })[0]; + if (!zone) { + return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' to create record')); + } + + var records = zone.records[record.name] = zone.records[record.name] || []; + var check = matchPredicate(record); + if (records.filter(check)[0]) { + return setImmediate(cb, new Error('Exact record already exists in zone ' + record.zone )); + } + + return setImmediate(cb, null, jsonDeepClone(found)); }, - create: function() {}, - update: function() {}, - delete: function() {} + update: function(record, cb) {}, + delete: function(record, cb) {} } }; -- 2.38.5 From 077c2592721524ed50be61bc8bdfb42c698a486b Mon Sep 17 00:00:00 2001 From: Aaron Madsen Date: Sun, 25 Nov 2018 01:29:16 -0700 Subject: [PATCH 3/3] WIP: begin to add locks to the store API --- lib/store/store.json.js | 525 ++++++++++++++++++++-------------------- 1 file changed, 269 insertions(+), 256 deletions(-) diff --git a/lib/store/store.json.js b/lib/store/store.json.js index d6883b5..0a275ee 100644 --- a/lib/store/store.json.js +++ b/lib/store/store.json.js @@ -95,22 +95,29 @@ function prepareZone(zone, options) { } /* -init() should return an object with: { - save: function -> undefined - changes to in memory representation should be persisted - This could be considered the equivalent of committing a transaction to the database. - primaryNameservers: { - list: function -> list nameservers +`init()` should return a `lock(forOps)` function, where `forOps` describes the portions +of the database that we need to obtain a lock for (so we can write to them). If `forOps` +is underfined, we only need to read the currently valid data. + +`lock(forOps)` should return an object with: { + save: function -> undefined - changes to in memory representation should be persisted. + This could be considered the equivalent of committing a transaction to the database. + This will release any write lock obtained. `save()` will return an error if no write + lock was obtained OR writes are made to locations other than were locked., + discard: function -> undefined - changes to in memory representation should be discarded. + This could be considered the equivalent of cancelling a transaction to the database. + This will release any write lock obtained., + peers: { + list: function -> list FQDNs that we expec to be in sync with this server }, zones: { list: function -> list zones, - create: - update: + write: delete: }, records: { list: function -> list records, - create: - update: + write: delete: } } @@ -121,7 +128,7 @@ All lists will be a deep copy of the data actually stored. module.exports = function init (opts) { // opts = { filepath }; - var db = require(opts.filepath); + var fsDb = require(opts.filepath); var mtime = require('fs').statSync(opts.filepath).mtime.valueOf(); // @@ -131,15 +138,15 @@ module.exports = function init (opts) { // Convert the primary nameservers from an array of strings to objects with names and IDs. // also switch to the 'peers' name, since we are really interested in the other FQDNs that // use the same data store and are kept in sync. - var peerList = (!db.peers || Array.isArray(db.peers))? db.peers : Object.keys(db.peers).map(function (p) { - return db.peers[p]; + var peerList = (!fsDb.peers || Array.isArray(fsDb.peers))? fsDb.peers : Object.keys(fsDb.peers).map(function (p) { + return fsDb.peers[p]; }); - db.peers = [].concat(db.primaryNameservers, peerList).filter(function (p) { - // filer out empty strings, undefined, etc. + fsDb.peers = [].concat(fsDb.primaryNameservers, peerList).filter(function (p) { + // filter out empty strings, undefined, etc. return !!p; }).map(function (ns) { var peer = ('string' === typeof ns)? ns : { name: ns }; - if (!ns.id) { + if (!peer.id) { peer.id = crypto.randomBytes(16).toString('hex'); } return peer; @@ -147,16 +154,16 @@ module.exports = function init (opts) { peers[p.name] = p; return peers; }, {}); - delete db.primaryNameservers; + delete fsDb.primaryNameservers; // Convert domains to zones and ensure that they have proper IDs and timestamps // Organize zones as a set of zone names - var zoneList = (!db.zones || Array.isArray(db.zones))? db.zones : Object.keys(db.zones).map(function (z) { - return db.zones[z]; + var zoneList = (!fsDb.zones || Array.isArray(fsDb.zones))? fsDb.zones : Object.keys(fsDb.zones).map(function (z) { + return fsDb.zones[z]; }); - db.zones = [].concat(db.domains, zoneList).filter(function (z) { - // filer out empty strings, undefined, etc. + fsDb.zones = [].concat(fsDb.domains, zoneList).filter(function (z) { + // filter out empty strings, undefined, etc. return !!z; }).map(function (zone) { return prepareZone(zone, { timestamp: mtime }); @@ -164,7 +171,7 @@ module.exports = function init (opts) { zones[z.name] = z; return zones; }, {}); - delete db.domains; + delete fsDb.domains; // NOTE: Records belong to zones, but they previously referred to them only by a // zone property. This may pose problems where the whole list of records is not easily @@ -173,264 +180,270 @@ module.exports = function init (opts) { // behave more traditionally, even though some stores (like a SQL database // table) might actually store the zone as a property of a record as we currently do. // (This fits with the somewhat unexpected and confusing logic of wildcard records.) - (db.records || []).forEach(function (record) { + (fsDb.records || []).forEach(function (record) { // make sure the record has an ID if (!record.id) { record.id = crypto.randomBytes(16).toString('hex'); } - // put it in it's zone - synthesize one if needed - db.zones[record.zone] = db.zones[record.zone] || prepareZone({ name: record.zone }); - var zone = db.zones[record.zone]; - zone.records[record.name] = zone.records[record.name] || []; + // Put it in it's zone - synthesize one if needed + fsDb.zones[record.zone] = fsDb.zones[record.zone] || prepareZone({ name: record.zone }); + var zone = fsDb.zones[record.zone]; + // Keep in mind that each name may have multiple records (whether or not they are + // of different types, classes, etc.), but each record must have a unique ID. + zone.records[record.name] = zone.records[record.name] || {}; var recordsForName = zone.records[record.name]; - recordsForName.push(record); + recordsForName[record.id] = record; }); - delete db.records; + delete fsDb.records; // Write the migrated data - require('fs').writeFileSync(opts.filepath, JSON.stringify(db, null, 2)); + require('fs').writeFileSync(opts.filepath, JSON.stringify(fsDb, null, 2)); // // End Migration // - var save = function save (cb) { - if (save._saving) { - console.log('make pending'); - save._pending.push(cb); - return; - } + return function lock(forOps) { + /* + forOps : { + write: { + zone: string - required - a zone name, + names: [string] - optional - a list of record names that may be modified. May be 0 length, + records: [string] - optional - a list of record IDs that may be modified. May be 0 length (default) + } + } - save._saving = true; - require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) { - console.log('done writing'); - var pending = save._pending.splice(0); - save._saving = false; - cb(err); - if (!pending.length) { + 1. You can't get a lock for a whole zone without first releasing any locks for names and records + within the zone. A whole zone lock will block + 2. You can't get a lock for a name within a zone without first releasing any locks for records + within that name and zone. + 3. Locks for a specific record do not block new locks with the same zone, name, but a different + record ID. + 4. Creating a new zone, name, or record requires obtaining a lock for it's key (name or ID), even + though it does not exist yet. This prevents race conditions where 2 requests (or processes) attempt + to create the same resource at the same time. + + Note: The UI probably needs to know if it is trying to write based on an outdated copy of data. Such + writes should be detected and fail loudly. + + locks probably involve lockfiles on the filesystem (with watches) so that writes and locks can be + communicated easily across processes. + */ + var db = mergeObjects(fsDb); + + var save = function save (cb) { + if (save._saving) { + console.log('make pending'); + save._pending.push(cb); return; } - save(function (err) { - console.log('double save'); - pending.forEach(function (cb) { cb(err); }); + + save._saving = true; + require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) { + console.log('done writing'); + var pending = save._pending.splice(0); + save._saving = false; + cb(err); + if (!pending.length) { + return; + } + save(function (err) { + console.log('double save'); + pending.forEach(function (cb) { cb(err); }); + }); }); - }); - }; - save._pending = []; - - function matchPredicate(predicate) { - return function (toCheck) { - // which items match the predicate? - if (!toCheck) { - return false; - } - - // check all the keys in the predicate - only supporting exact match - // of at least one listed option for all keys right now - if (Object.keys(predicate || {}).some(function (k) { - return [].concat(predicate[k]).indexOf(toCheck[k]) === -1; - })) { - return false; - } - - // we have a match - return true; }; - } + save._pending = []; - function matchZone(predicate) { - var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones); - var check = matchPredicate(predicate); - // TODO: swap the filter() for a functional style "loop" recursive function - // that lets us return early if we have a limit, etc. - var found = zonenames.filter(function (zonename) { - /* - if (predicate.id && predicate.id !== z.id) { return false; } - if (predicate.name && predicate.name !== z.name) { return false; } - */ - return check(db.zones[zonename]); - }).map(function (zonename) { - return db.zones[zonename]; - }); - - return found; - } - - var dbApi = { - save: function () { - // hide _pending and _saving from callers - var args = [].slice.call(arguments); - return save.apply(null, args); - }, - // peers really isn't editable - it's literally the list of FQDN's - // that this database is replicated to in a multi-master fashion. - // - // However, lib/store/index.js does plenty to update these records in support - // of the SOA records that are built from them (as does this file in the "migration" - // section). I'm toying with the idea of not storing them seperately or creating the - // SOA records somewhat immediately. - peers: function listPeers(cb) { - // Most data stores are going to have an asynchronous storage API. If we need - // synchronous access to the data it is going to have to be cached. If it is - // cached, there is still the issue the cache getting out of sync (a legitimate - // issue anyway). If we explicitly make all of these operations async then we - // have greater flexibility for store implmentations to address these issues. - return setImmediate(cb, null, jsonDeepClone(db.peers)); - }, - zones: { - /* - I'm fairly certan that zone names must be unique and therefore are legitimately - IDs within the zones namespace. This is similarly true of record names within a zone. - I'm not certain that having a distinct ID adds value and it may add confusion / complexity. - */ - // NOTE: `opts` exists so we can add options - like properties to read - easily in the future - // without modifying the function signature - list: function listZones(predicate, opts, cb) { - // TODO: consider whether we should just return the zone names - var found = jsonDeepClone(matchZone(predicate)).map(function (z) { - // This is fairly inefficient!! Consider alternative storage - // that does not require deleting the records like this. - delete z.records; - return z; - }); - return setImmediate(cb, null, found); - }, - // // NOTE: I'm not sure we need a distinct 'find()' operation in the API - // // unless we are going to limit the output of the - // // 'list()' operation in some incompatible way. - // // NOTE: `opts` exists so we can add options - like properties to read - easily in the future - // // without modifying the function signature - // find: function getZone(predicate, opts, cb) { - // if (!predicate.name || predicate.id) { - // return setImmediate(cb, new Error('Finding a zone requires a `name` or `id`')); - // } - // // TODO: implement a limit / short circuit and possibly offset - // // to allow for paging of zone data. - // var found = matchZone(predicate); - // if (!found[0]) { - // // TODO: make error message more specific? - // return setImmediate(cb, new Error('Zone not found')); - // } - - // var z = jsonDeepClone(found[0]); - // delete z.records; - // return setImmediate(cb, null, z); - // }, - create: function createZone(zone, cb) { - // We'll need a lock mechanism of some sort that works - // for simultaneous requests and multiple processes. - matchZone({ name: zone.name }, function (err, matched) { - if (err) { - return setImmediate(cb, err); - } - - var found = matched[0]; - if (found) { - return setImmediate(cb, new Error('Zone ' + zone.name + ' already exists')); - } - - db.zones[zone.name] = prepareZone(zone); - return setImmediate(function () { - cb(null, jsonDeepClone(db.zones[zone.name])); - // release lock - }); - }); - }, - update: function updateZone(zone, cb) { - // We'll need a lock mechanism of some sort that works - // for simultaneous requests and multiple processes. - matchZone({ name: zone.name }, function (err, matched) { - if (err) { - return setImmediate(cb, err); - } - var found = matched[0]; - if (!found) { - return setImmediate(cb, new Error('Zone not found')); - } - // make sure we are not writing records through this interface - delete zone.records; - - var combined = mergeObjects(found, zone); - db.zones[zone.name] = prepareZone(combined, { isUpdate: true }); - return setImmediate(function () { - cb(null, jsonDeepClone(db.zones[zone.name])); - // release lock - }); - }); - }, - delete: function(zone, cb) { - // We'll need a lock mechanism of some sort that works - // for simultaneous requests and multiple processes. - matchZone({ name: zone.name }, function (err, matched) { - if (err) { - return setImmediate(cb, err); - } - var found = matched[0]; - if (!found) { - return setImmediate(cb, new Error('Zone not found')); - } - - delete db.zones[zone.name]; - return setImmediate(function () { - cb(); - // release lock - }); - }); - } - }, - records: { - list: function listRecords(rPredicate, cb) { - var recordNames = [].concat(rPredicate.name); - var check = matchPredicate(rPredicate); - - var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) { - // get the records from the zone that match the record predicate - var zFound = recordNames.filter(function (name) { - return !!zone.records[name]; - }).map(function (name) { - return zone.records[name].filter(check); - }); - return records.concat(zFound); - }, []); - - return setImmediate(cb, null, jsonDeepClone(found)); - }, - // find: function getRecord(rPredicate, cb) { - // var recordNames = [].concat(rPredicate.name); - // var check = matchPredicate(rPredicate); - - // // TODO: swap the `filter()` and `map()` for a functional style "loop" - // // recursive function that lets us return early if we have a limit, etc. - // var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) { - // // get the records from the zone that match the record predicate - // var zFound = recordNames.filter(function (name) { - // return !!zone.records[name]; - // }).map(function (name) { - // return zone.records[name].filter(check); - // }); - // return records.concat(zFound); - // }, []); - - // return setImmediate(cb, null, jsonDeepClone(found[0])); - // }, - create: function(record, cb) { - var zone = matchZone({ name: record.zone })[0]; - if (!zone) { - return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' to create record')); + function matchPredicate(predicate) { + return function (toCheck) { + // which items match the predicate? + if (!toCheck) { + return false; } - var records = zone.records[record.name] = zone.records[record.name] || []; - var check = matchPredicate(record); - if (records.filter(check)[0]) { - return setImmediate(cb, new Error('Exact record already exists in zone ' + record.zone )); + // check all the keys in the predicate - only supporting exact match + // of at least one listed option for all keys right now + if (Object.keys(predicate || {}).some(function (k) { + return [].concat(predicate[k]).indexOf(toCheck[k]) === -1; + })) { + return false; } - return setImmediate(cb, null, jsonDeepClone(found)); - }, - update: function(record, cb) {}, - delete: function(record, cb) {} + // we have a match + return true; + }; } - }; - return dbApi; + function matchZone(predicate) { + var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones); + var check = matchPredicate(predicate); + // TODO: swap the filter() for a functional style "loop" recursive function + // that lets us return early if we have a limit, etc. + var found = zonenames.filter(function (zonename) { + /* + if (predicate.id && predicate.id !== z.id) { return false; } + if (predicate.name && predicate.name !== z.name) { return false; } + */ + return check(db.zones[zonename]); + }).map(function (zonename) { + return db.zones[zonename]; + }); + + return found; + } + + // NOTE: `opts` exists so we can add options - like properties to read - easily in the future + // without modifying the function signature + function listZones(predicate, opts, cb) { + var found = jsonDeepClone(matchZone(predicate)) + return setImmediate(cb, null, found); + } + + function writeZone(zone, cb) { + matchZone({ name: zone.name }, function (err, matched) { + if (err) { + return setImmediate(cb, err); + } + + var found = matched[0]; + var isUpdate = !!found; + + var combined = mergeObjects((found || {}), zone); + db.zones[zone.name] = prepareZone(combined, { isUpdate: isUpdate }); + return setImmediate(function () { + cb(null, jsonDeepClone(db.zones[zone.name])); + }); + }); + } + + function deleteZone(zone, cb) { + matchZone({ name: zone.name }, function (err, matched) { + if (err) { + return setImmediate(cb, err); + } + var found = matched[0]; + if (!found) { + return setImmediate(cb, new Error('Zone not found')); + } + + delete db.zones[zone.name]; + return setImmediate(function () { + cb(); + }); + }); + } + + function listRecords(rPredicate, cb) { + var recordNames = [].concat(rPredicate.name); + var check = matchPredicate(rPredicate); + + var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) { + // get the records from the zone that match the record predicate + var zFound = recordNames.filter(function (name) { + return !!zone.records[name]; + }).map(function (name) { + return Object.keys(zone.records[name]).map(function (id) { + return zone.records[name][id]; + }).filter(check); + }); + return records.concat(zFound); + }, []); + + return setImmediate(cb, null, jsonDeepClone(found)); + } + + function modifyRecords (record, options, cb) { + var opts = options || {}; + var isDelete = !!opts.isDelete; + if (!record.zone) { + return setImmediate(cb, new Error('No zone specified for record')); + } + if (!record.name) { + return setImmediate(cb, new Error('No name specified for record')); + } + if (isDelete && !record.id) { + return setImmediate(cb, new Error('No id specified to delete record')); + } + + var zone = matchZone({ name: record.zone })[0]; + if (!zone) { + return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' for record')); + } + var isUpdate = (record.id && !isDelete); + if (!isUpdate) { + record.id = crypto.randomBytes(16).toString('hex'); + } + + var recordsForName = zone.records[record.name] = zone.records[record.name] || {}; + var found = recordsForName[record.id]; + + if ((isUpdate || isDelete) && !found) { + return setImmediate(cb, new Error('Unable to find record with ID: ' + record.id)); + } + + if (!isDelete) { + recordsForName[record.id] = (mergeObjects((found || {}), record)); + } + + var zoneUpdate = { + name: record.name, + records: {} + }; + zoneUpdate.records[record.name] = keep; + return writeZone(zoneUpdate, function (err) { + if (err) { + return cb(err); + } + + return cb( + null, + isDelete ? null : jsonDeepClone(recordsForName[record.id]) + ); + }); + } + + function writeRecord(record, cb) { + modifyRecords(record, null, cb); + } + + function deleteRecord(record, cb) { + modifyRecords(record, { isDelete: true }, cb); + } + + var dbApi = { + save: function () { + // hide _pending and _saving from callers + var args = [].slice.call(arguments); + return save.apply(null, args); + }, + // peers really isn't editable - it's literally the list of FQDN's + // that this database is replicated to in a multi-master fashion. + // + // However, lib/store/index.js does plenty to update these records in support + // of the SOA records that are built from them (as does this file in the "migration" + // section). I'm toying with the idea of not storing them seperately or creating the + // SOA records somewhat immediately. + peers: function listPeers(cb) { + // Most data stores are going to have an asynchronous storage API. If we need + // synchronous access to the data it is going to have to be cached. If it is + // cached, there is still the issue the cache getting out of sync (a legitimate + // issue anyway). If we explicitly make all of these operations async then we + // have greater flexibility for store implmentations to address these issues. + return setImmediate(cb, null, jsonDeepClone(db.peers)); + }, + zones: { + list: listZones, + write: writeZone, + delete: deleteZone + }, + records: { + list: listRecords, + write: writeRecord, + delete: deleteRecord + } + }; + + return dbApi; + }; }; -- 2.38.5