Skip to content

Commit

Permalink
don't log known slow queries
Browse files Browse the repository at this point in the history
  • Loading branch information
zadam committed Oct 20, 2023
1 parent 5c10fc2 commit 4773f93
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 33 deletions.
51 changes: 28 additions & 23 deletions src/becca/becca_loader.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,44 +15,49 @@ const entityConstructor = require("../becca/entity_constructor");

const beccaLoaded = new Promise((res, rej) => {
sqlInit.dbReady.then(() => {
load();
cls.init(() => {
load();

cls.init(() => require('../services/options_init').initStartupOptions());
require('../services/options_init').initStartupOptions();

res();
res();
});
});
});

function load() {
const start = Date.now();
becca.reset();

// using a raw query and passing arrays to avoid allocating new objects,
// this is worth it for the becca load since it happens every run and blocks the app until finished
// we know this is slow and the total becca load time is logged
sql.disableSlowQueryLogging(() => {
// using a raw query and passing arrays to avoid allocating new objects,
// this is worth it for the becca load since it happens every run and blocks the app until finished

for (const row of sql.getRawRows(`SELECT noteId, title, type, mime, isProtected, blobId, dateCreated, dateModified, utcDateCreated, utcDateModified FROM notes WHERE isDeleted = 0`)) {
new BNote().update(row).init();
}
for (const row of sql.getRawRows(`SELECT noteId, title, type, mime, isProtected, blobId, dateCreated, dateModified, utcDateCreated, utcDateModified FROM notes WHERE isDeleted = 0`)) {
new BNote().update(row).init();
}

const branchRows = sql.getRawRows(`SELECT branchId, noteId, parentNoteId, prefix, notePosition, isExpanded, utcDateModified FROM branches WHERE isDeleted = 0`);
// in-memory sort is faster than in the DB
branchRows.sort((a, b) => a.notePosition - b.notePosition);
const branchRows = sql.getRawRows(`SELECT branchId, noteId, parentNoteId, prefix, notePosition, isExpanded, utcDateModified FROM branches WHERE isDeleted = 0`);
// in-memory sort is faster than in the DB
branchRows.sort((a, b) => a.notePosition - b.notePosition);

for (const row of branchRows) {
new BBranch().update(row).init();
}
for (const row of branchRows) {
new BBranch().update(row).init();
}

for (const row of sql.getRawRows(`SELECT attributeId, noteId, type, name, value, isInheritable, position, utcDateModified FROM attributes WHERE isDeleted = 0`)) {
new BAttribute().update(row).init();
}
for (const row of sql.getRawRows(`SELECT attributeId, noteId, type, name, value, isInheritable, position, utcDateModified FROM attributes WHERE isDeleted = 0`)) {
new BAttribute().update(row).init();
}

for (const row of sql.getRows(`SELECT name, value, isSynced, utcDateModified FROM options`)) {
new BOption(row);
}
for (const row of sql.getRows(`SELECT name, value, isSynced, utcDateModified FROM options`)) {
new BOption(row);
}

for (const row of sql.getRows(`SELECT etapiTokenId, name, tokenHash, utcDateCreated, utcDateModified FROM etapi_tokens WHERE isDeleted = 0`)) {
new BEtapiToken(row);
}
for (const row of sql.getRows(`SELECT etapiTokenId, name, tokenHash, utcDateCreated, utcDateModified FROM etapi_tokens WHERE isDeleted = 0`)) {
new BEtapiToken(row);
}
});

for (const noteId in becca.notes) {
becca.notes[noteId].sortParents();
Expand Down
10 changes: 10 additions & 0 deletions src/services/cls.js
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,14 @@ function isEntityEventsDisabled() {
return !!namespace.get('disableEntityEvents');
}

function disableSlowQueryLogging(disable) {
namespace.set('disableSlowQueryLogging', disable);
}

function isSlowQueryLoggingDisabled() {
return !!namespace.get('disableSlowQueryLogging');
}

function getAndClearEntityChangeIds() {
const entityChangeIds = namespace.get('entityChangeIds') || [];

Expand Down Expand Up @@ -93,4 +101,6 @@ module.exports = {
getAndClearEntityChangeIds,
putEntityChange,
ignoreEntityChangeIds,
disableSlowQueryLogging,
isSlowQueryLoggingDisabled
};
19 changes: 11 additions & 8 deletions src/services/content_hash.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,17 @@ function getEntityHashes() {

const startTime = new Date();

const hashRows = sql.getRawRows(`
SELECT entityName,
entityId,
hash,
isErased
FROM entity_changes
WHERE isSynced = 1
AND entityName != 'note_reordering'`);
// we know this is slow and the total content hash calculation time is logged
const hashRows = sql.disableSlowQueryLogging(
() => sql.getRawRows(`
SELECT entityName,
entityId,
hash,
isErased
FROM entity_changes
WHERE isSynced = 1
AND entityName != 'note_reordering'`)
);

// sorting is faster in memory
// sorting by entityId is enough, hashes will be segmented by entityName later on anyway
Expand Down
18 changes: 16 additions & 2 deletions src/services/sql.js
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ function wrap(query, func) {

const milliseconds = Date.now() - startTimestamp;

if (milliseconds >= 20) {
if (milliseconds >= 20 && !cls.isSlowQueryLoggingDisabled()) {
if (query.includes("WITH RECURSIVE")) {
log.info(`Slow recursive query took ${milliseconds}ms.`);
}
Expand Down Expand Up @@ -295,6 +295,19 @@ async function copyDatabase(targetFilePath) {
await dbConnection.backup(targetFilePath);
}

function disableSlowQueryLogging(cb) {
const orig = cls.isSlowQueryLoggingDisabled();

try {
cls.disableSlowQueryLogging(true);

return cb();
}
finally {
cls.disableSlowQueryLogging(orig);
}
}

module.exports = {
dbConnection,
insert,
Expand Down Expand Up @@ -367,5 +380,6 @@ module.exports = {
transactional,
upsert,
fillParamList,
copyDatabase
copyDatabase,
disableSlowQueryLogging
};

0 comments on commit 4773f93

Please sign in to comment.