Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Early detection of implausibly sized records to avoid unnecessary large memory allocations. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA3-256: |
2c8769c69f301307db6663adb8b7c0b8 |
User & Date: | drh 2019-02-04 21:10:24.890 |
Context
2019-02-05
| ||
08:55 | Update test file "resetdb.test" so that it works with the "prepare" permutation. (check-in: 95d338124b user: dan tags: trunk) | |
2019-02-04
| ||
21:10 | Early detection of implausibly sized records to avoid unnecessary large memory allocations. (check-in: 2c8769c69f user: drh tags: trunk) | |
19:52 | Mention the new -memtrace command-line option in the -help output of the CLI. (check-in: ada91aefe3 user: drh tags: trunk) | |
Changes
Changes to src/btree.c.
︙ | ︙ | |||
4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 | */ u32 sqlite3BtreePayloadSize(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); getCellInfo(pCur); return pCur->info.nPayload; } /* ** Given the page number of an overflow page in the database (parameter ** ovfl), this function finds the page number of the next page in the ** linked list of overflow pages. If possible, it uses the auto-vacuum ** pointer-map data instead of reading the content of page ovfl to do so. ** | > > > > > > > > > > > > > > > > > > > | 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 | */ u32 sqlite3BtreePayloadSize(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); getCellInfo(pCur); return pCur->info.nPayload; } /* ** Return an upper bound on the size of any record for the table ** that the cursor is pointing into. ** ** This is an optimization. Everything will still work if this ** routine always returns 2147483647 (which is the largest record ** that SQLite can handle) or more. But returning a smaller value might ** prevent large memory allocations when trying to interpret a ** corrupt datrabase. ** ** The current implementation merely returns the size of the underlying ** database file. */ sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); return pCur->pBt->pageSize * (sqlite3_int64)pCur->pBt->nPage; } /* ** Given the page number of an overflow page in the database (parameter ** ovfl), this function finds the page number of the next page in the ** linked list of overflow pages. If possible, it uses the auto-vacuum ** pointer-map data instead of reading the content of page ovfl to do so. ** |
︙ | ︙ |
Changes to src/btree.h.
︙ | ︙ | |||
311 312 313 314 315 316 317 318 319 320 321 322 323 324 | i64 sqlite3BtreeIntegerKey(BtCursor*); #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC i64 sqlite3BtreeOffset(BtCursor*); #endif int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); u32 sqlite3BtreePayloadSize(BtCursor*); char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); struct Pager *sqlite3BtreePager(Btree*); i64 sqlite3BtreeRowCountEst(BtCursor*); #ifndef SQLITE_OMIT_INCRBLOB int sqlite3BtreePayloadChecked(BtCursor*, u32 offset, u32 amt, void*); | > | 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 | i64 sqlite3BtreeIntegerKey(BtCursor*); #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC i64 sqlite3BtreeOffset(BtCursor*); #endif int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); u32 sqlite3BtreePayloadSize(BtCursor*); sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor*); char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); struct Pager *sqlite3BtreePager(Btree*); i64 sqlite3BtreeRowCountEst(BtCursor*); #ifndef SQLITE_OMIT_INCRBLOB int sqlite3BtreePayloadChecked(BtCursor*, u32 offset, u32 amt, void*); |
︙ | ︙ |
Changes to src/vdbemem.c.
︙ | ︙ | |||
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 | BtCursor *pCur, /* Cursor pointing at record to retrieve. */ u32 offset, /* Offset from the start of data to return bytes from. */ u32 amt, /* Number of bytes to return. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ int rc; pMem->flags = MEM_Null; if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+1)) ){ rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z); if( rc==SQLITE_OK ){ pMem->z[amt] = 0; /* Overrun area used when reading malformed records */ pMem->flags = MEM_Blob; pMem->n = (int)amt; }else{ | > > > | 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 | BtCursor *pCur, /* Cursor pointing at record to retrieve. */ u32 offset, /* Offset from the start of data to return bytes from. */ u32 amt, /* Number of bytes to return. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ int rc; pMem->flags = MEM_Null; if( sqlite3BtreeMaxRecordSize(pCur)<offset+amt ){ return SQLITE_CORRUPT_BKPT; } if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+1)) ){ rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z); if( rc==SQLITE_OK ){ pMem->z[amt] = 0; /* Overrun area used when reading malformed records */ pMem->flags = MEM_Blob; pMem->n = (int)amt; }else{ |
︙ | ︙ |