diff options
author | George Hazan <ghazan@miranda.im> | 2017-10-08 23:19:54 +0300 |
---|---|---|
committer | George Hazan <ghazan@miranda.im> | 2017-10-08 23:20:01 +0300 |
commit | 7aeec32a33811541b330a75352958267f22fa0a3 (patch) | |
tree | 397d770b0956933ea4ec4963943ea9827e2cf881 | |
parent | 73b775b4863e6498bc7794c9de27ef88529c835f (diff) |
moving lmdb to mdbx engine
24 files changed, 17377 insertions, 13443 deletions
diff --git a/plugins/Dbx_mdb/dbx_lmdb.vcxproj b/plugins/Dbx_mdb/dbx_lmdb.vcxproj index 11f517d307..41afd957be 100644 --- a/plugins/Dbx_mdb/dbx_lmdb.vcxproj +++ b/plugins/Dbx_mdb/dbx_lmdb.vcxproj @@ -19,17 +19,17 @@ </ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
- <ProjectName>Dbx_lmdb</ProjectName>
+ <ProjectName>Dbx_mdbx</ProjectName>
<ProjectGuid>{E0ACDEA0-0AC9-4431-8CA3-6B0CCACB2E18}</ProjectGuid>
</PropertyGroup>
<ImportGroup Label="PropertySheets">
<Import Project="$(ProjectDir)..\..\build\vc.common\plugin.props" />
</ImportGroup>
<ItemGroup>
- <ClCompile Include="src\lmdb\*.c">
+ <ClCompile Include="src\mdbx\*.c">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
- <ClCompile Include="src\lmdb\*.cpp">
+ <ClCompile Include="src\mdbx\*.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
</ItemGroup>
diff --git a/plugins/Dbx_mdb/src/dbcontacts.cpp b/plugins/Dbx_mdb/src/dbcontacts.cpp index 04b2879b53..3ae1d7c973 100644 --- a/plugins/Dbx_mdb/src/dbcontacts.cpp +++ b/plugins/Dbx_mdb/src/dbcontacts.cpp @@ -100,31 +100,31 @@ STDMETHODIMP_(LONG) CDbxMdb::DeleteContact(MCONTACT contactID) }
}
{
- MDB_val key, data;
+ MDBX_val key, data;
DBSettingKey keyS = { contactID, 0 };
txn_ptr txn(m_pMdbEnv);
cursor_ptr cursor(txn, m_dbSettings);
- key.mv_size = sizeof(keyS); key.mv_data = &keyS;
+ key.iov_len = sizeof(keyS); key.iov_base = &keyS;
- for (int res = mdb_cursor_get(cursor, &key, &data, MDB_SET_RANGE); res == MDB_SUCCESS; res = mdb_cursor_get(cursor, &key, &data, MDB_NEXT))
+ for (int res = mdbx_cursor_get(cursor, &key, &data, MDBX_SET_RANGE); res == MDBX_SUCCESS; res = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT))
{
- const DBSettingKey *pKey = (const DBSettingKey*)key.mv_data;
+ const DBSettingKey *pKey = (const DBSettingKey*)key.iov_base;
if (pKey->hContact != contactID)
break;
- mdb_cursor_del(cursor, 0);
+ mdbx_cursor_del(cursor, 0);
}
txn.commit();
}
- MDB_val key = { sizeof(MCONTACT), &contactID };
+ MDBX_val key = { &contactID, sizeof(MCONTACT) };
for (;; Remap())
{
txn_ptr trnlck(m_pMdbEnv);
- MDB_CHECK(mdb_del(trnlck, m_dbContacts, &key, nullptr), 1);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_CHECK(mdbx_del(trnlck, m_dbContacts, &key, nullptr), 1);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
@@ -139,13 +139,13 @@ STDMETHODIMP_(MCONTACT) CDbxMdb::AddContact() DBCachedContact *cc = m_cache->AddContactToCache(dwContactId);
- MDB_val key = { sizeof(MCONTACT), &dwContactId };
- MDB_val data = { sizeof(cc->dbc), &cc->dbc };
+ MDBX_val key = { &dwContactId, sizeof(MCONTACT) };
+ MDBX_val data = { &cc->dbc, sizeof(cc->dbc) };
for (;; Remap()) {
txn_ptr trnlck(m_pMdbEnv);
- MDB_CHECK(mdb_put(trnlck, m_dbContacts, &key, &data, 0), 0);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_CHECK(mdbx_put(trnlck, m_dbContacts, &key, &data, 0), 0);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
@@ -179,14 +179,14 @@ BOOL CDbxMdb::MetaSetDefault(DBCachedContact *cc) void CDbxMdb::GatherContactHistory(MCONTACT hContact, LIST<EventItem> &list)
{
DBEventSortingKey keyVal = { 0, 0, hContact };
- MDB_val key = { sizeof(keyVal), &keyVal }, data;
+ MDBX_val key = { &keyVal, sizeof(keyVal) }, data;
txn_ptr_ro trnlck(m_txn);
cursor_ptr_ro cursor(m_curEventsSort);
- for (int res = mdb_cursor_get(cursor, &key, &data, MDB_SET_RANGE); res == MDB_SUCCESS; res = mdb_cursor_get(cursor, &key, &data, MDB_NEXT))
+ for (int res = mdbx_cursor_get(cursor, &key, &data, MDBX_SET_RANGE); res == MDBX_SUCCESS; res = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT))
{
- const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.mv_data;
+ const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.iov_base;
if (pKey->hContact != hContact)
return;
@@ -206,22 +206,22 @@ BOOL CDbxMdb::MetaMergeHistory(DBCachedContact *ccMeta, DBCachedContact *ccSub) {
txn_ptr trnlck(m_pMdbEnv);
DBEventSortingKey insVal = { EI->eventId, EI->ts, ccMeta->contactID };
- MDB_val key = { sizeof(insVal), &insVal }, data = { 1, (void*)"" };
- mdb_put(trnlck, m_dbEventsSort, &key, &data, 0);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_val key = { &insVal, sizeof(insVal) }, data = { (void*)"", 1 };
+ mdbx_put(trnlck, m_dbEventsSort, &key, &data, 0);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
ccMeta->dbc.dwEventCount++;
delete EI;
}
- MDB_val keyc = { sizeof(MCONTACT), &ccMeta->contactID }, datac = { sizeof(ccMeta->dbc), &ccMeta->dbc };
+ MDBX_val keyc = { &ccMeta->contactID, sizeof(MCONTACT) }, datac = { &ccMeta->dbc, sizeof(ccMeta->dbc) };
for (;; Remap())
{
txn_ptr trnlck(m_pMdbEnv);
- MDB_CHECK(mdb_put(trnlck, m_dbContacts, &keyc, &datac, 0), 1);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_CHECK(mdbx_put(trnlck, m_dbContacts, &keyc, &datac, 0), 1);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
return 0;
@@ -240,22 +240,22 @@ BOOL CDbxMdb::MetaSplitHistory(DBCachedContact *ccMeta, DBCachedContact *ccSub) for (;; Remap()) {
txn_ptr trnlck(m_pMdbEnv);
DBEventSortingKey insVal = { EI->eventId, EI->ts, ccMeta->contactID };
- MDB_val key = { sizeof(insVal), &insVal }, data = { 1, (void*)"" };
- mdb_del(trnlck, m_dbEventsSort, &key, &data);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_val key = { &insVal, sizeof(insVal) }, data = { (void*)"", 1 };
+ mdbx_del(trnlck, m_dbEventsSort, &key, &data);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
ccMeta->dbc.dwEventCount--;
delete EI;
}
- MDB_val keyc = { sizeof(MCONTACT), &ccMeta->contactID }, datac = { sizeof(ccMeta->dbc), &ccMeta->dbc };
+ MDBX_val keyc = { &ccMeta->contactID, sizeof(MCONTACT) }, datac = { &ccMeta->dbc, sizeof(ccMeta->dbc) };
for (;; Remap())
{
txn_ptr trnlck(m_pMdbEnv);
- MDB_CHECK(mdb_put(trnlck, m_dbContacts, &keyc, &datac, 0), 1);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_CHECK(mdbx_put(trnlck, m_dbContacts, &keyc, &datac, 0), 1);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
return 0;
@@ -296,11 +296,11 @@ void CDbxMdb::FillContacts() txn_ptr_ro trnlck(m_txn);
cursor_ptr_ro cursor(m_curContacts);
- MDB_val key, data;
- while (mdb_cursor_get(cursor, &key, &data, MDB_NEXT) == MDB_SUCCESS)
+ MDBX_val key, data;
+ while (mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT) == MDBX_SUCCESS)
{
- DBCachedContact *cc = m_cache->AddContactToCache(*(MCONTACT*)key.mv_data);
- cc->dbc = *(DBContact*)data.mv_data;
+ DBCachedContact *cc = m_cache->AddContactToCache(*(MCONTACT*)key.iov_base);
+ cc->dbc = *(DBContact*)data.iov_base;
CheckProto(cc, "");
diff --git a/plugins/Dbx_mdb/src/dbcrypt.cpp b/plugins/Dbx_mdb/src/dbcrypt.cpp index d7297c7b3e..3699b97e6a 100644 --- a/plugins/Dbx_mdb/src/dbcrypt.cpp +++ b/plugins/Dbx_mdb/src/dbcrypt.cpp @@ -53,13 +53,13 @@ CRYPTO_PROVIDER* CDbxMdb::SelectProvider() {
txn_ptr txn(m_pMdbEnv);
- MDB_val key = { sizeof(DBKey_Crypto_Provider), DBKey_Crypto_Provider }, value = { mir_strlen(pProv->pszName) + 1, pProv->pszName };
- MDB_CHECK(mdb_put(txn, m_dbCrypto, &key, &value, 0), nullptr);
+ MDBX_val key = { DBKey_Crypto_Provider, sizeof(DBKey_Crypto_Provider) }, value = { pProv->pszName, mir_strlen(pProv->pszName) + 1 };
+ MDBX_CHECK(mdbx_put(txn, m_dbCrypto, &key, &value, 0), nullptr);
- key.mv_size = sizeof(DBKey_Crypto_IsEncrypted); key.mv_data = DBKey_Crypto_IsEncrypted; value.mv_size = sizeof(bool); value.mv_data = &bTotalCrypt;
- MDB_CHECK(mdb_put(txn, m_dbCrypto, &key, &value, 0), nullptr);
+ key.iov_len = sizeof(DBKey_Crypto_IsEncrypted); key.iov_base = DBKey_Crypto_IsEncrypted; value.iov_len = sizeof(bool); value.iov_base = &bTotalCrypt;
+ MDBX_CHECK(mdbx_put(txn, m_dbCrypto, &key, &value, 0), nullptr);
- if (txn.commit() == MDB_SUCCESS)
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
@@ -72,10 +72,10 @@ int CDbxMdb::InitCrypt() txn_ptr_ro txn(m_txn);
- MDB_val key = { sizeof(DBKey_Crypto_Provider), DBKey_Crypto_Provider }, value;
- if (mdb_get(txn, m_dbCrypto, &key, &value) == MDB_SUCCESS)
+ MDBX_val key = { DBKey_Crypto_Provider, sizeof(DBKey_Crypto_Provider) }, value;
+ if (mdbx_get(txn, m_dbCrypto, &key, &value) == MDBX_SUCCESS)
{
- pProvider = Crypto_GetProvider((const char*)value.mv_data);
+ pProvider = Crypto_GetProvider((const char*)value.iov_base);
if (pProvider == nullptr)
pProvider = SelectProvider();
}
@@ -89,10 +89,10 @@ int CDbxMdb::InitCrypt() if ((m_crypto = pProvider->pFactory()) == nullptr)
return 3;
- key.mv_size = sizeof(DBKey_Crypto_Key); key.mv_data = DBKey_Crypto_Key;
- if (mdb_get(txn, m_dbCrypto, &key, &value) == MDB_SUCCESS && (value.mv_size == m_crypto->getKeyLength()))
+ key.iov_len = sizeof(DBKey_Crypto_Key); key.iov_base = DBKey_Crypto_Key;
+ if (mdbx_get(txn, m_dbCrypto, &key, &value) == MDBX_SUCCESS && (value.iov_len == m_crypto->getKeyLength()))
{
- if (!m_crypto->setKey((const BYTE*)value.mv_data, value.mv_size))
+ if (!m_crypto->setKey((const BYTE*)value.iov_base, value.iov_len))
{
DlgChangePassParam param = { this };
CEnterPasswordDialog dlg(¶m);
@@ -101,7 +101,7 @@ int CDbxMdb::InitCrypt() if (-128 != dlg.DoModal())
return 4;
m_crypto->setPassword(pass_ptrA(mir_utf8encodeW(param.newPass)));
- if (m_crypto->setKey((const BYTE*)value.mv_data, value.mv_size))
+ if (m_crypto->setKey((const BYTE*)value.iov_base, value.iov_len))
{
m_bUsesPassword = true;
SecureZeroMemory(¶m, sizeof(param));
@@ -118,10 +118,10 @@ int CDbxMdb::InitCrypt() StoreKey();
}
- key.mv_size = sizeof(DBKey_Crypto_IsEncrypted); key.mv_data = DBKey_Crypto_IsEncrypted;
+ key.iov_len = sizeof(DBKey_Crypto_IsEncrypted); key.iov_base = DBKey_Crypto_IsEncrypted;
- if (mdb_get(txn, m_dbCrypto, &key, &value) == MDB_SUCCESS)
- m_bEncrypted = *(const bool*)value.mv_data;
+ if (mdbx_get(txn, m_dbCrypto, &key, &value) == MDBX_SUCCESS)
+ m_bEncrypted = *(const bool*)value.iov_base;
else
m_bEncrypted = false;
@@ -138,9 +138,9 @@ void CDbxMdb::StoreKey() for (;; Remap())
{
txn_ptr txn(m_pMdbEnv);
- MDB_val key = { sizeof(DBKey_Crypto_Key), DBKey_Crypto_Key }, value = { iKeyLength, pKey };
- mdb_put(txn, m_dbCrypto, &key, &value, 0);
- if (txn.commit() == MDB_SUCCESS)
+ MDBX_val key = { DBKey_Crypto_Key, sizeof(DBKey_Crypto_Key) }, value = { pKey, iKeyLength };
+ mdbx_put(txn, m_dbCrypto, &key, &value, 0);
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
SecureZeroMemory(pKey, iKeyLength);
@@ -172,28 +172,28 @@ int CDbxMdb::EnableEncryption(bool bEncrypted) {
txn_ptr_ro txn(m_txn);
- MDB_stat st;
- mdb_stat(txn, m_dbEvents, &st);
+ MDBX_stat st;
+ mdbx_dbi_stat(txn, m_dbEvents, &st, sizeof(st));
std::vector<MEVENT> lstEvents;
lstEvents.reserve(st.ms_entries);
{
cursor_ptr_ro cursor(m_curEvents);
- MDB_val key, data;
- while (mdb_cursor_get(cursor, &key, &data, MDB_NEXT) == MDB_SUCCESS)
+ MDBX_val key, data;
+ while (mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT) == MDBX_SUCCESS)
{
- const MEVENT hDbEvent = *(const MEVENT*)key.mv_data;
+ const MEVENT hDbEvent = *(const MEVENT*)key.iov_base;
lstEvents.push_back(hDbEvent);
}
}
for (auto it = lstEvents.begin(); it != lstEvents.end(); ++it)
{
MEVENT &hDbEvent = *it;
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- mdb_get(txn, m_dbEvents, &key, &data);
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ mdbx_get(txn, m_dbEvents, &key, &data);
- const DBEvent *dbEvent = (const DBEvent*)data.mv_data;
+ const DBEvent *dbEvent = (const DBEvent*)data.iov_base;
const BYTE *pBlob = (BYTE*)(dbEvent + 1);
if (((dbEvent->flags & DBEF_ENCRYPTED) != 0) != bEncrypted)
@@ -216,17 +216,17 @@ int CDbxMdb::EnableEncryption(bool bEncrypted) for (;; Remap())
{
txn_ptr txn(m_pMdbEnv);
- data.mv_size = sizeof(DBEvent)+nNewBlob;
- MDB_CHECK(mdb_put(txn, m_dbEvents, &key, &data, MDB_RESERVE), 1);
+ data.iov_len = sizeof(DBEvent)+nNewBlob;
+ MDBX_CHECK(mdbx_put(txn, m_dbEvents, &key, &data, MDBX_RESERVE), 1);
- DBEvent *pNewDBEvent = (DBEvent *)data.mv_data;
+ DBEvent *pNewDBEvent = (DBEvent *)data.iov_base;
*pNewDBEvent = *dbEvent;
pNewDBEvent->cbBlob = nNewBlob;
pNewDBEvent->flags = dwNewFlags;
memcpy(pNewDBEvent + 1, pNewBlob, nNewBlob);
- if (txn.commit() == MDB_SUCCESS)
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
}
@@ -236,9 +236,9 @@ int CDbxMdb::EnableEncryption(bool bEncrypted) for (;; Remap())
{
txn_ptr txn(m_pMdbEnv);
- MDB_val key = { sizeof(DBKey_Crypto_IsEncrypted), DBKey_Crypto_IsEncrypted }, value = { sizeof(bool), &bEncrypted };
- MDB_CHECK(mdb_put(txn, m_dbCrypto, &key, &value, 0), 1);
- if (txn.commit() == MDB_SUCCESS)
+ MDBX_val key = { DBKey_Crypto_IsEncrypted, sizeof(DBKey_Crypto_IsEncrypted) }, value = { &bEncrypted, sizeof(bool) };
+ MDBX_CHECK(mdbx_put(txn, m_dbCrypto, &key, &value, 0), 1);
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
m_bEncrypted = bEncrypted;
diff --git a/plugins/Dbx_mdb/src/dbevents.cpp b/plugins/Dbx_mdb/src/dbevents.cpp index bd83e1fe03..a29f8330a1 100644 --- a/plugins/Dbx_mdb/src/dbevents.cpp +++ b/plugins/Dbx_mdb/src/dbevents.cpp @@ -86,35 +86,35 @@ STDMETHODIMP_(MEVENT) CDbxMdb::AddEvent(MCONTACT contactID, DBEVENTINFO *dbei) for (Snapshot();; Revert(), Remap()) {
txn_ptr txn(m_pMdbEnv);
- MDB_val key = { sizeof(int), &dwEventId }, data = { sizeof(DBEvent) + dbe.cbBlob, NULL };
- MDB_CHECK(mdb_put(txn, m_dbEvents, &key, &data, MDB_RESERVE), 0);
+ MDBX_val key = { &dwEventId, sizeof(MEVENT) }, data = { NULL, sizeof(DBEvent)+dbe.cbBlob };
+ MDBX_CHECK(mdbx_put(txn, m_dbEvents, &key, &data, MDBX_RESERVE), 0);
- DBEvent *pNewEvent = (DBEvent*)data.mv_data;
+ DBEvent *pNewEvent = (DBEvent*)data.iov_base;
*pNewEvent = dbe;
memcpy(pNewEvent + 1, pBlob, dbe.cbBlob);
// add a sorting key
DBEventSortingKey key2 = { contactID, dwEventId, dbe.timestamp };
- key.mv_size = sizeof(key2); key.mv_data = &key2;
- data.mv_size = 1; data.mv_data = (char*)("");
- MDB_CHECK(mdb_put(txn, m_dbEventsSort, &key, &data, 0), 0);
+ key.iov_len = sizeof(key2); key.iov_base = &key2;
+ data.iov_len = 1; data.iov_base = (char*)("");
+ MDBX_CHECK(mdbx_put(txn, m_dbEventsSort, &key, &data, 0), 0);
cc->Advance(dwEventId, dbe);
- MDB_val keyc = { sizeof(MCONTACT), &contactID }, datac = { sizeof(DBContact), &cc->dbc };
- MDB_CHECK(mdb_put(txn, m_dbContacts, &keyc, &datac, 0), 0);
+ MDBX_val keyc = { &contactID, sizeof(MCONTACT) }, datac = { &cc->dbc, sizeof(DBContact) };
+ MDBX_CHECK(mdbx_put(txn, m_dbContacts, &keyc, &datac, 0), 0);
// insert an event into a sub's history too
if (ccSub != NULL) {
key2.hContact = ccSub->contactID;
- MDB_CHECK(mdb_put(txn, m_dbEventsSort, &key, &data, 0), 0);
+ MDBX_CHECK(mdbx_put(txn, m_dbEventsSort, &key, &data, 0), 0);
ccSub->Advance(dwEventId, dbe);
- datac.mv_data = &ccSub->dbc;
- keyc.mv_data = &ccSub->contactID;
- MDB_CHECK(mdb_put(txn, m_dbContacts, &keyc, &datac, 0), 0);
+ datac.iov_base = &ccSub->dbc;
+ keyc.iov_base = &ccSub->contactID;
+ MDBX_CHECK(mdbx_put(txn, m_dbContacts, &keyc, &datac, 0), 0);
}
- if (txn.commit() == MDB_SUCCESS)
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
@@ -134,10 +134,10 @@ STDMETHODIMP_(BOOL) CDbxMdb::DeleteEvent(MCONTACT contactID, MEVENT hDbEvent) DBEvent dbe;
{
txn_ptr_ro txn(m_txn);
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- if (mdb_get(txn, m_dbEvents, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ if (mdbx_get(txn, m_dbEvents, &key, &data) != MDBX_SUCCESS)
return 1;
- dbe = *(DBEvent*)data.mv_data;
+ dbe = *(DBEvent*)data.iov_base;
}
if (contactID != dbe.contactID)
@@ -153,40 +153,40 @@ STDMETHODIMP_(BOOL) CDbxMdb::DeleteEvent(MCONTACT contactID, MEVENT hDbEvent) DBEventSortingKey key2 = { contactID, hDbEvent, dbe.timestamp };
txn_ptr txn(m_pMdbEnv);
- MDB_val key = { sizeof(key2), &key2 }, data;
+ MDBX_val key = { &key2, sizeof(key2) }, data;
- MDB_CHECK(mdb_del(txn, m_dbEventsSort, &key, &data), 1)
+ MDBX_CHECK(mdbx_del(txn, m_dbEventsSort, &key, &data), 1)
{
- key.mv_size = sizeof(MCONTACT); key.mv_data = &contactID;
+ key.iov_len = sizeof(MCONTACT); key.iov_base = &contactID;
cc->dbc.dwEventCount--;
if (cc->dbc.evFirstUnread == hDbEvent)
FindNextUnread(txn, cc, key2);
- data.mv_size = sizeof(DBContact); data.mv_data = &cc->dbc;
- MDB_CHECK(mdb_put(txn, m_dbContacts, &key, &data, 0), 1);
+ data.iov_len = sizeof(DBContact); data.iov_base = &cc->dbc;
+ MDBX_CHECK(mdbx_put(txn, m_dbContacts, &key, &data, 0), 1);
}
if (cc2)
{
key2.hContact = dbe.contactID;
- MDB_CHECK(mdb_del(txn, m_dbEventsSort, &key, &data), 1);
+ MDBX_CHECK(mdbx_del(txn, m_dbEventsSort, &key, &data), 1);
- key.mv_size = sizeof(MCONTACT); key.mv_data = &contactID;
+ key.iov_len = sizeof(MCONTACT); key.iov_base = &contactID;
cc2->dbc.dwEventCount--;
if (cc2->dbc.evFirstUnread == hDbEvent)
FindNextUnread(txn, cc2, key2);
- data.mv_size = sizeof(DBContact); data.mv_data = &cc2->dbc;
- MDB_CHECK(mdb_put(txn, m_dbContacts, &key, &data, 0), 1);
+ data.iov_len = sizeof(DBContact); data.iov_base = &cc2->dbc;
+ MDBX_CHECK(mdbx_put(txn, m_dbContacts, &key, &data, 0), 1);
}
// remove a event
- key.mv_size = sizeof(MEVENT); key.mv_data = &hDbEvent;
- MDB_CHECK(mdb_del(txn, m_dbEvents, &key, &data), 1);
+ key.iov_len = sizeof(MEVENT); key.iov_base = &hDbEvent;
+ MDBX_CHECK(mdbx_del(txn, m_dbEvents, &key, &data), 1);
- if (txn.commit() == MDB_SUCCESS)
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
@@ -199,10 +199,10 @@ STDMETHODIMP_(LONG) CDbxMdb::GetBlobSize(MEVENT hDbEvent) {
txn_ptr_ro txn(m_txn);
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- if (mdb_get(txn, m_dbEvents, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ if (mdbx_get(txn, m_dbEvents, &key, &data) != MDBX_SUCCESS)
return -1;
- return ((const DBEvent*)data.mv_data)->cbBlob;
+ return ((const DBEvent*)data.iov_base)->cbBlob;
}
STDMETHODIMP_(BOOL) CDbxMdb::GetEvent(MEVENT hDbEvent, DBEVENTINFO *dbei)
@@ -215,11 +215,11 @@ STDMETHODIMP_(BOOL) CDbxMdb::GetEvent(MEVENT hDbEvent, DBEVENTINFO *dbei) txn_ptr_ro txn(m_txn);
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- if (mdb_get(txn, m_dbEvents, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ if (mdbx_get(txn, m_dbEvents, &key, &data) != MDBX_SUCCESS)
return 1;
- const DBEvent *dbe = (const DBEvent*)data.mv_data;
+ const DBEvent *dbe = (const DBEvent*)data.iov_base;
dbei->szModule = GetModuleName(dbe->iModuleId);
dbei->timestamp = dbe->timestamp;
@@ -229,7 +229,7 @@ STDMETHODIMP_(BOOL) CDbxMdb::GetEvent(MEVENT hDbEvent, DBEVENTINFO *dbei) dbei->cbBlob = dbe->cbBlob;
if (bytesToCopy && dbei->pBlob)
{
- BYTE *pSrc = (BYTE*)data.mv_data + sizeof(DBEvent);
+ BYTE *pSrc = (BYTE*)data.iov_base + sizeof(DBEvent);
if (dbe->flags & DBEF_ENCRYPTED)
{
dbei->flags &= ~DBEF_ENCRYPTED;
@@ -252,11 +252,11 @@ void CDbxMdb::FindNextUnread(const txn_ptr &txn, DBCachedContact *cc, DBEventSor {
cursor_ptr cursor(txn, m_dbEventsSort);
- MDB_val key = { sizeof(key2), &key2 }, data;
+ MDBX_val key = { &key2, sizeof(key2) }, data;
- for (int res = mdb_cursor_get(cursor, &key, &data, MDB_SET); res == MDB_SUCCESS; res = mdb_cursor_get(cursor, &key, &data, MDB_NEXT))
+ for (int res = mdbx_cursor_get(cursor, &key, &data, MDBX_SET); res == MDBX_SUCCESS; res = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT))
{
- const DBEvent *dbe = (const DBEvent*)data.mv_data;
+ const DBEvent *dbe = (const DBEvent*)data.iov_base;
if (dbe->contactID != cc->contactID)
break;
if (!dbe->markedRead()) {
@@ -283,29 +283,29 @@ STDMETHODIMP_(BOOL) CDbxMdb::MarkEventRead(MCONTACT contactID, MEVENT hDbEvent) {
txn_ptr txn(m_pMdbEnv);
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- MDB_CHECK(mdb_get(txn, m_dbEvents, &key, &data), -1);
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ MDBX_CHECK(mdbx_get(txn, m_dbEvents, &key, &data), -1);
- const DBEvent *cdbe = (const DBEvent*)data.mv_data;
+ const DBEvent *cdbe = (const DBEvent*)data.iov_base;
if (cdbe->markedRead())
return cdbe->flags;
DBEventSortingKey keyVal = { contactID, hDbEvent, cdbe->timestamp };
- MDB_CHECK(mdb_put(txn, m_dbEvents, &key, &data, MDB_RESERVE), -1);
+ MDBX_CHECK(mdbx_put(txn, m_dbEvents, &key, &data, MDBX_RESERVE), -1);
- DBEvent *pNewEvent = (DBEvent*)data.mv_data;
+ DBEvent *pNewEvent = (DBEvent*)data.iov_base;
*pNewEvent = *cdbe;
wRetVal = (pNewEvent->flags |= DBEF_READ);
FindNextUnread(txn, cc, keyVal);
- key.mv_size = sizeof(MCONTACT); key.mv_data = &contactID;
- data.mv_data = &cc->dbc; data.mv_size = sizeof(cc->dbc);
- MDB_CHECK(mdb_put(txn, m_dbContacts, &key, &data, 0), -1);
+ key.iov_len = sizeof(MCONTACT); key.iov_base = &contactID;
+ data.iov_base = &cc->dbc; data.iov_len = sizeof(cc->dbc);
+ MDBX_CHECK(mdbx_put(txn, m_dbContacts, &key, &data, 0), -1);
- if (txn.commit() == MDB_SUCCESS)
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
@@ -320,11 +320,11 @@ STDMETHODIMP_(MCONTACT) CDbxMdb::GetEventContact(MEVENT hDbEvent) txn_ptr_ro txn(m_txn);
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- if (mdb_get(txn, m_dbEvents, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ if (mdbx_get(txn, m_dbEvents, &key, &data) != MDBX_SUCCESS)
return INVALID_CONTACT_ID;
- return ((const DBEvent*)data.mv_data)->contactID;
+ return ((const DBEvent*)data.iov_base)->contactID;
}
thread_local uint64_t t_tsLast = 0;
@@ -333,15 +333,15 @@ thread_local MEVENT t_evLast = 0; STDMETHODIMP_(MEVENT) CDbxMdb::FindFirstEvent(MCONTACT contactID)
{
DBEventSortingKey keyVal = { contactID, 0, 0 };
- MDB_val key = { sizeof(keyVal), &keyVal }, data;
+ MDBX_val key = { &keyVal, sizeof(keyVal) }, data;
txn_ptr_ro txn(m_txn);
cursor_ptr_ro cursor(m_curEventsSort);
- if (mdb_cursor_get(cursor, &key, &data, MDB_SET_RANGE) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_SET_RANGE) != MDBX_SUCCESS)
return t_evLast = 0;
- const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.mv_data;
+ const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.iov_base;
t_tsLast = pKey->ts;
return t_evLast = (pKey->hContact == contactID) ? pKey->hEvent : 0;
}
@@ -355,23 +355,23 @@ STDMETHODIMP_(MEVENT) CDbxMdb::FindFirstUnreadEvent(MCONTACT contactID) STDMETHODIMP_(MEVENT) CDbxMdb::FindLastEvent(MCONTACT contactID)
{
DBEventSortingKey keyVal = { contactID, 0xFFFFFFFF, 0xFFFFFFFFFFFFFFFF };
- MDB_val key = { sizeof(keyVal), &keyVal }, data;
+ MDBX_val key = { &keyVal, sizeof(keyVal) }, data;
txn_ptr_ro txn(m_txn);
cursor_ptr_ro cursor(m_curEventsSort);
- if (mdb_cursor_get(cursor, &key, &data, MDB_SET_RANGE) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_SET_RANGE) != MDBX_SUCCESS)
{
- if (mdb_cursor_get(cursor, &key, &data, MDB_LAST) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_LAST) != MDBX_SUCCESS)
return t_evLast = 0;
}
else
{
- if (mdb_cursor_get(cursor, &key, &data, MDB_PREV) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_PREV) != MDBX_SUCCESS)
return t_evLast = 0;
}
- const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.mv_data;
+ const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.iov_base;
t_tsLast = pKey->ts;
return t_evLast = (pKey->hContact == contactID) ? pKey->hEvent : 0;
}
@@ -385,23 +385,23 @@ STDMETHODIMP_(MEVENT) CDbxMdb::FindNextEvent(MCONTACT contactID, MEVENT hDbEvent if (t_evLast != hDbEvent)
{
- MDB_val key = { sizeof(MEVENT), &hDbEvent }, data;
- if (mdb_get(txn, m_dbEvents, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) }, data;
+ if (mdbx_get(txn, m_dbEvents, &key, &data) != MDBX_SUCCESS)
return 0;
- t_tsLast = ((DBEvent*)data.mv_data)->timestamp;
+ t_tsLast = ((DBEvent*)data.iov_base)->timestamp;
}
DBEventSortingKey keyVal = { contactID, hDbEvent, t_tsLast };
- MDB_val key = { sizeof(keyVal), &keyVal }, data;
+ MDBX_val key = { &keyVal, sizeof(keyVal) }, data;
cursor_ptr_ro cursor(m_curEventsSort);
- if (mdb_cursor_get(cursor, &key, &data, MDB_SET) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_SET) != MDBX_SUCCESS)
return t_evLast = 0;
- if (mdb_cursor_get(cursor, &key, &data, MDB_NEXT) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT) != MDBX_SUCCESS)
return t_evLast = 0;
- const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.mv_data;
+ const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.iov_base;
t_tsLast = pKey->ts;
return t_evLast = (pKey->hContact == contactID) ? pKey->hEvent : 0;
}
@@ -411,29 +411,29 @@ STDMETHODIMP_(MEVENT) CDbxMdb::FindPrevEvent(MCONTACT contactID, MEVENT hDbEvent if (hDbEvent == 0)
return t_evLast = 0;
- MDB_val data;
+ MDBX_val data;
txn_ptr_ro txn(m_txn);
if (t_evLast != hDbEvent)
{
- MDB_val key = { sizeof(MEVENT), &hDbEvent };
- if (mdb_get(txn, m_dbEvents, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { &hDbEvent, sizeof(MEVENT) };
+ if (mdbx_get(txn, m_dbEvents, &key, &data) != MDBX_SUCCESS)
return 0;
- t_tsLast = ((DBEvent*)data.mv_data)->timestamp;
+ t_tsLast = ((DBEvent*)data.iov_base)->timestamp;
}
DBEventSortingKey keyVal = { contactID, hDbEvent, t_tsLast };
- MDB_val key = { sizeof(keyVal), &keyVal };
+ MDBX_val key = { &keyVal, sizeof(keyVal) };
cursor_ptr_ro cursor(m_curEventsSort);
- if (mdb_cursor_get(cursor, &key, &data, MDB_SET) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_SET) != MDBX_SUCCESS)
return t_evLast = 0;
- if (mdb_cursor_get(cursor, &key, &data, MDB_PREV) != MDB_SUCCESS)
+ if (mdbx_cursor_get(cursor, &key, &data, MDBX_PREV) != MDBX_SUCCESS)
return t_evLast = 0;
- const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.mv_data;
+ const DBEventSortingKey *pKey = (const DBEventSortingKey*)key.iov_base;
t_tsLast = pKey->ts;
return t_evLast = (pKey->hContact == contactID) ? pKey->hEvent : 0;
}
diff --git a/plugins/Dbx_mdb/src/dbintf.cpp b/plugins/Dbx_mdb/src/dbintf.cpp index 57f4046c27..2d2d01d7cd 100644 --- a/plugins/Dbx_mdb/src/dbintf.cpp +++ b/plugins/Dbx_mdb/src/dbintf.cpp @@ -33,17 +33,17 @@ CDbxMdb::CDbxMdb(const TCHAR *tszFileName, int iMode) : m_tszProfileName = mir_wstrdup(tszFileName);
InitDbInstance(this);
- mdb_env_create(&m_pMdbEnv);
- mdb_env_set_maxdbs(m_pMdbEnv, 10);
- mdb_env_set_userctx(m_pMdbEnv, this);
-// mdb_env_set_assert(m_pMdbEnv, LMDB_FailAssert);
+ mdbx_env_create(&m_pMdbEnv);
+ mdbx_env_set_maxdbs(m_pMdbEnv, 10);
+ mdbx_env_set_userctx(m_pMdbEnv, this);
+// mdbx_env_set_assert(m_pMdbEnv, LMDBX_FailAssert);
m_codePage = Langpack_GetDefaultCodePage();
}
CDbxMdb::~CDbxMdb()
{
- mdb_env_close(m_pMdbEnv);
+ mdbx_env_close(m_pMdbEnv);
DestroyServiceFunction(hService);
UnhookEvent(hHook);
@@ -66,31 +66,28 @@ CDbxMdb::~CDbxMdb() int CDbxMdb::Load(bool bSkipInit)
{
- if (Map() != MDB_SUCCESS)
+ if (Map() != MDBX_SUCCESS)
return EGROKPRF_CANTREAD;
if (!bSkipInit) {
txn_ptr trnlck(m_pMdbEnv);
- unsigned int defFlags = MDB_CREATE;
+ unsigned int defFlags = MDBX_CREATE;
- mdb_dbi_open(trnlck, "global", defFlags | MDB_INTEGERKEY, &m_dbGlobal);
- mdb_dbi_open(trnlck, "crypto", defFlags, &m_dbCrypto);
- mdb_dbi_open(trnlck, "contacts", defFlags | MDB_INTEGERKEY, &m_dbContacts);
- mdb_dbi_open(trnlck, "modules", defFlags | MDB_INTEGERKEY, &m_dbModules);
- mdb_dbi_open(trnlck, "events", defFlags | MDB_INTEGERKEY, &m_dbEvents);
+ mdbx_dbi_open(trnlck, "global", defFlags | MDBX_INTEGERKEY, &m_dbGlobal);
+ mdbx_dbi_open(trnlck, "crypto", defFlags, &m_dbCrypto);
+ mdbx_dbi_open(trnlck, "contacts", defFlags | MDBX_INTEGERKEY, &m_dbContacts);
+ mdbx_dbi_open(trnlck, "modules", defFlags | MDBX_INTEGERKEY, &m_dbModules);
+ mdbx_dbi_open(trnlck, "events", defFlags | MDBX_INTEGERKEY, &m_dbEvents);
- mdb_dbi_open(trnlck, "eventsrt", defFlags, &m_dbEventsSort);
- mdb_set_compare(trnlck, m_dbEventsSort, DBEventSortingKey::Compare);
-
- mdb_dbi_open(trnlck, "settings", defFlags, &m_dbSettings);
- mdb_set_compare(trnlck, m_dbSettings, DBSettingKey::Compare);
+ mdbx_dbi_open_ex(trnlck, "eventsrt", defFlags, &m_dbEventsSort, DBEventSortingKey::Compare, nullptr);
+ mdbx_dbi_open_ex(trnlck, "settings", defFlags, &m_dbSettings, DBSettingKey::Compare, nullptr);
uint32_t keyVal = 1;
- MDB_val key = { sizeof(keyVal), &keyVal }, data;
- if (mdb_get(trnlck, m_dbGlobal, &key, &data) == MDB_SUCCESS)
+ MDBX_val key = { &keyVal, sizeof(keyVal) }, data;
+ if (mdbx_get(trnlck, m_dbGlobal, &key, &data) == MDBX_SUCCESS)
{
- const DBHeader *hdr = (const DBHeader*)data.mv_data;
+ const DBHeader *hdr = (const DBHeader*)data.iov_base;
if (hdr->dwSignature != DBHEADER_SIGNATURE)
return EGROKPRF_DAMAGED;
if (hdr->dwVersion != DBHEADER_VERSION)
@@ -102,38 +99,38 @@ int CDbxMdb::Load(bool bSkipInit) {
m_header.dwSignature = DBHEADER_SIGNATURE;
m_header.dwVersion = DBHEADER_VERSION;
- data.mv_data = &m_header; data.mv_size = sizeof(m_header);
- mdb_put(trnlck, m_dbGlobal, &key, &data, 0);
+ data.iov_base = &m_header; data.iov_len = sizeof(m_header);
+ mdbx_put(trnlck, m_dbGlobal, &key, &data, 0);
keyVal = 0;
DBContact dbc = { 0, 0, 0 };
- data.mv_data = &dbc; data.mv_size = sizeof(dbc);
- mdb_put(trnlck, m_dbContacts, &key, &data, 0);
+ data.iov_base = &dbc; data.iov_len = sizeof(dbc);
+ mdbx_put(trnlck, m_dbContacts, &key, &data, 0);
}
trnlck.commit();
{
- MDB_val key, val;
+ MDBX_val key, val;
- mdb_txn_begin(m_pMdbEnv, nullptr, MDB_RDONLY, &m_txn);
+ mdbx_txn_begin(m_pMdbEnv, nullptr, MDBX_RDONLY, &m_txn);
- mdb_cursor_open(m_txn, m_dbEvents, &m_curEvents);
- if (mdb_cursor_get(m_curEvents, &key, &val, MDB_LAST) == MDB_SUCCESS)
- m_dwMaxEventId = *(MEVENT*)key.mv_data;
+ mdbx_cursor_open(m_txn, m_dbEvents, &m_curEvents);
+ if (mdbx_cursor_get(m_curEvents, &key, &val, MDBX_LAST) == MDBX_SUCCESS)
+ m_dwMaxEventId = *(MEVENT*)key.iov_base;
- mdb_cursor_open(m_txn, m_dbEventsSort, &m_curEventsSort);
- mdb_cursor_open(m_txn, m_dbSettings, &m_curSettings);
- mdb_cursor_open(m_txn, m_dbModules, &m_curModules);
+ mdbx_cursor_open(m_txn, m_dbEventsSort, &m_curEventsSort);
+ mdbx_cursor_open(m_txn, m_dbSettings, &m_curSettings);
+ mdbx_cursor_open(m_txn, m_dbModules, &m_curModules);
- mdb_cursor_open(m_txn, m_dbContacts, &m_curContacts);
- if (mdb_cursor_get(m_curContacts, &key, &val, MDB_LAST) == MDB_SUCCESS)
- m_maxContactId = *(MCONTACT*)key.mv_data;
+ mdbx_cursor_open(m_txn, m_dbContacts, &m_curContacts);
+ if (mdbx_cursor_get(m_curContacts, &key, &val, MDBX_LAST) == MDBX_SUCCESS)
+ m_maxContactId = *(MCONTACT*)key.iov_base;
- MDB_stat st;
- mdb_stat(m_txn, m_dbContacts, &st);
+ MDBX_stat st;
+ mdbx_dbi_stat(m_txn, m_dbContacts, &st, sizeof(st));
m_contactCount = st.ms_entries;
- mdb_txn_reset(m_txn);
+ mdbx_txn_reset(m_txn);
}
@@ -161,11 +158,11 @@ int CDbxMdb::Load(bool bSkipInit) int CDbxMdb::Create(void)
{
- return (Map() == MDB_SUCCESS) ? 0 : EGROKPRF_CANTREAD;
+ return (Map() == MDBX_SUCCESS) ? 0 : EGROKPRF_CANTREAD;
}
-size_t iDefHeaderOffset = 16;
-BYTE bDefHeader[] = { 0xDE, 0xC0, 0xEF, 0xBE };
+size_t iDefHeaderOffset = 0;
+BYTE bDefHeader[] = { 0 };
int CDbxMdb::Check(void)
{
@@ -196,17 +193,20 @@ STDMETHODIMP_(void) CDbxMdb::SetCacheSafetyMode(BOOL bIsSet) int CDbxMdb::Map()
{
- unsigned int mode = MDB_NOSYNC | MDB_NOSUBDIR | /*MDB_NOLOCK |*/ MDB_NOTLS | MDB_WRITEMAP;
+ unsigned int mode = MDBX_NOSUBDIR | MDBX_NOTLS | MDBX_MAPASYNC | MDBX_WRITEMAP | MDBX_NOSYNC;
if (m_bReadOnly)
- mode |= MDB_RDONLY;
- return mdb_env_open(m_pMdbEnv, _T2A(m_tszProfileName), mode, 0664);
+ mode |= MDBX_RDONLY;
+ mdbx_env_open(m_pMdbEnv, _T2A(m_tszProfileName), mode, 0664);
+ mdbx_env_set_mapsize(m_pMdbEnv, 0x1000000);
+ return MDBX_SUCCESS;
+
}
bool CDbxMdb::Remap()
{
- MDB_envinfo ei;
- mdb_env_info(m_pMdbEnv, &ei);
- return mdb_env_set_mapsize(m_pMdbEnv, ei.me_mapsize + 0x100000) == MDB_SUCCESS;
+ MDBX_envinfo ei;
+ mdbx_env_info(m_pMdbEnv, &ei, sizeof(ei));
+ return mdbx_env_set_mapsize(m_pMdbEnv, ei.mi_mapsize + 0x100000) == MDBX_SUCCESS;
}
@@ -233,12 +233,12 @@ EXTERN_C void __cdecl dbpanic(void *) }
-EXTERN_C void LMDB_FailAssert(MDB_env *env, const char *text)
+EXTERN_C void LMDBX_FailAssert(MDBX_env *env, const char *text)
{
- ((CDbxMdb*)mdb_env_get_userctx(env))->DatabaseCorruption(_A2T(text));
+ ((CDbxMdb*)mdbx_env_get_userctx(env))->DatabaseCorruption(_A2T(text));
}
-EXTERN_C void LMDB_Log(const char *fmt, ...)
+EXTERN_C void LMDBX_Log(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
diff --git a/plugins/Dbx_mdb/src/dbintf.h b/plugins/Dbx_mdb/src/dbintf.h index 4eff8532a6..d97f11855c 100644 --- a/plugins/Dbx_mdb/src/dbintf.h +++ b/plugins/Dbx_mdb/src/dbintf.h @@ -74,7 +74,7 @@ struct DBEventSortingKey MEVENT hEvent;
uint64_t ts;
- static int Compare(const MDB_val* a, const MDB_val* b);
+ static int Compare(const MDBX_val* a, const MDBX_val* b);
};
struct DBSettingKey
@@ -83,7 +83,7 @@ struct DBSettingKey uint32_t dwModuleId;
char szSettingName[];
- static int Compare(const MDB_val*, const MDB_val*);
+ static int Compare(const MDBX_val*, const MDBX_val*);
};
@@ -214,10 +214,10 @@ public: MICryptoEngine *m_crypto;
protected:
- MDB_env *m_pMdbEnv;
- CMDB_txn_ro m_txn;
+ MDBX_env *m_pMdbEnv;
+ CMDBX_txn_ro m_txn;
- MDB_dbi m_dbGlobal;
+ MDBX_dbi m_dbGlobal;
DBHeader m_header;
HANDLE hSettingChangeEvent, hContactDeletedEvent, hContactAddedEvent, hEventMarkedRead;
@@ -227,8 +227,8 @@ protected: ////////////////////////////////////////////////////////////////////////////
// settings
- MDB_dbi m_dbSettings;
- MDB_cursor *m_curSettings;
+ MDBX_dbi m_dbSettings;
+ MDBX_cursor *m_curSettings;
int m_codePage;
HANDLE hService, hHook;
@@ -238,8 +238,8 @@ protected: ////////////////////////////////////////////////////////////////////////////
// contacts
- MDB_dbi m_dbContacts;
- MDB_cursor *m_curContacts;
+ MDBX_dbi m_dbContacts;
+ MDBX_cursor *m_curContacts;
uint32_t m_contactCount;
MCONTACT m_maxContactId;
@@ -249,8 +249,8 @@ protected: ////////////////////////////////////////////////////////////////////////////
// events
- MDB_dbi m_dbEvents, m_dbEventsSort;
- MDB_cursor *m_curEvents, *m_curEventsSort;
+ MDBX_dbi m_dbEvents, m_dbEventsSort;
+ MDBX_cursor *m_curEvents, *m_curEventsSort;
MEVENT m_dwMaxEventId;
HANDLE hEventAddedEvent, hEventDeletedEvent, hEventFilterAddedEvent;
@@ -260,8 +260,8 @@ protected: ////////////////////////////////////////////////////////////////////////////
// modules
- MDB_dbi m_dbModules;
- MDB_cursor *m_curModules;
+ MDBX_dbi m_dbModules;
+ MDBX_cursor *m_curModules;
std::map<uint32_t, std::string> m_Modules;
@@ -279,7 +279,7 @@ protected: ////////////////////////////////////////////////////////////////////////////
// encryption
- MDB_dbi m_dbCrypto;
+ MDBX_dbi m_dbCrypto;
int InitCrypt(void);
CRYPTO_PROVIDER* SelectProvider();
diff --git a/plugins/Dbx_mdb/src/dbmodulechain.cpp b/plugins/Dbx_mdb/src/dbmodulechain.cpp index 59263de623..66e7cf1509 100644 --- a/plugins/Dbx_mdb/src/dbmodulechain.cpp +++ b/plugins/Dbx_mdb/src/dbmodulechain.cpp @@ -28,11 +28,11 @@ int CDbxMdb::InitModules() txn_ptr_ro trnlck(m_txn);
cursor_ptr_ro cursor(m_curModules);
- MDB_val key, data;
- while (mdb_cursor_get(cursor, &key, &data, MDB_NEXT) == MDB_SUCCESS)
+ MDBX_val key, data;
+ while (mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT) == MDBX_SUCCESS)
{
- uint32_t iMod = *(uint32_t*)key.mv_data;
- const char *szMod = (const char*)data.mv_data;
+ uint32_t iMod = *(uint32_t*)key.iov_base;
+ const char *szMod = (const char*)data.iov_base;
m_Modules[iMod] = szMod;
}
return 0;
@@ -44,12 +44,12 @@ uint32_t CDbxMdb::GetModuleID(const char *szName) uint32_t iHash = mir_hashstr(szName);
if (m_Modules.find(iHash) == m_Modules.end())
{
- MDB_val key = { sizeof(iHash), &iHash }, data = { strlen(szName) + 1, (void*)szName };
+ MDBX_val key = { &iHash, sizeof(iHash) }, data = { (void*)szName, strlen(szName) + 1 };
for (;; Remap()) {
txn_ptr txn(m_pMdbEnv);
- MDB_CHECK(mdb_put(txn, m_dbModules, &key, &data, 0), -1);
- if (txn.commit() == MDB_SUCCESS)
+ MDBX_CHECK(mdbx_put(txn, m_dbModules, &key, &data, 0), -1);
+ if (txn.commit() == MDBX_SUCCESS)
break;
}
m_Modules[iHash] = szName;
diff --git a/plugins/Dbx_mdb/src/dbsettings.cpp b/plugins/Dbx_mdb/src/dbsettings.cpp index 890525e53c..711b0b45a3 100644 --- a/plugins/Dbx_mdb/src/dbsettings.cpp +++ b/plugins/Dbx_mdb/src/dbsettings.cpp @@ -111,8 +111,8 @@ LBL_Seek: memcpy(&keyVal->szSettingName, szSetting, settingNameLen + 1);
- MDB_val key = { sizeof(DBSettingKey) + settingNameLen + 1, keyVal }, data;
- if (mdb_get(trnlck, m_dbSettings, &key, &data) != MDB_SUCCESS)
+ MDBX_val key = { keyVal, sizeof(DBSettingKey) + settingNameLen + 1 }, data;
+ if (mdbx_get(trnlck, m_dbSettings, &key, &data) != MDBX_SUCCESS)
{
// try to get the missing mc setting from the active sub
if (cc && cc->IsMeta() && ValidLookupName(szModule, szSetting))
@@ -129,7 +129,7 @@ LBL_Seek: return 1;
}
- const BYTE *pBlob = (const BYTE*)data.mv_data;
+ const BYTE *pBlob = (const BYTE*)data.iov_base;
if (isStatic && (pBlob[0] & DBVTF_VARIABLELENGTH) && VLT(dbv->type) != VLT(pBlob[0]))
return 1;
@@ -460,27 +460,27 @@ STDMETHODIMP_(BOOL) CDbxMdb::WriteContactSetting(MCONTACT contactID, DBCONTACTWR memcpy(&keyVal->szSettingName, dbcws->szSetting, settingNameLen + 1);
- MDB_val key = { sizeof(DBSettingKey) + settingNameLen + 1, keyVal }, data;
+ MDBX_val key = { keyVal, sizeof(DBSettingKey) + settingNameLen + 1 }, data;
switch (dbcwWork.value.type) {
- case DBVT_BYTE: data.mv_size = 2; break;
- case DBVT_WORD: data.mv_size = 3; break;
- case DBVT_DWORD: data.mv_size = 5; break;
+ case DBVT_BYTE: data.iov_len = 2; break;
+ case DBVT_WORD: data.iov_len = 3; break;
+ case DBVT_DWORD: data.iov_len = 5; break;
case DBVT_ASCIIZ:
case DBVT_UTF8:
- data.mv_size = 3 + dbcwWork.value.cchVal; break;
+ data.iov_len = 3 + dbcwWork.value.cchVal; break;
case DBVT_BLOB:
case DBVT_ENCRYPTED:
- data.mv_size = 3 + dbcwWork.value.cpbVal; break;
+ data.iov_len = 3 + dbcwWork.value.cpbVal; break;
}
for (;; Remap()) {
txn_ptr trnlck(m_pMdbEnv);
- MDB_CHECK(mdb_put(trnlck, m_dbSettings, &key, &data, MDB_RESERVE), 1);
+ MDBX_CHECK(mdbx_put(trnlck, m_dbSettings, &key, &data, MDBX_RESERVE), 1);
- BYTE *pBlob = (BYTE*)data.mv_data;
+ BYTE *pBlob = (BYTE*)data.iov_base;
*pBlob++ = dbcwWork.value.type;
switch (dbcwWork.value.type) {
case DBVT_BYTE: *pBlob = dbcwWork.value.bVal; break;
@@ -489,19 +489,19 @@ STDMETHODIMP_(BOOL) CDbxMdb::WriteContactSetting(MCONTACT contactID, DBCONTACTWR case DBVT_ASCIIZ:
case DBVT_UTF8:
- data.mv_size = *(WORD*)pBlob = dbcwWork.value.cchVal;
+ data.iov_len = *(WORD*)pBlob = dbcwWork.value.cchVal;
pBlob += 2;
memcpy(pBlob, dbcwWork.value.pszVal, dbcwWork.value.cchVal);
break;
case DBVT_BLOB:
case DBVT_ENCRYPTED:
- data.mv_size = *(WORD*)pBlob = dbcwWork.value.cpbVal;
+ data.iov_len = *(WORD*)pBlob = dbcwWork.value.cpbVal;
pBlob += 2;
memcpy(pBlob, dbcwWork.value.pbVal, dbcwWork.value.cpbVal);
}
- if (trnlck.commit() == MDB_SUCCESS)
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
@@ -527,13 +527,13 @@ STDMETHODIMP_(BOOL) CDbxMdb::DeleteContactSetting(MCONTACT contactID, LPCSTR szM keyVal->dwModuleId = GetModuleID(szModule);
memcpy(&keyVal->szSettingName, szSetting, settingNameLen + 1);
- MDB_val key = { sizeof(DBSettingKey) + settingNameLen + 1, keyVal };
+ MDBX_val key = { keyVal, sizeof(DBSettingKey) + settingNameLen + 1 };
for (;; Remap())
{
txn_ptr trnlck(m_pMdbEnv);
- MDB_CHECK(mdb_del(trnlck, m_dbSettings, &key, nullptr), 1);
- if (trnlck.commit() == MDB_SUCCESS)
+ MDBX_CHECK(mdbx_del(trnlck, m_dbSettings, &key, nullptr), 1);
+ if (trnlck.commit() == MDBX_SUCCESS)
break;
}
}
@@ -557,11 +557,11 @@ STDMETHODIMP_(BOOL) CDbxMdb::EnumContactSettings(MCONTACT hContact, DBSETTINGENU txn_ptr_ro txn(m_txn);
cursor_ptr_ro cursor(m_curSettings);
- MDB_val key = { sizeof(keyVal), &keyVal }, data;
+ MDBX_val key = { &keyVal, sizeof(keyVal) }, data;
- for (int res = mdb_cursor_get(cursor, &key, &data, MDB_SET_RANGE); res == MDB_SUCCESS; res = mdb_cursor_get(cursor, &key, &data, MDB_NEXT))
+ for (int res = mdbx_cursor_get(cursor, &key, &data, MDBX_SET_RANGE); res == MDBX_SUCCESS; res = mdbx_cursor_get(cursor, &key, &data, MDBX_NEXT))
{
- const DBSettingKey *pKey = (const DBSettingKey*)key.mv_data;
+ const DBSettingKey *pKey = (const DBSettingKey*)key.iov_base;
if (pKey->hContact != hContact || pKey->dwModuleId != keyVal.dwModuleId)
break;
result = pfnEnumProc(pKey->szSettingName, LPARAM(param));
diff --git a/plugins/Dbx_mdb/src/dbutils.cpp b/plugins/Dbx_mdb/src/dbutils.cpp index 7f4e33dcd0..27dcf4147a 100644 --- a/plugins/Dbx_mdb/src/dbutils.cpp +++ b/plugins/Dbx_mdb/src/dbutils.cpp @@ -25,10 +25,10 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #define CMP_UINT(x, y) { if ((x) != (y)) return (x) < (y) ? -1 : 1; }
-int DBEventSortingKey::Compare(const MDB_val *ax, const MDB_val *bx)
+int DBEventSortingKey::Compare(const MDBX_val *ax, const MDBX_val *bx)
{
- const DBEventSortingKey *a = (DBEventSortingKey *)ax->mv_data;
- const DBEventSortingKey *b = (DBEventSortingKey *)bx->mv_data;
+ const DBEventSortingKey *a = (DBEventSortingKey *)ax->iov_base;
+ const DBEventSortingKey *b = (DBEventSortingKey *)bx->iov_base;
CMP_UINT(a->hContact, b->hContact);
CMP_UINT(a->ts, b->ts);
@@ -36,12 +36,12 @@ int DBEventSortingKey::Compare(const MDB_val *ax, const MDB_val *bx) return 0;
}
-int DBSettingKey::Compare(const MDB_val *ax, const MDB_val *bx)
+int DBSettingKey::Compare(const MDBX_val *ax, const MDBX_val *bx)
{
- const DBSettingKey *a = (DBSettingKey *)ax->mv_data;
- const DBSettingKey *b = (DBSettingKey *)bx->mv_data;
+ const DBSettingKey *a = (DBSettingKey *)ax->iov_base;
+ const DBSettingKey *b = (DBSettingKey *)bx->iov_base;
CMP_UINT(a->hContact, b->hContact);
CMP_UINT(a->dwModuleId, b->dwModuleId);
- return (min(ax->mv_size, bx->mv_size) > sizeof(DBSettingKey)) ? strcmp(a->szSettingName, b->szSettingName) : 0;
+ return (min(ax->iov_len, bx->iov_len) > sizeof(DBSettingKey)) ? strcmp(a->szSettingName, b->szSettingName) : 0;
}
diff --git a/plugins/Dbx_mdb/src/init.cpp b/plugins/Dbx_mdb/src/init.cpp index 49991c0529..8567c890e7 100644 --- a/plugins/Dbx_mdb/src/init.cpp +++ b/plugins/Dbx_mdb/src/init.cpp @@ -98,8 +98,8 @@ MIDatabaseChecker* CheckDb(const TCHAR *profile, int *error) static DATABASELINK dblink =
{
sizeof(DATABASELINK),
- "dbx_lmdb",
- L"LMDB database driver",
+ "dbx_mdbx",
+ L"LMDBx database driver",
makeDatabase,
grokHeader,
LoadDatabase,
diff --git a/plugins/Dbx_mdb/src/lmdb/lmdb.h b/plugins/Dbx_mdb/src/lmdb/lmdb.h deleted file mode 100644 index 3cce100537..0000000000 --- a/plugins/Dbx_mdb/src/lmdb/lmdb.h +++ /dev/null @@ -1,1614 +0,0 @@ -/** @file lmdb.h - * @brief Lightning memory-mapped database library - * - * @mainpage Lightning Memory-Mapped Database Manager (LMDB) - * - * @section intro_sec Introduction - * LMDB is a Btree-based database management library modeled loosely on the - * BerkeleyDB API, but much simplified. The entire database is exposed - * in a memory map, and all data fetches return data directly - * from the mapped memory, so no malloc's or memcpy's occur during - * data fetches. As such, the library is extremely simple because it - * requires no page caching layer of its own, and it is extremely high - * performance and memory-efficient. It is also fully transactional with - * full ACID semantics, and when the memory map is read-only, the - * database integrity cannot be corrupted by stray pointer writes from - * application code. - * - * The library is fully thread-aware and supports concurrent read/write - * access from multiple processes and threads. Data pages use a copy-on- - * write strategy so no active data pages are ever overwritten, which - * also provides resistance to corruption and eliminates the need of any - * special recovery procedures after a system crash. Writes are fully - * serialized; only one write transaction may be active at a time, which - * guarantees that writers can never deadlock. The database structure is - * multi-versioned so readers run with no locks; writers cannot block - * readers, and readers don't block writers. - * - * Unlike other well-known database mechanisms which use either write-ahead - * transaction logs or append-only data writes, LMDB requires no maintenance - * during operation. Both write-ahead loggers and append-only databases - * require periodic checkpointing and/or compaction of their log or database - * files otherwise they grow without bound. LMDB tracks free pages within - * the database and re-uses them for new write operations, so the database - * size does not grow without bound in normal use. - * - * The memory map can be used as a read-only or read-write map. It is - * read-only by default as this provides total immunity to corruption. - * Using read-write mode offers much higher write performance, but adds - * the possibility for stray application writes thru pointers to silently - * corrupt the database. Of course if your application code is known to - * be bug-free (...) then this is not an issue. - * - * If this is your first time using a transactional embedded key/value - * store, you may find the \ref starting page to be helpful. - * - * @section caveats_sec Caveats - * Troubleshooting the lock file, plus semaphores on BSD systems: - * - * - A broken lockfile can cause sync issues. - * Stale reader transactions left behind by an aborted program - * cause further writes to grow the database quickly, and - * stale locks can block further operation. - * - * Fix: Check for stale readers periodically, using the - * #mdb_reader_check function or the \ref mdb_stat_1 "mdb_stat" tool. - * Stale writers will be cleared automatically on most systems: - * - Windows - automatic - * - BSD, systems using SysV semaphores - automatic - * - Linux, systems using POSIX mutexes with Robust option - automatic - * Otherwise just make all programs using the database close it; - * the lockfile is always reset on first open of the environment. - * - * - On BSD systems or others configured with MDB_USE_SYSV_SEM or - * MDB_USE_POSIX_SEM, - * startup can fail due to semaphores owned by another userid. - * - * Fix: Open and close the database as the user which owns the - * semaphores (likely last user) or as root, while no other - * process is using the database. - * - * Restrictions/caveats (in addition to those listed for some functions): - * - * - Only the database owner should normally use the database on - * BSD systems or when otherwise configured with MDB_USE_POSIX_SEM. - * Multiple users can cause startup to fail later, as noted above. - * - * - There is normally no pure read-only mode, since readers need write - * access to locks and lock file. Exceptions: On read-only filesystems - * or with the #MDB_NOLOCK flag described under #mdb_env_open(). - * - * - An LMDB configuration will often reserve considerable \b unused - * memory address space and maybe file size for future growth. - * This does not use actual memory or disk space, but users may need - * to understand the difference so they won't be scared off. - * - * - By default, in versions before 0.9.10, unused portions of the data - * file might receive garbage data from memory freed by other code. - * (This does not happen when using the #MDB_WRITEMAP flag.) As of - * 0.9.10 the default behavior is to initialize such memory before - * writing to the data file. Since there may be a slight performance - * cost due to this initialization, applications may disable it using - * the #MDB_NOMEMINIT flag. Applications handling sensitive data - * which must not be written should not use this flag. This flag is - * irrelevant when using #MDB_WRITEMAP. - * - * - A thread can only use one transaction at a time, plus any child - * transactions. Each transaction belongs to one thread. See below. - * The #MDB_NOTLS flag changes this for read-only transactions. - * - * - Use an MDB_env* in the process which opened it, without fork()ing. - * - * - Do not have open an LMDB database twice in the same process at - * the same time. Not even from a plain open() call - close()ing it - * breaks flock() advisory locking. - * - * - Avoid long-lived transactions. Read transactions prevent - * reuse of pages freed by newer write transactions, thus the - * database can grow quickly. Write transactions prevent - * other write transactions, since writes are serialized. - * - * - Avoid suspending a process with active transactions. These - * would then be "long-lived" as above. Also read transactions - * suspended when writers commit could sometimes see wrong data. - * - * ...when several processes can use a database concurrently: - * - * - Avoid aborting a process with an active transaction. - * The transaction becomes "long-lived" as above until a check - * for stale readers is performed or the lockfile is reset, - * since the process may not remove it from the lockfile. - * - * This does not apply to write transactions if the system clears - * stale writers, see above. - * - * - If you do that anyway, do a periodic check for stale readers. Or - * close the environment once in a while, so the lockfile can get reset. - * - * - Do not use LMDB databases on remote filesystems, even between - * processes on the same host. This breaks flock() on some OSes, - * possibly memory map sync, and certainly sync between programs - * on different hosts. - * - * - Opening a database can fail if another process is opening or - * closing it at exactly the same time. - * - * @author Howard Chu, Symas Corporation. - * - * @copyright Copyright 2011-2016 Howard Chu, Symas Corp. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * <http://www.OpenLDAP.org/license.html>. - * - * @par Derived From: - * This code is derived from btree.c written by Martin Hedenfalk. - * - * Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se> - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#ifndef _LMDB_H_ -#define _LMDB_H_ - -#include <sys/types.h> -#if _MSC_VER < 1800 -# include <msapi/inttypes.h> -#else -# include <inttypes.h> -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -/** Unix permissions for creating files, or dummy definition for Windows */ -#ifdef _MSC_VER -typedef int mdb_mode_t; -#else -typedef mode_t mdb_mode_t; -#endif - -#ifdef MDB_VL32 -typedef uint64_t mdb_size_t; -#define mdb_env_create mdb_env_create_vl32 /**< Prevent mixing with non-VL32 builds */ -#else -typedef size_t mdb_size_t; -#endif - -/** An abstraction for a file handle. - * On POSIX systems file handles are small integers. On Windows - * they're opaque pointers. - */ -#ifdef _WIN32 -typedef void *mdb_filehandle_t; -#else -typedef int mdb_filehandle_t; -#endif - -/** @defgroup mdb LMDB API - * @{ - * @brief OpenLDAP Lightning Memory-Mapped Database Manager - */ -/** @defgroup Version Version Macros - * @{ - */ -/** Library major version */ -#define MDB_VERSION_MAJOR 0 -/** Library minor version */ -#define MDB_VERSION_MINOR 9 -/** Library patch version */ -#define MDB_VERSION_PATCH 70 - -/** Combine args a,b,c into a single integer for easy version comparisons */ -#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c)) - -/** The full library version as a single integer */ -#define MDB_VERSION_FULL \ - MDB_VERINT(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH) - -/** The release date of this library version */ -#define MDB_VERSION_DATE "December 19, 2015" - -/** A stringifier for the version info */ -#define MDB_VERSTR(a,b,c,d) "LMDB " #a "." #b "." #c ": (" d ")" - -/** A helper for the stringifier macro */ -#define MDB_VERFOO(a,b,c,d) MDB_VERSTR(a,b,c,d) - -/** The full library version as a C string */ -#define MDB_VERSION_STRING \ - MDB_VERFOO(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH,MDB_VERSION_DATE) -/** @} */ - -/** @brief Opaque structure for a database environment. - * - * A DB environment supports multiple databases, all residing in the same - * shared-memory map. - */ -typedef struct MDB_env MDB_env; - -/** @brief Opaque structure for a transaction handle. - * - * All database operations require a transaction handle. Transactions may be - * read-only or read-write. - */ -typedef struct MDB_txn MDB_txn; - -/** @brief A handle for an individual database in the DB environment. */ -typedef unsigned int MDB_dbi; - -/** @brief Opaque structure for navigating through a database */ -typedef struct MDB_cursor MDB_cursor; - -/** @brief Generic structure used for passing keys and data in and out - * of the database. - * - * Values returned from the database are valid only until a subsequent - * update operation, or the end of the transaction. Do not modify or - * free them, they commonly point into the database itself. - * - * Key sizes must be between 1 and #mdb_env_get_maxkeysize() inclusive. - * The same applies to data sizes in databases with the #MDB_DUPSORT flag. - * Other data items can in theory be from 0 to 0xffffffff bytes long. - */ -typedef struct MDB_val { - size_t mv_size; /**< size of the data item */ - void *mv_data; /**< address of the data item */ -} MDB_val; - -/** @brief A callback function used to compare two keys in a database */ -typedef int (MDB_cmp_func)(const MDB_val *a, const MDB_val *b); - -/** @brief A callback function used to relocate a position-dependent data item - * in a fixed-address database. - * - * The \b newptr gives the item's desired address in - * the memory map, and \b oldptr gives its previous address. The item's actual - * data resides at the address in \b item. This callback is expected to walk - * through the fields of the record in \b item and modify any - * values based at the \b oldptr address to be relative to the \b newptr address. - * @param[in,out] item The item that is to be relocated. - * @param[in] oldptr The previous address. - * @param[in] newptr The new address to relocate to. - * @param[in] relctx An application-provided context, set by #mdb_set_relctx(). - * @todo This feature is currently unimplemented. - */ -typedef void (MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx); - -/** @defgroup mdb_env Environment Flags - * @{ - */ - /** mmap at a fixed address (experimental) */ -#define MDB_FIXEDMAP 0x01 - /** no environment directory */ -#define MDB_NOSUBDIR 0x4000 - /** don't fsync after commit */ -#define MDB_NOSYNC 0x10000 - /** read only */ -#define MDB_RDONLY 0x20000 - /** don't fsync metapage after commit */ -#define MDB_NOMETASYNC 0x40000 - /** use writable mmap */ -#define MDB_WRITEMAP 0x80000 - /** use asynchronous msync when #MDB_WRITEMAP is used */ -#define MDB_MAPASYNC 0x100000 - /** tie reader locktable slots to #MDB_txn objects instead of to threads */ -#define MDB_NOTLS 0x200000 - /** don't do any locking, caller must manage their own locks */ -#define MDB_NOLOCK 0x400000 - /** don't do readahead (no effect on Windows) */ -#define MDB_NORDAHEAD 0x800000 - /** don't initialize malloc'd memory before writing to datafile */ -#define MDB_NOMEMINIT 0x1000000 -/** @} */ - -/** @defgroup mdb_dbi_open Database Flags - * @{ - */ - /** use reverse string keys */ -#define MDB_REVERSEKEY 0x02 - /** use sorted duplicates */ -#define MDB_DUPSORT 0x04 - /** numeric keys in native byte order: either unsigned int or size_t. - * The keys must all be of the same size. */ -#define MDB_INTEGERKEY 0x08 - /** with #MDB_DUPSORT, sorted dup items have fixed size */ -#define MDB_DUPFIXED 0x10 - /** with #MDB_DUPSORT, dups are #MDB_INTEGERKEY-style integers */ -#define MDB_INTEGERDUP 0x20 - /** with #MDB_DUPSORT, use reverse string dups */ -#define MDB_REVERSEDUP 0x40 - /** create DB if not already existing */ -#define MDB_CREATE 0x40000 -/** @} */ - -/** @defgroup mdb_put Write Flags - * @{ - */ -/** For put: Don't write if the key already exists. */ -#define MDB_NOOVERWRITE 0x10 -/** Only for #MDB_DUPSORT<br> - * For put: don't write if the key and data pair already exist.<br> - * For mdb_cursor_del: remove all duplicate data items. - */ -#define MDB_NODUPDATA 0x20 -/** For mdb_cursor_put: overwrite the current key/data pair */ -#define MDB_CURRENT 0x40 -/** For put: Just reserve space for data, don't copy it. Return a - * pointer to the reserved space. - */ -#define MDB_RESERVE 0x10000 -/** Data is being appended, don't split full pages. */ -#define MDB_APPEND 0x20000 -/** Duplicate data is being appended, don't split full pages. */ -#define MDB_APPENDDUP 0x40000 -/** Store multiple data items in one call. Only for #MDB_DUPFIXED. */ -#define MDB_MULTIPLE 0x80000 -/* @} */ - -/** @defgroup mdb_copy Copy Flags - * @{ - */ -/** Compacting copy: Omit free space from copy, and renumber all - * pages sequentially. - */ -#define MDB_CP_COMPACT 0x01 -/* @} */ - -/** @brief Cursor Get operations. - * - * This is the set of all operations for retrieving data - * using a cursor. - */ -typedef enum MDB_cursor_op { - MDB_FIRST, /**< Position at first key/data item */ - MDB_FIRST_DUP, /**< Position at first data item of current key. - Only for #MDB_DUPSORT */ - MDB_GET_BOTH, /**< Position at key/data pair. Only for #MDB_DUPSORT */ - MDB_GET_BOTH_RANGE, /**< position at key, nearest data. Only for #MDB_DUPSORT */ - MDB_GET_CURRENT, /**< Return key/data at current cursor position */ - MDB_GET_MULTIPLE, /**< Return key and up to a page of duplicate data items - from current cursor position. Move cursor to prepare - for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ - MDB_LAST, /**< Position at last key/data item */ - MDB_LAST_DUP, /**< Position at last data item of current key. - Only for #MDB_DUPSORT */ - MDB_NEXT, /**< Position at next data item */ - MDB_NEXT_DUP, /**< Position at next data item of current key. - Only for #MDB_DUPSORT */ - MDB_NEXT_MULTIPLE, /**< Return key and up to a page of duplicate data items - from next cursor position. Move cursor to prepare - for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ - MDB_NEXT_NODUP, /**< Position at first data item of next key */ - MDB_PREV, /**< Position at previous data item */ - MDB_PREV_DUP, /**< Position at previous data item of current key. - Only for #MDB_DUPSORT */ - MDB_PREV_NODUP, /**< Position at last data item of previous key */ - MDB_SET, /**< Position at specified key */ - MDB_SET_KEY, /**< Position at specified key, return key + data */ - MDB_SET_RANGE, /**< Position at first key greater than or equal to specified key. */ - MDB_PREV_MULTIPLE /**< Position at previous page and return key and up to - a page of duplicate data items. Only for #MDB_DUPFIXED */ -} MDB_cursor_op; - -/** @defgroup errors Return Codes - * - * BerkeleyDB uses -30800 to -30999, we'll go under them - * @{ - */ - /** Successful result */ -#define MDB_SUCCESS 0 - /** key/data pair already exists */ -#define MDB_KEYEXIST (-30799) - /** key/data pair not found (EOF) */ -#define MDB_NOTFOUND (-30798) - /** Requested page not found - this usually indicates corruption */ -#define MDB_PAGE_NOTFOUND (-30797) - /** Located page was wrong type */ -#define MDB_CORRUPTED (-30796) - /** Update of meta page failed or environment had fatal error */ -#define MDB_PANIC (-30795) - /** Environment version mismatch */ -#define MDB_VERSION_MISMATCH (-30794) - /** File is not a valid LMDB file */ -#define MDB_INVALID (-30793) - /** Environment mapsize reached */ -#define MDB_MAP_FULL (-30792) - /** Environment maxdbs reached */ -#define MDB_DBS_FULL (-30791) - /** Environment maxreaders reached */ -#define MDB_READERS_FULL (-30790) - /** Too many TLS keys in use - Windows only */ -#define MDB_TLS_FULL (-30789) - /** Txn has too many dirty pages */ -#define MDB_TXN_FULL (-30788) - /** Cursor stack too deep - internal error */ -#define MDB_CURSOR_FULL (-30787) - /** Page has not enough space - internal error */ -#define MDB_PAGE_FULL (-30786) - /** Database contents grew beyond environment mapsize */ -#define MDB_MAP_RESIZED (-30785) - /** Operation and DB incompatible, or DB type changed. This can mean: - * <ul> - * <li>The operation expects an #MDB_DUPSORT / #MDB_DUPFIXED database. - * <li>Opening a named DB when the unnamed DB has #MDB_DUPSORT / #MDB_INTEGERKEY. - * <li>Accessing a data record as a database, or vice versa. - * <li>The database was dropped and recreated with different flags. - * </ul> - */ -#define MDB_INCOMPATIBLE (-30784) - /** Invalid reuse of reader locktable slot */ -#define MDB_BAD_RSLOT (-30783) - /** Transaction must abort, has a child, or is invalid */ -#define MDB_BAD_TXN (-30782) - /** Unsupported size of key/DB name/data, or wrong DUPFIXED size */ -#define MDB_BAD_VALSIZE (-30781) - /** The specified DBI was changed unexpectedly */ -#define MDB_BAD_DBI (-30780) - /** The last defined error code */ -#define MDB_LAST_ERRCODE MDB_BAD_DBI -/** @} */ - -/** @brief Statistics for a database in the environment */ -typedef struct MDB_stat { - unsigned int ms_psize; /**< Size of a database page. - This is currently the same for all databases. */ - unsigned int ms_depth; /**< Depth (height) of the B-tree */ - mdb_size_t ms_branch_pages; /**< Number of internal (non-leaf) pages */ - mdb_size_t ms_leaf_pages; /**< Number of leaf pages */ - mdb_size_t ms_overflow_pages; /**< Number of overflow pages */ - mdb_size_t ms_entries; /**< Number of data items */ -} MDB_stat; - -/** @brief Information about the environment */ -typedef struct MDB_envinfo { - void *me_mapaddr; /**< Address of map, if fixed */ - mdb_size_t me_mapsize; /**< Size of the data memory map */ - mdb_size_t me_last_pgno; /**< ID of the last used page */ - mdb_size_t me_last_txnid; /**< ID of the last committed transaction */ - unsigned int me_maxreaders; /**< max reader slots in the environment */ - unsigned int me_numreaders; /**< max reader slots used in the environment */ -} MDB_envinfo; - - /** @brief Return the LMDB library version information. - * - * @param[out] major if non-NULL, the library major version number is copied here - * @param[out] minor if non-NULL, the library minor version number is copied here - * @param[out] patch if non-NULL, the library patch version number is copied here - * @retval "version string" The library version as a string - */ -char *mdb_version(int *major, int *minor, int *patch); - - /** @brief Return a string describing a given error code. - * - * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3) - * function. If the error code is greater than or equal to 0, then the string - * returned by the system function strerror(3) is returned. If the error code - * is less than 0, an error string corresponding to the LMDB library error is - * returned. See @ref errors for a list of LMDB-specific error codes. - * @param[in] err The error code - * @retval "error message" The description of the error - */ -char *mdb_strerror(int err); - - /** @brief Create an LMDB environment handle. - * - * This function allocates memory for a #MDB_env structure. To release - * the allocated memory and discard the handle, call #mdb_env_close(). - * Before the handle may be used, it must be opened using #mdb_env_open(). - * Various other options may also need to be set before opening the handle, - * e.g. #mdb_env_set_mapsize(), #mdb_env_set_maxreaders(), #mdb_env_set_maxdbs(), - * depending on usage requirements. - * @param[out] env The address where the new handle will be stored - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_create(MDB_env **env); - - /** @brief Open an environment handle. - * - * If this function fails, #mdb_env_close() must be called to discard the #MDB_env handle. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] path The directory in which the database files reside. This - * directory must already exist and be writable. - * @param[in] flags Special options for this environment. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * Flags set by mdb_env_set_flags() are also used. - * <ul> - * <li>#MDB_FIXEDMAP - * use a fixed address for the mmap region. This flag must be specified - * when creating the environment, and is stored persistently in the environment. - * If successful, the memory map will always reside at the same virtual address - * and pointers used to reference data items in the database will be constant - * across multiple invocations. This option may not always work, depending on - * how the operating system has allocated memory to shared libraries and other uses. - * The feature is highly experimental. - * <li>#MDB_NOSUBDIR - * By default, LMDB creates its environment in a directory whose - * pathname is given in \b path, and creates its data and lock files - * under that directory. With this option, \b path is used as-is for - * the database main data file. The database lock file is the \b path - * with "-lock" appended. - * <li>#MDB_RDONLY - * Open the environment in read-only mode. No write operations will be - * allowed. LMDB will still modify the lock file - except on read-only - * filesystems, where LMDB does not use locks. - * <li>#MDB_WRITEMAP - * Use a writeable memory map unless MDB_RDONLY is set. This uses - * fewer mallocs but loses protection from application bugs - * like wild pointer writes and other bad updates into the database. - * This may be slightly faster for DBs that fit entirely in RAM, but - * is slower for DBs larger than RAM. - * Incompatible with nested transactions. - * Do not mix processes with and without MDB_WRITEMAP on the same - * environment. This can defeat durability (#mdb_env_sync etc). - * <li>#MDB_NOMETASYNC - * Flush system buffers to disk only once per transaction, omit the - * metadata flush. Defer that until the system flushes files to disk, - * or next non-MDB_RDONLY commit or #mdb_env_sync(). This optimization - * maintains database integrity, but a system crash may undo the last - * committed transaction. I.e. it preserves the ACI (atomicity, - * consistency, isolation) but not D (durability) database property. - * This flag may be changed at any time using #mdb_env_set_flags(). - * <li>#MDB_NOSYNC - * Don't flush system buffers to disk when committing a transaction. - * This optimization means a system crash can corrupt the database or - * lose the last transactions if buffers are not yet flushed to disk. - * The risk is governed by how often the system flushes dirty buffers - * to disk and how often #mdb_env_sync() is called. However, if the - * filesystem preserves write order and the #MDB_WRITEMAP flag is not - * used, transactions exhibit ACI (atomicity, consistency, isolation) - * properties and only lose D (durability). I.e. database integrity - * is maintained, but a system crash may undo the final transactions. - * Note that (#MDB_NOSYNC | #MDB_WRITEMAP) leaves the system with no - * hint for when to write transactions to disk, unless #mdb_env_sync() - * is called. (#MDB_MAPASYNC | #MDB_WRITEMAP) may be preferable. - * This flag may be changed at any time using #mdb_env_set_flags(). - * <li>#MDB_MAPASYNC - * When using #MDB_WRITEMAP, use asynchronous flushes to disk. - * As with #MDB_NOSYNC, a system crash can then corrupt the - * database or lose the last transactions. Calling #mdb_env_sync() - * ensures on-disk database integrity until next commit. - * This flag may be changed at any time using #mdb_env_set_flags(). - * <li>#MDB_NOTLS - * Don't use Thread-Local Storage. Tie reader locktable slots to - * #MDB_txn objects instead of to threads. I.e. #mdb_txn_reset() keeps - * the slot reseved for the #MDB_txn object. A thread may use parallel - * read-only transactions. A read-only transaction may span threads if - * the user synchronizes its use. Applications that multiplex many - * user threads over individual OS threads need this option. Such an - * application must also serialize the write transactions in an OS - * thread, since LMDB's write locking is unaware of the user threads. - * <li>#MDB_NOLOCK - * Don't do any locking. If concurrent access is anticipated, the - * caller must manage all concurrency itself. For proper operation - * the caller must enforce single-writer semantics, and must ensure - * that no readers are using old transactions while a writer is - * active. The simplest approach is to use an exclusive lock so that - * no readers may be active at all when a writer begins. - * <li>#MDB_NORDAHEAD - * Turn off readahead. Most operating systems perform readahead on - * read requests by default. This option turns it off if the OS - * supports it. Turning it off may help random read performance - * when the DB is larger than RAM and system RAM is full. - * The option is not implemented on Windows. - * <li>#MDB_NOMEMINIT - * Don't initialize malloc'd memory before writing to unused spaces - * in the data file. By default, memory for pages written to the data - * file is obtained using malloc. While these pages may be reused in - * subsequent transactions, freshly malloc'd pages will be initialized - * to zeroes before use. This avoids persisting leftover data from other - * code (that used the heap and subsequently freed the memory) into the - * data file. Note that many other system libraries may allocate - * and free memory from the heap for arbitrary uses. E.g., stdio may - * use the heap for file I/O buffers. This initialization step has a - * modest performance cost so some applications may want to disable - * it using this flag. This option can be a problem for applications - * which handle sensitive data like passwords, and it makes memory - * checkers like Valgrind noisy. This flag is not needed with #MDB_WRITEMAP, - * which writes directly to the mmap instead of using malloc for pages. The - * initialization is also skipped if #MDB_RESERVE is used; the - * caller is expected to overwrite all of the memory that was - * reserved in that case. - * This flag may be changed at any time using #mdb_env_set_flags(). - * </ul> - * @param[in] mode The UNIX permissions to set on created files and semaphores. - * This parameter is ignored on Windows. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_VERSION_MISMATCH - the version of the LMDB library doesn't match the - * version that created the database environment. - * <li>#MDB_INVALID - the environment file headers are corrupted. - * <li>ENOENT - the directory specified by the path parameter doesn't exist. - * <li>EACCES - the user didn't have permission to access the environment files. - * <li>EAGAIN - the environment was locked by another process. - * </ul> - */ -int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode); - - /** @brief Copy an LMDB environment to the specified path. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] path The directory in which the copy will reside. This - * directory must already exist and be writable but must otherwise be - * empty. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copy(MDB_env *env, const char *path); - - /** @brief Copy an LMDB environment to the specified file descriptor. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] fd The filedescriptor to write the copy to. It must - * have already been opened for Write access. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copyfd(MDB_env *env, mdb_filehandle_t fd); - - /** @brief Copy an LMDB environment to the specified path, with options. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] path The directory in which the copy will reside. This - * directory must already exist and be writable but must otherwise be - * empty. - * @param[in] flags Special options for this operation. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * <ul> - * <li>#MDB_CP_COMPACT - Perform compaction while copying: omit free - * pages and sequentially renumber all pages in output. This option - * consumes more CPU and runs more slowly than the default. - * </ul> - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags); - - /** @brief Copy an LMDB environment to the specified file descriptor, - * with options. - * - * This function may be used to make a backup of an existing environment. - * No lockfile is created, since it gets recreated at need. See - * #mdb_env_copy2() for further details. - * @note This call can trigger significant file size growth if run in - * parallel with write transactions, because it employs a read-only - * transaction. See long-lived transactions under @ref caveats_sec. - * @param[in] env An environment handle returned by #mdb_env_create(). It - * must have already been opened successfully. - * @param[in] fd The filedescriptor to write the copy to. It must - * have already been opened for Write access. - * @param[in] flags Special options for this operation. - * See #mdb_env_copy2() for options. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_copyfd2(MDB_env *env, mdb_filehandle_t fd, unsigned int flags); - - /** @brief Return statistics about the LMDB environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] stat The address of an #MDB_stat structure - * where the statistics will be copied - */ -int mdb_env_stat(MDB_env *env, MDB_stat *stat); - - /** @brief Return information about the LMDB environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] stat The address of an #MDB_envinfo structure - * where the information will be copied - */ -int mdb_env_info(MDB_env *env, MDB_envinfo *stat); - - /** @brief Flush the data buffers to disk. - * - * Data is always written to disk when #mdb_txn_commit() is called, - * but the operating system may keep it buffered. LMDB always flushes - * the OS buffers upon commit as well, unless the environment was - * opened with #MDB_NOSYNC or in part #MDB_NOMETASYNC. This call is - * not valid if the environment was opened with #MDB_RDONLY. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] force If non-zero, force a synchronous flush. Otherwise - * if the environment has the #MDB_NOSYNC flag set the flushes - * will be omitted, and with #MDB_MAPASYNC they will be asynchronous. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EACCES - the environment is read-only. - * <li>EINVAL - an invalid parameter was specified. - * <li>EIO - an error occurred during synchronization. - * </ul> - */ -int mdb_env_sync(MDB_env *env, int force); - - /** @brief Close the environment and release the memory map. - * - * Only a single thread may call this function. All transactions, databases, - * and cursors must already be closed before calling this function. Attempts to - * use any such handles after calling this function will cause a SIGSEGV. - * The environment handle will be freed and must not be used again after this call. - * @param[in] env An environment handle returned by #mdb_env_create() - */ -void mdb_env_close(MDB_env *env); - - /** @brief Set environment flags. - * - * This may be used to set some flags in addition to those from - * #mdb_env_open(), or to unset these flags. If several threads - * change the flags at the same time, the result is undefined. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] flags The flags to change, bitwise OR'ed together - * @param[in] onoff A non-zero value sets the flags, zero clears them. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff); - - /** @brief Get environment flags. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] flags The address of an integer to store the flags - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_env_get_flags(MDB_env *env, unsigned int *flags); - - /** @brief Return the path that was used in #mdb_env_open(). - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] path Address of a string pointer to contain the path. This - * is the actual string in the environment, not a copy. It should not be - * altered in any way. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_env_get_path(MDB_env *env, const char **path); - - /** @brief Return the filedescriptor for the given environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] fd Address of a mdb_filehandle_t to contain the descriptor. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *fd); - - /** @brief Set the size of the memory map to use for this environment. - * - * The size should be a multiple of the OS page size. The default is - * 10485760 bytes. The size of the memory map is also the maximum size - * of the database. The value should be chosen as large as possible, - * to accommodate future growth of the database. - * This function should be called after #mdb_env_create() and before #mdb_env_open(). - * It may be called at later times if no transactions are active in - * this process. Note that the library does not check for this condition, - * the caller must ensure it explicitly. - * - * The new size takes effect immediately for the current process but - * will not be persisted to any others until a write transaction has been - * committed by the current process. Also, only mapsize increases are - * persisted into the environment. - * - * If the mapsize is increased by another process, and data has grown - * beyond the range of the current mapsize, #mdb_txn_begin() will - * return #MDB_MAP_RESIZED. This function may be called with a size - * of zero to adopt the new size. - * - * Any attempt to set a size smaller than the space already consumed - * by the environment will be silently changed to the current size of the used space. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] size The size in bytes - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified, or the environment has - * an active write transaction. - * </ul> - */ -int mdb_env_set_mapsize(MDB_env *env, mdb_size_t size); - - /** @brief Set the maximum number of threads/reader slots for the environment. - * - * This defines the number of slots in the lock table that is used to track readers in the - * the environment. The default is 126. - * Starting a read-only transaction normally ties a lock table slot to the - * current thread until the environment closes or the thread exits. If - * MDB_NOTLS is in use, #mdb_txn_begin() instead ties the slot to the - * MDB_txn object until it or the #MDB_env object is destroyed. - * This function may only be called after #mdb_env_create() and before #mdb_env_open(). - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] readers The maximum number of reader lock table slots - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified, or the environment is already open. - * </ul> - */ -int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers); - - /** @brief Get the maximum number of threads/reader slots for the environment. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] readers Address of an integer to store the number of readers - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers); - - /** @brief Set the maximum number of named databases for the environment. - * - * This function is only needed if multiple databases will be used in the - * environment. Simpler applications that use the environment as a single - * unnamed database can ignore this option. - * This function may only be called after #mdb_env_create() and before #mdb_env_open(). - * - * Currently a moderate number of slots are cheap but a huge number gets - * expensive: 7-120 words per transaction, and every #mdb_dbi_open() - * does a linear search of the opened slots. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] dbs The maximum number of databases - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified, or the environment is already open. - * </ul> - */ -int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs); - - /** @brief Get the maximum size of keys and #MDB_DUPSORT data we can write. - * - * Depends on the compile-time constant #MDB_MAXKEYSIZE. Default 511. - * See @ref MDB_val. - * @param[in] env An environment handle returned by #mdb_env_create() - * @return The maximum size of a key we can write - */ -int mdb_env_get_maxkeysize(MDB_env *env); - - /** @brief Set application information associated with the #MDB_env. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] ctx An arbitrary pointer for whatever the application needs. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_set_userctx(MDB_env *env, void *ctx); - - /** @brief Get the application information associated with the #MDB_env. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @return The pointer set by #mdb_env_set_userctx(). - */ -void *mdb_env_get_userctx(MDB_env *env); - - /** @brief A callback function for most LMDB assert() failures, - * called before printing the message and aborting. - * - * @param[in] env An environment handle returned by #mdb_env_create(). - * @param[in] msg The assertion message, not including newline. - */ -typedef void MDB_assert_func(MDB_env *env, const char *msg); - - /** Set or reset the assert() callback of the environment. - * Disabled if liblmdb is buillt with NDEBUG. - * @note This hack should become obsolete as lmdb's error handling matures. - * @param[in] env An environment handle returned by #mdb_env_create(). - * @param[in] func An #MDB_assert_func function, or 0. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_env_set_assert(MDB_env *env, MDB_assert_func *func); - - /** @brief Create a transaction for use with the environment. - * - * The transaction handle may be discarded using #mdb_txn_abort() or #mdb_txn_commit(). - * @note A transaction and its cursors must only be used by a single - * thread, and a thread may only have a single transaction at a time. - * If #MDB_NOTLS is in use, this does not apply to read-only transactions. - * @note Cursors may not span transactions. - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] parent If this parameter is non-NULL, the new transaction - * will be a nested transaction, with the transaction indicated by \b parent - * as its parent. Transactions may be nested to any level. A parent - * transaction and its cursors may not issue any other operations than - * mdb_txn_commit and mdb_txn_abort while it has active child transactions. - * @param[in] flags Special options for this transaction. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * <ul> - * <li>#MDB_RDONLY - * This transaction will not perform any write operations. - * <li>#MDB_NOSYNC - * Don't flush system buffers to disk when committing this transaction. - * <li>#MDB_NOMETASYNC - * Flush system buffers but omit metadata flush when committing this transaction. - * </ul> - * @param[out] txn Address where the new #MDB_txn handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_PANIC - a fatal error occurred earlier and the environment - * must be shut down. - * <li>#MDB_MAP_RESIZED - another process wrote data beyond this MDB_env's - * mapsize and this environment's map must be resized as well. - * See #mdb_env_set_mapsize(). - * <li>#MDB_READERS_FULL - a read-only transaction was requested and - * the reader lock table is full. See #mdb_env_set_maxreaders(). - * <li>ENOMEM - out of memory. - * </ul> - */ -int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn); - - /** @brief Returns the transaction's #MDB_env - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -MDB_env *mdb_txn_env(MDB_txn *txn); - - /** @brief Return the transaction's ID. - * - * This returns the identifier associated with this transaction. For a - * read-only transaction, this corresponds to the snapshot being read; - * concurrent readers will frequently have the same transaction ID. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A transaction ID, valid if input is an active transaction. - */ -mdb_size_t mdb_txn_id(MDB_txn *txn); - - /** @brief Commit all the operations of a transaction into the database. - * - * The transaction handle is freed. It and its cursors must not be used - * again after this call, except with #mdb_cursor_renew(). - * @note Earlier documentation incorrectly said all cursors would be freed. - * Only write-transactions free cursors. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * <li>ENOSPC - no more disk space. - * <li>EIO - a low-level I/O error occurred while writing. - * <li>ENOMEM - out of memory. - * </ul> - */ -int mdb_txn_commit(MDB_txn *txn); - - /** @brief Abandon all the operations of the transaction instead of saving them. - * - * The transaction handle is freed. It and its cursors must not be used - * again after this call, except with #mdb_cursor_renew(). - * @note Earlier documentation incorrectly said all cursors would be freed. - * Only write-transactions free cursors. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -void mdb_txn_abort(MDB_txn *txn); - - /** @brief Reset a read-only transaction. - * - * Abort the transaction like #mdb_txn_abort(), but keep the transaction - * handle. #mdb_txn_renew() may reuse the handle. This saves allocation - * overhead if the process will start a new read-only transaction soon, - * and also locking overhead if #MDB_NOTLS is in use. The reader table - * lock is released, but the table slot stays tied to its thread or - * #MDB_txn. Use mdb_txn_abort() to discard a reset handle, and to free - * its lock table slot if MDB_NOTLS is in use. - * Cursors opened within the transaction must not be used - * again after this call, except with #mdb_cursor_renew(). - * Reader locks generally don't interfere with writers, but they keep old - * versions of database pages allocated. Thus they prevent the old pages - * from being reused when writers commit new data, and so under heavy load - * the database size may grow much more rapidly than otherwise. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - */ -void mdb_txn_reset(MDB_txn *txn); - - /** @brief Renew a read-only transaction. - * - * This acquires a new reader lock for a transaction handle that had been - * released by #mdb_txn_reset(). It must be called before a reset transaction - * may be used again. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_PANIC - a fatal error occurred earlier and the environment - * must be shut down. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_txn_renew(MDB_txn *txn); - -/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ -#define mdb_open(txn,name,flags,dbi) mdb_dbi_open(txn,name,flags,dbi) -/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ -#define mdb_close(env,dbi) mdb_dbi_close(env,dbi) - - /** @brief Open a database in the environment. - * - * A database handle denotes the name and parameters of a database, - * independently of whether such a database exists. - * The database handle may be discarded by calling #mdb_dbi_close(). - * The old database handle is returned if the database was already open. - * The handle may only be closed once. - * - * The database handle will be private to the current transaction until - * the transaction is successfully committed. If the transaction is - * aborted the handle will be closed automatically. - * After a successful commit the handle will reside in the shared - * environment, and may be used by other transactions. - * - * This function must not be called from multiple concurrent - * transactions in the same process. A transaction that uses - * this function must finish (either commit or abort) before - * any other transaction in the process may use this function. - * - * To use named databases (with name != NULL), #mdb_env_set_maxdbs() - * must be called before opening the environment. Database names are - * keys in the unnamed database, and may be read but not written. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] name The name of the database to open. If only a single - * database is needed in the environment, this value may be NULL. - * @param[in] flags Special options for this database. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * <ul> - * <li>#MDB_REVERSEKEY - * Keys are strings to be compared in reverse order, from the end - * of the strings to the beginning. By default, Keys are treated as strings and - * compared from beginning to end. - * <li>#MDB_DUPSORT - * Duplicate keys may be used in the database. (Or, from another perspective, - * keys may have multiple data items, stored in sorted order.) By default - * keys must be unique and may have only a single data item. - * <li>#MDB_INTEGERKEY - * Keys are binary integers in native byte order, either unsigned int - * or size_t, and will be sorted as such. - * The keys must all be of the same size. - * <li>#MDB_DUPFIXED - * This flag may only be used in combination with #MDB_DUPSORT. This option - * tells the library that the data items for this database are all the same - * size, which allows further optimizations in storage and retrieval. When - * all data items are the same size, the #MDB_GET_MULTIPLE and #MDB_NEXT_MULTIPLE - * cursor operations may be used to retrieve multiple items at once. - * <li>#MDB_INTEGERDUP - * This option specifies that duplicate data items are binary integers, - * similar to #MDB_INTEGERKEY keys. - * <li>#MDB_REVERSEDUP - * This option specifies that duplicate data items should be compared as - * strings in reverse order. - * <li>#MDB_CREATE - * Create the named database if it doesn't exist. This option is not - * allowed in a read-only transaction or a read-only environment. - * </ul> - * @param[out] dbi Address where the new #MDB_dbi handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_NOTFOUND - the specified database doesn't exist in the environment - * and #MDB_CREATE was not specified. - * <li>#MDB_DBS_FULL - too many databases have been opened. See #mdb_env_set_maxdbs(). - * </ul> - */ -int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi); - - /** @brief Retrieve statistics for a database. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] stat The address of an #MDB_stat structure - * where the statistics will be copied - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); - - /** @brief Retrieve the DB flags for a database handle. - * - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] flags Address where the flags will be returned. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags); - - /** @brief Close a database handle. Normally unnecessary. Use with care: - * - * This call is not mutex protected. Handles should only be closed by - * a single thread, and only if no other threads are going to reference - * the database handle or one of its cursors any further. Do not close - * a handle if an existing transaction has modified its database. - * Doing so can cause misbehavior from database corruption to errors - * like MDB_BAD_VALSIZE (since the DB name is gone). - * - * Closing a database handle is not necessary, but lets #mdb_dbi_open() - * reuse the handle value. Usually it's better to set a bigger - * #mdb_env_set_maxdbs(), unless that value would be large. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - */ -void mdb_dbi_close(MDB_env *env, MDB_dbi dbi); - - /** @brief Empty or delete+close a database. - * - * See #mdb_dbi_close() for restrictions about closing the DB handle. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] del 0 to empty the DB, 1 to delete it from the - * environment and close the DB handle. - * @return A non-zero error value on failure and 0 on success. - */ -int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del); - - /** @brief Set a custom key comparison function for a database. - * - * The comparison function is called whenever it is necessary to compare a - * key specified by the application with a key currently stored in the database. - * If no comparison function is specified, and no special key flags were specified - * with #mdb_dbi_open(), the keys are compared lexically, with shorter keys collating - * before longer keys. - * @warning This function must be called before any data access functions are used, - * otherwise data corruption may occur. The same comparison function must be used by every - * program accessing the database, every time the database is used. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] cmp A #MDB_cmp_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); - - /** @brief Set a custom data comparison function for a #MDB_DUPSORT database. - * - * This comparison function is called whenever it is necessary to compare a data - * item specified by the application with a data item currently stored in the database. - * This function only takes effect if the database was opened with the #MDB_DUPSORT - * flag. - * If no comparison function is specified, and no special key flags were specified - * with #mdb_dbi_open(), the data items are compared lexically, with shorter items collating - * before longer items. - * @warning This function must be called before any data access functions are used, - * otherwise data corruption may occur. The same comparison function must be used by every - * program accessing the database, every time the database is used. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] cmp A #MDB_cmp_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); - - /** @brief Set a relocation function for a #MDB_FIXEDMAP database. - * - * @todo The relocation function is called whenever it is necessary to move the data - * of an item to a different position in the database (e.g. through tree - * balancing operations, shifts as a result of adds or deletes, etc.). It is - * intended to allow address/position-dependent data items to be stored in - * a database in an environment opened with the #MDB_FIXEDMAP option. - * Currently the relocation feature is unimplemented and setting - * this function has no effect. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] rel A #MDB_rel_func function - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel); - - /** @brief Set a context pointer for a #MDB_FIXEDMAP database's relocation function. - * - * See #mdb_set_relfunc and #MDB_rel_func for more details. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] ctx An arbitrary pointer for whatever the application needs. - * It will be passed to the callback function set by #mdb_set_relfunc - * as its \b relctx parameter whenever the callback is invoked. - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx); - - /** @brief Get items from a database. - * - * This function retrieves key/data pairs from the database. The address - * and length of the data associated with the specified \b key are returned - * in the structure to which \b data refers. - * If the database supports duplicate keys (#MDB_DUPSORT) then the - * first data item for the key will be returned. Retrieval of other - * items requires the use of #mdb_cursor_get(). - * - * @note The memory pointed to by the returned values is owned by the - * database. The caller need not dispose of the memory, and may not - * modify it in any way. For values returned in a read-only transaction - * any modification attempts will cause a SIGSEGV. - * @note Values returned from the database are valid only until a - * subsequent update operation, or the end of the transaction. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to search for in the database - * @param[out] data The data corresponding to the key - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_NOTFOUND - the key was not in the database. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); - - /** @brief Store items into a database. - * - * This function stores key/data pairs in the database. The default behavior - * is to enter the new key/data pair, replacing any previously existing key - * if duplicates are disallowed, or adding a duplicate data item if - * duplicates are allowed (#MDB_DUPSORT). - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to store in the database - * @param[in,out] data The data to store - * @param[in] flags Special options for this operation. This parameter - * must be set to 0 or by bitwise OR'ing together one or more of the - * values described here. - * <ul> - * <li>#MDB_NODUPDATA - enter the new key/data pair only if it does not - * already appear in the database. This flag may only be specified - * if the database was opened with #MDB_DUPSORT. The function will - * return #MDB_KEYEXIST if the key/data pair already appears in the - * database. - * <li>#MDB_NOOVERWRITE - enter the new key/data pair only if the key - * does not already appear in the database. The function will return - * #MDB_KEYEXIST if the key already appears in the database, even if - * the database supports duplicates (#MDB_DUPSORT). The \b data - * parameter will be set to point to the existing item. - * <li>#MDB_RESERVE - reserve space for data of the given size, but - * don't copy the given data. Instead, return a pointer to the - * reserved space, which the caller can fill in later - before - * the next update operation or the transaction ends. This saves - * an extra memcpy if the data is being generated later. - * LMDB does nothing else with this memory, the caller is expected - * to modify all of the space requested. This flag must not be - * specified if the database was opened with #MDB_DUPSORT. - * <li>#MDB_APPEND - append the given key/data pair to the end of the - * database. This option allows fast bulk loading when keys are - * already known to be in the correct order. Loading unsorted keys - * with this flag will cause a #MDB_KEYEXIST error. - * <li>#MDB_APPENDDUP - as above, but for sorted dup data. - * </ul> - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). - * <li>#MDB_TXN_FULL - the transaction has too many dirty pages. - * <li>EACCES - an attempt was made to write in a read-only transaction. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_put(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, - unsigned int flags); - - /** @brief Delete items from a database. - * - * This function removes key/data pairs from the database. - * If the database does not support sorted duplicate data items - * (#MDB_DUPSORT) the data parameter is ignored. - * If the database supports sorted duplicates and the data parameter - * is NULL, all of the duplicate data items for the key will be - * deleted. Otherwise, if the data parameter is non-NULL - * only the matching data item will be deleted. - * This function will return #MDB_NOTFOUND if the specified key/data - * pair is not in the database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] key The key to delete from the database - * @param[in] data The data to delete - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EACCES - an attempt was made to write in a read-only transaction. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_del(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); - - /** @brief Create a cursor handle. - * - * A cursor is associated with a specific transaction and database. - * A cursor cannot be used when its database handle is closed. Nor - * when its transaction has ended, except with #mdb_cursor_renew(). - * It can be discarded with #mdb_cursor_close(). - * A cursor in a write-transaction can be closed before its transaction - * ends, and will otherwise be closed when its transaction ends. - * A cursor in a read-only transaction must be closed explicitly, before - * or after its transaction ends. It can be reused with - * #mdb_cursor_renew() before finally closing it. - * @note Earlier documentation said that cursors in every transaction - * were closed when the transaction committed or aborted. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[out] cursor Address where the new #MDB_cursor handle will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor); - - /** @brief Close a cursor handle. - * - * The cursor handle will be freed and must not be used again after this call. - * Its transaction must still be live if it is a write-transaction. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -void mdb_cursor_close(MDB_cursor *cursor); - - /** @brief Renew a cursor handle. - * - * A cursor is associated with a specific transaction and database. - * Cursors that are only used in read-only - * transactions may be re-used, to avoid unnecessary malloc/free overhead. - * The cursor may be associated with a new read-only transaction, and - * referencing the same database handle as it was created with. - * This may be done whether the previous transaction is live or dead. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_cursor_renew(MDB_txn *txn, MDB_cursor *cursor); - - /** @brief Return the cursor's transaction handle. - * - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -MDB_txn *mdb_cursor_txn(MDB_cursor *cursor); - - /** @brief Return the cursor's database handle. - * - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - */ -MDB_dbi mdb_cursor_dbi(MDB_cursor *cursor); - - /** @brief Retrieve by cursor. - * - * This function retrieves key/data pairs from the database. The address and length - * of the key are returned in the object to which \b key refers (except for the - * case of the #MDB_SET option, in which the \b key object is unchanged), and - * the address and length of the data are returned in the object to which \b data - * refers. - * See #mdb_get() for restrictions on using the output values. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in,out] key The key for a retrieved item - * @param[in,out] data The data of a retrieved item - * @param[in] op A cursor operation #MDB_cursor_op - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_NOTFOUND - no matching key found. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val *data, - MDB_cursor_op op); - - /** @brief Store by cursor. - * - * This function stores key/data pairs into the database. - * The cursor is positioned at the new item, or on failure usually near it. - * @note Earlier documentation incorrectly said errors would leave the - * state of the cursor unchanged. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in] key The key operated on. - * @param[in] data The data operated on. - * @param[in] flags Options for this operation. This parameter - * must be set to 0 or one of the values described here. - * <ul> - * <li>#MDB_CURRENT - replace the item at the current cursor position. - * The \b key parameter must still be provided, and must match it. - * If using sorted duplicates (#MDB_DUPSORT) the data item must still - * sort into the same place. This is intended to be used when the - * new data is the same size as the old. Otherwise it will simply - * perform a delete of the old record followed by an insert. - * <li>#MDB_NODUPDATA - enter the new key/data pair only if it does not - * already appear in the database. This flag may only be specified - * if the database was opened with #MDB_DUPSORT. The function will - * return #MDB_KEYEXIST if the key/data pair already appears in the - * database. - * <li>#MDB_NOOVERWRITE - enter the new key/data pair only if the key - * does not already appear in the database. The function will return - * #MDB_KEYEXIST if the key already appears in the database, even if - * the database supports duplicates (#MDB_DUPSORT). - * <li>#MDB_RESERVE - reserve space for data of the given size, but - * don't copy the given data. Instead, return a pointer to the - * reserved space, which the caller can fill in later - before - * the next update operation or the transaction ends. This saves - * an extra memcpy if the data is being generated later. This flag - * must not be specified if the database was opened with #MDB_DUPSORT. - * <li>#MDB_APPEND - append the given key/data pair to the end of the - * database. No key comparisons are performed. This option allows - * fast bulk loading when keys are already known to be in the - * correct order. Loading unsorted keys with this flag will cause - * a #MDB_KEYEXIST error. - * <li>#MDB_APPENDDUP - as above, but for sorted dup data. - * <li>#MDB_MULTIPLE - store multiple contiguous data elements in a - * single request. This flag may only be specified if the database - * was opened with #MDB_DUPFIXED. The \b data argument must be an - * array of two MDB_vals. The mv_size of the first MDB_val must be - * the size of a single data element. The mv_data of the first MDB_val - * must point to the beginning of the array of contiguous data elements. - * The mv_size of the second MDB_val must be the count of the number - * of data elements to store. On return this field will be set to - * the count of the number of elements actually written. The mv_data - * of the second MDB_val is unused. - * </ul> - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>#MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). - * <li>#MDB_TXN_FULL - the transaction has too many dirty pages. - * <li>EACCES - an attempt was made to write in a read-only transaction. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_cursor_put(MDB_cursor *cursor, MDB_val *key, MDB_val *data, - unsigned int flags); - - /** @brief Delete current key/data pair - * - * This function deletes the key/data pair to which the cursor refers. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[in] flags Options for this operation. This parameter - * must be set to 0 or one of the values described here. - * <ul> - * <li>#MDB_NODUPDATA - delete all of the data items for the current key. - * This flag may only be specified if the database was opened with #MDB_DUPSORT. - * </ul> - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EACCES - an attempt was made to write in a read-only transaction. - * <li>EINVAL - an invalid parameter was specified. - * </ul> - */ -int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags); - - /** @brief Return count of duplicates for current key. - * - * This call is only valid on databases that support sorted duplicate - * data items #MDB_DUPSORT. - * @param[in] cursor A cursor handle returned by #mdb_cursor_open() - * @param[out] countp Address where the count will be stored - * @return A non-zero error value on failure and 0 on success. Some possible - * errors are: - * <ul> - * <li>EINVAL - cursor is not initialized, or an invalid parameter was specified. - * </ul> - */ -int mdb_cursor_count(MDB_cursor *cursor, mdb_size_t *countp); - - /** @brief Compare two data items according to a particular database. - * - * This returns a comparison as if the two data items were keys in the - * specified database. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] a The first item to compare - * @param[in] b The second item to compare - * @return < 0 if a < b, 0 if a == b, > 0 if a > b - */ -int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); - - /** @brief Compare two data items according to a particular database. - * - * This returns a comparison as if the two items were data items of - * the specified database. The database must have the #MDB_DUPSORT flag. - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - * @param[in] a The first item to compare - * @param[in] b The second item to compare - * @return < 0 if a < b, 0 if a == b, > 0 if a > b - */ -int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); - - /** @brief A callback function used to print a message from the library. - * - * @param[in] msg The string to be printed. - * @param[in] ctx An arbitrary context pointer for the callback. - * @return < 0 on failure, >= 0 on success. - */ -typedef int (MDB_msg_func)(const char *msg, void *ctx); - - /** @brief Dump the entries in the reader lock table. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[in] func A #MDB_msg_func function - * @param[in] ctx Anything the message function needs - * @return < 0 on failure, >= 0 on success. - */ -int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); - - /** @brief Check for stale entries in the reader lock table. - * - * @param[in] env An environment handle returned by #mdb_env_create() - * @param[out] dead Number of stale slots that were cleared - * @return 0 on success, non-zero on failure. - */ -int mdb_reader_check(MDB_env *env, int *dead); -/** @} */ - -#ifdef __cplusplus -} -#endif -/** @page tools LMDB Command Line Tools - The following describes the command line tools that are available for LMDB. - \li \ref mdb_copy_1 - \li \ref mdb_dump_1 - \li \ref mdb_load_1 - \li \ref mdb_stat_1 -*/ - -#endif /* _LMDB_H_ */ diff --git a/plugins/Dbx_mdb/src/lmdb/mdb.c b/plugins/Dbx_mdb/src/lmdb/mdb.c deleted file mode 100644 index 44499e4f1b..0000000000 --- a/plugins/Dbx_mdb/src/lmdb/mdb.c +++ /dev/null @@ -1,10925 +0,0 @@ -/** @file mdb.c - * @brief Lightning memory-mapped database library - * - * A Btree-based database management library modeled loosely on the - * BerkeleyDB API, but much simplified. - */ -/* - * Copyright 2011-2016 Howard Chu, Symas Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * <http://www.OpenLDAP.org/license.html>. - * - * This code is derived from btree.c written by Martin Hedenfalk. - * - * Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se> - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE 1 -#endif -#if defined(MDB_VL32) || defined(__WIN64__) -#define _FILE_OFFSET_BITS 64 -#endif -#ifdef _WIN32 -#include <malloc.h> -#include <windows.h> - -#ifdef _WIN64 -# pragma comment(lib, "lib/x64/ntdll.lib") -#else -# pragma comment(lib, "lib/x86/ntdll.lib") -#endif - -#pragma warning(disable: 4706) - -/* We use native NT APIs to setup the memory map, so that we can - * let the DB file grow incrementally instead of always preallocating - * the full size. These APIs are defined in <wdm.h> and <ntifs.h> - * but those headers are meant for driver-level development and - * conflict with the regular user-level headers, so we explicitly - * declare them here. Using these APIs also means we must link to - * ntdll.dll, which is not linked by default in user code. - */ -NTSTATUS WINAPI -NtCreateSection(OUT PHANDLE sh, IN ACCESS_MASK acc, - IN void * oa OPTIONAL, - IN PLARGE_INTEGER ms OPTIONAL, - IN ULONG pp, IN ULONG aa, IN HANDLE fh OPTIONAL); - -typedef enum _SECTION_INHERIT { - ViewShare = 1, - ViewUnmap = 2 -} SECTION_INHERIT; - -NTSTATUS WINAPI -NtMapViewOfSection(IN PHANDLE sh, IN HANDLE ph, - IN OUT PVOID *addr, IN ULONG_PTR zbits, - IN SIZE_T cs, IN OUT PLARGE_INTEGER off OPTIONAL, - IN OUT PSIZE_T vs, IN SECTION_INHERIT ih, - IN ULONG at, IN ULONG pp); - -NTSTATUS WINAPI -NtClose(HANDLE h); - -/** getpid() returns int; MinGW defines pid_t but MinGW64 typedefs it - * as int64 which is wrong. MSVC doesn't define it at all, so just - * don't use it. - */ -#define MDB_PID_T int -#define MDB_THR_T DWORD -#include <sys/types.h> -#include <sys/stat.h> -#ifdef __GNUC__ -# include <sys/param.h> -#else -# define LITTLE_ENDIAN 1234 -# define BIG_ENDIAN 4321 -# define BYTE_ORDER LITTLE_ENDIAN -# ifndef SSIZE_MAX -# define SSIZE_MAX INT_MAX -# endif -#endif -#else -#include <sys/types.h> -#include <sys/stat.h> -#define MDB_PID_T pid_t -#define MDB_THR_T pthread_t -#include <sys/param.h> -#include <sys/uio.h> -#include <sys/mman.h> -#ifdef HAVE_SYS_FILE_H -#include <sys/file.h> -#endif -#include <fcntl.h> -#endif - -#if defined(__mips) && defined(__linux) -/* MIPS has cache coherency issues, requires explicit cache control */ -#include <asm/cachectl.h> -extern int cacheflush(char *addr, int nbytes, int cache); -#define CACHEFLUSH(addr, bytes, cache) cacheflush(addr, bytes, cache) -#else -#define CACHEFLUSH(addr, bytes, cache) -#endif - -#if defined(__linux) && !defined(MDB_FDATASYNC_WORKS) -/** fdatasync is broken on ext3/ext4fs on older kernels, see - * description in #mdb_env_open2 comments. You can safely - * define MDB_FDATASYNC_WORKS if this code will only be run - * on kernels 3.6 and newer. - */ -#define BROKEN_FDATASYNC -#endif - -#include <errno.h> -#include <limits.h> -#include <stddef.h> -#if _MSC_VER < 1800 -# include <msapi/inttypes.h> -#else -# include <inttypes.h> -#endif -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <time.h> - -#ifdef _MSC_VER -#include <io.h> -typedef SSIZE_T ssize_t; -#else -#include <unistd.h> -#endif - -#if defined(__sun) || defined(ANDROID) -/* Most platforms have posix_memalign, older may only have memalign */ -#define HAVE_MEMALIGN 1 -#include <malloc.h> -#endif - -#if !(defined(BYTE_ORDER) || defined(__BYTE_ORDER)) -#include <netinet/in.h> -#include <resolv.h> /* defines BYTE_ORDER on HPUX and Solaris */ -#endif - -#if defined(__APPLE__) || defined (BSD) -# if !(defined(MDB_USE_POSIX_MUTEX) || defined(MDB_USE_POSIX_SEM)) -# define MDB_USE_SYSV_SEM 1 -# endif -# define MDB_FDATASYNC fsync -#elif defined(ANDROID) -# define MDB_FDATASYNC fsync -#endif - -#ifndef _WIN32 -#include <pthread.h> -#ifdef MDB_USE_POSIX_SEM -# define MDB_USE_HASH 1 -#include <semaphore.h> -#elif defined(MDB_USE_SYSV_SEM) -#include <sys/ipc.h> -#include <sys/sem.h> -#ifdef _SEM_SEMUN_UNDEFINED -union semun { - int val; - struct semid_ds *buf; - unsigned short *array; -}; -#endif /* _SEM_SEMUN_UNDEFINED */ -#else -#define MDB_USE_POSIX_MUTEX 1 -#endif /* MDB_USE_POSIX_SEM */ -#endif /* !_WIN32 */ - -#if defined(_WIN32) + defined(MDB_USE_POSIX_SEM) + defined(MDB_USE_SYSV_SEM) \ - + defined(MDB_USE_POSIX_MUTEX) != 1 -# error "Ambiguous shared-lock implementation" -#endif - -#ifdef USE_VALGRIND -#include <valgrind/memcheck.h> -#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z) -#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s) -#define VGMEMP_FREE(h,a) VALGRIND_MEMPOOL_FREE(h,a) -#define VGMEMP_DESTROY(h) VALGRIND_DESTROY_MEMPOOL(h) -#define VGMEMP_DEFINED(a,s) VALGRIND_MAKE_MEM_DEFINED(a,s) -#else -#define VGMEMP_CREATE(h,r,z) -#define VGMEMP_ALLOC(h,a,s) -#define VGMEMP_FREE(h,a) -#define VGMEMP_DESTROY(h) -#define VGMEMP_DEFINED(a,s) -#endif - -#ifndef BYTE_ORDER -# if (defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)) -/* Solaris just defines one or the other */ -# define LITTLE_ENDIAN 1234 -# define BIG_ENDIAN 4321 -# ifdef _LITTLE_ENDIAN -# define BYTE_ORDER LITTLE_ENDIAN -# else -# define BYTE_ORDER BIG_ENDIAN -# endif -# else -# define BYTE_ORDER __BYTE_ORDER -# endif -#endif - -#ifndef LITTLE_ENDIAN -#define LITTLE_ENDIAN __LITTLE_ENDIAN -#endif -#ifndef BIG_ENDIAN -#define BIG_ENDIAN __BIG_ENDIAN -#endif - -#if defined(__i386) || defined(__x86_64) || defined(_M_IX86) -#define MISALIGNED_OK 1 -#endif - -#include "lmdb.h" -#include "midl.h" - -#if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN) -# error "Unknown or unsupported endianness (BYTE_ORDER)" -#elif (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF -# error "Two's complement, reasonably sized integer types, please" -#endif - -#ifdef __GNUC__ -/** Put infrequently used env functions in separate section */ -# ifdef __APPLE__ -# define ESECT __attribute__ ((section("__TEXT,text_env"))) -# else -# define ESECT __attribute__ ((section("text_env"))) -# endif -#else -#define ESECT -#endif - -#ifdef _WIN32 -#define CALL_CONV WINAPI -#else -#define CALL_CONV -#endif - -/** @defgroup internal LMDB Internals - * @{ - */ -/** @defgroup compat Compatibility Macros - * A bunch of macros to minimize the amount of platform-specific ifdefs - * needed throughout the rest of the code. When the features this library - * needs are similar enough to POSIX to be hidden in a one-or-two line - * replacement, this macro approach is used. - * @{ - */ - - /** Features under development */ -#ifndef MDB_DEVEL -#define MDB_DEVEL 0 -#endif - - /** Wrapper around __func__, which is a C99 feature */ -#if __STDC_VERSION__ >= 199901L -# define mdb_func_ __func__ -#elif __GNUC__ >= 2 || _MSC_VER >= 1300 -# define mdb_func_ __FUNCTION__ -#else -/* If a debug message says <mdb_unknown>(), update the #if statements above */ -# define mdb_func_ "<mdb_unknown>" -#endif - -/* Internal error codes, not exposed outside liblmdb */ -#define MDB_NO_ROOT (MDB_LAST_ERRCODE + 10) -#ifdef _WIN32 -#define MDB_OWNERDEAD ((int) WAIT_ABANDONED) -#elif defined MDB_USE_SYSV_SEM -#define MDB_OWNERDEAD (MDB_LAST_ERRCODE + 11) -#elif defined(MDB_USE_POSIX_MUTEX) && defined(EOWNERDEAD) -#define MDB_OWNERDEAD EOWNERDEAD /**< #LOCK_MUTEX0() result if dead owner */ -#endif - -#ifdef __GLIBC__ -#define GLIBC_VER ((__GLIBC__ << 16 )| __GLIBC_MINOR__) -#endif -/** Some platforms define the EOWNERDEAD error code - * even though they don't support Robust Mutexes. - * Compile with -DMDB_USE_ROBUST=0, or use some other - * mechanism like -DMDB_USE_SYSV_SEM instead of - * -DMDB_USE_POSIX_MUTEX. (SysV semaphores are - * also Robust, but some systems don't support them - * either.) - */ -#ifndef MDB_USE_ROBUST -/* Android currently lacks Robust Mutex support. So does glibc < 2.4. */ -# if defined(MDB_USE_POSIX_MUTEX) && (defined(ANDROID) || \ - (defined(__GLIBC__) && GLIBC_VER < 0x020004)) -# define MDB_USE_ROBUST 0 -# else -# define MDB_USE_ROBUST 1 -/* glibc < 2.12 only provided _np API */ -# if (defined(__GLIBC__) && GLIBC_VER < 0x02000c) || \ - (defined(PTHREAD_MUTEX_ROBUST_NP) && !defined(PTHREAD_MUTEX_ROBUST)) -# define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP -# define pthread_mutexattr_setrobust(attr, flag) pthread_mutexattr_setrobust_np(attr, flag) -# define pthread_mutex_consistent(mutex) pthread_mutex_consistent_np(mutex) -# endif -# endif -#endif /* MDB_USE_ROBUST */ - -#if defined(MDB_OWNERDEAD) && MDB_USE_ROBUST -#define MDB_ROBUST_SUPPORTED 1 -#endif - -#ifdef _WIN32 -#define MDB_USE_HASH 1 -#define MDB_PIDLOCK 0 -#define THREAD_RET DWORD -#define pthread_t HANDLE -#define pthread_mutex_t HANDLE -#define pthread_cond_t HANDLE -typedef HANDLE mdb_mutex_t, mdb_mutexref_t; -#define pthread_key_t DWORD -#define pthread_self() GetCurrentThreadId() -#define pthread_key_create(x,y) \ - ((*(x) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? ErrCode() : 0) -#define pthread_key_delete(x) TlsFree(x) -#define pthread_getspecific(x) TlsGetValue(x) -#define pthread_setspecific(x,y) (TlsSetValue(x,y) ? 0 : ErrCode()) -#define pthread_mutex_unlock(x) ReleaseMutex(*x) -#define pthread_mutex_lock(x) WaitForSingleObject(*x, INFINITE) -#define pthread_cond_signal(x) SetEvent(*x) -#define pthread_cond_wait(cond,mutex) do{SignalObjectAndWait(*mutex, *cond, INFINITE, FALSE); WaitForSingleObject(*mutex, INFINITE);}while(0) -#define THREAD_CREATE(thr,start,arg) thr=CreateThread(NULL,0,start,arg,0,NULL) -#define THREAD_FINISH(thr) WaitForSingleObject(thr, INFINITE) -#define LOCK_MUTEX0(mutex) WaitForSingleObject(mutex, INFINITE) -#define UNLOCK_MUTEX(mutex) ReleaseMutex(mutex) -#define mdb_mutex_consistent(mutex) 0 -#define getpid() GetCurrentProcessId() -#define MDB_FDATASYNC(fd) (!FlushFileBuffers(fd)) -#define MDB_MSYNC(addr,len,flags) (!FlushViewOfFile(addr,len)) -#define ErrCode() GetLastError() -#define GET_PAGESIZE(x) {SYSTEM_INFO si; GetSystemInfo(&si); (x) = si.dwPageSize;} -#define close(fd) (CloseHandle(fd) ? 0 : -1) -#define munmap(ptr,len) UnmapViewOfFile(ptr) -#ifdef PROCESS_QUERY_LIMITED_INFORMATION -#define MDB_PROCESS_QUERY_LIMITED_INFORMATION PROCESS_QUERY_LIMITED_INFORMATION -#else -#define MDB_PROCESS_QUERY_LIMITED_INFORMATION 0x1000 -#endif -#define Z "I" -#else -#define THREAD_RET void * -#define THREAD_CREATE(thr,start,arg) pthread_create(&thr,NULL,start,arg) -#define THREAD_FINISH(thr) pthread_join(thr,NULL) -#define Z "z" /**< printf format modifier for size_t */ - - /** For MDB_LOCK_FORMAT: True if readers take a pid lock in the lockfile */ -#define MDB_PIDLOCK 1 - -#ifdef MDB_USE_POSIX_SEM - -typedef sem_t *mdb_mutex_t, *mdb_mutexref_t; -#define LOCK_MUTEX0(mutex) mdb_sem_wait(mutex) -#define UNLOCK_MUTEX(mutex) sem_post(mutex) - -static int -mdb_sem_wait(sem_t *sem) -{ - int rc; - while ((rc = sem_wait(sem)) && (rc = errno) == EINTR) ; - return rc; -} - -#elif defined MDB_USE_SYSV_SEM - -typedef struct mdb_mutex { - int semid; - int semnum; - int *locked; -} mdb_mutex_t[1], *mdb_mutexref_t; - -#define LOCK_MUTEX0(mutex) mdb_sem_wait(mutex) -#define UNLOCK_MUTEX(mutex) do { \ - struct sembuf sb = { 0, 1, SEM_UNDO }; \ - sb.sem_num = (mutex)->semnum; \ - *(mutex)->locked = 0; \ - semop((mutex)->semid, &sb, 1); \ -} while(0) - -static int -mdb_sem_wait(mdb_mutexref_t sem) -{ - int rc, *locked = sem->locked; - struct sembuf sb = { 0, -1, SEM_UNDO }; - sb.sem_num = sem->semnum; - do { - if (!semop(sem->semid, &sb, 1)) { - rc = *locked ? MDB_OWNERDEAD : MDB_SUCCESS; - *locked = 1; - break; - } - } while ((rc = errno) == EINTR); - return rc; -} - -#define mdb_mutex_consistent(mutex) 0 - -#else /* MDB_USE_POSIX_MUTEX: */ - /** Shared mutex/semaphore as it is stored (mdb_mutex_t), and as - * local variables keep it (mdb_mutexref_t). - * - * An mdb_mutex_t can be assigned to an mdb_mutexref_t. They can - * be the same, or an array[size 1] and a pointer. - * @{ - */ -typedef pthread_mutex_t mdb_mutex_t[1], *mdb_mutexref_t; - /* @} */ - /** Lock the reader or writer mutex. - * Returns 0 or a code to give #mdb_mutex_failed(), as in #LOCK_MUTEX(). - */ -#define LOCK_MUTEX0(mutex) pthread_mutex_lock(mutex) - /** Unlock the reader or writer mutex. - */ -#define UNLOCK_MUTEX(mutex) pthread_mutex_unlock(mutex) - /** Mark mutex-protected data as repaired, after death of previous owner. - */ -#define mdb_mutex_consistent(mutex) pthread_mutex_consistent(mutex) -#endif /* MDB_USE_POSIX_SEM || MDB_USE_SYSV_SEM */ - - /** Get the error code for the last failed system function. - */ -#define ErrCode() errno - - /** An abstraction for a file handle. - * On POSIX systems file handles are small integers. On Windows - * they're opaque pointers. - */ -#define HANDLE int - - /** A value for an invalid file handle. - * Mainly used to initialize file variables and signify that they are - * unused. - */ -#define INVALID_HANDLE_VALUE (-1) - - /** Get the size of a memory page for the system. - * This is the basic size that the platform's memory manager uses, and is - * fundamental to the use of memory-mapped files. - */ -#define GET_PAGESIZE(x) ((x) = sysconf(_SC_PAGE_SIZE)) -#endif - -#ifdef MDB_VL32 -#ifdef _WIN32 -#define Y "I64" -#else -#define Y "ll" -#endif -#else -#define Y Z -#endif - -#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) -#define MNAME_LEN 32 -#elif defined(MDB_USE_SYSV_SEM) -#define MNAME_LEN (sizeof(int)) -#else -#define MNAME_LEN (sizeof(pthread_mutex_t)) -#endif - -#ifdef MDB_USE_SYSV_SEM -#define SYSV_SEM_FLAG 1 /**< SysV sems in lockfile format */ -#else -#define SYSV_SEM_FLAG 0 -#endif - -/** @} */ - -#ifdef MDB_ROBUST_SUPPORTED - /** Lock mutex, handle any error, set rc = result. - * Return 0 on success, nonzero (not rc) on error. - */ -#define LOCK_MUTEX(rc, env, mutex) \ - (((rc) = LOCK_MUTEX0(mutex)) && \ - ((rc) = mdb_mutex_failed(env, mutex, rc))) -static int mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc); -#else -#define LOCK_MUTEX(rc, env, mutex) ((rc) = LOCK_MUTEX0(mutex)) -#define mdb_mutex_failed(env, mutex, rc) (rc) -#endif - -#ifndef _WIN32 -/** A flag for opening a file and requesting synchronous data writes. - * This is only used when writing a meta page. It's not strictly needed; - * we could just do a normal write and then immediately perform a flush. - * But if this flag is available it saves us an extra system call. - * - * @note If O_DSYNC is undefined but exists in /usr/include, - * preferably set some compiler flag to get the definition. - */ -#ifndef MDB_DSYNC -# ifdef O_DSYNC -# define MDB_DSYNC O_DSYNC -# else -# define MDB_DSYNC O_SYNC -# endif -#endif -#endif - -/** Function for flushing the data of a file. Define this to fsync - * if fdatasync() is not supported. - */ -#ifndef MDB_FDATASYNC -# define MDB_FDATASYNC fdatasync -#endif - -#ifndef MDB_MSYNC -# define MDB_MSYNC(addr,len,flags) msync(addr,len,flags) -#endif - -#ifndef MS_SYNC -#define MS_SYNC 1 -#endif - -#ifndef MS_ASYNC -#define MS_ASYNC 0 -#endif - - /** A page number in the database. - * Note that 64 bit page numbers are overkill, since pages themselves - * already represent 12-13 bits of addressable memory, and the OS will - * always limit applications to a maximum of 63 bits of address space. - * - * @note In the #MDB_node structure, we only store 48 bits of this value, - * which thus limits us to only 60 bits of addressable data. - */ -typedef MDB_ID pgno_t; - - /** A transaction ID. - * See struct MDB_txn.mt_txnid for details. - */ -typedef MDB_ID txnid_t; - -/** @defgroup debug Debug Macros - * @{ - */ -#ifndef MDB_DEBUG - /** Enable debug output. Needs variable argument macros (a C99 feature). - * Set this to 1 for copious tracing. Set to 2 to add dumps of all IDLs - * read from and written to the database (used for free space management). - */ -#define MDB_DEBUG 0 -#endif - -#if MDB_DEBUG -static int mdb_debug; -static txnid_t mdb_debug_start; - - /** Print a debug message with printf formatting. - * Requires double parenthesis around 2 or more args. - */ -# define DPRINTF(args) ((void) ((mdb_debug) && DPRINTF0 args)) -# define DPRINTF0(fmt, ...) \ - LMDB_Log("%s:%d " fmt "\n", mdb_func_, __LINE__, __VA_ARGS__) -#else -# define DPRINTF(args) ((void) 0) -#endif - /** Print a debug string. - * The string is printed literally, with no format processing. - */ -#define DPUTS(arg) DPRINTF(("%s", arg)) - /** Debuging output value of a cursor DBI: Negative in a sub-cursor. */ -#define DDBI(mc) \ - (((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi) -/** @} */ - - /** @brief The maximum size of a database page. - * - * It is 32k or 64k, since value-PAGEBASE must fit in - * #MDB_page.%mp_upper. - * - * LMDB will use database pages < OS pages if needed. - * That causes more I/O in write transactions: The OS must - * know (read) the whole page before writing a partial page. - * - * Note that we don't currently support Huge pages. On Linux, - * regular data files cannot use Huge pages, and in general - * Huge pages aren't actually pageable. We rely on the OS - * demand-pager to read our data and page it out when memory - * pressure from other processes is high. So until OSs have - * actual paging support for Huge pages, they're not viable. - */ -#define MAX_PAGESIZE (PAGEBASE ? 0x10000 : 0x8000) - - /** The minimum number of keys required in a database page. - * Setting this to a larger value will place a smaller bound on the - * maximum size of a data item. Data items larger than this size will - * be pushed into overflow pages instead of being stored directly in - * the B-tree node. This value used to default to 4. With a page size - * of 4096 bytes that meant that any item larger than 1024 bytes would - * go into an overflow page. That also meant that on average 2-3KB of - * each overflow page was wasted space. The value cannot be lower than - * 2 because then there would no longer be a tree structure. With this - * value, items larger than 2KB will go into overflow pages, and on - * average only 1KB will be wasted. - */ -#define MDB_MINKEYS 2 - - /** A stamp that identifies a file as an LMDB file. - * There's nothing special about this value other than that it is easily - * recognizable, and it will reflect any byte order mismatches. - */ -#define MDB_MAGIC 0xBEEFC0DE - - /** The version number for a database's datafile format. */ -#define MDB_DATA_VERSION ((MDB_DEVEL) ? 999 : 1) - /** The version number for a database's lockfile format. */ -#define MDB_LOCK_VERSION ((MDB_DEVEL) ? 999 : 1) - - /** @brief The max size of a key we can write, or 0 for computed max. - * - * This macro should normally be left alone or set to 0. - * Note that a database with big keys or dupsort data cannot be - * reliably modified by a liblmdb which uses a smaller max. - * The default is 511 for backwards compat, or 0 when #MDB_DEVEL. - * - * Other values are allowed, for backwards compat. However: - * A value bigger than the computed max can break if you do not - * know what you are doing, and liblmdb <= 0.9.10 can break when - * modifying a DB with keys/dupsort data bigger than its max. - * - * Data items in an #MDB_DUPSORT database are also limited to - * this size, since they're actually keys of a sub-DB. Keys and - * #MDB_DUPSORT data items must fit on a node in a regular page. - */ -#ifndef MDB_MAXKEYSIZE -#define MDB_MAXKEYSIZE ((MDB_DEVEL) ? 0 : 511) -#endif - - /** The maximum size of a key we can write to the environment. */ -#if MDB_MAXKEYSIZE -#define ENV_MAXKEY(env) (MDB_MAXKEYSIZE) -#else -#define ENV_MAXKEY(env) ((env)->me_maxkey) -#endif - - /** @brief The maximum size of a data item. - * - * We only store a 32 bit value for node sizes. - */ -#define MAXDATASIZE 0xffffffffUL - -#if MDB_DEBUG - /** Key size which fits in a #DKBUF. - * @ingroup debug - */ -#define DKBUF_MAXKEYSIZE ((MDB_MAXKEYSIZE) > 0 ? (MDB_MAXKEYSIZE) : 511) - /** A key buffer. - * @ingroup debug - * This is used for printing a hex dump of a key's contents. - */ -#define DKBUF char kbuf[DKBUF_MAXKEYSIZE*2+1] - /** Display a key in hex. - * @ingroup debug - * Invoke a function to display a key in hex. - */ -#define DKEY(x) mdb_dkey(x, kbuf) -#else -#define DKBUF -#define DKEY(x) 0 -#endif - - /** An invalid page number. - * Mainly used to denote an empty tree. - */ -#define P_INVALID (~(pgno_t)0) - - /** Test if the flags \b f are set in a flag word \b w. */ -#define F_ISSET(w, f) (((w) & (f)) == (f)) - - /** Round \b n up to an even number. */ -#define EVEN(n) (((n) + 1U) & -2) /* sign-extending -2 to match n+1U */ - - /** Used for offsets within a single page. - * Since memory pages are typically 4 or 8KB in size, 12-13 bits, - * this is plenty. - */ -typedef uint16_t indx_t; - - /** Default size of memory map. - * This is certainly too small for any actual applications. Apps should always set - * the size explicitly using #mdb_env_set_mapsize(). - */ -#define DEFAULT_MAPSIZE 1048576 - -/** @defgroup readers Reader Lock Table - * Readers don't acquire any locks for their data access. Instead, they - * simply record their transaction ID in the reader table. The reader - * mutex is needed just to find an empty slot in the reader table. The - * slot's address is saved in thread-specific data so that subsequent read - * transactions started by the same thread need no further locking to proceed. - * - * If #MDB_NOTLS is set, the slot address is not saved in thread-specific data. - * - * No reader table is used if the database is on a read-only filesystem, or - * if #MDB_NOLOCK is set. - * - * Since the database uses multi-version concurrency control, readers don't - * actually need any locking. This table is used to keep track of which - * readers are using data from which old transactions, so that we'll know - * when a particular old transaction is no longer in use. Old transactions - * that have discarded any data pages can then have those pages reclaimed - * for use by a later write transaction. - * - * The lock table is constructed such that reader slots are aligned with the - * processor's cache line size. Any slot is only ever used by one thread. - * This alignment guarantees that there will be no contention or cache - * thrashing as threads update their own slot info, and also eliminates - * any need for locking when accessing a slot. - * - * A writer thread will scan every slot in the table to determine the oldest - * outstanding reader transaction. Any freed pages older than this will be - * reclaimed by the writer. The writer doesn't use any locks when scanning - * this table. This means that there's no guarantee that the writer will - * see the most up-to-date reader info, but that's not required for correct - * operation - all we need is to know the upper bound on the oldest reader, - * we don't care at all about the newest reader. So the only consequence of - * reading stale information here is that old pages might hang around a - * while longer before being reclaimed. That's actually good anyway, because - * the longer we delay reclaiming old pages, the more likely it is that a - * string of contiguous pages can be found after coalescing old pages from - * many old transactions together. - * @{ - */ - /** Number of slots in the reader table. - * This value was chosen somewhat arbitrarily. 126 readers plus a - * couple mutexes fit exactly into 8KB on my development machine. - * Applications should set the table size using #mdb_env_set_maxreaders(). - */ -#define DEFAULT_READERS 126 - - /** The size of a CPU cache line in bytes. We want our lock structures - * aligned to this size to avoid false cache line sharing in the - * lock table. - * This value works for most CPUs. For Itanium this should be 128. - */ -#ifndef CACHELINE -#define CACHELINE 64 -#endif - - /** The information we store in a single slot of the reader table. - * In addition to a transaction ID, we also record the process and - * thread ID that owns a slot, so that we can detect stale information, - * e.g. threads or processes that went away without cleaning up. - * @note We currently don't check for stale records. We simply re-init - * the table when we know that we're the only process opening the - * lock file. - */ -typedef struct MDB_rxbody { - /** Current Transaction ID when this transaction began, or (txnid_t)-1. - * Multiple readers that start at the same time will probably have the - * same ID here. Again, it's not important to exclude them from - * anything; all we need to know is which version of the DB they - * started from so we can avoid overwriting any data used in that - * particular version. - */ - volatile txnid_t mrb_txnid; - /** The process ID of the process owning this reader txn. */ - volatile MDB_PID_T mrb_pid; - /** The thread ID of the thread owning this txn. */ - volatile MDB_THR_T mrb_tid; -} MDB_rxbody; - - /** The actual reader record, with cacheline padding. */ -typedef struct MDB_reader { - union { - MDB_rxbody mrx; - /** shorthand for mrb_txnid */ -#define mr_txnid mru.mrx.mrb_txnid -#define mr_pid mru.mrx.mrb_pid -#define mr_tid mru.mrx.mrb_tid - /** cache line alignment */ - char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)]; - } mru; -} MDB_reader; - - /** The header for the reader table. - * The table resides in a memory-mapped file. (This is a different file - * than is used for the main database.) - * - * For POSIX the actual mutexes reside in the shared memory of this - * mapped file. On Windows, mutexes are named objects allocated by the - * kernel; we store the mutex names in this mapped file so that other - * processes can grab them. This same approach is also used on - * MacOSX/Darwin (using named semaphores) since MacOSX doesn't support - * process-shared POSIX mutexes. For these cases where a named object - * is used, the object name is derived from a 64 bit FNV hash of the - * environment pathname. As such, naming collisions are extremely - * unlikely. If a collision occurs, the results are unpredictable. - */ -typedef struct MDB_txbody { - /** Stamp identifying this as an LMDB file. It must be set - * to #MDB_MAGIC. */ - uint32_t mtb_magic; - /** Format of this lock file. Must be set to #MDB_LOCK_FORMAT. */ - uint32_t mtb_format; -#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) - char mtb_rmname[MNAME_LEN]; -#elif defined(MDB_USE_SYSV_SEM) - int mtb_semid; - int mtb_rlocked; -#else - /** Mutex protecting access to this table. - * This is the reader table lock used with LOCK_MUTEX(). - */ - mdb_mutex_t mtb_rmutex; -#endif - /** The ID of the last transaction committed to the database. - * This is recorded here only for convenience; the value can always - * be determined by reading the main database meta pages. - */ - volatile txnid_t mtb_txnid; - /** The number of slots that have been used in the reader table. - * This always records the maximum count, it is not decremented - * when readers release their slots. - */ - volatile unsigned mtb_numreaders; -} MDB_txbody; - - /** The actual reader table definition. */ -typedef struct MDB_txninfo { - union { - MDB_txbody mtb; -#define mti_magic mt1.mtb.mtb_magic -#define mti_format mt1.mtb.mtb_format -#define mti_rmutex mt1.mtb.mtb_rmutex -#define mti_rmname mt1.mtb.mtb_rmname -#define mti_txnid mt1.mtb.mtb_txnid -#define mti_numreaders mt1.mtb.mtb_numreaders -#ifdef MDB_USE_SYSV_SEM -#define mti_semid mt1.mtb.mtb_semid -#define mti_rlocked mt1.mtb.mtb_rlocked -#endif - char pad[(sizeof(MDB_txbody)+CACHELINE-1) & ~(CACHELINE-1)]; - } mt1; - union { -#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) - char mt2_wmname[MNAME_LEN]; -#define mti_wmname mt2.mt2_wmname -#elif defined MDB_USE_SYSV_SEM - int mt2_wlocked; -#define mti_wlocked mt2.mt2_wlocked -#else - mdb_mutex_t mt2_wmutex; -#define mti_wmutex mt2.mt2_wmutex -#endif - char pad[(MNAME_LEN+CACHELINE-1) & ~(CACHELINE-1)]; - } mt2; - MDB_reader mti_readers[1]; -} MDB_txninfo; - - /** Lockfile format signature: version, features and field layout */ -#define MDB_LOCK_FORMAT \ - ((uint32_t) \ - ((MDB_LOCK_VERSION) \ - /* Flags which describe functionality */ \ - + (SYSV_SEM_FLAG << 18) \ - + (((MDB_PIDLOCK) != 0) << 16))) -/** @} */ - -/** Common header for all page types. - * Overflow records occupy a number of contiguous pages with no - * headers on any page after the first. - */ -typedef struct MDB_page { -#define mp_pgno mp_p.p_pgno -#define mp_next mp_p.p_next - union { - pgno_t p_pgno; /**< page number */ - struct MDB_page *p_next; /**< for in-memory list of freed pages */ - } mp_p; - uint16_t mp_pad; -/** @defgroup mdb_page Page Flags - * @ingroup internal - * Flags for the page headers. - * @{ - */ -#define P_BRANCH 0x01 /**< branch page */ -#define P_LEAF 0x02 /**< leaf page */ -#define P_OVERFLOW 0x04 /**< overflow page */ -#define P_META 0x08 /**< meta page */ -#define P_DIRTY 0x10 /**< dirty page, also set for #P_SUBP pages */ -#define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */ -#define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */ -#define P_LOOSE 0x4000 /**< page was dirtied then freed, can be reused */ -#define P_KEEP 0x8000 /**< leave this page alone during spill */ -/** @} */ - uint16_t mp_flags; /**< @ref mdb_page */ -#define mp_lower mp_pb.pb.pb_lower -#define mp_upper mp_pb.pb.pb_upper -#define mp_pages mp_pb.pb_pages - union { - struct { - indx_t pb_lower; /**< lower bound of free space */ - indx_t pb_upper; /**< upper bound of free space */ - } pb; - uint32_t pb_pages; /**< number of overflow pages */ - } mp_pb; - indx_t mp_ptrs[1]; /**< dynamic size */ -} MDB_page; - - /** Size of the page header, excluding dynamic data at the end */ -#define PAGEHDRSZ ((unsigned) offsetof(MDB_page, mp_ptrs)) - - /** Address of first usable data byte in a page, after the header */ -#define METADATA(p) ((void *)((char *)(p) + PAGEHDRSZ)) - - /** ITS#7713, change PAGEBASE to handle 65536 byte pages */ -#define PAGEBASE ((MDB_DEVEL) ? PAGEHDRSZ : 0) - - /** Number of nodes on a page */ -#define NUMKEYS(p) (((p)->mp_lower - (PAGEHDRSZ-PAGEBASE)) >> 1) - - /** The amount of space remaining in the page */ -#define SIZELEFT(p) (indx_t)((p)->mp_upper - (p)->mp_lower) - - /** The percentage of space used in the page, in tenths of a percent. */ -#define PAGEFILL(env, p) (1000L * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \ - ((env)->me_psize - PAGEHDRSZ)) - /** The minimum page fill factor, in tenths of a percent. - * Pages emptier than this are candidates for merging. - */ -#define FILL_THRESHOLD 250 - - /** Test if a page is a leaf page */ -#define IS_LEAF(p) F_ISSET((p)->mp_flags, P_LEAF) - /** Test if a page is a LEAF2 page */ -#define IS_LEAF2(p) F_ISSET((p)->mp_flags, P_LEAF2) - /** Test if a page is a branch page */ -#define IS_BRANCH(p) F_ISSET((p)->mp_flags, P_BRANCH) - /** Test if a page is an overflow page */ -#define IS_OVERFLOW(p) F_ISSET((p)->mp_flags, P_OVERFLOW) - /** Test if a page is a sub page */ -#define IS_SUBP(p) F_ISSET((p)->mp_flags, P_SUBP) - - /** The number of overflow pages needed to store the given size. */ -#define OVPAGES(size, psize) ((PAGEHDRSZ-1 + (size)) / (psize) + 1) - - /** Link in #MDB_txn.%mt_loose_pgs list */ -#define NEXT_LOOSE_PAGE(p) (*(MDB_page **)((p) + 2)) - - /** Header for a single key/data pair within a page. - * Used in pages of type #P_BRANCH and #P_LEAF without #P_LEAF2. - * We guarantee 2-byte alignment for 'MDB_node's. - */ -typedef struct MDB_node { - /** lo and hi are used for data size on leaf nodes and for - * child pgno on branch nodes. On 64 bit platforms, flags - * is also used for pgno. (Branch nodes have no flags). - * They are in host byte order in case that lets some - * accesses be optimized into a 32-bit word access. - */ -#if BYTE_ORDER == LITTLE_ENDIAN - unsigned short mn_lo, mn_hi; /**< part of data size or pgno */ -#else - unsigned short mn_hi, mn_lo; -#endif -/** @defgroup mdb_node Node Flags - * @ingroup internal - * Flags for node headers. - * @{ - */ -#define F_BIGDATA 0x01 /**< data put on overflow page */ -#define F_SUBDATA 0x02 /**< data is a sub-database */ -#define F_DUPDATA 0x04 /**< data has duplicates */ - -/** valid flags for #mdb_node_add() */ -#define NODE_ADD_FLAGS (F_DUPDATA|F_SUBDATA|MDB_RESERVE|MDB_APPEND) - -/** @} */ - unsigned short mn_flags; /**< @ref mdb_node */ - unsigned short mn_ksize; /**< key size */ - char mn_data[1]; /**< key and data are appended here */ -} MDB_node; - - /** Size of the node header, excluding dynamic data at the end */ -#define NODESIZE offsetof(MDB_node, mn_data) - - /** Bit position of top word in page number, for shifting mn_flags */ -#define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0) - - /** Size of a node in a branch page with a given key. - * This is just the node header plus the key, there is no data. - */ -#define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size)) - - /** Size of a node in a leaf page with a given key and data. - * This is node header plus key plus data size. - */ -#define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size) - - /** Address of node \b i in page \b p */ -#define NODEPTR(p, i) ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i] + PAGEBASE)) - - /** Address of the key for the node */ -#define NODEKEY(node) (void *)((node)->mn_data) - - /** Address of the data for a node */ -#define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize) - - /** Get the page number pointed to by a branch node */ -#define NODEPGNO(node) \ - ((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | \ - (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0)) - /** Set the page number in a branch node */ -#define SETPGNO(node,pgno) do { \ - (node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; \ - if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0) - - /** Get the size of the data in a leaf node */ -#define NODEDSZ(node) ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16)) - /** Set the size of the data for a leaf node */ -#define SETDSZ(node,size) do { \ - (node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0) - /** The size of a key in a node */ -#define NODEKSZ(node) ((node)->mn_ksize) - - /** Copy a page number from src to dst */ -#ifdef MISALIGNED_OK -#define COPY_PGNO(dst,src) dst = src -#else -#if SIZE_MAX > 4294967295UL -#define COPY_PGNO(dst,src) do { \ - unsigned short *s, *d; \ - s = (unsigned short *)&(src); \ - d = (unsigned short *)&(dst); \ - *d++ = *s++; \ - *d++ = *s++; \ - *d++ = *s++; \ - *d = *s; \ -} while (0) -#else -#define COPY_PGNO(dst,src) do { \ - unsigned short *s, *d; \ - s = (unsigned short *)&(src); \ - d = (unsigned short *)&(dst); \ - *d++ = *s++; \ - *d = *s; \ -} while (0) -#endif -#endif - /** The address of a key in a LEAF2 page. - * LEAF2 pages are used for #MDB_DUPFIXED sorted-duplicate sub-DBs. - * There are no node headers, keys are stored contiguously. - */ -#define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i)*(ks))) - - /** Set the \b node's key into \b keyptr, if requested. */ -#define MDB_GET_KEY(node, keyptr) { if ((keyptr) != NULL) { \ - (keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } } - - /** Set the \b node's key into \b key. */ -#define MDB_GET_KEY2(node, key) { key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); } - - /** Information about a single database in the environment. */ -typedef struct MDB_db { - uint32_t md_pad; /**< also ksize for LEAF2 pages */ - uint16_t md_flags; /**< @ref mdb_dbi_open */ - uint16_t md_depth; /**< depth of this tree */ - pgno_t md_branch_pages; /**< number of internal pages */ - pgno_t md_leaf_pages; /**< number of leaf pages */ - pgno_t md_overflow_pages; /**< number of overflow pages */ - mdb_size_t md_entries; /**< number of data items */ - pgno_t md_root; /**< the root page of this tree */ -} MDB_db; - - /** mdb_dbi_open flags */ -#define MDB_VALID 0x8000 /**< DB handle is valid, for me_dbflags */ -#define PERSISTENT_FLAGS (0xffff & ~(MDB_VALID)) -#define VALID_FLAGS (MDB_REVERSEKEY|MDB_DUPSORT|MDB_INTEGERKEY|MDB_DUPFIXED|\ - MDB_INTEGERDUP|MDB_REVERSEDUP|MDB_CREATE) - - /** Handle for the DB used to track free pages. */ -#define FREE_DBI 0 - /** Handle for the default DB. */ -#define MAIN_DBI 1 - /** Number of DBs in metapage (free and main) - also hardcoded elsewhere */ -#define CORE_DBS 2 - - /** Number of meta pages - also hardcoded elsewhere */ -#define NUM_METAS 2 - - /** Meta page content. - * A meta page is the start point for accessing a database snapshot. - * Pages 0-1 are meta pages. Transaction N writes meta page #(N % 2). - */ -typedef struct MDB_meta { - /** Stamp identifying this as an LMDB file. It must be set - * to #MDB_MAGIC. */ - uint32_t mm_magic; - /** Version number of this file. Must be set to #MDB_DATA_VERSION. */ - uint32_t mm_version; -#ifdef MDB_VL32 - union { /* always zero since we don't support fixed mapping in MDB_VL32 */ - MDB_ID mmun_ull; - void *mmun_address; - } mm_un; -#define mm_address mm_un.mmun_address -#else - void *mm_address; /**< address for fixed mapping */ -#endif - pgno_t mm_mapsize; /**< size of mmap region */ - MDB_db mm_dbs[CORE_DBS]; /**< first is free space, 2nd is main db */ - /** The size of pages used in this DB */ -#define mm_psize mm_dbs[FREE_DBI].md_pad - /** Any persistent environment flags. @ref mdb_env */ -#define mm_flags mm_dbs[FREE_DBI].md_flags - pgno_t mm_last_pg; /**< last used page in file */ - volatile txnid_t mm_txnid; /**< txnid that committed this page */ -} MDB_meta; - - /** Buffer for a stack-allocated meta page. - * The members define size and alignment, and silence type - * aliasing warnings. They are not used directly; that could - * mean incorrectly using several union members in parallel. - */ -typedef union MDB_metabuf { - MDB_page mb_page; - struct { - char mm_pad[PAGEHDRSZ]; - MDB_meta mm_meta; - } mb_metabuf; -} MDB_metabuf; - - /** Auxiliary DB info. - * The information here is mostly static/read-only. There is - * only a single copy of this record in the environment. - */ -typedef struct MDB_dbx { - MDB_val md_name; /**< name of the database */ - MDB_cmp_func *md_cmp; /**< function for comparing keys */ - MDB_cmp_func *md_dcmp; /**< function for comparing data items */ - MDB_rel_func *md_rel; /**< user relocate function */ - void *md_relctx; /**< user-provided context for md_rel */ -} MDB_dbx; - - /** A database transaction. - * Every operation requires a transaction handle. - */ -struct MDB_txn { - MDB_txn *mt_parent; /**< parent of a nested txn */ - /** Nested txn under this txn, set together with flag #MDB_TXN_HAS_CHILD */ - MDB_txn *mt_child; - pgno_t mt_next_pgno; /**< next unallocated page */ -#ifdef MDB_VL32 - pgno_t mt_last_pgno; /**< last written page */ -#endif - /** The ID of this transaction. IDs are integers incrementing from 1. - * Only committed write transactions increment the ID. If a transaction - * aborts, the ID may be re-used by the next writer. - */ - txnid_t mt_txnid; - MDB_env *mt_env; /**< the DB environment */ - /** The list of pages that became unused during this transaction. - */ - MDB_IDL mt_free_pgs; - /** The list of loose pages that became unused and may be reused - * in this transaction, linked through #NEXT_LOOSE_PAGE(page). - */ - MDB_page *mt_loose_pgs; - /* #Number of loose pages (#mt_loose_pgs) */ - int mt_loose_count; - /** The sorted list of dirty pages we temporarily wrote to disk - * because the dirty list was full. page numbers in here are - * shifted left by 1, deleted slots have the LSB set. - */ - MDB_IDL mt_spill_pgs; - union { - /** For write txns: Modified pages. Sorted when not MDB_WRITEMAP. */ - MDB_ID2L dirty_list; - /** For read txns: This thread/txn's reader table slot, or NULL. */ - MDB_reader *reader; - } mt_u; - /** Array of records for each DB known in the environment. */ - MDB_dbx *mt_dbxs; - /** Array of MDB_db records for each known DB */ - MDB_db *mt_dbs; - /** Array of sequence numbers for each DB handle */ - unsigned int *mt_dbiseqs; -/** @defgroup mt_dbflag Transaction DB Flags - * @ingroup internal - * @{ - */ -#define DB_DIRTY 0x01 /**< DB was modified or is DUPSORT data */ -#define DB_STALE 0x02 /**< Named-DB record is older than txnID */ -#define DB_NEW 0x04 /**< Named-DB handle opened in this txn */ -#define DB_VALID 0x08 /**< DB handle is valid, see also #MDB_VALID */ -#define DB_USRVALID 0x10 /**< As #DB_VALID, but not set for #FREE_DBI */ -/** @} */ - /** In write txns, array of cursors for each DB */ - MDB_cursor **mt_cursors; - /** Array of flags for each DB */ - unsigned char *mt_dbflags; -#ifdef MDB_VL32 - /** List of read-only pages (actually chunks) */ - MDB_ID3L mt_rpages; - /** We map chunks of 16 pages. Even though Windows uses 4KB pages, all - * mappings must begin on 64KB boundaries. So we round off all pgnos to - * a chunk boundary. We do the same on Linux for symmetry, and also to - * reduce the frequency of mmap/munmap calls. - */ -#define MDB_RPAGE_CHUNK 16 -#define MDB_TRPAGE_SIZE 4096 /**< size of #mt_rpages array of chunks */ -#define MDB_TRPAGE_MAX (MDB_TRPAGE_SIZE-1) /**< maximum chunk index */ - unsigned int mt_rpcheck; /**< threshold for reclaiming unref'd chunks */ -#endif - /** Number of DB records in use, or 0 when the txn is finished. - * This number only ever increments until the txn finishes; we - * don't decrement it when individual DB handles are closed. - */ - MDB_dbi mt_numdbs; - -/** @defgroup mdb_txn Transaction Flags - * @ingroup internal - * @{ - */ - /** #mdb_txn_begin() flags */ -#define MDB_TXN_BEGIN_FLAGS (MDB_NOMETASYNC|MDB_NOSYNC|MDB_RDONLY) -#define MDB_TXN_NOMETASYNC MDB_NOMETASYNC /**< don't sync meta for this txn on commit */ -#define MDB_TXN_NOSYNC MDB_NOSYNC /**< don't sync this txn on commit */ -#define MDB_TXN_RDONLY MDB_RDONLY /**< read-only transaction */ - /* internal txn flags */ -#define MDB_TXN_WRITEMAP MDB_WRITEMAP /**< copy of #MDB_env flag in writers */ -#define MDB_TXN_FINISHED 0x01 /**< txn is finished or never began */ -#define MDB_TXN_ERROR 0x02 /**< txn is unusable after an error */ -#define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */ -#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */ -#define MDB_TXN_HAS_CHILD 0x10 /**< txn has an #MDB_txn.%mt_child */ - /** most operations on the txn are currently illegal */ -#define MDB_TXN_BLOCKED (MDB_TXN_FINISHED|MDB_TXN_ERROR|MDB_TXN_HAS_CHILD) -/** @} */ - unsigned int mt_flags; /**< @ref mdb_txn */ - /** #dirty_list room: Array size - \#dirty pages visible to this txn. - * Includes ancestor txns' dirty pages not hidden by other txns' - * dirty/spilled pages. Thus commit(nested txn) has room to merge - * dirty_list into mt_parent after freeing hidden mt_parent pages. - */ - unsigned int mt_dirty_room; -}; - -/** Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty. - * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to - * raise this on a 64 bit machine. - */ -#define CURSOR_STACK 32 - -struct MDB_xcursor; - - /** Cursors are used for all DB operations. - * A cursor holds a path of (page pointer, key index) from the DB - * root to a position in the DB, plus other state. #MDB_DUPSORT - * cursors include an xcursor to the current data item. Write txns - * track their cursors and keep them up to date when data moves. - * Exception: An xcursor's pointer to a #P_SUBP page can be stale. - * (A node with #F_DUPDATA but no #F_SUBDATA contains a subpage). - */ -struct MDB_cursor { - /** Next cursor on this DB in this txn */ - MDB_cursor *mc_next; - /** Backup of the original cursor if this cursor is a shadow */ - MDB_cursor *mc_backup; - /** Context used for databases with #MDB_DUPSORT, otherwise NULL */ - struct MDB_xcursor *mc_xcursor; - /** The transaction that owns this cursor */ - MDB_txn *mc_txn; - /** The database handle this cursor operates on */ - MDB_dbi mc_dbi; - /** The database record for this cursor */ - MDB_db *mc_db; - /** The database auxiliary record for this cursor */ - MDB_dbx *mc_dbx; - /** The @ref mt_dbflag for this database */ - unsigned char *mc_dbflag; - unsigned short mc_snum; /**< number of pushed pages */ - unsigned short mc_top; /**< index of top page, normally mc_snum-1 */ -/** @defgroup mdb_cursor Cursor Flags - * @ingroup internal - * Cursor state flags. - * @{ - */ -#define C_INITIALIZED 0x01 /**< cursor has been initialized and is valid */ -#define C_EOF 0x02 /**< No more data */ -#define C_SUB 0x04 /**< Cursor is a sub-cursor */ -#define C_DEL 0x08 /**< last op was a cursor_del */ -#define C_UNTRACK 0x40 /**< Un-track cursor when closing */ -#define C_WRITEMAP MDB_TXN_WRITEMAP /**< Copy of txn flag */ -/** Read-only cursor into the txn's original snapshot in the map. - * Set for read-only txns, and in #mdb_page_alloc() for #FREE_DBI when - * #MDB_DEVEL & 2. Only implements code which is necessary for this. - */ -#define C_ORIG_RDONLY MDB_TXN_RDONLY -/** @} */ - unsigned int mc_flags; /**< @ref mdb_cursor */ - MDB_page *mc_pg[CURSOR_STACK]; /**< stack of pushed pages */ - indx_t mc_ki[CURSOR_STACK]; /**< stack of page indices */ -#ifdef MDB_VL32 - MDB_page *mc_ovpg; /**< a referenced overflow page */ -#endif -}; - - /** Context for sorted-dup records. - * We could have gone to a fully recursive design, with arbitrarily - * deep nesting of sub-databases. But for now we only handle these - * levels - main DB, optional sub-DB, sorted-duplicate DB. - */ -typedef struct MDB_xcursor { - /** A sub-cursor for traversing the Dup DB */ - MDB_cursor mx_cursor; - /** The database record for this Dup DB */ - MDB_db mx_db; - /** The auxiliary DB record for this Dup DB */ - MDB_dbx mx_dbx; - /** The @ref mt_dbflag for this Dup DB */ - unsigned char mx_dbflag; -} MDB_xcursor; - - /** State of FreeDB old pages, stored in the MDB_env */ -typedef struct MDB_pgstate { - pgno_t *mf_pghead; /**< Reclaimed freeDB pages, or NULL before use */ - txnid_t mf_pglast; /**< ID of last used record, or 0 if !mf_pghead */ -} MDB_pgstate; - - /** The database environment. */ -struct MDB_env { - HANDLE me_fd; /**< The main data file */ - HANDLE me_lfd; /**< The lock file */ - HANDLE me_mfd; /**< just for writing the meta pages */ -#if defined(MDB_VL32) && defined(_WIN32) - HANDLE me_fmh; /**< File Mapping handle */ -#endif - /** Failed to update the meta page. Probably an I/O error. */ -#define MDB_FATAL_ERROR 0x80000000U - /** Some fields are initialized. */ -#define MDB_ENV_ACTIVE 0x20000000U - /** me_txkey is set */ -#define MDB_ENV_TXKEY 0x10000000U - /** fdatasync is unreliable */ -#define MDB_FSYNCONLY 0x08000000U - uint32_t me_flags; /**< @ref mdb_env */ - unsigned int me_psize; /**< DB page size, inited from me_os_psize */ - unsigned int me_os_psize; /**< OS page size, from #GET_PAGESIZE */ - unsigned int me_maxreaders; /**< size of the reader table */ - /** Max #MDB_txninfo.%mti_numreaders of interest to #mdb_env_close() */ - volatile int me_close_readers; - MDB_dbi me_numdbs; /**< number of DBs opened */ - MDB_dbi me_maxdbs; /**< size of the DB table */ - MDB_PID_T me_pid; /**< process ID of this env */ - char *me_path; /**< path to the DB files */ - char *me_map; /**< the memory map of the data file */ - MDB_txninfo *me_txns; /**< the memory map of the lock file or NULL */ - MDB_meta *me_metas[NUM_METAS]; /**< pointers to the two meta pages */ - void *me_pbuf; /**< scratch area for DUPSORT put() */ - MDB_txn *me_txn; /**< current write transaction */ - MDB_txn *me_txn0; /**< prealloc'd write transaction */ - mdb_size_t me_mapsize; /**< size of the data memory map */ - off_t me_size; /**< current file size */ - pgno_t me_maxpg; /**< me_mapsize / me_psize */ - MDB_dbx *me_dbxs; /**< array of static DB info */ - uint16_t *me_dbflags; /**< array of flags from MDB_db.md_flags */ - unsigned int *me_dbiseqs; /**< array of dbi sequence numbers */ - pthread_key_t me_txkey; /**< thread-key for readers */ - txnid_t me_pgoldest; /**< ID of oldest reader last time we looked */ - MDB_pgstate me_pgstate; /**< state of old pages from freeDB */ -# define me_pglast me_pgstate.mf_pglast -# define me_pghead me_pgstate.mf_pghead - MDB_page *me_dpages; /**< list of malloc'd blocks for re-use */ - /** IDL of pages that became unused in a write txn */ - MDB_IDL me_free_pgs; - /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */ - MDB_ID2L me_dirty_list; - /** Max number of freelist items that can fit in a single overflow page */ - int me_maxfree_1pg; - /** Max size of a node on a page */ - unsigned int me_nodemax; -#if !(MDB_MAXKEYSIZE) - unsigned int me_maxkey; /**< max size of a key */ -#endif - int me_live_reader; /**< have liveness lock in reader table */ -#ifdef _WIN32 - int me_pidquery; /**< Used in OpenProcess */ -#endif -#ifdef MDB_USE_POSIX_MUTEX /* Posix mutexes reside in shared mem */ -# define me_rmutex me_txns->mti_rmutex /**< Shared reader lock */ -# define me_wmutex me_txns->mti_wmutex /**< Shared writer lock */ -#else - mdb_mutex_t me_rmutex; - mdb_mutex_t me_wmutex; -#endif -#ifdef MDB_VL32 - MDB_ID3L me_rpages; /**< like #mt_rpages, but global to env */ - pthread_mutex_t me_rpmutex; /**< control access to #me_rpages */ -#define MDB_ERPAGE_SIZE 16384 -#define MDB_ERPAGE_MAX (MDB_ERPAGE_SIZE-1) - unsigned int me_rpcheck; -#endif - void *me_userctx; /**< User-settable context */ - MDB_assert_func *me_assert_func; /**< Callback for assertion failures */ -}; - - /** Nested transaction */ -typedef struct MDB_ntxn { - MDB_txn mnt_txn; /**< the transaction */ - MDB_pgstate mnt_pgstate; /**< parent transaction's saved freestate */ -} MDB_ntxn; - - /** max number of pages to commit in one writev() call */ -#define MDB_COMMIT_PAGES 64 -#if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES -#undef MDB_COMMIT_PAGES -#define MDB_COMMIT_PAGES IOV_MAX -#endif - - /** max bytes to write in one call */ -#define MAX_WRITE (0x40000000U >> (sizeof(ssize_t) == 4)) - - /** Check \b txn and \b dbi arguments to a function */ -#define TXN_DBI_EXIST(txn, dbi, validity) \ - ((txn) && (dbi)<(txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & (validity))) - - /** Check for misused \b dbi handles */ -#define TXN_DBI_CHANGED(txn, dbi) \ - ((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi]) - -static int mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp); -static int mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp); -static int mdb_page_touch(MDB_cursor *mc); - -#define MDB_END_NAMES {"committed", "empty-commit", "abort", "reset", \ - "reset-tmp", "fail-begin", "fail-beginchild"} -enum { - /* mdb_txn_end operation number, for logging */ - MDB_END_COMMITTED, MDB_END_EMPTY_COMMIT, MDB_END_ABORT, MDB_END_RESET, - MDB_END_RESET_TMP, MDB_END_FAIL_BEGIN, MDB_END_FAIL_BEGINCHILD -}; -#define MDB_END_OPMASK 0x0F /**< mask for #mdb_txn_end() operation number */ -#define MDB_END_UPDATE 0x10 /**< update env state (DBIs) */ -#define MDB_END_FREE 0x20 /**< free txn unless it is #MDB_env.%me_txn0 */ -#define MDB_END_SLOT MDB_NOTLS /**< release any reader slot if #MDB_NOTLS */ -static void mdb_txn_end(MDB_txn *txn, unsigned mode); - -static int mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **mp, int *lvl); -static int mdb_page_search_root(MDB_cursor *mc, - MDB_val *key, int modify); -#define MDB_PS_MODIFY 1 -#define MDB_PS_ROOTONLY 2 -#define MDB_PS_FIRST 4 -#define MDB_PS_LAST 8 -static int mdb_page_search(MDB_cursor *mc, - MDB_val *key, int flags); -static int mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst); - -#define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */ -static int mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, - pgno_t newpgno, unsigned int nflags); - -static int mdb_env_read_header(MDB_env *env, MDB_meta *meta); -static MDB_meta *mdb_env_pick_meta(const MDB_env *env); -static int mdb_env_write_meta(MDB_txn *txn); -#ifdef MDB_USE_POSIX_MUTEX /* Drop unused excl arg */ -# define mdb_env_close0(env, excl) mdb_env_close1(env) -#endif -static void mdb_env_close0(MDB_env *env, int excl); - -static MDB_node *mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp); -static int mdb_node_add(MDB_cursor *mc, indx_t indx, - MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags); -static void mdb_node_del(MDB_cursor *mc, int ksize); -static void mdb_node_shrink(MDB_page *mp, indx_t indx); -static int mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft); -static int mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data); -static size_t mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data); -static size_t mdb_branch_size(MDB_env *env, MDB_val *key); - -static int mdb_rebalance(MDB_cursor *mc); -static int mdb_update_key(MDB_cursor *mc, MDB_val *key); - -static void mdb_cursor_pop(MDB_cursor *mc); -static int mdb_cursor_push(MDB_cursor *mc, MDB_page *mp); - -static int mdb_cursor_del0(MDB_cursor *mc); -static int mdb_del0(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, unsigned flags); -static int mdb_cursor_sibling(MDB_cursor *mc, int move_right); -static int mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); -static int mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); -static int mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op, - int *exactp); -static int mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data); -static int mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data); - -static void mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx); -static void mdb_xcursor_init0(MDB_cursor *mc); -static void mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node); -static void mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int force); - -static int mdb_drop0(MDB_cursor *mc, int subs); -static void mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi); -static int mdb_reader_check0(MDB_env *env, int rlocked, int *dead); - -/** @cond */ -static MDB_cmp_func mdb_cmp_memn, mdb_cmp_memnr, mdb_cmp_int, mdb_cmp_cint, mdb_cmp_long; -/** @endcond */ - -/** Compare two items pointing at size_t's of unknown alignment. */ -#ifdef MISALIGNED_OK -# define mdb_cmp_clong mdb_cmp_long -#else -# define mdb_cmp_clong mdb_cmp_cint -#endif - -#ifdef _WIN32 -static SECURITY_DESCRIPTOR mdb_null_sd; -static SECURITY_ATTRIBUTES mdb_all_sa; -static int mdb_sec_inited; - -static int utf8_to_utf16(const char *src, int srcsize, wchar_t **dst, int *dstsize); -#endif - -/** Return the library version info. */ -char * ESECT -mdb_version(int *major, int *minor, int *patch) -{ - if (major) *major = MDB_VERSION_MAJOR; - if (minor) *minor = MDB_VERSION_MINOR; - if (patch) *patch = MDB_VERSION_PATCH; - return MDB_VERSION_STRING; -} - -/** Table of descriptions for LMDB @ref errors */ -static char *const mdb_errstr[] = { - "MDB_KEYEXIST: Key/data pair already exists", - "MDB_NOTFOUND: No matching key/data pair found", - "MDB_PAGE_NOTFOUND: Requested page not found", - "MDB_CORRUPTED: Located page was wrong type", - "MDB_PANIC: Update of meta page failed or environment had fatal error", - "MDB_VERSION_MISMATCH: Database environment version mismatch", - "MDB_INVALID: File is not an LMDB file", - "MDB_MAP_FULL: Environment mapsize limit reached", - "MDB_DBS_FULL: Environment maxdbs limit reached", - "MDB_READERS_FULL: Environment maxreaders limit reached", - "MDB_TLS_FULL: Thread-local storage keys full - too many environments open", - "MDB_TXN_FULL: Transaction has too many dirty pages - transaction too big", - "MDB_CURSOR_FULL: Internal error - cursor stack limit reached", - "MDB_PAGE_FULL: Internal error - page has no more space", - "MDB_MAP_RESIZED: Database contents grew beyond environment mapsize", - "MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed", - "MDB_BAD_RSLOT: Invalid reuse of reader locktable slot", - "MDB_BAD_TXN: Transaction must abort, has a child, or is invalid", - "MDB_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong DUPFIXED size", - "MDB_BAD_DBI: The specified DBI handle was closed/changed unexpectedly", -}; - -char * -mdb_strerror(int err) -{ -#ifdef _WIN32 - /** HACK: pad 4KB on stack over the buf. Return system msgs in buf. - * This works as long as no function between the call to mdb_strerror - * and the actual use of the message uses more than 4K of stack. - */ -#define MSGSIZE 1024 -#define PADSIZE 4096 - char buf[MSGSIZE+PADSIZE], *ptr = buf; -#endif - int i; - if (!err) - return ("Successful return: 0"); - - if (err >= MDB_KEYEXIST && err <= MDB_LAST_ERRCODE) { - i = err - MDB_KEYEXIST; - return mdb_errstr[i]; - } - -#ifdef _WIN32 - /* These are the C-runtime error codes we use. The comment indicates - * their numeric value, and the Win32 error they would correspond to - * if the error actually came from a Win32 API. A major mess, we should - * have used LMDB-specific error codes for everything. - */ - switch(err) { - case ENOENT: /* 2, FILE_NOT_FOUND */ - case EIO: /* 5, ACCESS_DENIED */ - case ENOMEM: /* 12, INVALID_ACCESS */ - case EACCES: /* 13, INVALID_DATA */ - case EBUSY: /* 16, CURRENT_DIRECTORY */ - case EINVAL: /* 22, BAD_COMMAND */ - case ENOSPC: /* 28, OUT_OF_PAPER */ - return strerror(err); - default: - ; - } - buf[0] = 0; - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, err, 0, ptr, MSGSIZE, (va_list *)buf+MSGSIZE); - return ptr; -#else - return strerror(err); -#endif -} - -/** assert(3) variant in cursor context */ -#define mdb_cassert(mc, expr) mdb_assert0((mc)->mc_txn->mt_env, expr, #expr) -/** assert(3) variant in transaction context */ -#define mdb_tassert(txn, expr) mdb_assert0((txn)->mt_env, expr, #expr) -/** assert(3) variant in environment context */ -#define mdb_eassert(env, expr) mdb_assert0(env, expr, #expr) - -#ifndef NDEBUG -# define mdb_assert0(env, expr, expr_txt) ((expr) ? (void)0 : \ - mdb_assert_fail(env, expr_txt, mdb_func_, __FILE__, __LINE__)) - -static void ESECT -mdb_assert_fail(MDB_env *env, const char *expr_txt, - const char *func, const char *file, int line) -{ - char buf[400]; - sprintf(buf, "%.100s:%d: Assertion '%.200s' failed in %.40s()", - file, line, expr_txt, func); - if (env->me_assert_func) - env->me_assert_func(env, buf); - fprintf(stderr, "%s\n", buf); - abort(); -} -#else -# define mdb_assert0(env, expr, expr_txt) ((void) 0) -#endif /* NDEBUG */ - -#if MDB_DEBUG -/** Return the page number of \b mp which may be sub-page, for debug output */ -static pgno_t -mdb_dbg_pgno(MDB_page *mp) -{ - pgno_t ret; - COPY_PGNO(ret, mp->mp_pgno); - return ret; -} - -/** Display a key in hexadecimal and return the address of the result. - * @param[in] key the key to display - * @param[in] buf the buffer to write into. Should always be #DKBUF. - * @return The key in hexadecimal form. - */ -char * -mdb_dkey(MDB_val *key, char *buf) -{ - char *ptr = buf; - unsigned char *c = key->mv_data; - unsigned int i; - - if (!key) - return ""; - - if (key->mv_size > DKBUF_MAXKEYSIZE) - return "MDB_MAXKEYSIZE"; - /* may want to make this a dynamic check: if the key is mostly - * printable characters, print it as-is instead of converting to hex. - */ -#if 1 - buf[0] = '\0'; - for (i=0; i<key->mv_size; i++) - ptr += sprintf(ptr, "%02x", *c++); -#else - sprintf(buf, "%.*s", key->mv_size, key->mv_data); -#endif - return buf; -} - -static const char * -mdb_leafnode_type(MDB_node *n) -{ - static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}}; - return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" : - tp[F_ISSET(n->mn_flags, F_DUPDATA)][F_ISSET(n->mn_flags, F_SUBDATA)]; -} - -/** Display all the keys in the page. */ -void -mdb_page_list(MDB_page *mp) -{ - pgno_t pgno = mdb_dbg_pgno(mp); - const char *type, *state = (mp->mp_flags & P_DIRTY) ? ", dirty" : ""; - MDB_node *node; - unsigned int i, nkeys, nsize, total = 0; - MDB_val key; - DKBUF; - - switch (mp->mp_flags & (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP)) { - case P_BRANCH: type = "Branch page"; break; - case P_LEAF: type = "Leaf page"; break; - case P_LEAF|P_SUBP: type = "Sub-page"; break; - case P_LEAF|P_LEAF2: type = "LEAF2 page"; break; - case P_LEAF|P_LEAF2|P_SUBP: type = "LEAF2 sub-page"; break; - case P_OVERFLOW: - fprintf(stderr, "Overflow page %"Y"u pages %u%s\n", - pgno, mp->mp_pages, state); - return; - case P_META: - fprintf(stderr, "Meta-page %"Y"u txnid %"Y"u\n", - pgno, ((MDB_meta *)METADATA(mp))->mm_txnid); - return; - default: - fprintf(stderr, "Bad page %"Y"u flags 0x%u\n", pgno, mp->mp_flags); - return; - } - - nkeys = NUMKEYS(mp); - fprintf(stderr, "%s %"Y"u numkeys %d%s\n", type, pgno, nkeys, state); - - for (i=0; i<nkeys; i++) { - if (IS_LEAF2(mp)) { /* LEAF2 pages have no mp_ptrs[] or node headers */ - key.mv_size = nsize = mp->mp_pad; - key.mv_data = LEAF2KEY(mp, i, nsize); - total += nsize; - fprintf(stderr, "key %d: nsize %d, %s\n", i, nsize, DKEY(&key)); - continue; - } - node = NODEPTR(mp, i); - key.mv_size = node->mn_ksize; - key.mv_data = node->mn_data; - nsize = NODESIZE + key.mv_size; - if (IS_BRANCH(mp)) { - fprintf(stderr, "key %d: page %"Y"u, %s\n", i, NODEPGNO(node), - DKEY(&key)); - total += nsize; - } else { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - nsize += sizeof(pgno_t); - else - nsize += NODEDSZ(node); - total += nsize; - nsize += sizeof(indx_t); - fprintf(stderr, "key %d: nsize %d, %s%s\n", - i, nsize, DKEY(&key), mdb_leafnode_type(node)); - } - total = EVEN(total); - } - fprintf(stderr, "Total: header %d + contents %d + unused %d\n", - IS_LEAF2(mp) ? PAGEHDRSZ : PAGEBASE + mp->mp_lower, total, SIZELEFT(mp)); -} - -void -mdb_cursor_chk(MDB_cursor *mc) -{ - unsigned int i; - MDB_node *node; - MDB_page *mp; - - if (!mc->mc_snum || !(mc->mc_flags & C_INITIALIZED)) return; - for (i=0; i<mc->mc_top; i++) { - mp = mc->mc_pg[i]; - node = NODEPTR(mp, mc->mc_ki[i]); - if (NODEPGNO(node) != mc->mc_pg[i+1]->mp_pgno) - printf("oops!\n"); - } - if (mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i])) - printf("ack!\n"); - if (mc->mc_xcursor && (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { - node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) && - mc->mc_xcursor->mx_cursor.mc_pg[0] != NODEDATA(node)) { - printf("blah!\n"); - } - } -} -#endif - -#if (MDB_DEBUG) > 2 -/** Count all the pages in each DB and in the freelist - * and make sure it matches the actual number of pages - * being used. - * All named DBs must be open for a correct count. - */ -static void mdb_audit(MDB_txn *txn) -{ - MDB_cursor mc; - MDB_val key, data; - MDB_ID freecount, count; - MDB_dbi i; - int rc; - - freecount = 0; - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) - freecount += *(MDB_ID *)data.mv_data; - mdb_tassert(txn, rc == MDB_NOTFOUND); - - count = 0; - for (i = 0; i<txn->mt_numdbs; i++) { - MDB_xcursor mx; - if (!(txn->mt_dbflags[i] & DB_VALID)) - continue; - mdb_cursor_init(&mc, txn, i, &mx); - if (txn->mt_dbs[i].md_root == P_INVALID) - continue; - count += txn->mt_dbs[i].md_branch_pages + - txn->mt_dbs[i].md_leaf_pages + - txn->mt_dbs[i].md_overflow_pages; - if (txn->mt_dbs[i].md_flags & MDB_DUPSORT) { - rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST); - for (; rc == MDB_SUCCESS; rc = mdb_cursor_sibling(&mc, 1)) { - unsigned j; - MDB_page *mp; - mp = mc.mc_pg[mc.mc_top]; - for (j=0; j<NUMKEYS(mp); j++) { - MDB_node *leaf = NODEPTR(mp, j); - if (leaf->mn_flags & F_SUBDATA) { - MDB_db db; - memcpy(&db, NODEDATA(leaf), sizeof(db)); - count += db.md_branch_pages + db.md_leaf_pages + - db.md_overflow_pages; - } - } - } - mdb_tassert(txn, rc == MDB_NOTFOUND); - } - } - if (freecount + count + NUM_METAS != txn->mt_next_pgno) { - fprintf(stderr, "audit: %"Y"u freecount: %"Y"u count: %"Y"u total: %"Y"u next_pgno: %"Y"u\n", - txn->mt_txnid, freecount, count+NUM_METAS, - freecount+count+NUM_METAS, txn->mt_next_pgno); - } -} -#endif - -int -mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) -{ - return txn->mt_dbxs[dbi].md_cmp(a, b); -} - -int -mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) -{ - MDB_cmp_func *dcmp = txn->mt_dbxs[dbi].md_dcmp; -#if UINT_MAX < SIZE_MAX || defined(MDB_VL32) - if (dcmp == mdb_cmp_int && a->mv_size == sizeof(mdb_size_t)) - dcmp = mdb_cmp_clong; -#endif - return dcmp(a, b); -} - -/** Allocate memory for a page. - * Re-use old malloc'd pages first for singletons, otherwise just malloc. - */ -static MDB_page * -mdb_page_malloc(MDB_txn *txn, unsigned num) -{ - MDB_env *env = txn->mt_env; - MDB_page *ret = env->me_dpages; - size_t psize = env->me_psize, sz = psize, off; - /* For ! #MDB_NOMEMINIT, psize counts how much to init. - * For a single page alloc, we init everything after the page header. - * For multi-page, we init the final page; if the caller needed that - * many pages they will be filling in at least up to the last page. - */ - if (num == 1) { - if (ret) { - VGMEMP_ALLOC(env, ret, sz); - VGMEMP_DEFINED(ret, sizeof(ret->mp_next)); - env->me_dpages = ret->mp_next; - return ret; - } - psize -= off = PAGEHDRSZ; - } else { - sz *= num; - off = sz - psize; - } - if ((ret = malloc(sz)) != NULL) { - VGMEMP_ALLOC(env, ret, sz); - if (!(env->me_flags & MDB_NOMEMINIT)) { - memset((char *)ret + off, 0, psize); - ret->mp_pad = 0; - } - } else { - txn->mt_flags |= MDB_TXN_ERROR; - } - return ret; -} -/** Free a single page. - * Saves single pages to a list, for future reuse. - * (This is not used for multi-page overflow pages.) - */ -static void -mdb_page_free(MDB_env *env, MDB_page *mp) -{ - mp->mp_next = env->me_dpages; - VGMEMP_FREE(env, mp); - env->me_dpages = mp; -} - -/** Free a dirty page */ -static void -mdb_dpage_free(MDB_env *env, MDB_page *dp) -{ - if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) { - mdb_page_free(env, dp); - } else { - /* large pages just get freed directly */ - VGMEMP_FREE(env, dp); - free(dp); - } -} - -/** Return all dirty pages to dpage list */ -static void -mdb_dlist_free(MDB_txn *txn) -{ - MDB_env *env = txn->mt_env; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned i, n = dl[0].mid; - - for (i = 1; i <= n; i++) { - mdb_dpage_free(env, dl[i].mptr); - } - dl[0].mid = 0; -} - -#ifdef MDB_VL32 -static void -mdb_page_unref(MDB_txn *txn, MDB_page *mp) -{ - pgno_t pgno; - MDB_ID3L tl = txn->mt_rpages; - unsigned x, rem; - if (mp->mp_flags & (P_SUBP|P_DIRTY)) - return; - rem = mp->mp_pgno & (MDB_RPAGE_CHUNK-1); - pgno = mp->mp_pgno ^ rem; - x = mdb_mid3l_search(tl, pgno); - if (x != tl[0].mid && tl[x+1].mid == mp->mp_pgno) - x++; - if (tl[x].mref) - tl[x].mref--; -} -#define MDB_PAGE_UNREF(txn, mp) mdb_page_unref(txn, mp) - -static void -mdb_cursor_unref(MDB_cursor *mc) -{ - int i; - if (!mc->mc_snum || !mc->mc_pg[0] || IS_SUBP(mc->mc_pg[0])) - return; - for (i=0; i<mc->mc_snum; i++) - mdb_page_unref(mc->mc_txn, mc->mc_pg[i]); - if (mc->mc_ovpg) { - mdb_page_unref(mc->mc_txn, mc->mc_ovpg); - mc->mc_ovpg = 0; - } - mc->mc_snum = mc->mc_top = 0; - mc->mc_pg[0] = NULL; - mc->mc_flags &= ~C_INITIALIZED; -} -#else -#define MDB_PAGE_UNREF(txn, mp) -#endif /* MDB_VL32 */ - -/** Loosen or free a single page. - * Saves single pages to a list for future reuse - * in this same txn. It has been pulled from the freeDB - * and already resides on the dirty list, but has been - * deleted. Use these pages first before pulling again - * from the freeDB. - * - * If the page wasn't dirtied in this txn, just add it - * to this txn's free list. - */ -static int -mdb_page_loose(MDB_cursor *mc, MDB_page *mp) -{ - int loose = 0; - pgno_t pgno = mp->mp_pgno; - MDB_txn *txn = mc->mc_txn; - - if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) { - if (txn->mt_parent) { - MDB_ID2 *dl = txn->mt_u.dirty_list; - /* If txn has a parent, make sure the page is in our - * dirty list. - */ - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - if (mp != dl[x].mptr) { /* bad cursor? */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - /* ok, it's ours */ - loose = 1; - } - } - } else { - /* no parent txn, so it's just ours */ - loose = 1; - } - } - if (loose) { - DPRINTF(("loosen db %d page %"Y"u", DDBI(mc), - mp->mp_pgno)); - NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs; - txn->mt_loose_pgs = mp; - txn->mt_loose_count++; - mp->mp_flags |= P_LOOSE; - } else { - int rc = mdb_midl_append(&txn->mt_free_pgs, pgno); - if (rc) - return rc; - } - - return MDB_SUCCESS; -} - -/** Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn. - * @param[in] mc A cursor handle for the current operation. - * @param[in] pflags Flags of the pages to update: - * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it. - * @param[in] all No shortcuts. Needed except after a full #mdb_page_flush(). - * @return 0 on success, non-zero on failure. - */ -static int -mdb_pages_xkeep(MDB_cursor *mc, unsigned pflags, int all) -{ - enum { Mask = P_SUBP|P_DIRTY|P_LOOSE|P_KEEP }; - MDB_txn *txn = mc->mc_txn; - MDB_cursor *m3, *m0 = mc; - MDB_xcursor *mx; - MDB_page *dp, *mp; - MDB_node *leaf; - unsigned i, j; - int rc = MDB_SUCCESS, level; - - /* Mark pages seen by cursors */ - if (mc->mc_flags & C_UNTRACK) - mc = NULL; /* will find mc in mt_cursors */ - for (i = txn->mt_numdbs;; mc = txn->mt_cursors[--i]) { - for (; mc; mc=mc->mc_next) { - if (!(mc->mc_flags & C_INITIALIZED)) - continue; - for (m3 = mc;; m3 = &mx->mx_cursor) { - mp = NULL; - for (j=0; j<m3->mc_snum; j++) { - mp = m3->mc_pg[j]; - if ((mp->mp_flags & Mask) == pflags) - mp->mp_flags ^= P_KEEP; - } - mx = m3->mc_xcursor; - /* Proceed to mx if it is at a sub-database */ - if (! (mx && (mx->mx_cursor.mc_flags & C_INITIALIZED))) - break; - if (! (mp && (mp->mp_flags & P_LEAF))) - break; - leaf = NODEPTR(mp, m3->mc_ki[j-1]); - if (!(leaf->mn_flags & F_SUBDATA)) - break; - } - } - if (i == 0) - break; - } - - if (all) { - /* Mark dirty root pages */ - for (i=0; i<txn->mt_numdbs; i++) { - if (txn->mt_dbflags[i] & DB_DIRTY) { - pgno_t pgno = txn->mt_dbs[i].md_root; - if (pgno == P_INVALID) - continue; - if ((rc = mdb_page_get(m0, pgno, &dp, &level)) != MDB_SUCCESS) - break; - if ((dp->mp_flags & Mask) == pflags && level <= 1) - dp->mp_flags ^= P_KEEP; - } - } - } - - return rc; -} - -static int mdb_page_flush(MDB_txn *txn, int keep); - -/** Spill pages from the dirty list back to disk. - * This is intended to prevent running into #MDB_TXN_FULL situations, - * but note that they may still occur in a few cases: - * 1) our estimate of the txn size could be too small. Currently this - * seems unlikely, except with a large number of #MDB_MULTIPLE items. - * 2) child txns may run out of space if their parents dirtied a - * lot of pages and never spilled them. TODO: we probably should do - * a preemptive spill during #mdb_txn_begin() of a child txn, if - * the parent's dirty_room is below a given threshold. - * - * Otherwise, if not using nested txns, it is expected that apps will - * not run into #MDB_TXN_FULL any more. The pages are flushed to disk - * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared. - * If the txn never references them again, they can be left alone. - * If the txn only reads them, they can be used without any fuss. - * If the txn writes them again, they can be dirtied immediately without - * going thru all of the work of #mdb_page_touch(). Such references are - * handled by #mdb_page_unspill(). - * - * Also note, we never spill DB root pages, nor pages of active cursors, - * because we'll need these back again soon anyway. And in nested txns, - * we can't spill a page in a child txn if it was already spilled in a - * parent txn. That would alter the parent txns' data even though - * the child hasn't committed yet, and we'd have no way to undo it if - * the child aborted. - * - * @param[in] m0 cursor A cursor handle identifying the transaction and - * database for which we are checking space. - * @param[in] key For a put operation, the key being stored. - * @param[in] data For a put operation, the data being stored. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data) -{ - MDB_txn *txn = m0->mc_txn; - MDB_page *dp; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned int i, j, need; - int rc; - - if (m0->mc_flags & C_SUB) - return MDB_SUCCESS; - - /* Estimate how much space this op will take */ - i = m0->mc_db->md_depth; - /* Named DBs also dirty the main DB */ - if (m0->mc_dbi >= CORE_DBS) - i += txn->mt_dbs[MAIN_DBI].md_depth; - /* For puts, roughly factor in the key+data size */ - if (key) - i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize; - i += i; /* double it for good measure */ - need = i; - - if (txn->mt_dirty_room > i) - return MDB_SUCCESS; - - if (!txn->mt_spill_pgs) { - txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX); - if (!txn->mt_spill_pgs) - return ENOMEM; - } else { - /* purge deleted slots */ - MDB_IDL sl = txn->mt_spill_pgs; - unsigned int num = sl[0]; - j=0; - for (i=1; i<=num; i++) { - if (!(sl[i] & 1)) - sl[++j] = sl[i]; - } - sl[0] = j; - } - - /* Preserve pages which may soon be dirtied again */ - if ((rc = mdb_pages_xkeep(m0, P_DIRTY, 1)) != MDB_SUCCESS) - goto done; - - /* Less aggressive spill - we originally spilled the entire dirty list, - * with a few exceptions for cursor pages and DB root pages. But this - * turns out to be a lot of wasted effort because in a large txn many - * of those pages will need to be used again. So now we spill only 1/8th - * of the dirty pages. Testing revealed this to be a good tradeoff, - * better than 1/2, 1/4, or 1/10. - */ - if (need < MDB_IDL_UM_MAX / 8) - need = MDB_IDL_UM_MAX / 8; - - /* Save the page IDs of all the pages we're flushing */ - /* flush from the tail forward, this saves a lot of shifting later on. */ - for (i=dl[0].mid; i && need; i--) { - MDB_ID pn = dl[i].mid << 1; - dp = dl[i].mptr; - if (dp->mp_flags & (P_LOOSE|P_KEEP)) - continue; - /* Can't spill twice, make sure it's not already in a parent's - * spill list. - */ - if (txn->mt_parent) { - MDB_txn *tx2; - for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { - if (tx2->mt_spill_pgs) { - j = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == pn) { - dp->mp_flags |= P_KEEP; - break; - } - } - } - if (tx2) - continue; - } - if ((rc = mdb_midl_append(&txn->mt_spill_pgs, pn))) - goto done; - need--; - } - mdb_midl_sort(txn->mt_spill_pgs); - - /* Flush the spilled part of dirty list */ - if ((rc = mdb_page_flush(txn, i)) != MDB_SUCCESS) - goto done; - - /* Reset any dirty pages we kept that page_flush didn't see */ - rc = mdb_pages_xkeep(m0, P_DIRTY|P_KEEP, i); - -done: - txn->mt_flags |= rc ? MDB_TXN_ERROR : MDB_TXN_SPILLS; - return rc; -} - -/** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */ -static txnid_t -mdb_find_oldest(MDB_txn *txn) -{ - int i; - txnid_t mr, oldest = txn->mt_txnid - 1; - if (txn->mt_env->me_txns) { - MDB_reader *r = txn->mt_env->me_txns->mti_readers; - for (i = txn->mt_env->me_txns->mti_numreaders; --i >= 0; ) { - if (r[i].mr_pid) { - mr = r[i].mr_txnid; - if (oldest > mr) - oldest = mr; - } - } - } - return oldest; -} - -/** Add a page to the txn's dirty list */ -static void -mdb_page_dirty(MDB_txn *txn, MDB_page *mp) -{ - MDB_ID2 mid; - int rc, (*insert)(MDB_ID2L, MDB_ID2 *); - - if (txn->mt_flags & MDB_TXN_WRITEMAP) { - insert = mdb_mid2l_append; - } else { - insert = mdb_mid2l_insert; - } - mid.mid = mp->mp_pgno; - mid.mptr = mp; - rc = insert(txn->mt_u.dirty_list, &mid); - mdb_tassert(txn, rc == 0); - txn->mt_dirty_room--; -} - -/** Allocate page numbers and memory for writing. Maintain me_pglast, - * me_pghead and mt_next_pgno. - * - * If there are free pages available from older transactions, they - * are re-used first. Otherwise allocate a new page at mt_next_pgno. - * Do not modify the freedB, just merge freeDB records into me_pghead[] - * and move me_pglast to say which records were consumed. Only this - * function can create me_pghead and move me_pglast/mt_next_pgno. - * When #MDB_DEVEL & 2, it is not affected by #mdb_freelist_save(): it - * then uses the transaction's original snapshot of the freeDB. - * @param[in] mc cursor A cursor handle identifying the transaction and - * database for which we are allocating. - * @param[in] num the number of pages to allocate. - * @param[out] mp Address of the allocated page(s). Requests for multiple pages - * will always be satisfied by a single contiguous chunk of memory. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp) -{ -#ifdef MDB_PARANOID /* Seems like we can ignore this now */ - /* Get at most <Max_retries> more freeDB records once me_pghead - * has enough pages. If not enough, use new pages from the map. - * If <Paranoid> and mc is updating the freeDB, only get new - * records if me_pghead is empty. Then the freelist cannot play - * catch-up with itself by growing while trying to save it. - */ - enum { Paranoid = 1, Max_retries = 500 }; -#else - enum { Paranoid = 0, Max_retries = INT_MAX /*infinite*/ }; -#endif - int rc, retry = num * 60; - MDB_txn *txn = mc->mc_txn; - MDB_env *env = txn->mt_env; - pgno_t pgno, *mop = env->me_pghead; - unsigned i, j, mop_len = mop ? mop[0] : 0, n2 = num-1; - MDB_page *np; - txnid_t oldest = 0, last; - MDB_cursor_op op; - MDB_cursor m2; - int found_old = 0; - - /* If there are any loose pages, just use them */ - if (num == 1 && txn->mt_loose_pgs) { - np = txn->mt_loose_pgs; - txn->mt_loose_pgs = NEXT_LOOSE_PAGE(np); - txn->mt_loose_count--; - DPRINTF(("db %d use loose page %"Y"u", DDBI(mc), - np->mp_pgno)); - *mp = np; - return MDB_SUCCESS; - } - - *mp = NULL; - - /* If our dirty list is already full, we can't do anything */ - if (txn->mt_dirty_room == 0) { - rc = MDB_TXN_FULL; - goto fail; - } - - for (op = MDB_FIRST;; op = MDB_NEXT) { - MDB_val key, data; - MDB_node *leaf; - pgno_t *idl; - - /* Seek a big enough contiguous page range. Prefer - * pages at the tail, just truncating the list. - */ - if (mop_len > n2) { - i = mop_len; - do { - pgno = mop[i]; - if (mop[i-n2] == pgno+n2) - goto search_done; - } while (--i > n2); - if (--retry < 0) - break; - } - - if (op == MDB_FIRST) { /* 1st iteration */ - /* Prepare to fetch more and coalesce */ - last = env->me_pglast; - oldest = env->me_pgoldest; - mdb_cursor_init(&m2, txn, FREE_DBI, NULL); -#if (MDB_DEVEL) & 2 /* "& 2" so MDB_DEVEL=1 won't hide bugs breaking freeDB */ - /* Use original snapshot. TODO: Should need less care in code - * which modifies the database. Maybe we can delete some code? - */ - m2.mc_flags |= C_ORIG_RDONLY; - m2.mc_db = &env->me_metas[(txn->mt_txnid-1) & 1]->mm_dbs[FREE_DBI]; - m2.mc_dbflag = (unsigned char *)""; /* probably unnecessary */ -#endif - if (last) { - op = MDB_SET_RANGE; - key.mv_data = &last; /* will look up last+1 */ - key.mv_size = sizeof(last); - } - if (Paranoid && mc->mc_dbi == FREE_DBI) - retry = -1; - } - if (Paranoid && retry < 0 && mop_len) - break; - - last++; - /* Do not fetch more if the record will be too recent */ - if (oldest <= last) { - if (!found_old) { - oldest = mdb_find_oldest(txn); - env->me_pgoldest = oldest; - found_old = 1; - } - if (oldest <= last) - break; - } - rc = mdb_cursor_get(&m2, &key, NULL, op); - if (rc) { - if (rc == MDB_NOTFOUND) - break; - goto fail; - } - last = *(txnid_t*)key.mv_data; - if (oldest <= last) { - if (!found_old) { - oldest = mdb_find_oldest(txn); - env->me_pgoldest = oldest; - found_old = 1; - } - if (oldest <= last) - break; - } - np = m2.mc_pg[m2.mc_top]; - leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]); - if ((rc = mdb_node_read(&m2, leaf, &data)) != MDB_SUCCESS) - return rc; - - idl = (MDB_ID *) data.mv_data; - i = idl[0]; - if (!mop) { - if (!(env->me_pghead = mop = mdb_midl_alloc(i))) { - rc = ENOMEM; - goto fail; - } - } else { - if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0) - goto fail; - mop = env->me_pghead; - } - env->me_pglast = last; -#if (MDB_DEBUG) > 1 - DPRINTF(("IDL read txn %"Y"u root %"Y"u num %u", - last, txn->mt_dbs[FREE_DBI].md_root, i)); - for (j = i; j; j--) - DPRINTF(("IDL %"Y"u", idl[j])); -#endif - /* Merge in descending sorted order */ - mdb_midl_xmerge(mop, idl); - mop_len = mop[0]; - } - - /* Use new pages from the map when nothing suitable in the freeDB */ - i = 0; - pgno = txn->mt_next_pgno; - if (pgno + num >= env->me_maxpg) { - DPUTS("DB size maxed out"); - rc = MDB_MAP_FULL; - goto fail; - } -#if defined(_WIN32) && !defined(MDB_VL32) - if (!(env->me_flags & MDB_RDONLY)) { - void *p; - p = (MDB_page *)(env->me_map + env->me_psize * pgno); - p = VirtualAlloc(p, env->me_psize * num, MEM_COMMIT, - (env->me_flags & MDB_WRITEMAP) ? PAGE_READWRITE: - PAGE_READONLY); - if (!p) { - DPUTS("VirtualAlloc failed"); - rc = ErrCode(); - goto fail; - } - } -#endif - -search_done: - if (env->me_flags & MDB_WRITEMAP) { - np = (MDB_page *)(env->me_map + env->me_psize * pgno); - } else { - if (!(np = mdb_page_malloc(txn, num))) { - rc = ENOMEM; - goto fail; - } - } - if (i) { - mop[0] = mop_len -= num; - /* Move any stragglers down */ - for (j = i-num; j < mop_len; ) - mop[++j] = mop[++i]; - } else { - txn->mt_next_pgno = pgno + num; - } - np->mp_pgno = pgno; - mdb_page_dirty(txn, np); - *mp = np; - - return MDB_SUCCESS; - -fail: - txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -/** Copy the used portions of a non-overflow page. - * @param[in] dst page to copy into - * @param[in] src page to copy from - * @param[in] psize size of a page - */ -static void -mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize) -{ - enum { Align = sizeof(pgno_t) }; - indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper-lower; - - /* If page isn't full, just copy the used portion. Adjust - * alignment so memcpy may copy words instead of bytes. - */ - if ((unused &= -Align) && !IS_LEAF2(src)) { - upper = (upper + PAGEBASE) & -Align; - memcpy(dst, src, (lower + PAGEBASE + (Align-1)) & -Align); - memcpy((pgno_t *)((char *)dst+upper), (pgno_t *)((char *)src+upper), - psize - upper); - } else { - memcpy(dst, src, psize - unused); - } -} - -/** Pull a page off the txn's spill list, if present. - * If a page being referenced was spilled to disk in this txn, bring - * it back and make it dirty/writable again. - * @param[in] txn the transaction handle. - * @param[in] mp the page being referenced. It must not be dirty. - * @param[out] ret the writable page, if any. ret is unchanged if - * mp wasn't spilled. - */ -static int -mdb_page_unspill(MDB_txn *txn, MDB_page *mp, MDB_page **ret) -{ - MDB_env *env = txn->mt_env; - const MDB_txn *tx2; - unsigned x; - pgno_t pgno = mp->mp_pgno, pn = pgno << 1; - - for (tx2 = txn; tx2; tx2=tx2->mt_parent) { - if (!tx2->mt_spill_pgs) - continue; - x = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { - MDB_page *np; - int num; - if (txn->mt_dirty_room == 0) - return MDB_TXN_FULL; - if (IS_OVERFLOW(mp)) - num = mp->mp_pages; - else - num = 1; - if (env->me_flags & MDB_WRITEMAP) { - np = mp; - } else { - np = mdb_page_malloc(txn, num); - if (!np) - return ENOMEM; - if (num > 1) - memcpy(np, mp, num * env->me_psize); - else - mdb_page_copy(np, mp, env->me_psize); - } - if (tx2 == txn) { - /* If in current txn, this page is no longer spilled. - * If it happens to be the last page, truncate the spill list. - * Otherwise mark it as deleted by setting the LSB. - */ - if (x == txn->mt_spill_pgs[0]) - txn->mt_spill_pgs[0]--; - else - txn->mt_spill_pgs[x] |= 1; - } /* otherwise, if belonging to a parent txn, the - * page remains spilled until child commits - */ - - mdb_page_dirty(txn, np); - np->mp_flags |= P_DIRTY; - *ret = np; - break; - } - } - return MDB_SUCCESS; -} - -/** Touch a page: make it dirty and re-insert into tree with updated pgno. - * @param[in] mc cursor pointing to the page to be touched - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_touch(MDB_cursor *mc) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top], *np; - MDB_txn *txn = mc->mc_txn; - MDB_cursor *m2, *m3; - pgno_t pgno; - int rc; - - if (!F_ISSET(mp->mp_flags, P_DIRTY)) { - if (txn->mt_flags & MDB_TXN_SPILLS) { - np = NULL; - rc = mdb_page_unspill(txn, mp, &np); - if (rc) - goto fail; - if (np) - goto done; - } - if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) || - (rc = mdb_page_alloc(mc, 1, &np))) - goto fail; - pgno = np->mp_pgno; - DPRINTF(("touched db %d page %"Y"u -> %"Y"u", DDBI(mc), - mp->mp_pgno, pgno)); - mdb_cassert(mc, mp->mp_pgno != pgno); - mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); - /* Update the parent page, if any, to point to the new page */ - if (mc->mc_top) { - MDB_page *parent = mc->mc_pg[mc->mc_top-1]; - MDB_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top-1]); - SETPGNO(node, pgno); - } else { - mc->mc_db->md_root = pgno; - } - } else if (txn->mt_parent && !IS_SUBP(mp)) { - MDB_ID2 mid, *dl = txn->mt_u.dirty_list; - pgno = mp->mp_pgno; - /* If txn has a parent, make sure the page is in our - * dirty list. - */ - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - if (mp != dl[x].mptr) { /* bad cursor? */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - return 0; - } - } - mdb_cassert(mc, dl[0].mid < MDB_IDL_UM_MAX); - /* No - copy it */ - np = mdb_page_malloc(txn, 1); - if (!np) - return ENOMEM; - mid.mid = pgno; - mid.mptr = np; - rc = mdb_mid2l_insert(dl, &mid); - mdb_cassert(mc, rc == 0); - } else { - return 0; - } - - mdb_page_copy(np, mp, txn->mt_env->me_psize); - np->mp_pgno = pgno; - np->mp_flags |= P_DIRTY; - -done: - /* Adjust cursors pointing to mp */ - mc->mc_pg[mc->mc_top] = np; - m2 = txn->mt_cursors[mc->mc_dbi]; - if (mc->mc_flags & C_SUB) { - for (; m2; m2=m2->mc_next) { - m3 = &m2->mc_xcursor->mx_cursor; - if (m3->mc_snum < mc->mc_snum) continue; - if (m3->mc_pg[mc->mc_top] == mp) - m3->mc_pg[mc->mc_top] = np; - } - } else { - for (; m2; m2=m2->mc_next) { - if (m2->mc_snum < mc->mc_snum) continue; - if (m2 == mc) continue; - if (m2->mc_pg[mc->mc_top] == mp) { - m2->mc_pg[mc->mc_top] = np; - if ((mc->mc_db->md_flags & MDB_DUPSORT) && - IS_LEAF(np) && - (m2->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) - { - MDB_node *leaf = NODEPTR(np, m2->mc_ki[mc->mc_top]); - if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) - m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } - } - } - } - MDB_PAGE_UNREF(mc->mc_txn, mp); - return 0; - -fail: - txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_env_sync0(MDB_env *env, int force, pgno_t numpgs) -{ - int rc = 0; - if (env->me_flags & MDB_RDONLY) - return EACCES; - if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) { - if (env->me_flags & MDB_WRITEMAP) { - int flags = ((env->me_flags & MDB_MAPASYNC) && !force) - ? MS_ASYNC : MS_SYNC; - if (MDB_MSYNC(env->me_map, env->me_psize * numpgs, flags)) - rc = ErrCode(); -#ifdef _WIN32 - else if (flags == MS_SYNC && MDB_FDATASYNC(env->me_fd)) - rc = ErrCode(); -#endif - } else { -#ifdef BROKEN_FDATASYNC - if (env->me_flags & MDB_FSYNCONLY) { - if (fsync(env->me_fd)) - rc = ErrCode(); - } else -#endif - if (MDB_FDATASYNC(env->me_fd)) - rc = ErrCode(); - } - } - return rc; -} - -int -mdb_env_sync(MDB_env *env, int force) -{ - MDB_meta *m = mdb_env_pick_meta(env); - return mdb_env_sync0(env, force, m->mm_last_pg+1); -} - -/** Back up parent txn's cursors, then grab the originals for tracking */ -static int -mdb_cursor_shadow(MDB_txn *src, MDB_txn *dst) -{ - MDB_cursor *mc, *bk; - MDB_xcursor *mx; - size_t size; - int i; - - for (i = src->mt_numdbs; --i >= 0; ) { - if ((mc = src->mt_cursors[i]) != NULL) { - size = sizeof(MDB_cursor); - if (mc->mc_xcursor) - size += sizeof(MDB_xcursor); - for (; mc; mc = bk->mc_next) { - bk = malloc(size); - if (!bk) - return ENOMEM; - *bk = *mc; - mc->mc_backup = bk; - mc->mc_db = &dst->mt_dbs[i]; - /* Kill pointers into src to reduce abuse: The - * user may not use mc until dst ends. But we need a valid - * txn pointer here for cursor fixups to keep working. - */ - mc->mc_txn = dst; - mc->mc_dbflag = &dst->mt_dbflags[i]; - if ((mx = mc->mc_xcursor) != NULL) { - *(MDB_xcursor *)(bk+1) = *mx; - mx->mx_cursor.mc_txn = dst; - } - mc->mc_next = dst->mt_cursors[i]; - dst->mt_cursors[i] = mc; - } - } - } - return MDB_SUCCESS; -} - -/** Close this write txn's cursors, give parent txn's cursors back to parent. - * @param[in] txn the transaction handle. - * @param[in] merge true to keep changes to parent cursors, false to revert. - * @return 0 on success, non-zero on failure. - */ -static void -mdb_cursors_close(MDB_txn *txn, unsigned merge) -{ - MDB_cursor **cursors = txn->mt_cursors, *mc, *next, *bk; - MDB_xcursor *mx; - int i; - - for (i = txn->mt_numdbs; --i >= 0; ) { - for (mc = cursors[i]; mc; mc = next) { - next = mc->mc_next; - if ((bk = mc->mc_backup) != NULL) { - if (merge) { - /* Commit changes to parent txn */ - mc->mc_next = bk->mc_next; - mc->mc_backup = bk->mc_backup; - mc->mc_txn = bk->mc_txn; - mc->mc_db = bk->mc_db; - mc->mc_dbflag = bk->mc_dbflag; - if ((mx = mc->mc_xcursor) != NULL) - mx->mx_cursor.mc_txn = bk->mc_txn; - } else { - /* Abort nested txn */ - *mc = *bk; - if ((mx = mc->mc_xcursor) != NULL) - *mx = *(MDB_xcursor *)(bk+1); - } - mc = bk; - } - /* Only malloced cursors are permanently tracked. */ - free(mc); - } - cursors[i] = NULL; - } -} - -#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ -enum Pidlock_op { - Pidset, Pidcheck -}; -#else -enum Pidlock_op { - Pidset = F_SETLK, Pidcheck = F_GETLK -}; -#endif - -/** Set or check a pid lock. Set returns 0 on success. - * Check returns 0 if the process is certainly dead, nonzero if it may - * be alive (the lock exists or an error happened so we do not know). - * - * On Windows Pidset is a no-op, we merely check for the existence - * of the process with the given pid. On POSIX we use a single byte - * lock on the lockfile, set at an offset equal to the pid. - */ -static int -mdb_reader_pid(MDB_env *env, enum Pidlock_op op, MDB_PID_T pid) -{ -#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ - int ret = 0; - HANDLE h; - if (op == Pidcheck) { - h = OpenProcess(env->me_pidquery, FALSE, pid); - /* No documented "no such process" code, but other program use this: */ - if (!h) - return ErrCode() != ERROR_INVALID_PARAMETER; - /* A process exists until all handles to it close. Has it exited? */ - ret = WaitForSingleObject(h, 0) != 0; - CloseHandle(h); - } - return ret; -#else - for (;;) { - int rc; - struct flock lock_info; - memset(&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_WRLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = pid; - lock_info.l_len = 1; - if ((rc = fcntl(env->me_lfd, op, &lock_info)) == 0) { - if (op == F_GETLK && lock_info.l_type != F_UNLCK) - rc = -1; - } else if ((rc = ErrCode()) == EINTR) { - continue; - } - return rc; - } -#endif -} - -/** Common code for #mdb_txn_begin() and #mdb_txn_renew(). - * @param[in] txn the transaction handle to initialize - * @return 0 on success, non-zero on failure. - */ -static int -mdb_txn_renew0(MDB_txn *txn) -{ - MDB_env *env = txn->mt_env; - MDB_txninfo *ti = env->me_txns; - MDB_meta *meta; - unsigned int i, nr, flags = txn->mt_flags; - uint16_t x; - int rc, new_notls = 0; - - if ((flags &= MDB_TXN_RDONLY) != 0) { - if (!ti) { - meta = mdb_env_pick_meta(env); - txn->mt_txnid = meta->mm_txnid; - txn->mt_u.reader = NULL; - } else { - MDB_reader *r = (env->me_flags & MDB_NOTLS) ? txn->mt_u.reader : - pthread_getspecific(env->me_txkey); - if (r) { - if (r->mr_pid != env->me_pid || r->mr_txnid != (txnid_t)-1) - return MDB_BAD_RSLOT; - } else { - MDB_PID_T pid = env->me_pid; - MDB_THR_T tid = pthread_self(); - mdb_mutexref_t rmutex = env->me_rmutex; - - if (!env->me_live_reader) { - rc = mdb_reader_pid(env, Pidset, pid); - if (rc) - return rc; - env->me_live_reader = 1; - } - - if (LOCK_MUTEX(rc, env, rmutex)) - return rc; - nr = ti->mti_numreaders; - for (i=0; i<nr; i++) - if (ti->mti_readers[i].mr_pid == 0) - break; - if (i == env->me_maxreaders) { - UNLOCK_MUTEX(rmutex); - return MDB_READERS_FULL; - } - r = &ti->mti_readers[i]; - /* Claim the reader slot, carefully since other code - * uses the reader table un-mutexed: First reset the - * slot, next publish it in mti_numreaders. After - * that, it is safe for mdb_env_close() to touch it. - * When it will be closed, we can finally claim it. - */ - r->mr_pid = 0; - r->mr_txnid = (txnid_t)-1; - r->mr_tid = tid; - if (i == nr) - ti->mti_numreaders = ++nr; - env->me_close_readers = nr; - r->mr_pid = pid; - UNLOCK_MUTEX(rmutex); - - new_notls = (env->me_flags & MDB_NOTLS); - if (!new_notls && (rc=pthread_setspecific(env->me_txkey, r))) { - r->mr_pid = 0; - return rc; - } - } - do /* LY: Retry on a race, ITS#7970. */ - r->mr_txnid = ti->mti_txnid; - while(r->mr_txnid != ti->mti_txnid); - txn->mt_txnid = r->mr_txnid; - txn->mt_u.reader = r; - meta = env->me_metas[txn->mt_txnid & 1]; - } - - } else { - /* Not yet touching txn == env->me_txn0, it may be active */ - if (ti) { - if (LOCK_MUTEX(rc, env, env->me_wmutex)) - return rc; - txn->mt_txnid = ti->mti_txnid; - meta = env->me_metas[txn->mt_txnid & 1]; - } else { - meta = mdb_env_pick_meta(env); - txn->mt_txnid = meta->mm_txnid; - } - txn->mt_txnid++; -#if MDB_DEBUG - if (txn->mt_txnid == mdb_debug_start) - mdb_debug = 1; -#endif - txn->mt_child = NULL; - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - txn->mt_dirty_room = MDB_IDL_UM_MAX; - txn->mt_u.dirty_list = env->me_dirty_list; - txn->mt_u.dirty_list[0].mid = 0; - txn->mt_free_pgs = env->me_free_pgs; - txn->mt_free_pgs[0] = 0; - txn->mt_spill_pgs = NULL; - env->me_txn = txn; - memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned int)); - } - - /* Copy the DB info and flags */ - memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDB_db)); - - /* Moved to here to avoid a data race in read TXNs */ - txn->mt_next_pgno = meta->mm_last_pg+1; -#ifdef MDB_VL32 - txn->mt_last_pgno = txn->mt_next_pgno - 1; -#endif - - txn->mt_flags = flags; - - /* Setup db info */ - txn->mt_numdbs = env->me_numdbs; - for (i=CORE_DBS; i<txn->mt_numdbs; i++) { - x = env->me_dbflags[i]; - txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS; - txn->mt_dbflags[i] = (x & MDB_VALID) ? DB_VALID|DB_USRVALID|DB_STALE : 0; - } - txn->mt_dbflags[MAIN_DBI] = DB_VALID|DB_USRVALID; - txn->mt_dbflags[FREE_DBI] = DB_VALID; - - if (env->me_flags & MDB_FATAL_ERROR) { - DPUTS("environment had fatal error, must shutdown!"); - rc = MDB_PANIC; - } else if (env->me_maxpg < txn->mt_next_pgno) { - rc = MDB_MAP_RESIZED; - } else { - return MDB_SUCCESS; - } - mdb_txn_end(txn, new_notls /*0 or MDB_END_SLOT*/ | MDB_END_FAIL_BEGIN); - return rc; -} - -int -mdb_txn_renew(MDB_txn *txn) -{ - int rc; - - if (!txn || !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY|MDB_TXN_FINISHED)) - return EINVAL; - - rc = mdb_txn_renew0(txn); - if (rc == MDB_SUCCESS) { - DPRINTF(("renew txn %"Y"u%c %p on mdbenv %p, root page %"Y"u", - txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root)); - } - return rc; -} - -int -mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret) -{ - MDB_txn *txn; - MDB_ntxn *ntxn; - int rc, size, tsize; - - flags &= MDB_TXN_BEGIN_FLAGS; - flags |= env->me_flags & MDB_WRITEMAP; - - if (env->me_flags & MDB_RDONLY & ~flags) /* write txn in RDONLY env */ - return EACCES; - - if (parent) { - /* Nested transactions: Max 1 child, write txns only, no writemap */ - flags |= parent->mt_flags; - if (flags & (MDB_RDONLY|MDB_WRITEMAP|MDB_TXN_BLOCKED)) { - return (parent->mt_flags & MDB_TXN_RDONLY) ? EINVAL : MDB_BAD_TXN; - } - /* Child txns save MDB_pgstate and use own copy of cursors */ - size = env->me_maxdbs * (sizeof(MDB_db)+sizeof(MDB_cursor *)+1); - size += tsize = sizeof(MDB_ntxn); - } else if (flags & MDB_RDONLY) { - size = env->me_maxdbs * (sizeof(MDB_db)+1); - size += tsize = sizeof(MDB_txn); - } else { - /* Reuse preallocated write txn. However, do not touch it until - * mdb_txn_renew0() succeeds, since it currently may be active. - */ - txn = env->me_txn0; - goto renew; - } - if ((txn = calloc(1, size)) == NULL) { - DPRINTF(("calloc: %s", strerror(errno))); - return ENOMEM; - } -#ifdef MDB_VL32 - if (!parent) { - txn->mt_rpages = malloc(MDB_TRPAGE_SIZE * sizeof(MDB_ID3)); - if (!txn->mt_rpages) { - free(txn); - return ENOMEM; - } - txn->mt_rpages[0].mid = 0; - txn->mt_rpcheck = MDB_TRPAGE_SIZE/2; - } -#endif - txn->mt_dbxs = env->me_dbxs; /* static */ - txn->mt_dbs = (MDB_db *) ((char *)txn + tsize); - txn->mt_dbflags = (unsigned char *)txn + size - env->me_maxdbs; - txn->mt_flags = flags; - txn->mt_env = env; - - if (parent) { - unsigned int i; - txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); - txn->mt_dbiseqs = parent->mt_dbiseqs; - txn->mt_u.dirty_list = malloc(sizeof(MDB_ID2)*MDB_IDL_UM_SIZE); - if (!txn->mt_u.dirty_list || - !(txn->mt_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX))) - { - free(txn->mt_u.dirty_list); - free(txn); - return ENOMEM; - } - txn->mt_txnid = parent->mt_txnid; - txn->mt_dirty_room = parent->mt_dirty_room; - txn->mt_u.dirty_list[0].mid = 0; - txn->mt_spill_pgs = NULL; - txn->mt_next_pgno = parent->mt_next_pgno; - parent->mt_flags |= MDB_TXN_HAS_CHILD; - parent->mt_child = txn; - txn->mt_parent = parent; - txn->mt_numdbs = parent->mt_numdbs; -#ifdef MDB_VL32 - txn->mt_rpages = parent->mt_rpages; -#endif - memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - /* Copy parent's mt_dbflags, but clear DB_NEW */ - for (i=0; i<txn->mt_numdbs; i++) - txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW; - rc = 0; - ntxn = (MDB_ntxn *)txn; - ntxn->mnt_pgstate = env->me_pgstate; /* save parent me_pghead & co */ - if (env->me_pghead) { - size = MDB_IDL_SIZEOF(env->me_pghead); - env->me_pghead = mdb_midl_alloc(env->me_pghead[0]); - if (env->me_pghead) - memcpy(env->me_pghead, ntxn->mnt_pgstate.mf_pghead, size); - else - rc = ENOMEM; - } - if (!rc) - rc = mdb_cursor_shadow(parent, txn); - if (rc) - mdb_txn_end(txn, MDB_END_FAIL_BEGINCHILD); - } else { /* MDB_RDONLY */ - txn->mt_dbiseqs = env->me_dbiseqs; -renew: - rc = mdb_txn_renew0(txn); - } - if (rc) { - if (txn != env->me_txn0) { -#ifdef MDB_VL32 - free(txn->mt_rpages); -#endif - free(txn); - } - } else { - txn->mt_flags |= flags; /* could not change txn=me_txn0 earlier */ - *ret = txn; - DPRINTF(("begin txn %"Y"u%c %p on mdbenv %p, root page %"Y"u", - txn->mt_txnid, (flags & MDB_RDONLY) ? 'r' : 'w', - (void *) txn, (void *) env, txn->mt_dbs[MAIN_DBI].md_root)); - } - - return rc; -} - -MDB_env * -mdb_txn_env(MDB_txn *txn) -{ - if(!txn) return NULL; - return txn->mt_env; -} - -mdb_size_t -mdb_txn_id(MDB_txn *txn) -{ - if(!txn) return 0; - return txn->mt_txnid; -} - -/** Export or close DBI handles opened in this txn. */ -static void -mdb_dbis_update(MDB_txn *txn, int keep) -{ - int i; - MDB_dbi n = txn->mt_numdbs; - MDB_env *env = txn->mt_env; - unsigned char *tdbflags = txn->mt_dbflags; - - for (i = n; --i >= CORE_DBS;) { - if (tdbflags[i] & DB_NEW) { - if (keep) { - env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDB_VALID; - } else { - char *ptr = env->me_dbxs[i].md_name.mv_data; - if (ptr) { - env->me_dbxs[i].md_name.mv_data = NULL; - env->me_dbxs[i].md_name.mv_size = 0; - env->me_dbflags[i] = 0; - env->me_dbiseqs[i]++; - free(ptr); - } - } - } - } - if (keep && env->me_numdbs < n) - env->me_numdbs = n; -} - -/** End a transaction, except successful commit of a nested transaction. - * May be called twice for readonly txns: First reset it, then abort. - * @param[in] txn the transaction handle to end - * @param[in] mode why and how to end the transaction - */ -static void -mdb_txn_end(MDB_txn *txn, unsigned mode) -{ - MDB_env *env = txn->mt_env; -#if MDB_DEBUG - static const char *const names[] = MDB_END_NAMES; -#endif - - /* Export or close DBI handles opened in this txn */ - mdb_dbis_update(txn, mode & MDB_END_UPDATE); - - DPRINTF(("%s txn %"Y"u%c %p on mdbenv %p, root page %"Y"u", - names[mode & MDB_END_OPMASK], - txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', - (void *) txn, (void *)env, txn->mt_dbs[MAIN_DBI].md_root)); - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { - if (txn->mt_u.reader) { - txn->mt_u.reader->mr_txnid = (txnid_t)-1; - if (!(env->me_flags & MDB_NOTLS)) { - txn->mt_u.reader = NULL; /* txn does not own reader */ - } else if (mode & MDB_END_SLOT) { - txn->mt_u.reader->mr_pid = 0; - txn->mt_u.reader = NULL; - } /* else txn owns the slot until it does MDB_END_SLOT */ - } - txn->mt_numdbs = 0; /* prevent further DBI activity */ - txn->mt_flags |= MDB_TXN_FINISHED; - - } else if (!F_ISSET(txn->mt_flags, MDB_TXN_FINISHED)) { - pgno_t *pghead = env->me_pghead; - - if (!(mode & MDB_END_UPDATE)) /* !(already closed cursors) */ - mdb_cursors_close(txn, 0); - if (!(env->me_flags & MDB_WRITEMAP)) { - mdb_dlist_free(txn); - } - - txn->mt_numdbs = 0; - txn->mt_flags = MDB_TXN_FINISHED; - - if (!txn->mt_parent) { - mdb_midl_shrink(&txn->mt_free_pgs); - env->me_free_pgs = txn->mt_free_pgs; - /* me_pgstate: */ - env->me_pghead = NULL; - env->me_pglast = 0; - - env->me_txn = NULL; - mode = 0; /* txn == env->me_txn0, do not free() it */ - - /* The writer mutex was locked in mdb_txn_begin. */ - if (env->me_txns) - UNLOCK_MUTEX(env->me_wmutex); - } else { - txn->mt_parent->mt_child = NULL; - txn->mt_parent->mt_flags &= ~MDB_TXN_HAS_CHILD; - env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate; - mdb_midl_free(txn->mt_free_pgs); - mdb_midl_free(txn->mt_spill_pgs); - free(txn->mt_u.dirty_list); - } - - mdb_midl_free(pghead); - } -#ifdef MDB_VL32 - if (!txn->mt_parent) { - MDB_ID3L el = env->me_rpages, tl = txn->mt_rpages; - unsigned i, x, n = tl[0].mid; - pthread_mutex_lock(&env->me_rpmutex); - for (i = 1; i <= n; i++) { - if (tl[i].mid & (MDB_RPAGE_CHUNK-1)) { - /* tmp overflow pages that we didn't share in env */ - munmap(tl[i].mptr, tl[i].mcnt * env->me_psize); - } else { - x = mdb_mid3l_search(el, tl[i].mid); - if (tl[i].mptr == el[x].mptr) { - el[x].mref--; - } else { - /* another tmp overflow page */ - munmap(tl[i].mptr, tl[i].mcnt * env->me_psize); - } - } - } - pthread_mutex_unlock(&env->me_rpmutex); - tl[0].mid = 0; - if (mode & MDB_END_FREE) - free(tl); - } -#endif - if (mode & MDB_END_FREE) - free(txn); -} - -void -mdb_txn_reset(MDB_txn *txn) -{ - if (txn == NULL) - return; - - /* This call is only valid for read-only txns */ - if (!(txn->mt_flags & MDB_TXN_RDONLY)) - return; - - mdb_txn_end(txn, MDB_END_RESET); -} - -void -mdb_txn_abort(MDB_txn *txn) -{ - if (txn == NULL) - return; - - if (txn->mt_child) - mdb_txn_abort(txn->mt_child); - - mdb_txn_end(txn, MDB_END_ABORT|MDB_END_SLOT|MDB_END_FREE); -} - -/** Save the freelist as of this transaction to the freeDB. - * This changes the freelist. Keep trying until it stabilizes. - * - * When (MDB_DEVEL) & 2, the changes do not affect #mdb_page_alloc(), - * it then uses the transaction's original snapshot of the freeDB. - */ -static int -mdb_freelist_save(MDB_txn *txn) -{ - /* env->me_pghead[] can grow and shrink during this call. - * env->me_pglast and txn->mt_free_pgs[] can only grow. - * Page numbers cannot disappear from txn->mt_free_pgs[]. - */ - MDB_cursor mc; - MDB_env *env = txn->mt_env; - int rc, maxfree_1pg = env->me_maxfree_1pg, more = 1; - txnid_t pglast = 0, head_id = 0; - pgno_t freecnt = 0, *free_pgs, *mop; - ssize_t head_room = 0, total_room = 0, mop_len, clean_limit; - - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - - if (env->me_pghead) { - /* Make sure first page of freeDB is touched and on freelist */ - rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST|MDB_PS_MODIFY); - if (rc && rc != MDB_NOTFOUND) - return rc; - } - - if (!env->me_pghead && txn->mt_loose_pgs) { - /* Put loose page numbers in mt_free_pgs, since - * we may be unable to return them to me_pghead. - */ - MDB_page *mp = txn->mt_loose_pgs; - if ((rc = mdb_midl_need(&txn->mt_free_pgs, txn->mt_loose_count)) != 0) - return rc; - for (; mp; mp = NEXT_LOOSE_PAGE(mp)) - mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - } - - /* MDB_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */ - clean_limit = (env->me_flags & (MDB_NOMEMINIT|MDB_WRITEMAP)) - ? SSIZE_MAX : maxfree_1pg; - - for (;;) { - /* Come back here after each Put() in case freelist changed */ - MDB_val key, data; - pgno_t *pgs; - ssize_t j; - - /* If using records from freeDB which we have not yet - * deleted, delete them and any we reserved for me_pghead. - */ - while (pglast < env->me_pglast) { - rc = mdb_cursor_first(&mc, &key, NULL); - if (rc) - return rc; - pglast = head_id = *(txnid_t *)key.mv_data; - total_room = head_room = 0; - mdb_tassert(txn, pglast <= env->me_pglast); - rc = mdb_cursor_del(&mc, 0); - if (rc) - return rc; - } - - /* Save the IDL of pages freed by this txn, to a single record */ - if (freecnt < txn->mt_free_pgs[0]) { - if (!freecnt) { - /* Make sure last page of freeDB is touched and on freelist */ - rc = mdb_page_search(&mc, NULL, MDB_PS_LAST|MDB_PS_MODIFY); - if (rc && rc != MDB_NOTFOUND) - return rc; - } - free_pgs = txn->mt_free_pgs; - /* Write to last page of freeDB */ - key.mv_size = sizeof(txn->mt_txnid); - key.mv_data = &txn->mt_txnid; - do { - freecnt = free_pgs[0]; - data.mv_size = MDB_IDL_SIZEOF(free_pgs); - rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); - if (rc) - return rc; - /* Retry if mt_free_pgs[] grew during the Put() */ - free_pgs = txn->mt_free_pgs; - } while (freecnt < free_pgs[0]); - mdb_midl_sort(free_pgs); - memcpy(data.mv_data, free_pgs, data.mv_size); -#if (MDB_DEBUG) > 1 - { - unsigned int i = free_pgs[0]; - DPRINTF(("IDL write txn %"Y"u root %"Y"u num %u", - txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i)); - for (; i; i--) - DPRINTF(("IDL %"Y"u", free_pgs[i])); - } -#endif - continue; - } - - mop = env->me_pghead; - mop_len = (mop ? mop[0] : 0) + txn->mt_loose_count; - - /* Reserve records for me_pghead[]. Split it if multi-page, - * to avoid searching freeDB for a page range. Use keys in - * range [1,me_pglast]: Smaller than txnid of oldest reader. - */ - if (total_room >= mop_len) { - if (total_room == mop_len || --more < 0) - break; - } else if (head_room >= maxfree_1pg && head_id > 1) { - /* Keep current record (overflow page), add a new one */ - head_id--; - head_room = 0; - } - /* (Re)write {key = head_id, IDL length = head_room} */ - total_room -= head_room; - head_room = mop_len - total_room; - if (head_room > maxfree_1pg && head_id > 1) { - /* Overflow multi-page for part of me_pghead */ - head_room /= head_id; /* amortize page sizes */ - head_room += maxfree_1pg - head_room % (maxfree_1pg + 1); - } else if (head_room < 0) { - /* Rare case, not bothering to delete this record */ - head_room = 0; - } - key.mv_size = sizeof(head_id); - key.mv_data = &head_id; - data.mv_size = (head_room + 1) * sizeof(pgno_t); - rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); - if (rc) - return rc; - /* IDL is initially empty, zero out at least the length */ - pgs = (pgno_t *)data.mv_data; - j = head_room > clean_limit ? head_room : 0; - do { - pgs[j] = 0; - } while (--j >= 0); - total_room += head_room; - } - - /* Return loose page numbers to me_pghead, though usually none are - * left at this point. The pages themselves remain in dirty_list. - */ - if (txn->mt_loose_pgs) { - MDB_page *mp = txn->mt_loose_pgs; - unsigned count = txn->mt_loose_count; - MDB_IDL loose; - /* Room for loose pages + temp IDL with same */ - if ((rc = mdb_midl_need(&env->me_pghead, 2*count+1)) != 0) - return rc; - mop = env->me_pghead; - loose = mop + MDB_IDL_ALLOCLEN(mop) - count; - for (count = 0; mp; mp = NEXT_LOOSE_PAGE(mp)) - loose[ ++count ] = mp->mp_pgno; - loose[0] = count; - mdb_midl_sort(loose); - mdb_midl_xmerge(mop, loose); - txn->mt_loose_pgs = NULL; - txn->mt_loose_count = 0; - mop_len = mop[0]; - } - - /* Fill in the reserved me_pghead records */ - rc = MDB_SUCCESS; - if (mop_len) { - MDB_val key, data; - - mop += mop_len; - rc = mdb_cursor_first(&mc, &key, &data); - for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) { - txnid_t id = *(txnid_t *)key.mv_data; - ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1; - MDB_ID save; - - mdb_tassert(txn, len >= 0 && id <= env->me_pglast); - key.mv_data = &id; - if (len > mop_len) { - len = mop_len; - data.mv_size = (len + 1) * sizeof(MDB_ID); - } - data.mv_data = mop -= len; - save = mop[0]; - mop[0] = len; - rc = mdb_cursor_put(&mc, &key, &data, MDB_CURRENT); - mop[0] = save; - if (rc || !(mop_len -= len)) - break; - } - } - return rc; -} - -/** Flush (some) dirty pages to the map, after clearing their dirty flag. - * @param[in] txn the transaction that's being committed - * @param[in] keep number of initial pages in dirty_list to keep dirty. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_flush(MDB_txn *txn, int keep) -{ - MDB_env *env = txn->mt_env; - MDB_ID2L dl = txn->mt_u.dirty_list; - unsigned psize = env->me_psize, j; - int i, pagecount = dl[0].mid, rc; - size_t size = 0; - off_t pos = 0; - pgno_t pgno = 0; - MDB_page *dp = NULL; -#ifdef _WIN32 - OVERLAPPED ov; -#else - struct iovec iov[MDB_COMMIT_PAGES]; - ssize_t wsize = 0, wres; - off_t wpos = 0, next_pos = 1; /* impossible pos, so pos != next_pos */ - int n = 0; -#endif - - j = i = keep; - - if (env->me_flags & MDB_WRITEMAP) { - /* Clear dirty flags */ - while (++i <= pagecount) { - dp = dl[i].mptr; - /* Don't flush this page yet */ - if (dp->mp_flags & (P_LOOSE|P_KEEP)) { - dp->mp_flags &= ~P_KEEP; - dl[++j] = dl[i]; - continue; - } - dp->mp_flags &= ~P_DIRTY; - } - goto done; - } - - /* Write the pages */ - for (;;) { - if (++i <= pagecount) { - dp = dl[i].mptr; - /* Don't flush this page yet */ - if (dp->mp_flags & (P_LOOSE|P_KEEP)) { - dp->mp_flags &= ~P_KEEP; - dl[i].mid = 0; - continue; - } - pgno = dl[i].mid; - /* clear dirty flag */ - dp->mp_flags &= ~P_DIRTY; - pos = pgno * psize; - size = psize; - if (IS_OVERFLOW(dp)) size *= dp->mp_pages; - } -#ifdef _WIN32 - else break; - - /* Windows actually supports scatter/gather I/O, but only on - * unbuffered file handles. Since we're relying on the OS page - * cache for all our data, that's self-defeating. So we just - * write pages one at a time. We use the ov structure to set - * the write offset, to at least save the overhead of a Seek - * system call. - */ - DPRINTF(("committing page %"Z"u", pgno)); - memset(&ov, 0, sizeof(ov)); - ov.Offset = pos & 0xffffffff; - ov.OffsetHigh = pos >> 16 >> 16; - if (!WriteFile(env->me_fd, dp, size, NULL, &ov)) { - rc = ErrCode(); - DPRINTF(("WriteFile: %d", rc)); - return rc; - } -#else - /* Write up to MDB_COMMIT_PAGES dirty pages at a time. */ - if (pos!=next_pos || n==MDB_COMMIT_PAGES || wsize+size>MAX_WRITE) { - if (n) { -retry_write: - /* Write previous page(s) */ -#ifdef MDB_USE_PWRITEV - wres = pwritev(env->me_fd, iov, n, wpos); -#else - if (n == 1) { - wres = pwrite(env->me_fd, iov[0].iov_base, wsize, wpos); - } else { -retry_seek: - if (lseek(env->me_fd, wpos, SEEK_SET) == -1) { - rc = ErrCode(); - if (rc == EINTR) - goto retry_seek; - DPRINTF(("lseek: %s", strerror(rc))); - return rc; - } - wres = writev(env->me_fd, iov, n); - } -#endif - if (wres != wsize) { - if (wres < 0) { - rc = ErrCode(); - if (rc == EINTR) - goto retry_write; - DPRINTF(("Write error: %s", strerror(rc))); - } else { - rc = EIO; /* TODO: Use which error code? */ - DPUTS("short write, filesystem full?"); - } - return rc; - } - n = 0; - } - if (i > pagecount) - break; - wpos = pos; - wsize = 0; - } - DPRINTF(("committing page %"Y"u", pgno)); - next_pos = pos + size; - iov[n].iov_len = size; - iov[n].iov_base = (char *)dp; - wsize += size; - n++; -#endif /* _WIN32 */ - } -#ifdef MDB_VL32 - if (pgno > txn->mt_last_pgno) - txn->mt_last_pgno = pgno; -#endif - - /* MIPS has cache coherency issues, this is a no-op everywhere else - * Note: for any size >= on-chip cache size, entire on-chip cache is - * flushed. - */ - CACHEFLUSH(env->me_map, txn->mt_next_pgno * env->me_psize, DCACHE); - - for (i = keep; ++i <= pagecount; ) { - dp = dl[i].mptr; - /* This is a page we skipped above */ - if (!dl[i].mid) { - dl[++j] = dl[i]; - dl[j].mid = dp->mp_pgno; - continue; - } - mdb_dpage_free(env, dp); - } - -done: - i--; - txn->mt_dirty_room += i - j; - dl[0].mid = j; - return MDB_SUCCESS; -} - -int -mdb_txn_commit(MDB_txn *txn) -{ - int rc; - unsigned int i, end_mode; - MDB_env *env; - - if (txn == NULL) - return EINVAL; - - /* mdb_txn_end() mode for a commit which writes nothing */ - end_mode = MDB_END_EMPTY_COMMIT|MDB_END_UPDATE|MDB_END_SLOT|MDB_END_FREE; - - if (txn->mt_child) { - rc = mdb_txn_commit(txn->mt_child); - if (rc) - goto fail; - } - - env = txn->mt_env; - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { - goto done; - } - - if (txn->mt_flags & (MDB_TXN_FINISHED|MDB_TXN_ERROR)) { - DPUTS("txn has failed/finished, can't commit"); - if (txn->mt_parent) - txn->mt_parent->mt_flags |= MDB_TXN_ERROR; - rc = MDB_BAD_TXN; - goto fail; - } - - if (txn->mt_parent) { - MDB_txn *parent = txn->mt_parent; - MDB_page **lp; - MDB_ID2L dst, src; - MDB_IDL pspill; - unsigned x, y, len, ps_len; - - /* Append our free list to parent's */ - rc = mdb_midl_append_list(&parent->mt_free_pgs, txn->mt_free_pgs); - if (rc) - goto fail; - mdb_midl_free(txn->mt_free_pgs); - /* Failures after this must either undo the changes - * to the parent or set MDB_TXN_ERROR in the parent. - */ - - parent->mt_next_pgno = txn->mt_next_pgno; - parent->mt_flags = txn->mt_flags; - - /* Merge our cursors into parent's and close them */ - mdb_cursors_close(txn, 1); - - /* Update parent's DB table. */ - memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); - parent->mt_numdbs = txn->mt_numdbs; - parent->mt_dbflags[FREE_DBI] = txn->mt_dbflags[FREE_DBI]; - parent->mt_dbflags[MAIN_DBI] = txn->mt_dbflags[MAIN_DBI]; - for (i=CORE_DBS; i<txn->mt_numdbs; i++) { - /* preserve parent's DB_NEW status */ - x = parent->mt_dbflags[i] & DB_NEW; - parent->mt_dbflags[i] = txn->mt_dbflags[i] | x; - } - - dst = parent->mt_u.dirty_list; - src = txn->mt_u.dirty_list; - /* Remove anything in our dirty list from parent's spill list */ - if ((pspill = parent->mt_spill_pgs) && (ps_len = pspill[0])) { - x = y = ps_len; - pspill[0] = (pgno_t)-1; - /* Mark our dirty pages as deleted in parent spill list */ - for (i=0, len=src[0].mid; ++i <= len; ) { - MDB_ID pn = src[i].mid << 1; - while (pn > pspill[x]) - x--; - if (pn == pspill[x]) { - pspill[x] = 1; - y = --x; - } - } - /* Squash deleted pagenums if we deleted any */ - for (x=y; ++x <= ps_len; ) - if (!(pspill[x] & 1)) - pspill[++y] = pspill[x]; - pspill[0] = y; - } - - /* Remove anything in our spill list from parent's dirty list */ - if (txn->mt_spill_pgs && txn->mt_spill_pgs[0]) { - for (i=1; i<=txn->mt_spill_pgs[0]; i++) { - MDB_ID pn = txn->mt_spill_pgs[i]; - if (pn & 1) - continue; /* deleted spillpg */ - pn >>= 1; - y = mdb_mid2l_search(dst, pn); - if (y <= dst[0].mid && dst[y].mid == pn) { - free(dst[y].mptr); - while (y < dst[0].mid) { - dst[y] = dst[y+1]; - y++; - } - dst[0].mid--; - } - } - } - - /* Find len = length of merging our dirty list with parent's */ - x = dst[0].mid; - dst[0].mid = 0; /* simplify loops */ - if (parent->mt_parent) { - len = x + src[0].mid; - y = mdb_mid2l_search(src, dst[x].mid + 1) - 1; - for (i = x; y && i; y--) { - pgno_t yp = src[y].mid; - while (yp < dst[i].mid) - i--; - if (yp == dst[i].mid) { - i--; - len--; - } - } - } else { /* Simplify the above for single-ancestor case */ - len = MDB_IDL_UM_MAX - txn->mt_dirty_room; - } - /* Merge our dirty list with parent's */ - y = src[0].mid; - for (i = len; y; dst[i--] = src[y--]) { - pgno_t yp = src[y].mid; - while (yp < dst[x].mid) - dst[i--] = dst[x--]; - if (yp == dst[x].mid) - free(dst[x--].mptr); - } - mdb_tassert(txn, i == x); - dst[0].mid = len; - free(txn->mt_u.dirty_list); - parent->mt_dirty_room = txn->mt_dirty_room; - if (txn->mt_spill_pgs) { - if (parent->mt_spill_pgs) { - /* TODO: Prevent failure here, so parent does not fail */ - rc = mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs); - if (rc) - parent->mt_flags |= MDB_TXN_ERROR; - mdb_midl_free(txn->mt_spill_pgs); - mdb_midl_sort(parent->mt_spill_pgs); - } else { - parent->mt_spill_pgs = txn->mt_spill_pgs; - } - } - - /* Append our loose page list to parent's */ - for (lp = &parent->mt_loose_pgs; *lp; lp = &NEXT_LOOSE_PAGE(*lp)) - ; - *lp = txn->mt_loose_pgs; - parent->mt_loose_count += txn->mt_loose_count; - - parent->mt_child = NULL; - mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead); - free(txn); - return rc; - } - - if (txn != env->me_txn) { - DPUTS("attempt to commit unknown transaction"); - rc = EINVAL; - goto fail; - } - - mdb_cursors_close(txn, 0); - - if (!txn->mt_u.dirty_list[0].mid && - !(txn->mt_flags & (MDB_TXN_DIRTY|MDB_TXN_SPILLS))) - goto done; - - DPRINTF(("committing txn %"Y"u %p on mdbenv %p, root page %"Y"u", - txn->mt_txnid, (void*)txn, (void*)env, txn->mt_dbs[MAIN_DBI].md_root)); - - /* Update DB root pointers */ - if (txn->mt_numdbs > CORE_DBS) { - MDB_cursor mc; - MDB_dbi i; - MDB_val data; - data.mv_size = sizeof(MDB_db); - - mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); - for (i = CORE_DBS; i < txn->mt_numdbs; i++) { - if (txn->mt_dbflags[i] & DB_DIRTY) { - if (TXN_DBI_CHANGED(txn, i)) { - rc = MDB_BAD_DBI; - goto fail; - } - data.mv_data = &txn->mt_dbs[i]; - rc = mdb_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data, - F_SUBDATA); - if (rc) - goto fail; - } - } - } - - rc = mdb_freelist_save(txn); - if (rc) - goto fail; - - mdb_midl_free(env->me_pghead); - env->me_pghead = NULL; - mdb_midl_shrink(&txn->mt_free_pgs); - -#if (MDB_DEBUG) > 2 - mdb_audit(txn); -#endif - - if ((rc = mdb_page_flush(txn, 0))) - goto fail; - if (!F_ISSET(txn->mt_flags, MDB_TXN_NOSYNC) && - (rc = mdb_env_sync0(env, 0, txn->mt_next_pgno))) - goto fail; - if ((rc = mdb_env_write_meta(txn))) - goto fail; - end_mode = MDB_END_COMMITTED|MDB_END_UPDATE; - -done: - mdb_txn_end(txn, end_mode); - return MDB_SUCCESS; - -fail: - mdb_txn_abort(txn); - return rc; -} - -/** Read the environment parameters of a DB environment before - * mapping it into memory. - * @param[in] env the environment handle - * @param[out] meta address of where to store the meta information - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_read_header(MDB_env *env, MDB_meta *meta) -{ - MDB_metabuf pbuf; - MDB_page *p; - MDB_meta *m; - int i, rc, off; - enum { Size = sizeof(pbuf) }; - - /* We don't know the page size yet, so use a minimum value. - * Read both meta pages so we can use the latest one. - */ - - for (i=off=0; i<NUM_METAS; i++, off += meta->mm_psize) { -#ifdef _WIN32 - DWORD len; - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - rc = ReadFile(env->me_fd, &pbuf, Size, &len, &ov) ? (int)len : -1; - if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF) - rc = 0; -#else - rc = pread(env->me_fd, &pbuf, Size, off); -#endif - if (rc != Size) { - if (rc == 0 && off == 0) - return ENOENT; - rc = rc < 0 ? (int) ErrCode() : MDB_INVALID; - DPRINTF(("read: %s", mdb_strerror(rc))); - return rc; - } - - p = (MDB_page *)&pbuf; - - if (!F_ISSET(p->mp_flags, P_META)) { - DPRINTF(("page %"Y"u not a meta page", p->mp_pgno)); - return MDB_INVALID; - } - - m = METADATA(p); - if (m->mm_magic != MDB_MAGIC) { - DPUTS("meta has invalid magic"); - return MDB_INVALID; - } - - if (m->mm_version != MDB_DATA_VERSION) { - DPRINTF(("database is version %u, expected version %u", - m->mm_version, MDB_DATA_VERSION)); - return MDB_VERSION_MISMATCH; - } - - if (off == 0 || m->mm_txnid > meta->mm_txnid) - *meta = *m; - } - return 0; -} - -/** Fill in most of the zeroed #MDB_meta for an empty database environment */ -static void ESECT -mdb_env_init_meta0(MDB_env *env, MDB_meta *meta) -{ - meta->mm_magic = MDB_MAGIC; - meta->mm_version = MDB_DATA_VERSION; - meta->mm_mapsize = env->me_mapsize; - meta->mm_psize = env->me_psize; - meta->mm_last_pg = NUM_METAS-1; - meta->mm_flags = env->me_flags & 0xffff; - meta->mm_flags |= MDB_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */ - meta->mm_dbs[FREE_DBI].md_root = P_INVALID; - meta->mm_dbs[MAIN_DBI].md_root = P_INVALID; -} - -/** Write the environment parameters of a freshly created DB environment. - * @param[in] env the environment handle - * @param[in] meta the #MDB_meta to write - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_init_meta(MDB_env *env, MDB_meta *meta) -{ - MDB_page *p, *q; - int rc; - unsigned int psize; -#ifdef _WIN32 - DWORD len; - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); -#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ - ov.Offset = pos; \ - rc = WriteFile(fd, ptr, size, &len, &ov); } while(0) -#else - int len; -#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ - len = pwrite(fd, ptr, size, pos); \ - if (len == -1 && ErrCode() == EINTR) continue; \ - rc = (len >= 0); break; } while(1) -#endif - - DPUTS("writing new meta page"); - - psize = env->me_psize; - - p = calloc(NUM_METAS, psize); - if (!p) - return ENOMEM; - p->mp_pgno = 0; - p->mp_flags = P_META; - *(MDB_meta *)METADATA(p) = *meta; - - q = (MDB_page *)((char *)p + psize); - q->mp_pgno = 1; - q->mp_flags = P_META; - *(MDB_meta *)METADATA(q) = *meta; - - DO_PWRITE(rc, env->me_fd, p, psize * NUM_METAS, len, 0); - if (!rc) - rc = ErrCode(); - else if ((unsigned) len == psize * NUM_METAS) - rc = MDB_SUCCESS; - else - rc = ENOSPC; - free(p); - return rc; -} - -/** Update the environment info to commit a transaction. - * @param[in] txn the transaction that's being committed - * @return 0 on success, non-zero on failure. - */ -static int -mdb_env_write_meta(MDB_txn *txn) -{ - MDB_env *env; - MDB_meta meta, metab, *mp; - unsigned flags; - mdb_size_t mapsize; - off_t off; - int rc, len, toggle; - char *ptr; - HANDLE mfd; -#ifdef _WIN32 - OVERLAPPED ov; -#else - int r2; -#endif - - toggle = txn->mt_txnid & 1; - DPRINTF(("writing meta page %d for root page %"Y"u", - toggle, txn->mt_dbs[MAIN_DBI].md_root)); - - env = txn->mt_env; - flags = txn->mt_flags | env->me_flags; - mp = env->me_metas[toggle]; - mapsize = env->me_metas[toggle ^ 1]->mm_mapsize; - /* Persist any increases of mapsize config */ - if (mapsize < env->me_mapsize) - mapsize = env->me_mapsize; - - if (flags & MDB_WRITEMAP) { - mp->mm_mapsize = mapsize; - mp->mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; - mp->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; - mp->mm_last_pg = txn->mt_next_pgno - 1; -#if (__GNUC__ * 100 + __GNUC_MINOR__ >= 404) && /* TODO: portability */ \ - !(defined(__i386__) || defined(__x86_64__)) - /* LY: issue a memory barrier, if not x86. ITS#7969 */ - __sync_synchronize(); -#endif - mp->mm_txnid = txn->mt_txnid; - if (!(flags & (MDB_NOMETASYNC|MDB_NOSYNC))) { - unsigned meta_size = env->me_psize; - rc = (env->me_flags & MDB_MAPASYNC) ? MS_ASYNC : MS_SYNC; - ptr = (char *)mp - PAGEHDRSZ; -#ifndef _WIN32 /* POSIX msync() requires ptr = start of OS page */ - r2 = (ptr - env->me_map) & (env->me_os_psize - 1); - ptr -= r2; - meta_size += r2; -#endif - if (MDB_MSYNC(ptr, meta_size, rc)) { - rc = ErrCode(); - goto fail; - } - } - goto done; - } - metab.mm_txnid = mp->mm_txnid; - metab.mm_last_pg = mp->mm_last_pg; - - meta.mm_mapsize = mapsize; - meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; - meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; - meta.mm_last_pg = txn->mt_next_pgno - 1; - meta.mm_txnid = txn->mt_txnid; - - off = offsetof(MDB_meta, mm_mapsize); - ptr = (char *)&meta + off; - len = sizeof(MDB_meta) - off; - off += (char *)mp - env->me_map; - - /* Write to the SYNC fd */ - mfd = (flags & (MDB_NOSYNC|MDB_NOMETASYNC)) ? env->me_fd : env->me_mfd; -#ifdef _WIN32 - { - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - if (!WriteFile(mfd, ptr, len, (DWORD *)&rc, &ov)) - rc = -1; - } -#else -retry_write: - rc = pwrite(mfd, ptr, len, off); -#endif - if (rc != len) { - rc = rc < 0 ? ErrCode() : EIO; -#ifndef _WIN32 - if (rc == EINTR) - goto retry_write; -#endif - DPUTS("write failed, disk error?"); - /* On a failure, the pagecache still contains the new data. - * Write some old data back, to prevent it from being used. - * Use the non-SYNC fd; we know it will fail anyway. - */ - meta.mm_last_pg = metab.mm_last_pg; - meta.mm_txnid = metab.mm_txnid; -#ifdef _WIN32 - memset(&ov, 0, sizeof(ov)); - ov.Offset = off; - WriteFile(env->me_fd, ptr, len, NULL, &ov); -#else - r2 = pwrite(env->me_fd, ptr, len, off); - (void)r2; /* Silence warnings. We don't care about pwrite's return value */ -#endif -fail: - env->me_flags |= MDB_FATAL_ERROR; - return rc; - } - /* MIPS has cache coherency issues, this is a no-op everywhere else */ - CACHEFLUSH(env->me_map + off, len, DCACHE); -done: - /* Memory ordering issues are irrelevant; since the entire writer - * is wrapped by wmutex, all of these changes will become visible - * after the wmutex is unlocked. Since the DB is multi-version, - * readers will get consistent data regardless of how fresh or - * how stale their view of these values is. - */ - if (env->me_txns) - env->me_txns->mti_txnid = txn->mt_txnid; - - return MDB_SUCCESS; -} - -/** Check both meta pages to see which one is newer. - * @param[in] env the environment handle - * @return newest #MDB_meta. - */ -static MDB_meta * -mdb_env_pick_meta(const MDB_env *env) -{ - MDB_meta *const *metas = env->me_metas; - return metas[ metas[0]->mm_txnid < metas[1]->mm_txnid ]; -} - -int ESECT -mdb_env_create(MDB_env **env) -{ - MDB_env *e; - - e = calloc(1, sizeof(MDB_env)); - if (!e) - return ENOMEM; - - e->me_maxreaders = DEFAULT_READERS; - e->me_maxdbs = e->me_numdbs = CORE_DBS; - e->me_fd = INVALID_HANDLE_VALUE; - e->me_lfd = INVALID_HANDLE_VALUE; - e->me_mfd = INVALID_HANDLE_VALUE; -#ifdef MDB_USE_POSIX_SEM - e->me_rmutex = SEM_FAILED; - e->me_wmutex = SEM_FAILED; -#elif defined MDB_USE_SYSV_SEM - e->me_rmutex->semid = -1; - e->me_wmutex->semid = -1; -#endif - e->me_pid = getpid(); - GET_PAGESIZE(e->me_os_psize); - VGMEMP_CREATE(e,0,0); - *env = e; - return MDB_SUCCESS; -} - -#ifdef _WIN32 -/** @brief Map a result from an NTAPI call to WIN32. */ -static DWORD -mdb_nt2win32(NTSTATUS st) -{ - OVERLAPPED o = {0}; - DWORD br; - o.Internal = st; - GetOverlappedResult(NULL, &o, &br, FALSE); - return GetLastError(); -} -#endif - -static int ESECT -mdb_env_map(MDB_env *env, void *addr) -{ - MDB_page *p; - unsigned int flags = env->me_flags; -#ifdef _WIN32 - int rc; - int access = SECTION_MAP_READ; - HANDLE mh; - void *map; - SIZE_T msize; - ULONG pageprot = PAGE_READONLY, secprot, alloctype; - - if (flags & MDB_WRITEMAP) { - access |= SECTION_MAP_WRITE; - pageprot = PAGE_READWRITE; - } - if (flags & MDB_RDONLY) { - secprot = PAGE_READONLY; - msize = 0; - alloctype = 0; - } else { - secprot = PAGE_READWRITE; - msize = env->me_mapsize; - alloctype = MEM_RESERVE; - } - - rc = NtCreateSection(&mh, access, NULL, NULL, secprot, SEC_RESERVE, env->me_fd); - if (rc) - return mdb_nt2win32(rc); - map = addr; -#ifdef MDB_VL32 - msize = NUM_METAS * env->me_psize; -#endif - rc = NtMapViewOfSection(mh, GetCurrentProcess(), &map, 0, 0, NULL, &msize, ViewUnmap, alloctype, pageprot); -#ifdef MDB_VL32 - env->me_fmh = mh; -#else - NtClose(mh); -#endif - if (rc) - return mdb_nt2win32(rc); - env->me_map = map; -#else -#ifdef MDB_VL32 - (void) flags; - env->me_map = mmap(addr, NUM_METAS * env->me_psize, PROT_READ, MAP_SHARED, - env->me_fd, 0); - if (env->me_map == MAP_FAILED) { - env->me_map = NULL; - return ErrCode(); - } -#else - int prot = PROT_READ; - if (flags & MDB_WRITEMAP) { - prot |= PROT_WRITE; - if (ftruncate(env->me_fd, env->me_mapsize) < 0) - return ErrCode(); - } - env->me_map = mmap(addr, env->me_mapsize, prot, MAP_SHARED, - env->me_fd, 0); - if (env->me_map == MAP_FAILED) { - env->me_map = NULL; - return ErrCode(); - } - - if (flags & MDB_NORDAHEAD) { - /* Turn off readahead. It's harmful when the DB is larger than RAM. */ -#ifdef MADV_RANDOM - madvise(env->me_map, env->me_mapsize, MADV_RANDOM); -#else -#ifdef POSIX_MADV_RANDOM - posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM); -#endif /* POSIX_MADV_RANDOM */ -#endif /* MADV_RANDOM */ - } -#endif /* _WIN32 */ - - /* Can happen because the address argument to mmap() is just a - * hint. mmap() can pick another, e.g. if the range is in use. - * The MAP_FIXED flag would prevent that, but then mmap could - * instead unmap existing pages to make room for the new map. - */ - if (addr && env->me_map != addr) - return EBUSY; /* TODO: Make a new MDB_* error code? */ -#endif - - p = (MDB_page *)env->me_map; - env->me_metas[0] = METADATA(p); - env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize); - - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_mapsize(MDB_env *env, mdb_size_t size) -{ - /* If env is already open, caller is responsible for making - * sure there are no active txns. - */ - if (env->me_map) { - MDB_meta *meta; -#ifndef MDB_VL32 - void *old; - int rc; -#endif - if (env->me_txn) - return EINVAL; - meta = mdb_env_pick_meta(env); - if (!size) - size = meta->mm_mapsize; - { - /* Silently round up to minimum if the size is too small */ - mdb_size_t minsize = (meta->mm_last_pg + 1) * env->me_psize; - if (size < minsize) - size = minsize; - } -#ifndef MDB_VL32 - /* For MDB_VL32 this bit is a noop since we dynamically remap - * chunks of the DB anyway. - */ - munmap(env->me_map, env->me_mapsize); - env->me_mapsize = size; - old = (env->me_flags & MDB_FIXEDMAP) ? env->me_map : NULL; - rc = mdb_env_map(env, old); - if (rc) - return rc; -#endif /* !MDB_VL32 */ - } - env->me_mapsize = size; - if (env->me_psize) - env->me_maxpg = env->me_mapsize / env->me_psize; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs) -{ - if (env->me_map) - return EINVAL; - env->me_maxdbs = dbs + CORE_DBS; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_maxreaders(MDB_env *env, unsigned int readers) -{ - if (env->me_map || readers < 1) - return EINVAL; - env->me_maxreaders = readers; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers) -{ - if (!env || !readers) - return EINVAL; - *readers = env->me_maxreaders; - return MDB_SUCCESS; -} - -static int ESECT -mdb_fsize(HANDLE fd, mdb_size_t *size) -{ -#ifdef _WIN32 - LARGE_INTEGER fsize; - - if (!GetFileSizeEx(fd, &fsize)) - return ErrCode(); - - *size = fsize.QuadPart; -#else - struct stat st; - - if (fstat(fd, &st)) - return ErrCode(); - - *size = st.st_size; -#endif - return MDB_SUCCESS; -} - -#ifdef BROKEN_FDATASYNC -#include <sys/utsname.h> -#include <sys/vfs.h> -#endif - -/** Further setup required for opening an LMDB environment - */ -static int ESECT -mdb_env_open2(MDB_env *env) -{ - unsigned int flags = env->me_flags; - int i, newenv = 0, rc; - MDB_meta meta; - -#ifdef _WIN32 - /* See if we should use QueryLimited */ - rc = GetVersion(); - if ((rc & 0xff) > 5) - env->me_pidquery = MDB_PROCESS_QUERY_LIMITED_INFORMATION; - else - env->me_pidquery = PROCESS_QUERY_INFORMATION; -#endif /* _WIN32 */ - -#ifdef BROKEN_FDATASYNC - /* ext3/ext4 fdatasync is broken on some older Linux kernels. - * https://lkml.org/lkml/2012/9/3/83 - * Kernels after 3.6-rc6 are known good. - * https://lkml.org/lkml/2012/9/10/556 - * See if the DB is on ext3/ext4, then check for new enough kernel - * Kernels 2.6.32.60, 2.6.34.15, 3.2.30, and 3.5.4 are also known - * to be patched. - */ - { - struct statfs st; - fstatfs(env->me_fd, &st); - while (st.f_type == 0xEF53) { - struct utsname uts; - int i; - uname(&uts); - if (uts.release[0] < '3') { - if (!strncmp(uts.release, "2.6.32.", 7)) { - i = atoi(uts.release+7); - if (i >= 60) - break; /* 2.6.32.60 and newer is OK */ - } else if (!strncmp(uts.release, "2.6.34.", 7)) { - i = atoi(uts.release+7); - if (i >= 15) - break; /* 2.6.34.15 and newer is OK */ - } - } else if (uts.release[0] == '3') { - i = atoi(uts.release+2); - if (i > 5) - break; /* 3.6 and newer is OK */ - if (i == 5) { - i = atoi(uts.release+4); - if (i >= 4) - break; /* 3.5.4 and newer is OK */ - } else if (i == 2) { - i = atoi(uts.release+4); - if (i >= 30) - break; /* 3.2.30 and newer is OK */ - } - } else { /* 4.x and newer is OK */ - break; - } - env->me_flags |= MDB_FSYNCONLY; - break; - } - } -#endif - - if ((i = mdb_env_read_header(env, &meta)) != 0) { - if (i != ENOENT) - return i; - DPUTS("new mdbenv"); - newenv = 1; - env->me_psize = env->me_os_psize; - if (env->me_psize > MAX_PAGESIZE) - env->me_psize = MAX_PAGESIZE; - memset(&meta, 0, sizeof(meta)); - mdb_env_init_meta0(env, &meta); - meta.mm_mapsize = DEFAULT_MAPSIZE; - } else { - env->me_psize = meta.mm_psize; - } - - /* Was a mapsize configured? */ - if (!env->me_mapsize) { - env->me_mapsize = meta.mm_mapsize; - } - { - /* Make sure mapsize >= committed data size. Even when using - * mm_mapsize, which could be broken in old files (ITS#7789). - */ - mdb_size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize; - if (env->me_mapsize < minsize) - env->me_mapsize = minsize; - } - meta.mm_mapsize = env->me_mapsize; - - if (newenv && !(flags & MDB_FIXEDMAP)) { - /* mdb_env_map() may grow the datafile. Write the metapages - * first, so the file will be valid if initialization fails. - * Except with FIXEDMAP, since we do not yet know mm_address. - * We could fill in mm_address later, but then a different - * program might end up doing that - one with a memory layout - * and map address which does not suit the main program. - */ - rc = mdb_env_init_meta(env, &meta); - if (rc) - return rc; - newenv = 0; - } -#ifdef _WIN32 - /* For FIXEDMAP, make sure the file is non-empty before we attempt to map it */ - if (newenv) { - char dummy = 0; - DWORD len; - rc = WriteFile(env->me_fd, &dummy, 1, &len, NULL); - if (!rc) { - rc = ErrCode(); - return rc; - } - } -#endif - - rc = mdb_env_map(env, (flags & MDB_FIXEDMAP) ? meta.mm_address : NULL); - if (rc) - return rc; - - if (newenv) { - if (flags & MDB_FIXEDMAP) - meta.mm_address = env->me_map; - i = mdb_env_init_meta(env, &meta); - if (i != MDB_SUCCESS) { - return i; - } - } - - env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1; - env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2) - - sizeof(indx_t); -#if !(MDB_MAXKEYSIZE) - env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db)); -#endif - env->me_maxpg = env->me_mapsize / env->me_psize; - -#if MDB_DEBUG - { - MDB_meta *meta = mdb_env_pick_meta(env); - MDB_db *db = &meta->mm_dbs[MAIN_DBI]; - - DPRINTF(("opened database version %u, pagesize %u", - meta->mm_version, env->me_psize)); - DPRINTF(("using meta page %d", (int) (meta->mm_txnid & 1))); - DPRINTF(("depth: %u", db->md_depth)); - DPRINTF(("entries: %"Y"u", db->md_entries)); - DPRINTF(("branch pages: %"Y"u", db->md_branch_pages)); - DPRINTF(("leaf pages: %"Y"u", db->md_leaf_pages)); - DPRINTF(("overflow pages: %"Y"u", db->md_overflow_pages)); - DPRINTF(("root: %"Y"u", db->md_root)); - } -#endif - - return MDB_SUCCESS; -} - - -/** Release a reader thread's slot in the reader lock table. - * This function is called automatically when a thread exits. - * @param[in] ptr This points to the slot in the reader lock table. - */ -static void -mdb_env_reader_dest(void *ptr) -{ - MDB_reader *reader = ptr; - - reader->mr_pid = 0; -} - -#ifdef _WIN32 -/** Junk for arranging thread-specific callbacks on Windows. This is - * necessarily platform and compiler-specific. Windows supports up - * to 1088 keys. Let's assume nobody opens more than 64 environments - * in a single process, for now. They can override this if needed. - */ -#ifndef MAX_TLS_KEYS -#define MAX_TLS_KEYS 64 -#endif -static pthread_key_t mdb_tls_keys[MAX_TLS_KEYS]; -static int mdb_tls_nkeys; - -static void NTAPI mdb_tls_callback(PVOID module, DWORD reason, PVOID ptr) -{ - int i; - switch(reason) { - case DLL_PROCESS_ATTACH: break; - case DLL_THREAD_ATTACH: break; - case DLL_THREAD_DETACH: - for (i=0; i<mdb_tls_nkeys; i++) { - MDB_reader *r = pthread_getspecific(mdb_tls_keys[i]); - if (r) { - mdb_env_reader_dest(r); - } - } - break; - case DLL_PROCESS_DETACH: break; - } -} -#ifdef __GNUC__ -#ifdef _WIN64 -const PIMAGE_TLS_CALLBACK mdb_tls_cbp __attribute__((section (".CRT$XLB"))) = mdb_tls_callback; -#else -PIMAGE_TLS_CALLBACK mdb_tls_cbp __attribute__((section (".CRT$XLB"))) = mdb_tls_callback; -#endif -#else -#ifdef _WIN64 -/* Force some symbol references. - * _tls_used forces the linker to create the TLS directory if not already done - * mdb_tls_cbp prevents whole-program-optimizer from dropping the symbol. - */ -#pragma comment(linker, "/INCLUDE:_tls_used") -#pragma comment(linker, "/INCLUDE:mdb_tls_cbp") -#pragma const_seg(".CRT$XLB") -extern const PIMAGE_TLS_CALLBACK mdb_tls_cbp; -const PIMAGE_TLS_CALLBACK mdb_tls_cbp = mdb_tls_callback; -#pragma const_seg() -#else /* _WIN32 */ -#pragma comment(linker, "/INCLUDE:__tls_used") -#pragma comment(linker, "/INCLUDE:_mdb_tls_cbp") -#pragma data_seg(".CRT$XLB") -PIMAGE_TLS_CALLBACK mdb_tls_cbp = mdb_tls_callback; -#pragma data_seg() -#endif /* WIN 32/64 */ -#endif /* !__GNUC__ */ -#endif - -/** Downgrade the exclusive lock on the region back to shared */ -static int ESECT -mdb_env_share_locks(MDB_env *env, int *excl) -{ - int rc = 0; - MDB_meta *meta = mdb_env_pick_meta(env); - - env->me_txns->mti_txnid = meta->mm_txnid; - -#ifdef _WIN32 - { - OVERLAPPED ov; - /* First acquire a shared lock. The Unlock will - * then release the existing exclusive lock. - */ - memset(&ov, 0, sizeof(ov)); - if (!LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { - rc = ErrCode(); - } else { - UnlockFile(env->me_lfd, 0, 0, 1, 0); - *excl = 0; - } - } -#else - { - struct flock lock_info; - /* The shared lock replaces the existing lock */ - memset((void *)&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_RDLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = 0; - lock_info.l_len = 1; - while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - *excl = rc ? -1 : 0; /* error may mean we lost the lock */ - } -#endif - - return rc; -} - -/** Try to get exclusive lock, otherwise shared. - * Maintain *excl = -1: no/unknown lock, 0: shared, 1: exclusive. - */ -static int ESECT -mdb_env_excl_lock(MDB_env *env, int *excl) -{ - int rc = 0; -#ifdef _WIN32 - if (LockFile(env->me_lfd, 0, 0, 1, 0)) { - *excl = 1; - } else { - OVERLAPPED ov; - memset(&ov, 0, sizeof(ov)); - if (LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { - *excl = 0; - } else { - rc = ErrCode(); - } - } -#else - struct flock lock_info; - memset((void *)&lock_info, 0, sizeof(lock_info)); - lock_info.l_type = F_WRLCK; - lock_info.l_whence = SEEK_SET; - lock_info.l_start = 0; - lock_info.l_len = 1; - while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - if (!rc) { - *excl = 1; - } else -# ifndef MDB_USE_POSIX_MUTEX - if (*excl < 0) /* always true when MDB_USE_POSIX_MUTEX */ -# endif - { - lock_info.l_type = F_RDLCK; - while ((rc = fcntl(env->me_lfd, F_SETLKW, &lock_info)) && - (rc = ErrCode()) == EINTR) ; - if (rc == 0) - *excl = 0; - } -#endif - return rc; -} - -#ifdef MDB_USE_HASH -/* - * hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code - * - * @(#) $Revision: 5.1 $ - * @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $ - * @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $ - * - * http://www.isthe.com/chongo/tech/comp/fnv/index.html - * - *** - * - * Please do not copyright this code. This code is in the public domain. - * - * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO - * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF - * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - * - * By: - * chongo <Landon Curt Noll> /\oo/\ - * http://www.isthe.com/chongo/ - * - * Share and Enjoy! :-) - */ - -typedef unsigned long long mdb_hash_t; -#define MDB_HASH_INIT ((mdb_hash_t)0xcbf29ce484222325ULL) - -/** perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer - * @param[in] val value to hash - * @param[in] hval initial value for hash - * @return 64 bit hash - * - * NOTE: To use the recommended 64 bit FNV-1a hash, use MDB_HASH_INIT as the - * hval arg on the first call. - */ -static mdb_hash_t -mdb_hash_val(MDB_val *val, mdb_hash_t hval) -{ - unsigned char *s = (unsigned char *)val->mv_data; /* unsigned string */ - unsigned char *end = s + val->mv_size; - /* - * FNV-1a hash each octet of the string - */ - while (s < end) { - /* xor the bottom with the current octet */ - hval ^= (mdb_hash_t)*s++; - - /* multiply by the 64 bit FNV magic prime mod 2^64 */ - hval += (hval << 1) + (hval << 4) + (hval << 5) + - (hval << 7) + (hval << 8) + (hval << 40); - } - /* return our new hash value */ - return hval; -} - -/** Hash the string and output the encoded hash. - * This uses modified RFC1924 Ascii85 encoding to accommodate systems with - * very short name limits. We don't care about the encoding being reversible, - * we just want to preserve as many bits of the input as possible in a - * small printable string. - * @param[in] str string to hash - * @param[out] encbuf an array of 11 chars to hold the hash - */ -static const char mdb_a85[]= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"; - -static void ESECT -mdb_pack85(unsigned long l, char *out) -{ - int i; - - for (i=0; i<5; i++) { - *out++ = mdb_a85[l % 85]; - l /= 85; - } -} - -static void ESECT -mdb_hash_enc(MDB_val *val, char *encbuf) -{ - mdb_hash_t h = mdb_hash_val(val, MDB_HASH_INIT); - - mdb_pack85(h, encbuf); - mdb_pack85(h>>32, encbuf+5); - encbuf[10] = '\0'; -} -#endif - -/** Open and/or initialize the lock region for the environment. - * @param[in] env The LMDB environment. - * @param[in] lpath The pathname of the file used for the lock region. - * @param[in] mode The Unix permissions for the file, if we create it. - * @param[in,out] excl In -1, out lock type: -1 none, 0 shared, 1 exclusive - * @return 0 on success, non-zero on failure. - */ -static int ESECT -mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl) -{ -#ifdef _WIN32 -# define MDB_ERRCODE_ROFS ERROR_WRITE_PROTECT -#else -# define MDB_ERRCODE_ROFS EROFS -#ifdef O_CLOEXEC /* Linux: Open file and set FD_CLOEXEC atomically */ -# define MDB_CLOEXEC O_CLOEXEC -#else - int fdflags; -# define MDB_CLOEXEC 0 -#endif -#endif -#ifdef MDB_USE_SYSV_SEM - int semid; - union semun semu; -#endif - int rc; - off_t size, rsize; - -#ifdef _WIN32 - wchar_t *wlpath; - rc = utf8_to_utf16(lpath, -1, &wlpath, NULL); - if (rc) - return rc; - env->me_lfd = CreateFileW(wlpath, GENERIC_READ|GENERIC_WRITE, - FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL, NULL); - free(wlpath); -#else - env->me_lfd = open(lpath, O_RDWR|O_CREAT|MDB_CLOEXEC, mode); -#endif - if (env->me_lfd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - if (rc == MDB_ERRCODE_ROFS && (env->me_flags & MDB_RDONLY)) { - return MDB_SUCCESS; - } - goto fail_errno; - } -#if ! ((MDB_CLOEXEC) || defined(_WIN32)) - /* Lose record locks when exec*() */ - if ((fdflags = fcntl(env->me_lfd, F_GETFD) | FD_CLOEXEC) >= 0) - fcntl(env->me_lfd, F_SETFD, fdflags); -#endif - - if (!(env->me_flags & MDB_NOTLS)) { - rc = pthread_key_create(&env->me_txkey, mdb_env_reader_dest); - if (rc) - goto fail; - env->me_flags |= MDB_ENV_TXKEY; -#ifdef _WIN32 - /* Windows TLS callbacks need help finding their TLS info. */ - if (mdb_tls_nkeys >= MAX_TLS_KEYS) { - rc = MDB_TLS_FULL; - goto fail; - } - mdb_tls_keys[mdb_tls_nkeys++] = env->me_txkey; -#endif - } - - /* Try to get exclusive lock. If we succeed, then - * nobody is using the lock region and we should initialize it. - */ - if ((rc = mdb_env_excl_lock(env, excl))) goto fail; - -#ifdef _WIN32 - size = GetFileSize(env->me_lfd, NULL); -#else - size = lseek(env->me_lfd, 0, SEEK_END); - if (size == -1) goto fail_errno; -#endif - rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo); - if (size < rsize && *excl > 0) { -#ifdef _WIN32 - if (SetFilePointer(env->me_lfd, rsize, NULL, FILE_BEGIN) != (DWORD)rsize - || !SetEndOfFile(env->me_lfd)) - goto fail_errno; -#else - if (ftruncate(env->me_lfd, rsize) != 0) goto fail_errno; -#endif - } else { - rsize = size; - size = rsize - sizeof(MDB_txninfo); - env->me_maxreaders = size/sizeof(MDB_reader) + 1; - } - { -#ifdef _WIN32 - HANDLE mh; - mh = CreateFileMapping(env->me_lfd, NULL, PAGE_READWRITE, - 0, 0, NULL); - if (!mh) goto fail_errno; - env->me_txns = MapViewOfFileEx(mh, FILE_MAP_WRITE, 0, 0, rsize, NULL); - CloseHandle(mh); - if (!env->me_txns) goto fail_errno; -#else - void *m = mmap(NULL, rsize, PROT_READ|PROT_WRITE, MAP_SHARED, - env->me_lfd, 0); - if (m == MAP_FAILED) goto fail_errno; - env->me_txns = m; -#endif - } - if (*excl > 0) { -#ifdef _WIN32 - BY_HANDLE_FILE_INFORMATION stbuf; - struct { - DWORD volume; - DWORD nhigh; - DWORD nlow; - } idbuf; - MDB_val val; - char encbuf[11]; - - if (!mdb_sec_inited) { - InitializeSecurityDescriptor(&mdb_null_sd, - SECURITY_DESCRIPTOR_REVISION); - SetSecurityDescriptorDacl(&mdb_null_sd, TRUE, 0, FALSE); - mdb_all_sa.nLength = sizeof(SECURITY_ATTRIBUTES); - mdb_all_sa.bInheritHandle = FALSE; - mdb_all_sa.lpSecurityDescriptor = &mdb_null_sd; - mdb_sec_inited = 1; - } - if (!GetFileInformationByHandle(env->me_lfd, &stbuf)) goto fail_errno; - idbuf.volume = stbuf.dwVolumeSerialNumber; - idbuf.nhigh = stbuf.nFileIndexHigh; - idbuf.nlow = stbuf.nFileIndexLow; - val.mv_data = &idbuf; - val.mv_size = sizeof(idbuf); - mdb_hash_enc(&val, encbuf); - sprintf(env->me_txns->mti_rmname, "Global\\MDBr%s", encbuf); - sprintf(env->me_txns->mti_wmname, "Global\\MDBw%s", encbuf); - env->me_rmutex = CreateMutexA(&mdb_all_sa, FALSE, env->me_txns->mti_rmname); - if (!env->me_rmutex) goto fail_errno; - env->me_wmutex = CreateMutexA(&mdb_all_sa, FALSE, env->me_txns->mti_wmname); - if (!env->me_wmutex) goto fail_errno; -#elif defined(MDB_USE_POSIX_SEM) - struct stat stbuf; - struct { - dev_t dev; - ino_t ino; - } idbuf; - MDB_val val; - char encbuf[11]; - -#if defined(__NetBSD__) -#define MDB_SHORT_SEMNAMES 1 /* limited to 14 chars */ -#endif - if (fstat(env->me_lfd, &stbuf)) goto fail_errno; - idbuf.dev = stbuf.st_dev; - idbuf.ino = stbuf.st_ino; - val.mv_data = &idbuf; - val.mv_size = sizeof(idbuf); - mdb_hash_enc(&val, encbuf); -#ifdef MDB_SHORT_SEMNAMES - encbuf[9] = '\0'; /* drop name from 15 chars to 14 chars */ -#endif - sprintf(env->me_txns->mti_rmname, "/MDBr%s", encbuf); - sprintf(env->me_txns->mti_wmname, "/MDBw%s", encbuf); - /* Clean up after a previous run, if needed: Try to - * remove both semaphores before doing anything else. - */ - sem_unlink(env->me_txns->mti_rmname); - sem_unlink(env->me_txns->mti_wmname); - env->me_rmutex = sem_open(env->me_txns->mti_rmname, - O_CREAT|O_EXCL, mode, 1); - if (env->me_rmutex == SEM_FAILED) goto fail_errno; - env->me_wmutex = sem_open(env->me_txns->mti_wmname, - O_CREAT|O_EXCL, mode, 1); - if (env->me_wmutex == SEM_FAILED) goto fail_errno; -#elif defined(MDB_USE_SYSV_SEM) - unsigned short vals[2] = {1, 1}; - key_t key = ftok(lpath, 'M'); - if (key == -1) - goto fail_errno; - semid = semget(key, 2, (mode & 0777) | IPC_CREAT); - if (semid < 0) - goto fail_errno; - semu.array = vals; - if (semctl(semid, 0, SETALL, semu) < 0) - goto fail_errno; - env->me_txns->mti_semid = semid; -#else /* MDB_USE_POSIX_MUTEX: */ - pthread_mutexattr_t mattr; - - /* Solaris needs this before initing a robust mutex. Otherwise - * it may skip the init and return EBUSY "seems someone already - * inited" or EINVAL "it was inited differently". - */ - memset(env->me_txns->mti_rmutex, 0, sizeof(*env->me_txns->mti_rmutex)); - memset(env->me_txns->mti_wmutex, 0, sizeof(*env->me_txns->mti_wmutex)); - - if ((rc = pthread_mutexattr_init(&mattr)) - || (rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED)) -#ifdef MDB_ROBUST_SUPPORTED - || (rc = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST)) -#endif - || (rc = pthread_mutex_init(env->me_txns->mti_rmutex, &mattr)) - || (rc = pthread_mutex_init(env->me_txns->mti_wmutex, &mattr))) - goto fail; - pthread_mutexattr_destroy(&mattr); -#endif /* _WIN32 || ... */ - - env->me_txns->mti_magic = MDB_MAGIC; - env->me_txns->mti_format = MDB_LOCK_FORMAT; - env->me_txns->mti_txnid = 0; - env->me_txns->mti_numreaders = 0; - - } else { -#ifdef MDB_USE_SYSV_SEM - struct semid_ds buf; -#endif - if (env->me_txns->mti_magic != MDB_MAGIC) { - DPUTS("lock region has invalid magic"); - rc = MDB_INVALID; - goto fail; - } - if (env->me_txns->mti_format != MDB_LOCK_FORMAT) { - DPRINTF(("lock region has format+version 0x%x, expected 0x%x", - env->me_txns->mti_format, MDB_LOCK_FORMAT)); - rc = MDB_VERSION_MISMATCH; - goto fail; - } - rc = ErrCode(); - if (rc && rc != EACCES && rc != EAGAIN) { - goto fail; - } -#ifdef _WIN32 - env->me_rmutex = OpenMutexA(SYNCHRONIZE, FALSE, env->me_txns->mti_rmname); - if (!env->me_rmutex) goto fail_errno; - env->me_wmutex = OpenMutexA(SYNCHRONIZE, FALSE, env->me_txns->mti_wmname); - if (!env->me_wmutex) goto fail_errno; -#elif defined(MDB_USE_POSIX_SEM) - env->me_rmutex = sem_open(env->me_txns->mti_rmname, 0); - if (env->me_rmutex == SEM_FAILED) goto fail_errno; - env->me_wmutex = sem_open(env->me_txns->mti_wmname, 0); - if (env->me_wmutex == SEM_FAILED) goto fail_errno; -#elif defined(MDB_USE_SYSV_SEM) - semid = env->me_txns->mti_semid; - semu.buf = &buf; - /* check for read access */ - if (semctl(semid, 0, IPC_STAT, semu) < 0) - goto fail_errno; - /* check for write access */ - if (semctl(semid, 0, IPC_SET, semu) < 0) - goto fail_errno; -#endif - } -#ifdef MDB_USE_SYSV_SEM - env->me_rmutex->semid = semid; - env->me_wmutex->semid = semid; - env->me_rmutex->semnum = 0; - env->me_wmutex->semnum = 1; - env->me_rmutex->locked = &env->me_txns->mti_rlocked; - env->me_wmutex->locked = &env->me_txns->mti_wlocked; -#endif -#ifdef MDB_VL32 -#ifdef _WIN32 - env->me_rpmutex = CreateMutex(NULL, FALSE, NULL); -#else - pthread_mutex_init(&env->me_rpmutex, NULL); -#endif -#endif - - return MDB_SUCCESS; - -fail_errno: - rc = ErrCode(); -fail: - return rc; -} - - /** The name of the lock file in the DB environment */ -#define LOCKNAME "/lock.mdb" - /** The name of the data file in the DB environment */ -#define DATANAME "/data.mdb" - /** The suffix of the lock file when no subdir is used */ -#define LOCKSUFF "-lock" - /** Only a subset of the @ref mdb_env flags can be changed - * at runtime. Changing other flags requires closing the - * environment and re-opening it with the new flags. - */ -#define CHANGEABLE (MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC|MDB_NOMEMINIT) -#define CHANGELESS (MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY| \ - MDB_WRITEMAP|MDB_NOTLS|MDB_NOLOCK|MDB_NORDAHEAD) - -#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS) -# error "Persistent DB flags & env flags overlap, but both go in mm_flags" -#endif - -int ESECT -mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode) -{ - int oflags, rc, len, excl = -1; - char *lpath, *dpath; -#ifdef _WIN32 - wchar_t *wpath; -#endif - - if (env->me_fd!=INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS))) - return EINVAL; - -#ifdef MDB_VL32 - if (flags & MDB_WRITEMAP) { - /* silently ignore WRITEMAP in 32 bit mode */ - flags ^= MDB_WRITEMAP; - } - if (flags & MDB_FIXEDMAP) { - /* cannot support FIXEDMAP */ - return EINVAL; - } -#endif - - len = strlen(path); - if (flags & MDB_NOSUBDIR) { - rc = len + sizeof(LOCKSUFF) + len + 1; - } else { - rc = len + sizeof(LOCKNAME) + len + sizeof(DATANAME); - } - lpath = malloc(rc); - if (!lpath) - return ENOMEM; - if (flags & MDB_NOSUBDIR) { - dpath = lpath + len + sizeof(LOCKSUFF); - sprintf(lpath, "%s" LOCKSUFF, path); - strcpy(dpath, path); - } else { - dpath = lpath + len + sizeof(LOCKNAME); - sprintf(lpath, "%s" LOCKNAME, path); - sprintf(dpath, "%s" DATANAME, path); - } - - rc = MDB_SUCCESS; - flags |= env->me_flags; - if (flags & MDB_RDONLY) { - /* silently ignore WRITEMAP when we're only getting read access */ - flags &= ~MDB_WRITEMAP; - } else { - if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)) && - (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2))))) - rc = ENOMEM; - } -#ifdef MDB_VL32 - if (!rc) { - env->me_rpages = malloc(MDB_ERPAGE_SIZE * sizeof(MDB_ID3)); - if (!env->me_rpages) { - rc = ENOMEM; - goto leave; - } - env->me_rpages[0].mid = 0; - env->me_rpcheck = MDB_ERPAGE_SIZE/2; - } -#endif - env->me_flags = flags |= MDB_ENV_ACTIVE; - if (rc) - goto leave; - - env->me_path = strdup(path); - env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDB_dbx)); - env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t)); - env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned int)); - if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) { - rc = ENOMEM; - goto leave; - } - env->me_dbxs[FREE_DBI].md_cmp = mdb_cmp_long; /* aligned MDB_INTEGERKEY */ - - /* For RDONLY, get lockfile after we know datafile exists */ - if (!(flags & (MDB_RDONLY|MDB_NOLOCK))) { - rc = mdb_env_setup_locks(env, lpath, mode, &excl); - if (rc) - goto leave; - } - -#ifdef _WIN32 - if (F_ISSET(flags, MDB_RDONLY)) { - oflags = GENERIC_READ; - len = OPEN_EXISTING; - } else { - oflags = GENERIC_READ|GENERIC_WRITE; - len = OPEN_ALWAYS; - } - mode = FILE_ATTRIBUTE_NORMAL; - rc = utf8_to_utf16(dpath, -1, &wpath, NULL); - if (rc) - goto leave; - env->me_fd = CreateFileW(wpath, oflags, FILE_SHARE_READ|FILE_SHARE_WRITE, - NULL, len, mode, NULL); - free(wpath); -#else - if (F_ISSET(flags, MDB_RDONLY)) - oflags = O_RDONLY; - else - oflags = O_RDWR | O_CREAT; - - env->me_fd = open(dpath, oflags, mode); -#endif - if (env->me_fd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - goto leave; - } - - if ((flags & (MDB_RDONLY|MDB_NOLOCK)) == MDB_RDONLY) { - rc = mdb_env_setup_locks(env, lpath, mode, &excl); - if (rc) - goto leave; - } - - if ((rc = mdb_env_open2(env)) == MDB_SUCCESS) { - if (flags & (MDB_RDONLY|MDB_WRITEMAP)) { - env->me_mfd = env->me_fd; - } else { - /* Synchronous fd for meta writes. Needed even with - * MDB_NOSYNC/MDB_NOMETASYNC, in case these get reset. - */ -#ifdef _WIN32 - len = OPEN_EXISTING; - rc = utf8_to_utf16(dpath, -1, &wpath, NULL); - if (rc) - goto leave; - env->me_mfd = CreateFileW(wpath, oflags, - FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, len, - mode | FILE_FLAG_WRITE_THROUGH, NULL); - free(wpath); -#else - oflags &= ~O_CREAT; - env->me_mfd = open(dpath, oflags | MDB_DSYNC, mode); -#endif - if (env->me_mfd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - goto leave; - } - } - DPRINTF(("opened dbenv %p", (void *) env)); - if (excl > 0) { - rc = mdb_env_share_locks(env, &excl); - if (rc) - goto leave; - } - if (!(flags & MDB_RDONLY)) { - MDB_txn *txn; - int tsize = sizeof(MDB_txn), size = tsize + env->me_maxdbs * - (sizeof(MDB_db)+sizeof(MDB_cursor *)+sizeof(unsigned int)+1); - if ((env->me_pbuf = calloc(1, env->me_psize)) && - (txn = calloc(1, size))) - { - txn->mt_dbs = (MDB_db *)((char *)txn + tsize); - txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); - txn->mt_dbiseqs = (unsigned int *)(txn->mt_cursors + env->me_maxdbs); - txn->mt_dbflags = (unsigned char *)(txn->mt_dbiseqs + env->me_maxdbs); - txn->mt_env = env; -#ifdef MDB_VL32 - txn->mt_rpages = malloc(MDB_TRPAGE_SIZE * sizeof(MDB_ID3)); - if (!txn->mt_rpages) { - free(txn); - rc = ENOMEM; - goto leave; - } - txn->mt_rpages[0].mid = 0; - txn->mt_rpcheck = MDB_TRPAGE_SIZE/2; -#endif - txn->mt_dbxs = env->me_dbxs; - txn->mt_flags = MDB_TXN_FINISHED; - env->me_txn0 = txn; - } else { - rc = ENOMEM; - } - } - } - -leave: - if (rc) { - mdb_env_close0(env, excl); - } - free(lpath); - return rc; -} - -/** Destroy resources from mdb_env_open(), clear our readers & DBIs */ -static void ESECT -mdb_env_close0(MDB_env *env, int excl) -{ - int i; - - if (!(env->me_flags & MDB_ENV_ACTIVE)) - return; - - /* Doing this here since me_dbxs may not exist during mdb_env_close */ - if (env->me_dbxs) { - for (i = env->me_maxdbs; --i >= CORE_DBS; ) - free(env->me_dbxs[i].md_name.mv_data); - free(env->me_dbxs); - } - - free(env->me_pbuf); - free(env->me_dbiseqs); - free(env->me_dbflags); - free(env->me_path); - free(env->me_dirty_list); -#ifdef MDB_VL32 - if (env->me_txn0 && env->me_txn0->mt_rpages) - free(env->me_txn0->mt_rpages); - { unsigned int x; - for (x=1; x<=env->me_rpages[0].mid; x++) - munmap(env->me_rpages[x].mptr, env->me_rpages[x].mcnt * env->me_psize); - } - free(env->me_rpages); -#endif - free(env->me_txn0); - mdb_midl_free(env->me_free_pgs); - - if (env->me_flags & MDB_ENV_TXKEY) { - pthread_key_delete(env->me_txkey); -#ifdef _WIN32 - /* Delete our key from the global list */ - for (i=0; i<mdb_tls_nkeys; i++) - if (mdb_tls_keys[i] == env->me_txkey) { - mdb_tls_keys[i] = mdb_tls_keys[mdb_tls_nkeys-1]; - mdb_tls_nkeys--; - break; - } -#endif - } - - if (env->me_map) { -#ifdef MDB_VL32 - munmap(env->me_map, NUM_METAS*env->me_psize); -#else - munmap(env->me_map, env->me_mapsize); -#endif - } - if (env->me_mfd != env->me_fd && env->me_mfd != INVALID_HANDLE_VALUE) - (void) close(env->me_mfd); - if (env->me_fd != INVALID_HANDLE_VALUE) - (void) close(env->me_fd); - if (env->me_txns) { - MDB_PID_T pid = env->me_pid; - /* Clearing readers is done in this function because - * me_txkey with its destructor must be disabled first. - * - * We skip the the reader mutex, so we touch only - * data owned by this process (me_close_readers and - * our readers), and clear each reader atomically. - */ - for (i = env->me_close_readers; --i >= 0; ) - if (env->me_txns->mti_readers[i].mr_pid == pid) - env->me_txns->mti_readers[i].mr_pid = 0; -#ifdef _WIN32 - if (env->me_rmutex) { - CloseHandle(env->me_rmutex); - if (env->me_wmutex) CloseHandle(env->me_wmutex); - } - /* Windows automatically destroys the mutexes when - * the last handle closes. - */ -#elif defined(MDB_USE_POSIX_SEM) - if (env->me_rmutex != SEM_FAILED) { - sem_close(env->me_rmutex); - if (env->me_wmutex != SEM_FAILED) - sem_close(env->me_wmutex); - /* If we have the filelock: If we are the - * only remaining user, clean up semaphores. - */ - if (excl == 0) - mdb_env_excl_lock(env, &excl); - if (excl > 0) { - sem_unlink(env->me_txns->mti_rmname); - sem_unlink(env->me_txns->mti_wmname); - } - } -#elif defined(MDB_USE_SYSV_SEM) - if (env->me_rmutex->semid != -1) { - /* If we have the filelock: If we are the - * only remaining user, clean up semaphores. - */ - if (excl == 0) - mdb_env_excl_lock(env, &excl); - if (excl > 0) - semctl(env->me_rmutex->semid, 0, IPC_RMID); - } -#endif - munmap((void *)env->me_txns, (env->me_maxreaders-1)*sizeof(MDB_reader)+sizeof(MDB_txninfo)); - } - if (env->me_lfd != INVALID_HANDLE_VALUE) { -#ifdef _WIN32 - if (excl >= 0) { - /* Unlock the lockfile. Windows would have unlocked it - * after closing anyway, but not necessarily at once. - */ - UnlockFile(env->me_lfd, 0, 0, 1, 0); - } -#endif - (void) close(env->me_lfd); - } -#ifdef MDB_VL32 -#ifdef _WIN32 - if (env->me_fmh) CloseHandle(env->me_fmh); - if (env->me_rpmutex) CloseHandle(env->me_rpmutex); -#else - pthread_mutex_destroy(&env->me_rpmutex); -#endif -#endif - - env->me_flags &= ~(MDB_ENV_ACTIVE|MDB_ENV_TXKEY); -} - -void ESECT -mdb_env_close(MDB_env *env) -{ - MDB_page *dp; - - if (env == NULL) - return; - - VGMEMP_DESTROY(env); - while ((dp = env->me_dpages) != NULL) { - VGMEMP_DEFINED(&dp->mp_next, sizeof(dp->mp_next)); - env->me_dpages = dp->mp_next; - free(dp); - } - - mdb_env_close0(env, 0); - free(env); -} - -/** Compare two items pointing at aligned mdb_size_t's */ -static int -mdb_cmp_long(const MDB_val *a, const MDB_val *b) -{ - return (*(mdb_size_t *)a->mv_data < *(mdb_size_t *)b->mv_data) ? -1 : - *(mdb_size_t *)a->mv_data > *(mdb_size_t *)b->mv_data; -} - -/** Compare two items pointing at aligned unsigned int's. - * - * This is also set as #MDB_INTEGERDUP|#MDB_DUPFIXED's #MDB_dbx.%md_dcmp, - * but #mdb_cmp_clong() is called instead if the data type is mdb_size_t. - */ -static int -mdb_cmp_int(const MDB_val *a, const MDB_val *b) -{ - return (*(unsigned int *)a->mv_data < *(unsigned int *)b->mv_data) ? -1 : - *(unsigned int *)a->mv_data > *(unsigned int *)b->mv_data; -} - -/** Compare two items pointing at unsigned ints of unknown alignment. - * Nodes and keys are guaranteed to be 2-byte aligned. - */ -static int -mdb_cmp_cint(const MDB_val *a, const MDB_val *b) -{ -#if BYTE_ORDER == LITTLE_ENDIAN - unsigned short *u, *c; - int x; - - u = (unsigned short *) ((char *) a->mv_data + a->mv_size); - c = (unsigned short *) ((char *) b->mv_data + a->mv_size); - do { - x = *--u - *--c; - } while(!x && u > (unsigned short *)a->mv_data); - return x; -#else - unsigned short *u, *c, *end; - int x; - - end = (unsigned short *) ((char *) a->mv_data + a->mv_size); - u = (unsigned short *)a->mv_data; - c = (unsigned short *)b->mv_data; - do { - x = *u++ - *c++; - } while(!x && u < end); - return x; -#endif -} - -/** Compare two items lexically */ -static int -mdb_cmp_memn(const MDB_val *a, const MDB_val *b) -{ - int diff; - ssize_t len_diff; - unsigned int len; - - len = a->mv_size; - len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; - if (len_diff > 0) { - len = b->mv_size; - len_diff = 1; - } - - diff = memcmp(a->mv_data, b->mv_data, len); - return diff ? diff : len_diff<0 ? -1 : len_diff; -} - -/** Compare two items in reverse byte order */ -static int -mdb_cmp_memnr(const MDB_val *a, const MDB_val *b) -{ - const unsigned char *p1, *p2, *p1_lim; - ssize_t len_diff; - int diff; - - p1_lim = (const unsigned char *)a->mv_data; - p1 = (const unsigned char *)a->mv_data + a->mv_size; - p2 = (const unsigned char *)b->mv_data + b->mv_size; - - len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; - if (len_diff > 0) { - p1_lim += len_diff; - len_diff = 1; - } - - while (p1 > p1_lim) { - diff = *--p1 - *--p2; - if (diff) - return diff; - } - return len_diff<0 ? -1 : len_diff; -} - -/** Search for key within a page, using binary search. - * Returns the smallest entry larger or equal to the key. - * If exactp is non-null, stores whether the found entry was an exact match - * in *exactp (1 or 0). - * Updates the cursor index with the index of the found entry. - * If no entry larger or equal to the key is found, returns NULL. - */ -static MDB_node * -mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp) -{ - unsigned int i = 0, nkeys; - int low, high; - int rc = 0; - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_node *node = NULL; - MDB_val nodekey; - MDB_cmp_func *cmp; - DKBUF; - - nkeys = NUMKEYS(mp); - - DPRINTF(("searching %u keys in %s %spage %"Y"u", - nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", - mdb_dbg_pgno(mp))); - - low = IS_LEAF(mp) ? 0 : 1; - high = nkeys - 1; - cmp = mc->mc_dbx->md_cmp; - - /* Branch pages have no data, so if using integer keys, - * alignment is guaranteed. Use faster mdb_cmp_int. - */ - if (cmp == mdb_cmp_cint && IS_BRANCH(mp)) { - if (NODEPTR(mp, 1)->mn_ksize == sizeof(mdb_size_t)) - cmp = mdb_cmp_long; - else if(NODEPTR(mp, 1)->mn_ksize == sizeof(int)) - cmp = mdb_cmp_int; - } - - if (IS_LEAF2(mp)) { - nodekey.mv_size = mc->mc_db->md_pad; - node = NODEPTR(mp, 0); /* fake */ - while (low <= high) { - i = (low + high) >> 1; - nodekey.mv_data = LEAF2KEY(mp, i, nodekey.mv_size); - rc = cmp(key, &nodekey); - DPRINTF(("found leaf index %u [%s], rc = %i", - i, DKEY(&nodekey), rc)); - if (rc == 0) - break; - if (rc > 0) - low = i + 1; - else - high = i - 1; - } - } else { - while (low <= high) { - i = (low + high) >> 1; - - node = NODEPTR(mp, i); - nodekey.mv_size = NODEKSZ(node); - nodekey.mv_data = NODEKEY(node); - - rc = cmp(key, &nodekey); -#if MDB_DEBUG - if (IS_LEAF(mp)) - DPRINTF(("found leaf index %u [%s], rc = %i", - i, DKEY(&nodekey), rc)); - else - DPRINTF(("found branch index %u [%s -> %"Y"u], rc = %i", - i, DKEY(&nodekey), NODEPGNO(node), rc)); -#endif - if (rc == 0) - break; - if (rc > 0) - low = i + 1; - else - high = i - 1; - } - } - - if (rc > 0) { /* Found entry is less than the key. */ - i++; /* Skip to get the smallest entry larger than key. */ - if (!IS_LEAF2(mp)) - node = NODEPTR(mp, i); - } - if (exactp) - *exactp = (rc == 0 && nkeys > 0); - /* store the key index */ - mc->mc_ki[mc->mc_top] = i; - if (i >= nkeys) - /* There is no entry larger or equal to the key. */ - return NULL; - - /* nodeptr is fake for LEAF2 */ - return node; -} - -#if 0 -static void -mdb_cursor_adjust(MDB_cursor *mc, func) -{ - MDB_cursor *m2; - - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) { - func(mc, m2); - } - } -} -#endif - -/** Pop a page off the top of the cursor's stack. */ -static void -mdb_cursor_pop(MDB_cursor *mc) -{ - if (mc->mc_snum) { - DPRINTF(("popping page %"Y"u off db %d cursor %p", - mc->mc_pg[mc->mc_top]->mp_pgno, DDBI(mc), (void *) mc)); - - mc->mc_snum--; - if (mc->mc_snum) { - mc->mc_top--; - } else { - mc->mc_flags &= ~C_INITIALIZED; - } - } -} - -/** Push a page onto the top of the cursor's stack. */ -static int -mdb_cursor_push(MDB_cursor *mc, MDB_page *mp) -{ - DPRINTF(("pushing page %"Y"u on db %d cursor %p", mp->mp_pgno, - DDBI(mc), (void *) mc)); - - if (mc->mc_snum >= CURSOR_STACK) { - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CURSOR_FULL; - } - - mc->mc_top = mc->mc_snum++; - mc->mc_pg[mc->mc_top] = mp; - mc->mc_ki[mc->mc_top] = 0; - - return MDB_SUCCESS; -} - -#ifdef MDB_VL32 -/** Map a read-only page. - * There are two levels of tracking in use, a per-txn list and a per-env list. - * ref'ing and unref'ing the per-txn list is faster since it requires no - * locking. Pages are cached in the per-env list for global reuse, and a lock - * is required. Pages are not immediately unmapped when their refcnt goes to - * zero; they hang around in case they will be reused again soon. - * - * When the per-txn list gets full, all pages with refcnt=0 are purged from the - * list and their refcnts in the per-env list are decremented. - * - * When the per-env list gets full, all pages with refcnt=0 are purged from the - * list and their pages are unmapped. - * - * @note "full" means the list has reached its respective rpcheck threshold. - * This threshold slowly raises if no pages could be purged on a given check, - * and returns to its original value when enough pages were purged. - * - * If purging doesn't free any slots, filling the per-txn list will return - * MDB_TXN_FULL, and filling the per-env list returns MDB_MAP_FULL. - * - * Reference tracking in a txn is imperfect, pages can linger with non-zero - * refcnt even without active references. It was deemed to be too invasive - * to add unrefs in every required location. However, all pages are unref'd - * at the end of the transaction. This guarantees that no stale references - * linger in the per-env list. - * - * Usually we map chunks of 16 pages at a time, but if an overflow page begins - * at the tail of the chunk we extend the chunk to include the entire overflow - * page. Unfortunately, pages can be turned into overflow pages after their - * chunk was already mapped. In that case we must remap the chunk if the - * overflow page is referenced. If the chunk's refcnt is 0 we can just remap - * it, otherwise we temporarily map a new chunk just for the overflow page. - * - * @note this chunk handling means we cannot guarantee that a data item - * returned from the DB will stay alive for the duration of the transaction: - * We unref pages as soon as a cursor moves away from the page - * A subsequent op may cause a purge, which may unmap any unref'd chunks - * The caller must copy the data if it must be used later in the same txn. - * - * Also - our reference counting revolves around cursors, but overflow pages - * aren't pointed to by a cursor's page stack. We have to remember them - * explicitly, in the added mc_ovpg field. A single cursor can only hold a - * reference to one overflow page at a time. - * - * @param[in] txn the transaction for this access. - * @param[in] pgno the page number for the page to retrieve. - * @param[out] ret address of a pointer where the page's address will be stored. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_rpage_get(MDB_txn *txn, pgno_t pg0, MDB_page **ret) -{ - MDB_env *env = txn->mt_env; - MDB_page *p; - MDB_ID3L tl = txn->mt_rpages; - MDB_ID3L el = env->me_rpages; - MDB_ID3 id3; - unsigned x, rem; - pgno_t pgno; - int rc, retries = 1; -#ifdef _WIN32 - LARGE_INTEGER off; - SIZE_T len; -#define SET_OFF(off,val) off.QuadPart = val -#define MAP(rc,env,addr,len,off) \ - addr = NULL; \ - rc = NtMapViewOfSection(env->me_fmh, GetCurrentProcess(), &addr, 0, \ - len, &off, &len, ViewUnmap, (env->me_flags & MDB_RDONLY) ? 0 : MEM_RESERVE, PAGE_READONLY); \ - if (rc) rc = mdb_nt2win32(rc) -#else - off_t off; - size_t len; -#define SET_OFF(off,val) off = val -#define MAP(rc,env,addr,len,off) \ - addr = mmap(NULL, len, PROT_READ, MAP_SHARED, env->me_fd, off); \ - rc = (addr == MAP_FAILED) ? errno : 0 -#endif - - /* remember the offset of the actual page number, so we can - * return the correct pointer at the end. - */ - rem = pg0 & (MDB_RPAGE_CHUNK-1); - pgno = pg0 ^ rem; - - id3.mid = 0; - x = mdb_mid3l_search(tl, pgno); - if (x <= tl[0].mid && tl[x].mid == pgno) { - if (x != tl[0].mid && tl[x+1].mid == pg0) - x++; - /* check for overflow size */ - p = (MDB_page *)((char *)tl[x].mptr + rem * env->me_psize); - if (IS_OVERFLOW(p) && p->mp_pages + rem > tl[x].mcnt) { - id3.mcnt = p->mp_pages + rem; - len = id3.mcnt * env->me_psize; - SET_OFF(off, pgno * env->me_psize); - MAP(rc, env, id3.mptr, len, off); - if (rc) - return rc; - /* check for local-only page */ - if (rem) { - mdb_tassert(txn, tl[x].mid != pg0); - /* hope there's room to insert this locally. - * setting mid here tells later code to just insert - * this id3 instead of searching for a match. - */ - id3.mid = pg0; - goto notlocal; - } else { - /* ignore the mapping we got from env, use new one */ - tl[x].mptr = id3.mptr; - tl[x].mcnt = id3.mcnt; - /* if no active ref, see if we can replace in env */ - if (!tl[x].mref) { - unsigned i; - pthread_mutex_lock(&env->me_rpmutex); - i = mdb_mid3l_search(el, tl[x].mid); - if (el[i].mref == 1) { - /* just us, replace it */ - munmap(el[i].mptr, el[i].mcnt * env->me_psize); - el[i].mptr = tl[x].mptr; - el[i].mcnt = tl[x].mcnt; - } else { - /* there are others, remove ourself */ - el[i].mref--; - } - pthread_mutex_unlock(&env->me_rpmutex); - } - } - } - id3.mptr = tl[x].mptr; - id3.mcnt = tl[x].mcnt; - tl[x].mref++; - goto ok; - } - -notlocal: - if (tl[0].mid >= MDB_TRPAGE_MAX - txn->mt_rpcheck) { - unsigned i, y; - /* purge unref'd pages from our list and unref in env */ - pthread_mutex_lock(&env->me_rpmutex); -retry: - y = 0; - for (i=1; i<=tl[0].mid; i++) { - if (!tl[i].mref) { - if (!y) y = i; - /* tmp overflow pages don't go to env */ - if (tl[i].mid & (MDB_RPAGE_CHUNK-1)) { - munmap(tl[i].mptr, tl[i].mcnt * env->me_psize); - continue; - } - x = mdb_mid3l_search(el, tl[i].mid); - el[x].mref--; - } - } - pthread_mutex_unlock(&env->me_rpmutex); - if (!y) { - /* we didn't find any unref'd chunks. - * if we're out of room, fail. - */ - if (tl[0].mid >= MDB_TRPAGE_MAX) - return MDB_TXN_FULL; - /* otherwise, raise threshold for next time around - * and let this go. - */ - txn->mt_rpcheck /= 2; - } else { - /* we found some unused; consolidate the list */ - for (i=y+1; i<= tl[0].mid; i++) - if (tl[i].mref) - tl[y++] = tl[i]; - tl[0].mid = y-1; - /* decrease the check threshold toward its original value */ - if (!txn->mt_rpcheck) - txn->mt_rpcheck = 1; - while (txn->mt_rpcheck < tl[0].mid && txn->mt_rpcheck < MDB_TRPAGE_SIZE/2) - txn->mt_rpcheck *= 2; - } - } - if (tl[0].mid < MDB_TRPAGE_SIZE) { - id3.mref = 1; - if (id3.mid) - goto found; - /* don't map past last written page in read-only envs */ - if ((env->me_flags & MDB_RDONLY) && pgno + MDB_RPAGE_CHUNK-1 > txn->mt_last_pgno) - id3.mcnt = txn->mt_last_pgno + 1 - pgno; - else - id3.mcnt = MDB_RPAGE_CHUNK; - len = id3.mcnt * env->me_psize; - id3.mid = pgno; - - /* search for page in env */ - pthread_mutex_lock(&env->me_rpmutex); - x = mdb_mid3l_search(el, pgno); - if (x <= el[0].mid && el[x].mid == pgno) { - id3.mptr = el[x].mptr; - id3.mcnt = el[x].mcnt; - /* check for overflow size */ - p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize); - if (IS_OVERFLOW(p) && p->mp_pages + rem > id3.mcnt) { - id3.mcnt = p->mp_pages + rem; - len = id3.mcnt * env->me_psize; - SET_OFF(off, pgno * env->me_psize); - MAP(rc, env, id3.mptr, len, off); - if (rc) - goto fail; - if (!el[x].mref) { - munmap(el[x].mptr, env->me_psize * el[x].mcnt); - el[x].mptr = id3.mptr; - el[x].mcnt = id3.mcnt; - } else { - id3.mid = pg0; - pthread_mutex_unlock(&env->me_rpmutex); - goto found; - } - } - el[x].mref++; - pthread_mutex_unlock(&env->me_rpmutex); - goto found; - } - if (el[0].mid >= MDB_ERPAGE_MAX - env->me_rpcheck) { - /* purge unref'd pages */ - unsigned i, y = 0; - for (i=1; i<=el[0].mid; i++) { - if (!el[i].mref) { - if (!y) y = i; - munmap(el[i].mptr, env->me_psize * el[i].mcnt); - } - } - if (!y) { - if (retries) { - /* see if we can unref some local pages */ - retries--; - id3.mid = 0; - goto retry; - } - if (el[0].mid >= MDB_ERPAGE_MAX) { - pthread_mutex_unlock(&env->me_rpmutex); - return MDB_MAP_FULL; - } - env->me_rpcheck /= 2; - } else { - for (i=y+1; i<= el[0].mid; i++) - if (el[i].mref) - el[y++] = el[i]; - el[0].mid = y-1; - if (!env->me_rpcheck) - env->me_rpcheck = 1; - while (env->me_rpcheck < el[0].mid && env->me_rpcheck < MDB_ERPAGE_SIZE/2) - env->me_rpcheck *= 2; - } - } - SET_OFF(off, pgno * env->me_psize); - MAP(rc, env, id3.mptr, len, off); - if (rc) { -fail: - pthread_mutex_unlock(&env->me_rpmutex); - return rc; - } - /* check for overflow size */ - p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize); - if (IS_OVERFLOW(p) && p->mp_pages + rem > id3.mcnt) { - id3.mcnt = p->mp_pages + rem; - munmap(id3.mptr, len); - len = id3.mcnt * env->me_psize; - MAP(rc, env, id3.mptr, len, off); - if (rc) - goto fail; - } - mdb_mid3l_insert(el, &id3); - pthread_mutex_unlock(&env->me_rpmutex); -found: - mdb_mid3l_insert(tl, &id3); - } else { - return MDB_TXN_FULL; - } -ok: - p = (MDB_page *)((char *)id3.mptr + rem * env->me_psize); -#if MDB_DEBUG /* we don't need this check any more */ - if (IS_OVERFLOW(p)) { - mdb_tassert(txn, p->mp_pages + rem <= id3.mcnt); - } -#endif - *ret = p; - return MDB_SUCCESS; -} -#endif - -/** Find the address of the page corresponding to a given page number. - * @param[in] mc the cursor accessing the page. - * @param[in] pgno the page number for the page to retrieve. - * @param[out] ret address of a pointer where the page's address will be stored. - * @param[out] lvl dirty_list inheritance level of found page. 1=current txn, 0=mapped page. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_get(MDB_cursor *mc, pgno_t pgno, MDB_page **ret, int *lvl) -{ - MDB_txn *txn = mc->mc_txn; -#ifndef MDB_VL32 - MDB_env *env = txn->mt_env; -#endif - MDB_page *p = NULL; - int level; - - if (! (mc->mc_flags & (C_ORIG_RDONLY|C_WRITEMAP))) { - MDB_txn *tx2 = txn; - level = 1; - do { - MDB_ID2L dl = tx2->mt_u.dirty_list; - unsigned x; - /* Spilled pages were dirtied in this txn and flushed - * because the dirty list got full. Bring this page - * back in from the map (but don't unspill it here, - * leave that unless page_touch happens again). - */ - if (tx2->mt_spill_pgs) { - MDB_ID pn = pgno << 1; - x = mdb_midl_search(tx2->mt_spill_pgs, pn); - if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { -#ifdef MDB_VL32 - int rc = mdb_rpage_get(txn, pgno, &p); - if (rc) - return rc; -#else - p = (MDB_page *)(env->me_map + env->me_psize * pgno); -#endif - goto done; - } - } - if (dl[0].mid) { - unsigned x = mdb_mid2l_search(dl, pgno); - if (x <= dl[0].mid && dl[x].mid == pgno) { - p = dl[x].mptr; - goto done; - } - } - level++; - } while ((tx2 = tx2->mt_parent) != NULL); - } - - if (pgno < txn->mt_next_pgno) { - level = 0; -#ifdef MDB_VL32 - { - int rc = mdb_rpage_get(txn, pgno, &p); - if (rc) - return rc; - } -#else - p = (MDB_page *)(env->me_map + env->me_psize * pgno); -#endif - } else { - DPRINTF(("page %"Y"u not found", pgno)); - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PAGE_NOTFOUND; - } - -done: - *ret = p; - if (lvl) - *lvl = level; - return MDB_SUCCESS; -} - -/** Finish #mdb_page_search() / #mdb_page_search_lowest(). - * The cursor is at the root page, set up the rest of it. - */ -static int -mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int flags) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - int rc; - DKBUF; - - while (IS_BRANCH(mp)) { - MDB_node *node; - indx_t i; - - DPRINTF(("branch page %"Y"u has %u keys", mp->mp_pgno, NUMKEYS(mp))); - /* Don't assert on branch pages in the FreeDB. We can get here - * while in the process of rebalancing a FreeDB branch page; we must - * let that proceed. ITS#8336 - */ - mdb_cassert(mc, !mc->mc_dbi || NUMKEYS(mp) > 1); - DPRINTF(("found index 0 to page %"Y"u", NODEPGNO(NODEPTR(mp, 0)))); - - if (flags & (MDB_PS_FIRST|MDB_PS_LAST)) { - i = 0; - if (flags & MDB_PS_LAST) - i = NUMKEYS(mp) - 1; - } else { - int exact; - node = mdb_node_search(mc, key, &exact); - if (node == NULL) - i = NUMKEYS(mp) - 1; - else { - i = mc->mc_ki[mc->mc_top]; - if (!exact) { - mdb_cassert(mc, i > 0); - i--; - } - } - DPRINTF(("following index %u for key [%s]", i, DKEY(key))); - } - - mdb_cassert(mc, i < NUMKEYS(mp)); - node = NODEPTR(mp, i); - - if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0) - return rc; - - mc->mc_ki[mc->mc_top] = i; - if ((rc = mdb_cursor_push(mc, mp))) - return rc; - - if (flags & MDB_PS_MODIFY) { - if ((rc = mdb_page_touch(mc)) != 0) - return rc; - mp = mc->mc_pg[mc->mc_top]; - } - } - - if (!IS_LEAF(mp)) { - DPRINTF(("internal error, index points to a %02X page!?", - mp->mp_flags)); - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - - DPRINTF(("found leaf page %"Y"u for key [%s]", mp->mp_pgno, - key ? DKEY(key) : "null")); - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - return MDB_SUCCESS; -} - -/** Search for the lowest key under the current branch page. - * This just bypasses a NUMKEYS check in the current page - * before calling mdb_page_search_root(), because the callers - * are all in situations where the current page is known to - * be underfilled. - */ -static int -mdb_page_search_lowest(MDB_cursor *mc) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_node *node = NODEPTR(mp, 0); - int rc; - - if ((rc = mdb_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0) - return rc; - - mc->mc_ki[mc->mc_top] = 0; - if ((rc = mdb_cursor_push(mc, mp))) - return rc; - return mdb_page_search_root(mc, NULL, MDB_PS_FIRST); -} - -/** Search for the page a given key should be in. - * Push it and its parent pages on the cursor stack. - * @param[in,out] mc the cursor for this operation. - * @param[in] key the key to search for, or NULL for first/last page. - * @param[in] flags If MDB_PS_MODIFY is set, visited pages in the DB - * are touched (updated with new page numbers). - * If MDB_PS_FIRST or MDB_PS_LAST is set, find first or last leaf. - * This is used by #mdb_cursor_first() and #mdb_cursor_last(). - * If MDB_PS_ROOTONLY set, just fetch root node, no further lookups. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_search(MDB_cursor *mc, MDB_val *key, int flags) -{ - int rc; - pgno_t root; - - /* Make sure the txn is still viable, then find the root from - * the txn's db table and set it as the root of the cursor's stack. - */ - if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) { - DPUTS("transaction may not be used now"); - return MDB_BAD_TXN; - } else { - /* Make sure we're using an up-to-date root */ - if (*mc->mc_dbflag & DB_STALE) { - MDB_cursor mc2; - if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) - return MDB_BAD_DBI; - mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL); - rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, 0); - if (rc) - return rc; - { - MDB_val data; - int exact = 0; - uint16_t flags; - MDB_node *leaf = mdb_node_search(&mc2, - &mc->mc_dbx->md_name, &exact); - if (!exact) - return MDB_NOTFOUND; - if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA) - return MDB_INCOMPATIBLE; /* not a named DB */ - rc = mdb_node_read(&mc2, leaf, &data); - if (rc) - return rc; - memcpy(&flags, ((char *) data.mv_data + offsetof(MDB_db, md_flags)), - sizeof(uint16_t)); - /* The txn may not know this DBI, or another process may - * have dropped and recreated the DB with other flags. - */ - if ((mc->mc_db->md_flags & PERSISTENT_FLAGS) != flags) - return MDB_INCOMPATIBLE; - memcpy(mc->mc_db, data.mv_data, sizeof(MDB_db)); - } - *mc->mc_dbflag &= ~DB_STALE; - } - root = mc->mc_db->md_root; - - if (root == P_INVALID) { /* Tree is empty. */ - DPUTS("tree is empty"); - return MDB_NOTFOUND; - } - } - - mdb_cassert(mc, root > 1); - if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root) { -#ifdef MDB_VL32 - if (mc->mc_pg[0]) - MDB_PAGE_UNREF(mc->mc_txn, mc->mc_pg[0]); -#endif - if ((rc = mdb_page_get(mc, root, &mc->mc_pg[0], NULL)) != 0) - return rc; - } - -#ifdef MDB_VL32 - { - int i; - for (i=1; i<mc->mc_snum; i++) - MDB_PAGE_UNREF(mc->mc_txn, mc->mc_pg[i]); - } -#endif - mc->mc_snum = 1; - mc->mc_top = 0; - - DPRINTF(("db %d root page %"Y"u has flags 0x%X", - DDBI(mc), root, mc->mc_pg[0]->mp_flags)); - - if (flags & MDB_PS_MODIFY) { - if ((rc = mdb_page_touch(mc))) - return rc; - } - - if (flags & MDB_PS_ROOTONLY) - return MDB_SUCCESS; - - return mdb_page_search_root(mc, key, flags); -} - -static int -mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp) -{ - MDB_txn *txn = mc->mc_txn; - pgno_t pg = mp->mp_pgno; - unsigned x = 0, ovpages = mp->mp_pages; - MDB_env *env = txn->mt_env; - MDB_IDL sl = txn->mt_spill_pgs; - MDB_ID pn = pg << 1; - int rc; - - DPRINTF(("free ov page %"Y"u (%d)", pg, ovpages)); - /* If the page is dirty or on the spill list we just acquired it, - * so we should give it back to our current free list, if any. - * Otherwise put it onto the list of pages we freed in this txn. - * - * Won't create me_pghead: me_pglast must be inited along with it. - * Unsupported in nested txns: They would need to hide the page - * range in ancestor txns' dirty and spilled lists. - */ - if (env->me_pghead && - !txn->mt_parent && - ((mp->mp_flags & P_DIRTY) || - (sl && (x = mdb_midl_search(sl, pn)) <= sl[0] && sl[x] == pn))) - { - unsigned i, j; - pgno_t *mop; - MDB_ID2 *dl, ix, iy; - rc = mdb_midl_need(&env->me_pghead, ovpages); - if (rc) - return rc; - if (!(mp->mp_flags & P_DIRTY)) { - /* This page is no longer spilled */ - if (x == sl[0]) - sl[0]--; - else - sl[x] |= 1; - goto release; - } - /* Remove from dirty list */ - dl = txn->mt_u.dirty_list; - x = dl[0].mid--; - for (ix = dl[x]; ix.mptr != mp; ix = iy) { - if (x > 1) { - x--; - iy = dl[x]; - dl[x] = ix; - } else { - mdb_cassert(mc, x > 1); - j = ++(dl[0].mid); - dl[j] = ix; /* Unsorted. OK when MDB_TXN_ERROR. */ - txn->mt_flags |= MDB_TXN_ERROR; - return MDB_CORRUPTED; - } - } - txn->mt_dirty_room++; - if (!(env->me_flags & MDB_WRITEMAP)) - mdb_dpage_free(env, mp); -release: - /* Insert in me_pghead */ - mop = env->me_pghead; - j = mop[0] + ovpages; - for (i = mop[0]; i && mop[i] < pg; i--) - mop[j--] = mop[i]; - while (j>i) - mop[j--] = pg++; - mop[0] += ovpages; - } else { - rc = mdb_midl_append_range(&txn->mt_free_pgs, pg, ovpages); - if (rc) - return rc; - } - mc->mc_db->md_overflow_pages -= ovpages; - return 0; -} - -/** Return the data associated with a given node. - * @param[in] mc The cursor for this operation. - * @param[in] leaf The node being read. - * @param[out] data Updated to point to the node's data. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_node_read(MDB_cursor *mc, MDB_node *leaf, MDB_val *data) -{ - MDB_page *omp; /* overflow page */ - pgno_t pgno; - int rc; - -#ifdef MDB_VL32 - if (mc->mc_ovpg) { - MDB_PAGE_UNREF(mc->mc_txn, mc->mc_ovpg); - mc->mc_ovpg = 0; - } -#endif - if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) { - data->mv_size = NODEDSZ(leaf); - data->mv_data = NODEDATA(leaf); - return MDB_SUCCESS; - } - - /* Read overflow data. - */ - data->mv_size = NODEDSZ(leaf); - memcpy(&pgno, NODEDATA(leaf), sizeof(pgno)); - if ((rc = mdb_page_get(mc, pgno, &omp, NULL)) != 0) { - DPRINTF(("read overflow page %"Y"u failed", pgno)); - return rc; - } - data->mv_data = METADATA(omp); -#ifdef MDB_VL32 - mc->mc_ovpg = omp; -#endif - - return MDB_SUCCESS; -} - -int -mdb_get(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data) -{ - MDB_cursor mc; - MDB_xcursor mx; - int exact = 0, rc; - DKBUF; - - DPRINTF(("===> get db %u key [%s]", dbi, DKEY(key))); - - if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - mdb_cursor_init(&mc, txn, dbi, &mx); - rc = mdb_cursor_set(&mc, key, data, MDB_SET, &exact); -#ifdef MDB_VL32 - { - /* unref all the pages - caller must copy the data - * before doing anything else - */ - mdb_cursor_unref(&mc); - } -#endif - return rc; -} - -/** Find a sibling for a page. - * Replaces the page at the top of the cursor's stack with the - * specified sibling, if one exists. - * @param[in] mc The cursor for this operation. - * @param[in] move_right Non-zero if the right sibling is requested, - * otherwise the left sibling. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_cursor_sibling(MDB_cursor *mc, int move_right) -{ - int rc; - MDB_node *indx; - MDB_page *mp; -#ifdef MDB_VL32 - MDB_page *op; -#endif - - if (mc->mc_snum < 2) { - return MDB_NOTFOUND; /* root has no siblings */ - } - -#ifdef MDB_VL32 - op = mc->mc_pg[mc->mc_top]; -#endif - mdb_cursor_pop(mc); - DPRINTF(("parent page is page %"Y"u, index %u", - mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top])); - - if (move_right ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top])) - : (mc->mc_ki[mc->mc_top] == 0)) { - DPRINTF(("no more keys left, moving to %s sibling", - move_right ? "right" : "left")); - if ((rc = mdb_cursor_sibling(mc, move_right)) != MDB_SUCCESS) { - /* undo cursor_pop before returning */ - mc->mc_top++; - mc->mc_snum++; - return rc; - } - } else { - if (move_right) - mc->mc_ki[mc->mc_top]++; - else - mc->mc_ki[mc->mc_top]--; - DPRINTF(("just moving to %s index key %u", - move_right ? "right" : "left", mc->mc_ki[mc->mc_top])); - } - mdb_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top])); - - MDB_PAGE_UNREF(mc->mc_txn, op); - - indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if ((rc = mdb_page_get(mc, NODEPGNO(indx), &mp, NULL)) != 0) { - /* mc will be inconsistent if caller does mc_snum++ as above */ - mc->mc_flags &= ~(C_INITIALIZED|C_EOF); - return rc; - } - - mdb_cursor_push(mc, mp); - if (!move_right) - mc->mc_ki[mc->mc_top] = NUMKEYS(mp)-1; - - return MDB_SUCCESS; -} - -/** Move the cursor to the next data item. */ -static int -mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) -{ - MDB_page *mp; - MDB_node *leaf; - int rc; - - if ((mc->mc_flags & C_EOF) || - ((mc->mc_flags & C_DEL) && op == MDB_NEXT_DUP)) { - return MDB_NOTFOUND; - } - if (!(mc->mc_flags & C_INITIALIZED)) - return mdb_cursor_first(mc, key, data); - - mp = mc->mc_pg[mc->mc_top]; - - if (mc->mc_db->md_flags & MDB_DUPSORT) { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_NEXT || op == MDB_NEXT_DUP) { - rc = mdb_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_NEXT); - if (op != MDB_NEXT || rc != MDB_NOTFOUND) { - if (rc == MDB_SUCCESS) - MDB_GET_KEY(leaf, key); - return rc; - } - } -#ifdef MDB_VL32 - else { - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - mdb_cursor_unref(&mc->mc_xcursor->mx_cursor); - } - } -#endif - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if (op == MDB_NEXT_DUP) - return MDB_NOTFOUND; - } - } - - DPRINTF(("cursor_next: top page is %"Y"u in cursor %p", - mdb_dbg_pgno(mp), (void *) mc)); - if (mc->mc_flags & C_DEL) { - mc->mc_flags ^= C_DEL; - goto skip; - } - - if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) { - DPUTS("=====> move to next sibling page"); - if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { - mc->mc_flags |= C_EOF; - return rc; - } - mp = mc->mc_pg[mc->mc_top]; - DPRINTF(("next page is %"Y"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); - } else - mc->mc_ki[mc->mc_top]++; - -skip: - DPRINTF(("==> cursor points to page %"Y"u with %u keys, key index %u", - mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); - - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Move the cursor to the previous data item. */ -static int -mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) -{ - MDB_page *mp; - MDB_node *leaf; - int rc; - - if (!(mc->mc_flags & C_INITIALIZED)) { - rc = mdb_cursor_last(mc, key, data); - if (rc) - return rc; - mc->mc_ki[mc->mc_top]++; - } - - mp = mc->mc_pg[mc->mc_top]; - - if (mc->mc_db->md_flags & MDB_DUPSORT) { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_PREV || op == MDB_PREV_DUP) { - rc = mdb_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_PREV); - if (op != MDB_PREV || rc != MDB_NOTFOUND) { - if (rc == MDB_SUCCESS) { - MDB_GET_KEY(leaf, key); - mc->mc_flags &= ~C_EOF; - } - return rc; - } - } -#ifdef MDB_VL32 - else { - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - mdb_cursor_unref(&mc->mc_xcursor->mx_cursor); - } - } -#endif - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if (op == MDB_PREV_DUP) - return MDB_NOTFOUND; - } - } - - DPRINTF(("cursor_prev: top page is %"Y"u in cursor %p", - mdb_dbg_pgno(mp), (void *) mc)); - - mc->mc_flags &= ~(C_EOF|C_DEL); - - if (mc->mc_ki[mc->mc_top] == 0) { - DPUTS("=====> move to prev sibling page"); - if ((rc = mdb_cursor_sibling(mc, 0)) != MDB_SUCCESS) { - return rc; - } - mp = mc->mc_pg[mc->mc_top]; - mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1; - DPRINTF(("prev page is %"Y"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); - } else - mc->mc_ki[mc->mc_top]--; - - mc->mc_flags &= ~C_EOF; - - DPRINTF(("==> cursor points to page %"Y"u with %u keys, key index %u", - mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); - - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Set the cursor on a specific data item. */ -static int -mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, - MDB_cursor_op op, int *exactp) -{ - int rc; - MDB_page *mp; - MDB_node *leaf = NULL; - DKBUF; - - if (key->mv_size == 0) - return MDB_BAD_VALSIZE; - - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - - /* See if we're already on the right page */ - if (mc->mc_flags & C_INITIALIZED) { - MDB_val nodekey; - - mp = mc->mc_pg[mc->mc_top]; - if (!NUMKEYS(mp)) { - mc->mc_ki[mc->mc_top] = 0; - return MDB_NOTFOUND; - } - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_size = mc->mc_db->md_pad; - nodekey.mv_data = LEAF2KEY(mp, 0, nodekey.mv_size); - } else { - leaf = NODEPTR(mp, 0); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* Probably happens rarely, but first node on the page - * was the one we wanted. - */ - mc->mc_ki[mc->mc_top] = 0; - if (exactp) - *exactp = 1; - goto set1; - } - if (rc > 0) { - unsigned int i; - unsigned int nkeys = NUMKEYS(mp); - if (nkeys > 1) { - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_data = LEAF2KEY(mp, - nkeys-1, nodekey.mv_size); - } else { - leaf = NODEPTR(mp, nkeys-1); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* last node was the one we wanted */ - mc->mc_ki[mc->mc_top] = nkeys-1; - if (exactp) - *exactp = 1; - goto set1; - } - if (rc < 0) { - if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) { - /* This is definitely the right page, skip search_page */ - if (mp->mp_flags & P_LEAF2) { - nodekey.mv_data = LEAF2KEY(mp, - mc->mc_ki[mc->mc_top], nodekey.mv_size); - } else { - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - MDB_GET_KEY2(leaf, nodekey); - } - rc = mc->mc_dbx->md_cmp(key, &nodekey); - if (rc == 0) { - /* current node was the one we wanted */ - if (exactp) - *exactp = 1; - goto set1; - } - } - rc = 0; - goto set2; - } - } - /* If any parents have right-sibs, search. - * Otherwise, there's nothing further. - */ - for (i=0; i<mc->mc_top; i++) - if (mc->mc_ki[i] < - NUMKEYS(mc->mc_pg[i])-1) - break; - if (i == mc->mc_top) { - /* There are no other pages */ - mc->mc_ki[mc->mc_top] = nkeys; - return MDB_NOTFOUND; - } - } - if (!mc->mc_top) { - /* There are no other pages */ - mc->mc_ki[mc->mc_top] = 0; - if (op == MDB_SET_RANGE && !exactp) { - rc = 0; - goto set1; - } else - return MDB_NOTFOUND; - } - } else { - mc->mc_pg[0] = 0; - } - - rc = mdb_page_search(mc, key, 0); - if (rc != MDB_SUCCESS) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - mdb_cassert(mc, IS_LEAF(mp)); - -set2: - leaf = mdb_node_search(mc, key, exactp); - if (exactp != NULL && !*exactp) { - /* MDB_SET specified and not an exact match. */ - return MDB_NOTFOUND; - } - - if (leaf == NULL) { - DPUTS("===> inexact leaf not found, goto sibling"); - if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { - mc->mc_flags |= C_EOF; - return rc; /* no entries matched */ - } - mp = mc->mc_pg[mc->mc_top]; - mdb_cassert(mc, IS_LEAF(mp)); - leaf = NODEPTR(mp, 0); - } - -set1: - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - if (IS_LEAF2(mp)) { - if (op == MDB_SET_RANGE || op == MDB_SET_KEY) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - } - return MDB_SUCCESS; - } - -#ifdef MDB_VL32 - if (mc->mc_xcursor && mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - mdb_cursor_unref(&mc->mc_xcursor->mx_cursor); - } -#endif - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - } - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (op == MDB_SET || op == MDB_SET_KEY || op == MDB_SET_RANGE) { - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - } else { - int ex2, *ex2p; - if (op == MDB_GET_BOTH) { - ex2p = &ex2; - ex2 = 0; - } else { - ex2p = NULL; - } - rc = mdb_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_SET_RANGE, ex2p); - if (rc != MDB_SUCCESS) - return rc; - } - } else if (op == MDB_GET_BOTH || op == MDB_GET_BOTH_RANGE) { - MDB_val olddata; - MDB_cmp_func *dcmp; - if ((rc = mdb_node_read(mc, leaf, &olddata)) != MDB_SUCCESS) - return rc; - dcmp = mc->mc_dbx->md_dcmp; -#if UINT_MAX < SIZE_MAX || defined(MDB_VL32) - if (dcmp == mdb_cmp_int && olddata.mv_size == sizeof(mdb_size_t)) - dcmp = mdb_cmp_clong; -#endif - rc = dcmp(data, &olddata); - if (rc) { - if (op == MDB_GET_BOTH || rc > 0) - return MDB_NOTFOUND; - rc = 0; - } - *data = olddata; - - } else { - if (mc->mc_xcursor) - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - - /* The key already matches in all other cases */ - if (op == MDB_SET_RANGE || op == MDB_SET_KEY) - MDB_GET_KEY(leaf, key); - DPRINTF(("==> cursor placed on key [%s]", DKEY(key))); - - return rc; -} - -/** Move the cursor to the first item in the database. */ -static int -mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data) -{ - int rc; - MDB_node *leaf; - - if (mc->mc_xcursor) { -#ifdef MDB_VL32 - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - mdb_cursor_unref(&mc->mc_xcursor->mx_cursor); - } -#endif - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - } - - if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { - rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); - if (rc != MDB_SUCCESS) - return rc; - } - mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); - - leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0); - mc->mc_flags |= C_INITIALIZED; - mc->mc_flags &= ~C_EOF; - - mc->mc_ki[mc->mc_top] = 0; - - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->mv_size); - return MDB_SUCCESS; - } - - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc) - return rc; - } else { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -/** Move the cursor to the last item in the database. */ -static int -mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data) -{ - int rc; - MDB_node *leaf; - - if (mc->mc_xcursor) { -#ifdef MDB_VL32 - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - mdb_cursor_unref(&mc->mc_xcursor->mx_cursor); - } -#endif - mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); - } - - if (!(mc->mc_flags & C_EOF)) { - - if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { - rc = mdb_page_search(mc, NULL, MDB_PS_LAST); - if (rc != MDB_SUCCESS) - return rc; - } - mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); - - } - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1; - mc->mc_flags |= C_INITIALIZED|C_EOF; - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->mv_size); - return MDB_SUCCESS; - } - - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - mdb_xcursor_init1(mc, leaf); - rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); - if (rc) - return rc; - } else { - if ((rc = mdb_node_read(mc, leaf, data)) != MDB_SUCCESS) - return rc; - } - } - - MDB_GET_KEY(leaf, key); - return MDB_SUCCESS; -} - -int -mdb_cursor_get(MDB_cursor *mc, MDB_val *key, MDB_val *data, - MDB_cursor_op op) -{ - int rc; - int exact = 0; - int (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data); - - if (mc == NULL) - return EINVAL; - - if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - switch (op) { - case MDB_GET_CURRENT: - if (!(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - } else { - MDB_page *mp = mc->mc_pg[mc->mc_top]; - int nkeys = NUMKEYS(mp); - if (!nkeys || mc->mc_ki[mc->mc_top] >= nkeys) { - mc->mc_ki[mc->mc_top] = nkeys; - rc = MDB_NOTFOUND; - break; - } - rc = MDB_SUCCESS; - if (IS_LEAF2(mp)) { - key->mv_size = mc->mc_db->md_pad; - key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); - } else { - MDB_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - MDB_GET_KEY(leaf, key); - if (data) { - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - rc = mdb_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_GET_CURRENT); - } else { - rc = mdb_node_read(mc, leaf, data); - } - } - } - } - break; - case MDB_GET_BOTH: - case MDB_GET_BOTH_RANGE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (mc->mc_xcursor == NULL) { - rc = MDB_INCOMPATIBLE; - break; - } - /* FALLTHRU */ - case MDB_SET: - case MDB_SET_KEY: - case MDB_SET_RANGE: - if (key == NULL) { - rc = EINVAL; - } else { - rc = mdb_cursor_set(mc, key, data, op, - op == MDB_SET_RANGE ? NULL : &exact); - } - break; - case MDB_GET_MULTIPLE: - if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - rc = MDB_SUCCESS; - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) || - (mc->mc_xcursor->mx_cursor.mc_flags & C_EOF)) - break; - goto fetchm; - case MDB_NEXT_MULTIPLE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - rc = mdb_cursor_next(mc, key, data, MDB_NEXT_DUP); - if (rc == MDB_SUCCESS) { - if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { - MDB_cursor *mx; -fetchm: - mx = &mc->mc_xcursor->mx_cursor; - data->mv_size = NUMKEYS(mx->mc_pg[mx->mc_top]) * - mx->mc_db->md_pad; - data->mv_data = METADATA(mx->mc_pg[mx->mc_top]); - mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top])-1; - } else { - rc = MDB_NOTFOUND; - } - } - break; - case MDB_PREV_MULTIPLE: - if (data == NULL) { - rc = EINVAL; - break; - } - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - rc = MDB_INCOMPATIBLE; - break; - } - if (!(mc->mc_flags & C_INITIALIZED)) - rc = mdb_cursor_last(mc, key, data); - else - rc = MDB_SUCCESS; - if (rc == MDB_SUCCESS) { - MDB_cursor *mx = &mc->mc_xcursor->mx_cursor; - if (mx->mc_flags & C_INITIALIZED) { - rc = mdb_cursor_sibling(mx, 0); - if (rc == MDB_SUCCESS) - goto fetchm; - } else { - rc = MDB_NOTFOUND; - } - } - break; - case MDB_NEXT: - case MDB_NEXT_DUP: - case MDB_NEXT_NODUP: - rc = mdb_cursor_next(mc, key, data, op); - break; - case MDB_PREV: - case MDB_PREV_DUP: - case MDB_PREV_NODUP: - rc = mdb_cursor_prev(mc, key, data, op); - break; - case MDB_FIRST: - rc = mdb_cursor_first(mc, key, data); - break; - case MDB_FIRST_DUP: - mfunc = mdb_cursor_first; - mmove: - if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - if (mc->mc_xcursor == NULL) { - rc = MDB_INCOMPATIBLE; - break; - } - { - MDB_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - MDB_GET_KEY(leaf, key); - rc = mdb_node_read(mc, leaf, data); - break; - } - } - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { - rc = EINVAL; - break; - } - rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL); - break; - case MDB_LAST: - rc = mdb_cursor_last(mc, key, data); - break; - case MDB_LAST_DUP: - mfunc = mdb_cursor_last; - goto mmove; - default: - DPRINTF(("unhandled/unimplemented cursor operation %u", op)); - rc = EINVAL; - break; - } - - if (mc->mc_flags & C_DEL) - mc->mc_flags ^= C_DEL; - - return rc; -} - -/** Touch all the pages in the cursor stack. Set mc_top. - * Makes sure all the pages are writable, before attempting a write operation. - * @param[in] mc The cursor to operate on. - */ -static int -mdb_cursor_touch(MDB_cursor *mc) -{ - int rc = MDB_SUCCESS; - - if (mc->mc_dbi >= CORE_DBS && !(*mc->mc_dbflag & DB_DIRTY)) { - MDB_cursor mc2; - MDB_xcursor mcx; - if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) - return MDB_BAD_DBI; - mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx); - rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, MDB_PS_MODIFY); - if (rc) - return rc; - *mc->mc_dbflag |= DB_DIRTY; - } - mc->mc_top = 0; - if (mc->mc_snum) { - do { - rc = mdb_page_touch(mc); - } while (!rc && ++(mc->mc_top) < mc->mc_snum); - mc->mc_top = mc->mc_snum-1; - } - return rc; -} - -/** Do not spill pages to disk if txn is getting full, may fail instead */ -#define MDB_NOSPILL 0x8000 - -int -mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, - unsigned int flags) -{ - MDB_env *env; - MDB_node *leaf = NULL; - MDB_page *fp, *mp, *sub_root = NULL; - uint16_t fp_flags; - MDB_val xdata, *rdata, dkey, olddata; - MDB_db dummy; - int do_sub = 0, insert_key, insert_data; - unsigned int mcount = 0, dcount = 0, nospill; - size_t nsize; - int rc, rc2; - unsigned int nflags; - DKBUF; - - if (mc == NULL || key == NULL) - return EINVAL; - - env = mc->mc_txn->mt_env; - - /* Check this first so counter will always be zero on any - * early failures. - */ - if (flags & MDB_MULTIPLE) { - dcount = data[1].mv_size; - data[1].mv_size = 0; - if (!F_ISSET(mc->mc_db->md_flags, MDB_DUPFIXED)) - return MDB_INCOMPATIBLE; - } - - nospill = flags & MDB_NOSPILL; - flags &= ~MDB_NOSPILL; - - if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (key->mv_size-1 >= ENV_MAXKEY(env)) - return MDB_BAD_VALSIZE; - -#if SIZE_MAX > MAXDATASIZE - if (data->mv_size > ((mc->mc_db->md_flags & MDB_DUPSORT) ? ENV_MAXKEY(env) : MAXDATASIZE)) - return MDB_BAD_VALSIZE; -#else - if ((mc->mc_db->md_flags & MDB_DUPSORT) && data->mv_size > ENV_MAXKEY(env)) - return MDB_BAD_VALSIZE; -#endif - - DPRINTF(("==> put db %d key [%s], size %"Z"u, data size %"Z"u", - DDBI(mc), DKEY(key), key ? key->mv_size : 0, data->mv_size)); - - dkey.mv_size = 0; - - if (flags == MDB_CURRENT) { - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - rc = MDB_SUCCESS; - } else if (mc->mc_db->md_root == P_INVALID) { - /* new database, cursor has nothing to point to */ - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_flags &= ~C_INITIALIZED; - rc = MDB_NO_ROOT; - } else { - int exact = 0; - MDB_val d2; - if (flags & MDB_APPEND) { - MDB_val k2; - rc = mdb_cursor_last(mc, &k2, &d2); - if (rc == 0) { - rc = mc->mc_dbx->md_cmp(key, &k2); - if (rc > 0) { - rc = MDB_NOTFOUND; - mc->mc_ki[mc->mc_top]++; - } else { - /* new key is <= last key */ - rc = MDB_KEYEXIST; - } - } - } else { - rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact); - } - if ((flags & MDB_NOOVERWRITE) && rc == 0) { - DPRINTF(("duplicate key [%s]", DKEY(key))); - *data = d2; - return MDB_KEYEXIST; - } - if (rc && rc != MDB_NOTFOUND) - return rc; - } - - if (mc->mc_flags & C_DEL) - mc->mc_flags ^= C_DEL; - - /* Cursor is positioned, check for room in the dirty list */ - if (!nospill) { - if (flags & MDB_MULTIPLE) { - rdata = &xdata; - xdata.mv_size = data->mv_size * dcount; - } else { - rdata = data; - } - if ((rc2 = mdb_page_spill(mc, key, rdata))) - return rc2; - } - - if (rc == MDB_NO_ROOT) { - MDB_page *np; - /* new database, write a root leaf page */ - DPUTS("allocating new root leaf page"); - if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) { - return rc2; - } - mdb_cursor_push(mc, np); - mc->mc_db->md_root = np->mp_pgno; - mc->mc_db->md_depth++; - *mc->mc_dbflag |= DB_DIRTY; - if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED)) - == MDB_DUPFIXED) - np->mp_flags |= P_LEAF2; - mc->mc_flags |= C_INITIALIZED; - } else { - /* make sure all cursor pages are writable */ - rc2 = mdb_cursor_touch(mc); - if (rc2) - return rc2; - } - - insert_key = insert_data = rc; - if (insert_key) { - /* The key does not exist */ - DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top])); - if ((mc->mc_db->md_flags & MDB_DUPSORT) && - LEAFSIZE(key, data) > env->me_nodemax) - { - /* Too big for a node, insert in sub-DB. Set up an empty - * "old sub-page" for prep_subDB to expand to a full page. - */ - fp_flags = P_LEAF|P_DIRTY; - fp = env->me_pbuf; - fp->mp_pad = data->mv_size; /* used if MDB_DUPFIXED */ - fp->mp_lower = fp->mp_upper = (PAGEHDRSZ-PAGEBASE); - olddata.mv_size = PAGEHDRSZ; - goto prep_subDB; - } - } else { - /* there's only a key anyway, so this is a no-op */ - if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { - char *ptr; - unsigned int ksize = mc->mc_db->md_pad; - if (key->mv_size != ksize) - return MDB_BAD_VALSIZE; - ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize); - memcpy(ptr, key->mv_data, ksize); -fix_parent: - /* if overwriting slot 0 of leaf, need to - * update branch key if there is a parent page - */ - if (mc->mc_top && !mc->mc_ki[mc->mc_top]) { - unsigned short dtop = 1; - mc->mc_top--; - /* slot 0 is always an empty key, find real slot */ - while (mc->mc_top && !mc->mc_ki[mc->mc_top]) { - mc->mc_top--; - dtop++; - } - if (mc->mc_ki[mc->mc_top]) - rc2 = mdb_update_key(mc, key); - else - rc2 = MDB_SUCCESS; - mc->mc_top += dtop; - if (rc2) - return rc2; - } - return MDB_SUCCESS; - } - -more: - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - olddata.mv_size = NODEDSZ(leaf); - olddata.mv_data = NODEDATA(leaf); - - /* DB has dups? */ - if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) { - /* Prepare (sub-)page/sub-DB to accept the new item, - * if needed. fp: old sub-page or a header faking - * it. mp: new (sub-)page. offset: growth in page - * size. xdata: node data with new page or DB. - */ - unsigned i, offset = 0; - mp = fp = xdata.mv_data = env->me_pbuf; - mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno; - - /* Was a single item before, must convert now */ - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - MDB_cmp_func *dcmp; - /* Just overwrite the current item */ - if (flags == MDB_CURRENT) - goto current; - dcmp = mc->mc_dbx->md_dcmp; -#if UINT_MAX < SIZE_MAX || defined(MDB_VL32) - if (dcmp == mdb_cmp_int && olddata.mv_size == sizeof(mdb_size_t)) - dcmp = mdb_cmp_clong; -#endif - /* does data match? */ - if (!dcmp(data, &olddata)) { - if (flags & (MDB_NODUPDATA|MDB_APPENDDUP)) - return MDB_KEYEXIST; - /* overwrite it */ - goto current; - } - - /* Back up original data item */ - dkey.mv_size = olddata.mv_size; - dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size); - - /* Make sub-page header for the dup items, with dummy body */ - fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP; - fp->mp_lower = (PAGEHDRSZ-PAGEBASE); - xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size; - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - fp->mp_flags |= P_LEAF2; - fp->mp_pad = data->mv_size; - xdata.mv_size += 2 * data->mv_size; /* leave space for 2 more */ - } else { - xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) + - (dkey.mv_size & 1) + (data->mv_size & 1); - } - fp->mp_upper = xdata.mv_size - PAGEBASE; - olddata.mv_size = xdata.mv_size; /* pretend olddata is fp */ - } else if (leaf->mn_flags & F_SUBDATA) { - /* Data is on sub-DB, just store it */ - flags |= F_DUPDATA|F_SUBDATA; - goto put_sub; - } else { - /* Data is on sub-page */ - fp = olddata.mv_data; - switch (flags) { - default: - if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { - offset = EVEN(NODESIZE + sizeof(indx_t) + - data->mv_size); - break; - } - offset = fp->mp_pad; - if (SIZELEFT(fp) < offset) { - offset *= 4; /* space for 4 more */ - break; - } - /* FALLTHRU: Big enough MDB_DUPFIXED sub-page */ - case MDB_CURRENT: - fp->mp_flags |= P_DIRTY; - COPY_PGNO(fp->mp_pgno, mp->mp_pgno); - mc->mc_xcursor->mx_cursor.mc_pg[0] = fp; - flags |= F_DUPDATA; - goto put_sub; - } - xdata.mv_size = olddata.mv_size + offset; - } - - fp_flags = fp->mp_flags; - if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) { - /* Too big for a sub-page, convert to sub-DB */ - fp_flags &= ~P_SUBP; -prep_subDB: - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - fp_flags |= P_LEAF2; - dummy.md_pad = fp->mp_pad; - dummy.md_flags = MDB_DUPFIXED; - if (mc->mc_db->md_flags & MDB_INTEGERDUP) - dummy.md_flags |= MDB_INTEGERKEY; - } else { - dummy.md_pad = 0; - dummy.md_flags = 0; - } - dummy.md_depth = 1; - dummy.md_branch_pages = 0; - dummy.md_leaf_pages = 1; - dummy.md_overflow_pages = 0; - dummy.md_entries = NUMKEYS(fp); - xdata.mv_size = sizeof(MDB_db); - xdata.mv_data = &dummy; - if ((rc = mdb_page_alloc(mc, 1, &mp))) - return rc; - offset = env->me_psize - olddata.mv_size; - flags |= F_DUPDATA|F_SUBDATA; - dummy.md_root = mp->mp_pgno; - sub_root = mp; - } - if (mp != fp) { - mp->mp_flags = fp_flags | P_DIRTY; - mp->mp_pad = fp->mp_pad; - mp->mp_lower = fp->mp_lower; - mp->mp_upper = fp->mp_upper + offset; - if (fp_flags & P_LEAF2) { - memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad); - } else { - memcpy((char *)mp + mp->mp_upper + PAGEBASE, (char *)fp + fp->mp_upper + PAGEBASE, - olddata.mv_size - fp->mp_upper - PAGEBASE); - for (i=0; i<NUMKEYS(fp); i++) - mp->mp_ptrs[i] = fp->mp_ptrs[i] + offset; - } - } - - rdata = &xdata; - flags |= F_DUPDATA; - do_sub = 1; - if (!insert_key) - mdb_node_del(mc, 0); - goto new_sub; - } -current: - /* LMDB passes F_SUBDATA in 'flags' to write a DB record */ - if ((leaf->mn_flags ^ flags) & F_SUBDATA) - return MDB_INCOMPATIBLE; - /* overflow page overwrites need special handling */ - if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { - MDB_page *omp; - pgno_t pg; - int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize); - - memcpy(&pg, olddata.mv_data, sizeof(pg)); - if ((rc2 = mdb_page_get(mc, pg, &omp, &level)) != 0) - return rc2; - ovpages = omp->mp_pages; - - /* Is the ov page large enough? */ - if (ovpages >= dpages) { - if (!(omp->mp_flags & P_DIRTY) && - (level || (env->me_flags & MDB_WRITEMAP))) - { - rc = mdb_page_unspill(mc->mc_txn, omp, &omp); - if (rc) - return rc; - level = 0; /* dirty in this txn or clean */ - } - /* Is it dirty? */ - if (omp->mp_flags & P_DIRTY) { - /* yes, overwrite it. Note in this case we don't - * bother to try shrinking the page if the new data - * is smaller than the overflow threshold. - */ - if (level > 1) { - /* It is writable only in a parent txn */ - size_t sz = (size_t) env->me_psize * ovpages, off; - MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages); - MDB_ID2 id2; - if (!np) - return ENOMEM; - id2.mid = pg; - id2.mptr = np; - /* Note - this page is already counted in parent's dirty_room */ - rc2 = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2); - mdb_cassert(mc, rc2 == 0); - /* Currently we make the page look as with put() in the - * parent txn, in case the user peeks at MDB_RESERVEd - * or unused parts. Some users treat ovpages specially. - */ - if (!(flags & MDB_RESERVE)) { - /* Skip the part where LMDB will put *data. - * Copy end of page, adjusting alignment so - * compiler may copy words instead of bytes. - */ - off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t); - memcpy((size_t *)((char *)np + off), - (size_t *)((char *)omp + off), sz - off); - sz = PAGEHDRSZ; - } - memcpy(np, omp, sz); /* Copy beginning of page */ - omp = np; - } - SETDSZ(leaf, data->mv_size); - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = METADATA(omp); - else - memcpy(METADATA(omp), data->mv_data, data->mv_size); - return MDB_SUCCESS; - } - } - if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS) - return rc2; - } else if (data->mv_size == olddata.mv_size) { - /* same size, just replace it. Note that we could - * also reuse this node if the new data is smaller, - * but instead we opt to shrink the node in that case. - */ - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = olddata.mv_data; - else if (!(mc->mc_flags & C_SUB)) - memcpy(olddata.mv_data, data->mv_data, data->mv_size); - else { - memcpy(NODEKEY(leaf), key->mv_data, key->mv_size); - goto fix_parent; - } - return MDB_SUCCESS; - } - mdb_node_del(mc, 0); - } - - rdata = data; - -new_sub: - nflags = flags & NODE_ADD_FLAGS; - nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata); - if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) { - if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA ) - nflags &= ~MDB_APPEND; /* sub-page may need room to grow */ - if (!insert_key) - nflags |= MDB_SPLIT_REPLACE; - rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags); - } else { - /* There is room already in this leaf page. */ - rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags); - if (rc == 0) { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - unsigned i = mc->mc_top; - MDB_page *mp = mc->mc_pg[i]; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc || m3->mc_snum < mc->mc_snum || m3->mc_pg[i] != mp) continue; - if (m3->mc_ki[i] >= mc->mc_ki[i] && insert_key) { - m3->mc_ki[i]++; - } - if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { - MDB_node *n2 = NODEPTR(mp, m3->mc_ki[i]); - if ((n2->mn_flags & (F_SUBDATA|F_DUPDATA)) == F_DUPDATA) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); - } - } - } - } - - if (rc == MDB_SUCCESS) { - /* Now store the actual data in the child DB. Note that we're - * storing the user data in the keys field, so there are strict - * size limits on dupdata. The actual data fields of the child - * DB are all zero size. - */ - if (do_sub) { - int xflags, new_dupdata; - mdb_size_t ecount; -put_sub: - xdata.mv_size = 0; - xdata.mv_data = ""; - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (flags & MDB_CURRENT) { - xflags = MDB_CURRENT|MDB_NOSPILL; - } else { - mdb_xcursor_init1(mc, leaf); - xflags = (flags & MDB_NODUPDATA) ? - MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL; - } - if (sub_root) - mc->mc_xcursor->mx_cursor.mc_pg[0] = sub_root; - new_dupdata = (int)dkey.mv_size; - /* converted, write the original data first */ - if (dkey.mv_size) { - rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags); - if (rc) - goto bad_sub; - /* we've done our job */ - dkey.mv_size = 0; - } - if (!(leaf->mn_flags & F_SUBDATA) || sub_root) { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2; - MDB_xcursor *mx = mc->mc_xcursor; - unsigned i = mc->mc_top; - MDB_page *mp = mc->mc_pg[i]; - int nkeys = NUMKEYS(mp); - - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; - if (!(m2->mc_flags & C_INITIALIZED)) continue; - if (m2->mc_pg[i] == mp) { - if (m2->mc_ki[i] == mc->mc_ki[i]) { - mdb_xcursor_init2(m2, mx, new_dupdata); - } else if (!insert_key && m2->mc_ki[i] < nkeys) { - MDB_node *n2 = NODEPTR(mp, m2->mc_ki[i]); - if ((n2->mn_flags & (F_SUBDATA|F_DUPDATA)) == F_DUPDATA) - m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); - } - } - } - } - ecount = mc->mc_xcursor->mx_db.md_entries; - if (flags & MDB_APPENDDUP) - xflags |= MDB_APPEND; - rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags); - if (flags & F_SUBDATA) { - void *db = NODEDATA(leaf); - memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); - } - insert_data = mc->mc_xcursor->mx_db.md_entries - ecount; - } - /* Increment count unless we just replaced an existing item. */ - if (insert_data) - mc->mc_db->md_entries++; - if (insert_key) { - /* Invalidate txn if we created an empty sub-DB */ - if (rc) - goto bad_sub; - /* If we succeeded and the key didn't exist before, - * make sure the cursor is marked valid. - */ - mc->mc_flags |= C_INITIALIZED; - } - if (flags & MDB_MULTIPLE) { - if (!rc) { - mcount++; - /* let caller know how many succeeded, if any */ - data[1].mv_size = mcount; - if (mcount < dcount) { - data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size; - insert_key = insert_data = 0; - goto more; - } - } - } - return rc; -bad_sub: - if (rc == MDB_KEYEXIST) /* should not happen, we deleted that item */ - rc = MDB_CORRUPTED; - } - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_cursor_del(MDB_cursor *mc, unsigned int flags) -{ - MDB_node *leaf; - MDB_page *mp; - int rc; - - if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - - if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) - return MDB_NOTFOUND; - - if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL))) - return rc; - - rc = mdb_cursor_touch(mc); - if (rc) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - if (IS_LEAF2(mp)) - goto del_key; - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - - if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { - if (flags & MDB_NODUPDATA) { - /* mdb_cursor_del0() will subtract the final entry */ - mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1; - mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; - } else { - if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) { - mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } - rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL); - if (rc) - return rc; - /* If sub-DB still has entries, we're done */ - if (mc->mc_xcursor->mx_db.md_entries) { - if (leaf->mn_flags & F_SUBDATA) { - /* update subDB info */ - void *db = NODEDATA(leaf); - memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); - } else { - MDB_cursor *m2; - /* shrink fake page */ - mdb_node_shrink(mp, mc->mc_ki[mc->mc_top]); - leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); - mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - /* fix other sub-DB cursors pointed at fake pages on this page */ - for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { - if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; - if (!(m2->mc_flags & C_INITIALIZED)) continue; - if (m2->mc_pg[mc->mc_top] == mp) { - if (m2->mc_ki[mc->mc_top] == mc->mc_ki[mc->mc_top]) { - m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); - } else { - MDB_node *n2 = NODEPTR(mp, m2->mc_ki[mc->mc_top]); - if (!(n2->mn_flags & F_SUBDATA)) - m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); - } - } - } - } - mc->mc_db->md_entries--; - return rc; - } else { - mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; - } - /* otherwise fall thru and delete the sub-DB */ - } - - if (leaf->mn_flags & F_SUBDATA) { - /* add all the child DB's pages to the free list */ - rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); - if (rc) - goto fail; - } - } - /* LMDB passes F_SUBDATA in 'flags' to delete a DB record */ - else if ((leaf->mn_flags ^ flags) & F_SUBDATA) { - rc = MDB_INCOMPATIBLE; - goto fail; - } - - /* add overflow pages to free list */ - if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { - MDB_page *omp; - pgno_t pg; - - memcpy(&pg, NODEDATA(leaf), sizeof(pg)); - if ((rc = mdb_page_get(mc, pg, &omp, NULL)) || - (rc = mdb_ovpage_free(mc, omp))) - goto fail; - } - -del_key: - return mdb_cursor_del0(mc); - -fail: - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -/** Allocate and initialize new pages for a database. - * @param[in] mc a cursor on the database being added to. - * @param[in] flags flags defining what type of page is being allocated. - * @param[in] num the number of pages to allocate. This is usually 1, - * unless allocating overflow pages for a large record. - * @param[out] mp Address of a page, or NULL on failure. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp) -{ - MDB_page *np; - int rc; - - if ((rc = mdb_page_alloc(mc, num, &np))) - return rc; - DPRINTF(("allocated new mpage %"Y"u, page size %u", - np->mp_pgno, mc->mc_txn->mt_env->me_psize)); - np->mp_flags = flags | P_DIRTY; - np->mp_lower = (PAGEHDRSZ-PAGEBASE); - np->mp_upper = mc->mc_txn->mt_env->me_psize - PAGEBASE; - - if (IS_BRANCH(np)) - mc->mc_db->md_branch_pages++; - else if (IS_LEAF(np)) - mc->mc_db->md_leaf_pages++; - else if (IS_OVERFLOW(np)) { - mc->mc_db->md_overflow_pages += num; - np->mp_pages = num; - } - *mp = np; - - return 0; -} - -/** Calculate the size of a leaf node. - * The size depends on the environment's page size; if a data item - * is too large it will be put onto an overflow page and the node - * size will only include the key and not the data. Sizes are always - * rounded up to an even number of bytes, to guarantee 2-byte alignment - * of the #MDB_node headers. - * @param[in] env The environment handle. - * @param[in] key The key for the node. - * @param[in] data The data for the node. - * @return The number of bytes needed to store the node. - */ -static size_t -mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data) -{ - size_t sz; - - sz = LEAFSIZE(key, data); - if (sz > env->me_nodemax) { - /* put on overflow page */ - sz -= data->mv_size - sizeof(pgno_t); - } - - return EVEN(sz + sizeof(indx_t)); -} - -/** Calculate the size of a branch node. - * The size should depend on the environment's page size but since - * we currently don't support spilling large keys onto overflow - * pages, it's simply the size of the #MDB_node header plus the - * size of the key. Sizes are always rounded up to an even number - * of bytes, to guarantee 2-byte alignment of the #MDB_node headers. - * @param[in] env The environment handle. - * @param[in] key The key for the node. - * @return The number of bytes needed to store the node. - */ -static size_t -mdb_branch_size(MDB_env *env, MDB_val *key) -{ - size_t sz; - - sz = INDXSIZE(key); - if (sz > env->me_nodemax) { - /* put on overflow page */ - /* not implemented */ - /* sz -= key->size - sizeof(pgno_t); */ - } - - return sz + sizeof(indx_t); -} - -/** Add a node to the page pointed to by the cursor. - * @param[in] mc The cursor for this operation. - * @param[in] indx The index on the page where the new node should be added. - * @param[in] key The key for the new node. - * @param[in] data The data for the new node, if any. - * @param[in] pgno The page number, if adding a branch node. - * @param[in] flags Flags for the node. - * @return 0 on success, non-zero on failure. Possible errors are: - * <ul> - * <li>ENOMEM - failed to allocate overflow pages for the node. - * <li>MDB_PAGE_FULL - there is insufficient room in the page. This error - * should never happen since all callers already calculate the - * page's free space before calling this function. - * </ul> - */ -static int -mdb_node_add(MDB_cursor *mc, indx_t indx, - MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags) -{ - unsigned int i; - size_t node_size = NODESIZE; - ssize_t room; - indx_t ofs; - MDB_node *node; - MDB_page *mp = mc->mc_pg[mc->mc_top]; - MDB_page *ofp = NULL; /* overflow page */ - void *ndata; - DKBUF; - - mdb_cassert(mc, mp->mp_upper >= mp->mp_lower); - - DPRINTF(("add to %s %spage %"Y"u index %i, data size %"Z"u key size %"Z"u [%s]", - IS_LEAF(mp) ? "leaf" : "branch", - IS_SUBP(mp) ? "sub-" : "", - mdb_dbg_pgno(mp), indx, data ? data->mv_size : 0, - key ? key->mv_size : 0, key ? DKEY(key) : "null")); - - if (IS_LEAF2(mp)) { - /* Move higher keys up one slot. */ - int ksize = mc->mc_db->md_pad, dif; - char *ptr = LEAF2KEY(mp, indx, ksize); - dif = NUMKEYS(mp) - indx; - if (dif > 0) - memmove(ptr+ksize, ptr, dif*ksize); - /* insert new key */ - memcpy(ptr, key->mv_data, ksize); - - /* Just using these for counting */ - mp->mp_lower += sizeof(indx_t); - mp->mp_upper -= ksize - sizeof(indx_t); - return MDB_SUCCESS; - } - - room = (ssize_t)SIZELEFT(mp) - (ssize_t)sizeof(indx_t); - if (key != NULL) - node_size += key->mv_size; - if (IS_LEAF(mp)) { - mdb_cassert(mc, key && data); - if (F_ISSET(flags, F_BIGDATA)) { - /* Data already on overflow page. */ - node_size += sizeof(pgno_t); - } else if (node_size + data->mv_size > mc->mc_txn->mt_env->me_nodemax) { - int ovpages = OVPAGES(data->mv_size, mc->mc_txn->mt_env->me_psize); - int rc; - /* Put data on overflow page. */ - DPRINTF(("data size is %"Z"u, node would be %"Z"u, put data on overflow page", - data->mv_size, node_size+data->mv_size)); - node_size = EVEN(node_size + sizeof(pgno_t)); - if ((ssize_t)node_size > room) - goto full; - if ((rc = mdb_page_new(mc, P_OVERFLOW, ovpages, &ofp))) - return rc; - DPRINTF(("allocated overflow page %"Y"u", ofp->mp_pgno)); - flags |= F_BIGDATA; - goto update; - } else { - node_size += data->mv_size; - } - } - node_size = EVEN(node_size); - if ((ssize_t)node_size > room) - goto full; - -update: - /* Move higher pointers up one slot. */ - for (i = NUMKEYS(mp); i > indx; i--) - mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; - - /* Adjust free space offsets. */ - ofs = mp->mp_upper - node_size; - mdb_cassert(mc, ofs >= mp->mp_lower + sizeof(indx_t)); - mp->mp_ptrs[indx] = ofs; - mp->mp_upper = ofs; - mp->mp_lower += sizeof(indx_t); - - /* Write the node data. */ - node = NODEPTR(mp, indx); - node->mn_ksize = (key == NULL) ? 0 : key->mv_size; - node->mn_flags = flags; - if (IS_LEAF(mp)) - SETDSZ(node,data->mv_size); - else - SETPGNO(node,pgno); - - if (key) - memcpy(NODEKEY(node), key->mv_data, key->mv_size); - - if (IS_LEAF(mp)) { - ndata = NODEDATA(node); - if (ofp == NULL) { - if (F_ISSET(flags, F_BIGDATA)) - memcpy(ndata, data->mv_data, sizeof(pgno_t)); - else if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = ndata; - else - memcpy(ndata, data->mv_data, data->mv_size); - } else { - memcpy(ndata, &ofp->mp_pgno, sizeof(pgno_t)); - ndata = METADATA(ofp); - if (F_ISSET(flags, MDB_RESERVE)) - data->mv_data = ndata; - else - memcpy(ndata, data->mv_data, data->mv_size); - } - } - - return MDB_SUCCESS; - -full: - DPRINTF(("not enough room in page %"Y"u, got %u ptrs", - mdb_dbg_pgno(mp), NUMKEYS(mp))); - DPRINTF(("upper-lower = %u - %u = %"Z"d", mp->mp_upper,mp->mp_lower,room)); - DPRINTF(("node size = %"Z"u", node_size)); - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return MDB_PAGE_FULL; -} - -/** Delete the specified node from a page. - * @param[in] mc Cursor pointing to the node to delete. - * @param[in] ksize The size of a node. Only used if the page is - * part of a #MDB_DUPFIXED database. - */ -static void -mdb_node_del(MDB_cursor *mc, int ksize) -{ - MDB_page *mp = mc->mc_pg[mc->mc_top]; - indx_t indx = mc->mc_ki[mc->mc_top]; - unsigned int sz; - indx_t i, j, numkeys, ptr; - MDB_node *node; - char *base; - - DPRINTF(("delete node %u on %s page %"Y"u", indx, - IS_LEAF(mp) ? "leaf" : "branch", mdb_dbg_pgno(mp))); - numkeys = NUMKEYS(mp); - mdb_cassert(mc, indx < numkeys); - - if (IS_LEAF2(mp)) { - int x = numkeys - 1 - indx; - base = LEAF2KEY(mp, indx, ksize); - if (x) - memmove(base, base + ksize, x * ksize); - mp->mp_lower -= sizeof(indx_t); - mp->mp_upper += ksize - sizeof(indx_t); - return; - } - - node = NODEPTR(mp, indx); - sz = NODESIZE + node->mn_ksize; - if (IS_LEAF(mp)) { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - sz += sizeof(pgno_t); - else - sz += NODEDSZ(node); - } - sz = EVEN(sz); - - ptr = mp->mp_ptrs[indx]; - for (i = j = 0; i < numkeys; i++) { - if (i != indx) { - mp->mp_ptrs[j] = mp->mp_ptrs[i]; - if (mp->mp_ptrs[i] < ptr) - mp->mp_ptrs[j] += sz; - j++; - } - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - memmove(base + sz, base, ptr - mp->mp_upper); - - mp->mp_lower -= sizeof(indx_t); - mp->mp_upper += sz; -} - -/** Compact the main page after deleting a node on a subpage. - * @param[in] mp The main page to operate on. - * @param[in] indx The index of the subpage on the main page. - */ -static void -mdb_node_shrink(MDB_page *mp, indx_t indx) -{ - MDB_node *node; - MDB_page *sp, *xp; - char *base; - indx_t delta, nsize, len, ptr; - int i; - - node = NODEPTR(mp, indx); - sp = (MDB_page *)NODEDATA(node); - delta = SIZELEFT(sp); - nsize = NODEDSZ(node) - delta; - - /* Prepare to shift upward, set len = length(subpage part to shift) */ - if (IS_LEAF2(sp)) { - len = nsize; - if (nsize & 1) - return; /* do not make the node uneven-sized */ - } else { - xp = (MDB_page *)((char *)sp + delta); /* destination subpage */ - for (i = NUMKEYS(sp); --i >= 0; ) - xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta; - len = PAGEHDRSZ; - } - sp->mp_upper = sp->mp_lower; - COPY_PGNO(sp->mp_pgno, mp->mp_pgno); - SETDSZ(node, nsize); - - /* Shift <lower nodes...initial part of subpage> upward */ - base = (char *)mp + mp->mp_upper + PAGEBASE; - memmove(base + delta, base, (char *)sp + len - base); - - ptr = mp->mp_ptrs[indx]; - for (i = NUMKEYS(mp); --i >= 0; ) { - if (mp->mp_ptrs[i] <= ptr) - mp->mp_ptrs[i] += delta; - } - mp->mp_upper += delta; -} - -/** Initial setup of a sorted-dups cursor. - * Sorted duplicates are implemented as a sub-database for the given key. - * The duplicate data items are actually keys of the sub-database. - * Operations on the duplicate data items are performed using a sub-cursor - * initialized when the sub-database is first accessed. This function does - * the preliminary setup of the sub-cursor, filling in the fields that - * depend only on the parent DB. - * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. - */ -static void -mdb_xcursor_init0(MDB_cursor *mc) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - mx->mx_cursor.mc_xcursor = NULL; - mx->mx_cursor.mc_txn = mc->mc_txn; - mx->mx_cursor.mc_db = &mx->mx_db; - mx->mx_cursor.mc_dbx = &mx->mx_dbx; - mx->mx_cursor.mc_dbi = mc->mc_dbi; - mx->mx_cursor.mc_dbflag = &mx->mx_dbflag; - mx->mx_cursor.mc_snum = 0; - mx->mx_cursor.mc_top = 0; -#ifdef MDB_VL32 - mx->mx_cursor.mc_ovpg = 0; -#endif - mx->mx_cursor.mc_flags = C_SUB | (mc->mc_flags & (C_ORIG_RDONLY|C_WRITEMAP)); - mx->mx_dbx.md_name.mv_size = 0; - mx->mx_dbx.md_name.mv_data = NULL; - mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp; - mx->mx_dbx.md_dcmp = NULL; - mx->mx_dbx.md_rel = mc->mc_dbx->md_rel; -} - -/** Final setup of a sorted-dups cursor. - * Sets up the fields that depend on the data from the main cursor. - * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. - * @param[in] node The data containing the #MDB_db record for the - * sorted-dup database. - */ -static void -mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - mx->mx_cursor.mc_flags &= C_SUB|C_ORIG_RDONLY|C_WRITEMAP; - if (node->mn_flags & F_SUBDATA) { - memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDB_db)); - mx->mx_cursor.mc_pg[0] = 0; - mx->mx_cursor.mc_snum = 0; - mx->mx_cursor.mc_top = 0; - } else { - MDB_page *fp = NODEDATA(node); - mx->mx_db.md_pad = 0; - mx->mx_db.md_flags = 0; - mx->mx_db.md_depth = 1; - mx->mx_db.md_branch_pages = 0; - mx->mx_db.md_leaf_pages = 1; - mx->mx_db.md_overflow_pages = 0; - mx->mx_db.md_entries = NUMKEYS(fp); - COPY_PGNO(mx->mx_db.md_root, fp->mp_pgno); - mx->mx_cursor.mc_snum = 1; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags |= C_INITIALIZED; - mx->mx_cursor.mc_pg[0] = fp; - mx->mx_cursor.mc_ki[0] = 0; - if (mc->mc_db->md_flags & MDB_DUPFIXED) { - mx->mx_db.md_flags = MDB_DUPFIXED; - mx->mx_db.md_pad = fp->mp_pad; - if (mc->mc_db->md_flags & MDB_INTEGERDUP) - mx->mx_db.md_flags |= MDB_INTEGERKEY; - } - } - DPRINTF(("Sub-db -%u root page %"Y"u", mx->mx_cursor.mc_dbi, - mx->mx_db.md_root)); - mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DIRTY; /* DB_DIRTY guides mdb_cursor_touch */ -#if UINT_MAX < SIZE_MAX || defined(MDB_VL32) - if (mx->mx_dbx.md_cmp == mdb_cmp_int && mx->mx_db.md_pad == sizeof(mdb_size_t)) - mx->mx_dbx.md_cmp = mdb_cmp_clong; -#endif -} - - -/** Fixup a sorted-dups cursor due to underlying update. - * Sets up some fields that depend on the data from the main cursor. - * Almost the same as init1, but skips initialization steps if the - * xcursor had already been used. - * @param[in] mc The main cursor whose sorted-dups cursor is to be fixed up. - * @param[in] src_mx The xcursor of an up-to-date cursor. - * @param[in] new_dupdata True if converting from a non-#F_DUPDATA item. - */ -static void -mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int new_dupdata) -{ - MDB_xcursor *mx = mc->mc_xcursor; - - if (new_dupdata) { - mx->mx_cursor.mc_snum = 1; - mx->mx_cursor.mc_top = 0; - mx->mx_cursor.mc_flags |= C_INITIALIZED; - mx->mx_cursor.mc_ki[0] = 0; - mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DIRTY; /* DB_DIRTY guides mdb_cursor_touch */ -#if UINT_MAX < SIZE_MAX - mx->mx_dbx.md_cmp = src_mx->mx_dbx.md_cmp; -#endif - } else if (!(mx->mx_cursor.mc_flags & C_INITIALIZED)) { - return; - } - mx->mx_db = src_mx->mx_db; - mx->mx_cursor.mc_pg[0] = src_mx->mx_cursor.mc_pg[0]; - DPRINTF(("Sub-db -%u root page %"Y"u", mx->mx_cursor.mc_dbi, - mx->mx_db.md_root)); -} - -/** Initialize a cursor for a given transaction and database. */ -static void -mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx) -{ - mc->mc_next = NULL; - mc->mc_backup = NULL; - mc->mc_dbi = dbi; - mc->mc_txn = txn; - mc->mc_db = &txn->mt_dbs[dbi]; - mc->mc_dbx = &txn->mt_dbxs[dbi]; - mc->mc_dbflag = &txn->mt_dbflags[dbi]; - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_pg[0] = 0; - mc->mc_ki[0] = 0; -#ifdef MDB_VL32 - mc->mc_ovpg = 0; -#endif - mc->mc_flags = txn->mt_flags & (C_ORIG_RDONLY|C_WRITEMAP); - if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) { - mdb_tassert(txn, mx != NULL); - mc->mc_xcursor = mx; - mdb_xcursor_init0(mc); - } else { - mc->mc_xcursor = NULL; - } - if (*mc->mc_dbflag & DB_STALE) { - mdb_page_search(mc, NULL, MDB_PS_ROOTONLY); - } -} - -int -mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret) -{ - MDB_cursor *mc; - size_t size = sizeof(MDB_cursor); - - if (!ret || !TXN_DBI_EXIST(txn, dbi, DB_VALID)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - if (dbi == FREE_DBI && !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EINVAL; - - if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) - size += sizeof(MDB_xcursor); - - if ((mc = malloc(size)) != NULL) { - mdb_cursor_init(mc, txn, dbi, (MDB_xcursor *)(mc + 1)); - if (txn->mt_cursors) { - mc->mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = mc; - mc->mc_flags |= C_UNTRACK; - } - } else { - return ENOMEM; - } - - *ret = mc; - - return MDB_SUCCESS; -} - -int -mdb_cursor_renew(MDB_txn *txn, MDB_cursor *mc) -{ - if (!mc || !TXN_DBI_EXIST(txn, mc->mc_dbi, DB_VALID)) - return EINVAL; - - if ((mc->mc_flags & C_UNTRACK) || txn->mt_cursors) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor); - return MDB_SUCCESS; -} - -/* Return the count of duplicate data items for the current key */ -int -mdb_cursor_count(MDB_cursor *mc, mdb_size_t *countp) -{ - MDB_node *leaf; - - if (mc == NULL || countp == NULL) - return EINVAL; - - if (mc->mc_xcursor == NULL) - return MDB_INCOMPATIBLE; - - if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - if (!(mc->mc_flags & C_INITIALIZED)) - return EINVAL; - - if (!mc->mc_snum || (mc->mc_flags & C_EOF)) - return MDB_NOTFOUND; - - leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { - *countp = 1; - } else { - if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) - return EINVAL; - - *countp = mc->mc_xcursor->mx_db.md_entries; - } - return MDB_SUCCESS; -} - -void -mdb_cursor_close(MDB_cursor *mc) -{ - if (mc && !mc->mc_backup) { - /* remove from txn, if tracked */ - if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) { - MDB_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi]; - while (*prev && *prev != mc) prev = &(*prev)->mc_next; - if (*prev == mc) - *prev = mc->mc_next; - } - free(mc); - } -} - -MDB_txn * -mdb_cursor_txn(MDB_cursor *mc) -{ - if (!mc) return NULL; - return mc->mc_txn; -} - -MDB_dbi -mdb_cursor_dbi(MDB_cursor *mc) -{ - return mc->mc_dbi; -} - -/** Replace the key for a branch node with a new key. - * @param[in] mc Cursor pointing to the node to operate on. - * @param[in] key The new key to use. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_update_key(MDB_cursor *mc, MDB_val *key) -{ - MDB_page *mp; - MDB_node *node; - char *base; - size_t len; - int delta, ksize, oksize; - indx_t ptr, i, numkeys, indx; - DKBUF; - - indx = mc->mc_ki[mc->mc_top]; - mp = mc->mc_pg[mc->mc_top]; - node = NODEPTR(mp, indx); - ptr = mp->mp_ptrs[indx]; -#if MDB_DEBUG - { - MDB_val k2; - char kbuf2[DKBUF_MAXKEYSIZE*2+1]; - k2.mv_data = NODEKEY(node); - k2.mv_size = node->mn_ksize; - DPRINTF(("update key %u (ofs %u) [%s] to [%s] on page %"Y"u", - indx, ptr, - mdb_dkey(&k2, kbuf2), - DKEY(key), - mp->mp_pgno)); - } -#endif - - /* Sizes must be 2-byte aligned. */ - ksize = EVEN(key->mv_size); - oksize = EVEN(node->mn_ksize); - delta = ksize - oksize; - - /* Shift node contents if EVEN(key length) changed. */ - if (delta) { - if (delta > 0 && SIZELEFT(mp) < delta) { - pgno_t pgno; - /* not enough space left, do a delete and split */ - DPRINTF(("Not enough room, delta = %d, splitting...", delta)); - pgno = NODEPGNO(node); - mdb_node_del(mc, 0); - return mdb_page_split(mc, key, NULL, pgno, MDB_SPLIT_REPLACE); - } - - numkeys = NUMKEYS(mp); - for (i = 0; i < numkeys; i++) { - if (mp->mp_ptrs[i] <= ptr) - mp->mp_ptrs[i] -= delta; - } - - base = (char *)mp + mp->mp_upper + PAGEBASE; - len = ptr - mp->mp_upper + NODESIZE; - memmove(base - delta, base, len); - mp->mp_upper -= delta; - - node = NODEPTR(mp, indx); - } - - /* But even if no shift was needed, update ksize */ - if (node->mn_ksize != key->mv_size) - node->mn_ksize = key->mv_size; - - if (key->mv_size) - memcpy(NODEKEY(node), key->mv_data, key->mv_size); - - return MDB_SUCCESS; -} - -static void -mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst); - -/** Perform \b act while tracking temporary cursor \b mn */ -#define WITH_CURSOR_TRACKING(mn, act) do { \ - MDB_cursor dummy, *tracked, **tp = &(mn).mc_txn->mt_cursors[mn.mc_dbi]; \ - if ((mn).mc_flags & C_SUB) { \ - dummy.mc_flags = C_INITIALIZED; \ - dummy.mc_xcursor = (MDB_xcursor *)&(mn); \ - tracked = &dummy; \ - } else { \ - tracked = &(mn); \ - } \ - tracked->mc_next = *tp; \ - *tp = tracked; \ - { act; } \ - *tp = tracked->mc_next; \ -} while (0) - -/** Move a node from csrc to cdst. - */ -static int -mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft) -{ - MDB_node *srcnode; - MDB_val key, data; - pgno_t srcpg; - MDB_cursor mn; - int rc; - unsigned short flags; - - DKBUF; - - /* Mark src and dst as dirty. */ - if ((rc = mdb_page_touch(csrc)) || - (rc = mdb_page_touch(cdst))) - return rc; - - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top], key.mv_size); - data.mv_size = 0; - data.mv_data = NULL; - srcpg = 0; - flags = 0; - } else { - srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]); - mdb_cassert(csrc, !((size_t)srcnode & 1)); - srcpg = NODEPGNO(srcnode); - flags = srcnode->mn_flags; - if (csrc->mc_ki[csrc->mc_top] == 0 && IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { - unsigned int snum = csrc->mc_snum; - MDB_node *s2; - /* must find the lowest key below src */ - rc = mdb_page_search_lowest(csrc); - if (rc) - return rc; - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); - } else { - s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); - key.mv_size = NODEKSZ(s2); - key.mv_data = NODEKEY(s2); - } - csrc->mc_snum = snum--; - csrc->mc_top = snum; - } else { - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - data.mv_size = NODEDSZ(srcnode); - data.mv_data = NODEDATA(srcnode); - } - mn.mc_xcursor = NULL; - if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) { - unsigned int snum = cdst->mc_snum; - MDB_node *s2; - MDB_val bkey; - /* must find the lowest key below dst */ - mdb_cursor_copy(cdst, &mn); - rc = mdb_page_search_lowest(&mn); - if (rc) - return rc; - if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { - bkey.mv_size = mn.mc_db->md_pad; - bkey.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.mv_size); - } else { - s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); - bkey.mv_size = NODEKSZ(s2); - bkey.mv_data = NODEKEY(s2); - } - mn.mc_snum = snum--; - mn.mc_top = snum; - mn.mc_ki[snum] = 0; - rc = mdb_update_key(&mn, &bkey); - if (rc) - return rc; - } - - DPRINTF(("moving %s node %u [%s] on page %"Y"u to node %u on page %"Y"u", - IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch", - csrc->mc_ki[csrc->mc_top], - DKEY(&key), - csrc->mc_pg[csrc->mc_top]->mp_pgno, - cdst->mc_ki[cdst->mc_top], cdst->mc_pg[cdst->mc_top]->mp_pgno)); - - /* Add the node to the destination page. - */ - rc = mdb_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags); - if (rc != MDB_SUCCESS) - return rc; - - /* Delete the node from the source page. - */ - mdb_node_del(csrc, key.mv_size); - - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = csrc->mc_dbi; - MDB_page *mpd, *mps; - - mps = csrc->mc_pg[csrc->mc_top]; - /* If we're adding on the left, bump others up */ - if (fromleft) { - mpd = cdst->mc_pg[csrc->mc_top]; - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) - continue; - if (m3 != cdst && - m3->mc_pg[csrc->mc_top] == mpd && - m3->mc_ki[csrc->mc_top] >= cdst->mc_ki[csrc->mc_top]) { - m3->mc_ki[csrc->mc_top]++; - } - if (m3 !=csrc && - m3->mc_pg[csrc->mc_top] == mps && - m3->mc_ki[csrc->mc_top] == csrc->mc_ki[csrc->mc_top]) { - m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; - m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; - m3->mc_ki[csrc->mc_top-1]++; - } - if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && - IS_LEAF(mps)) { - MDB_node *node = NODEPTR(m3->mc_pg[csrc->mc_top], m3->mc_ki[csrc->mc_top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); - } - } - } else - /* Adding on the right, bump others down */ - { - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == csrc) continue; - if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) - continue; - if (m3->mc_pg[csrc->mc_top] == mps) { - if (!m3->mc_ki[csrc->mc_top]) { - m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; - m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; - m3->mc_ki[csrc->mc_top-1]--; - } else { - m3->mc_ki[csrc->mc_top]--; - } - if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && - IS_LEAF(mps)) { - MDB_node *node = NODEPTR(m3->mc_pg[csrc->mc_top], m3->mc_ki[csrc->mc_top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); - } - } - } - } - } - - /* Update the parent separators. - */ - if (csrc->mc_ki[csrc->mc_top] == 0) { - if (csrc->mc_ki[csrc->mc_top-1] != 0) { - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); - } else { - srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - DPRINTF(("update separator for source page %"Y"u to [%s]", - csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key))); - mdb_cursor_copy(csrc, &mn); - mn.mc_snum--; - mn.mc_top--; - /* We want mdb_rebalance to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_update_key(&mn, &key)); - if (rc) - return rc; - } - if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { - MDB_val nullkey; - indx_t ix = csrc->mc_ki[csrc->mc_top]; - nullkey.mv_size = 0; - csrc->mc_ki[csrc->mc_top] = 0; - rc = mdb_update_key(csrc, &nullkey); - csrc->mc_ki[csrc->mc_top] = ix; - mdb_cassert(csrc, rc == MDB_SUCCESS); - } - } - - if (cdst->mc_ki[cdst->mc_top] == 0) { - if (cdst->mc_ki[cdst->mc_top-1] != 0) { - if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { - key.mv_data = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.mv_size); - } else { - srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0); - key.mv_size = NODEKSZ(srcnode); - key.mv_data = NODEKEY(srcnode); - } - DPRINTF(("update separator for destination page %"Y"u to [%s]", - cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key))); - mdb_cursor_copy(cdst, &mn); - mn.mc_snum--; - mn.mc_top--; - /* We want mdb_rebalance to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_update_key(&mn, &key)); - if (rc) - return rc; - } - if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) { - MDB_val nullkey; - indx_t ix = cdst->mc_ki[cdst->mc_top]; - nullkey.mv_size = 0; - cdst->mc_ki[cdst->mc_top] = 0; - rc = mdb_update_key(cdst, &nullkey); - cdst->mc_ki[cdst->mc_top] = ix; - mdb_cassert(cdst, rc == MDB_SUCCESS); - } - } - - return MDB_SUCCESS; -} - -/** Merge one page into another. - * The nodes from the page pointed to by \b csrc will - * be copied to the page pointed to by \b cdst and then - * the \b csrc page will be freed. - * @param[in] csrc Cursor pointing to the source page. - * @param[in] cdst Cursor pointing to the destination page. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst) -{ - MDB_page *psrc, *pdst; - MDB_node *srcnode; - MDB_val key, data; - unsigned nkeys; - int rc; - indx_t i, j; - - psrc = csrc->mc_pg[csrc->mc_top]; - pdst = cdst->mc_pg[cdst->mc_top]; - - DPRINTF(("merging page %"Y"u into %"Y"u", psrc->mp_pgno, pdst->mp_pgno)); - - mdb_cassert(csrc, csrc->mc_snum > 1); /* can't merge root page */ - mdb_cassert(csrc, cdst->mc_snum > 1); - - /* Mark dst as dirty. */ - if ((rc = mdb_page_touch(cdst))) - return rc; - - /* get dst page again now that we've touched it. */ - pdst = cdst->mc_pg[cdst->mc_top]; - - /* Move all nodes from src to dst. - */ - j = nkeys = NUMKEYS(pdst); - if (IS_LEAF2(psrc)) { - key.mv_size = csrc->mc_db->md_pad; - key.mv_data = METADATA(psrc); - for (i = 0; i < NUMKEYS(psrc); i++, j++) { - rc = mdb_node_add(cdst, j, &key, NULL, 0, 0); - if (rc != MDB_SUCCESS) - return rc; - key.mv_data = (char *)key.mv_data + key.mv_size; - } - } else { - for (i = 0; i < NUMKEYS(psrc); i++, j++) { - srcnode = NODEPTR(psrc, i); - if (i == 0 && IS_BRANCH(psrc)) { - MDB_cursor mn; - MDB_node *s2; - mdb_cursor_copy(csrc, &mn); - mn.mc_xcursor = NULL; - /* must find the lowest key below src */ - rc = mdb_page_search_lowest(&mn); - if (rc) - return rc; - if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { - key.mv_size = mn.mc_db->md_pad; - key.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.mv_size); - } else { - s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); - key.mv_size = NODEKSZ(s2); - key.mv_data = NODEKEY(s2); - } - } else { - key.mv_size = srcnode->mn_ksize; - key.mv_data = NODEKEY(srcnode); - } - - data.mv_size = NODEDSZ(srcnode); - data.mv_data = NODEDATA(srcnode); - rc = mdb_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), srcnode->mn_flags); - if (rc != MDB_SUCCESS) - return rc; - } - } - - DPRINTF(("dst page %"Y"u now has %u keys (%.1f%% filled)", - pdst->mp_pgno, NUMKEYS(pdst), - (float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10)); - - /* Unlink the src page from parent and add to free list. - */ - csrc->mc_top--; - mdb_node_del(csrc, 0); - if (csrc->mc_ki[csrc->mc_top] == 0) { - key.mv_size = 0; - rc = mdb_update_key(csrc, &key); - if (rc) { - csrc->mc_top++; - return rc; - } - } - csrc->mc_top++; - - psrc = csrc->mc_pg[csrc->mc_top]; - /* If not operating on FreeDB, allow this page to be reused - * in this txn. Otherwise just add to free list. - */ - rc = mdb_page_loose(csrc, psrc); - if (rc) - return rc; - if (IS_LEAF(psrc)) - csrc->mc_db->md_leaf_pages--; - else - csrc->mc_db->md_branch_pages--; - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = csrc->mc_dbi; - unsigned int top = csrc->mc_top; - - for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (csrc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == csrc) continue; - if (m3->mc_snum < csrc->mc_snum) continue; - if (m3->mc_pg[top] == psrc) { - m3->mc_pg[top] = pdst; - m3->mc_ki[top] += nkeys; - m3->mc_ki[top-1] = cdst->mc_ki[top-1]; - } else if (m3->mc_pg[top-1] == csrc->mc_pg[top-1] && - m3->mc_ki[top-1] > csrc->mc_ki[top-1]) { - m3->mc_ki[top-1]--; - } - if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && - IS_LEAF(psrc)) { - MDB_node *node = NODEPTR(m3->mc_pg[top], m3->mc_ki[top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); - } - } - } - { - unsigned int snum = cdst->mc_snum; - uint16_t depth = cdst->mc_db->md_depth; - mdb_cursor_pop(cdst); - rc = mdb_rebalance(cdst); - /* Did the tree height change? */ - if (depth != cdst->mc_db->md_depth) - snum += cdst->mc_db->md_depth - depth; - cdst->mc_snum = snum; - cdst->mc_top = snum-1; - } - return rc; -} - -/** Copy the contents of a cursor. - * @param[in] csrc The cursor to copy from. - * @param[out] cdst The cursor to copy to. - */ -static void -mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst) -{ - unsigned int i; - - cdst->mc_txn = csrc->mc_txn; - cdst->mc_dbi = csrc->mc_dbi; - cdst->mc_db = csrc->mc_db; - cdst->mc_dbx = csrc->mc_dbx; - cdst->mc_snum = csrc->mc_snum; - cdst->mc_top = csrc->mc_top; - cdst->mc_flags = csrc->mc_flags; -#ifdef MDB_VL32 - cdst->mc_ovpg = csrc->mc_ovpg; -#endif - - for (i=0; i<csrc->mc_snum; i++) { - cdst->mc_pg[i] = csrc->mc_pg[i]; - cdst->mc_ki[i] = csrc->mc_ki[i]; - } -} - -/** Rebalance the tree after a delete operation. - * @param[in] mc Cursor pointing to the page where rebalancing - * should begin. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_rebalance(MDB_cursor *mc) -{ - MDB_node *node; - int rc, fromleft; - unsigned int ptop, minkeys, thresh; - MDB_cursor mn; - indx_t oldki; - - if (IS_BRANCH(mc->mc_pg[mc->mc_top])) { - minkeys = 2; - thresh = 1; - } else { - minkeys = 1; - thresh = FILL_THRESHOLD; - } - DPRINTF(("rebalancing %s page %"Y"u (has %u keys, %.1f%% full)", - IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch", - mdb_dbg_pgno(mc->mc_pg[mc->mc_top]), NUMKEYS(mc->mc_pg[mc->mc_top]), - (float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10)); - - if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= thresh && - NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) { - DPRINTF(("no need to rebalance page %"Y"u, above fill threshold", - mdb_dbg_pgno(mc->mc_pg[mc->mc_top]))); - return MDB_SUCCESS; - } - - if (mc->mc_snum < 2) { - MDB_page *mp = mc->mc_pg[0]; - if (IS_SUBP(mp)) { - DPUTS("Can't rebalance a subpage, ignoring"); - return MDB_SUCCESS; - } - if (NUMKEYS(mp) == 0) { - DPUTS("tree is completely empty"); - mc->mc_db->md_root = P_INVALID; - mc->mc_db->md_depth = 0; - mc->mc_db->md_leaf_pages = 0; - rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); - if (rc) - return rc; - /* Adjust cursors pointing to mp */ - mc->mc_snum = 0; - mc->mc_top = 0; - mc->mc_flags &= ~C_INITIALIZED; - { - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (!(m3->mc_flags & C_INITIALIZED) || (m3->mc_snum < mc->mc_snum)) - continue; - if (m3->mc_pg[0] == mp) { - m3->mc_snum = 0; - m3->mc_top = 0; - m3->mc_flags &= ~C_INITIALIZED; - } - } - } - } else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) { - int i; - DPUTS("collapsing root page!"); - rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); - if (rc) - return rc; - mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0)); - rc = mdb_page_get(mc, mc->mc_db->md_root, &mc->mc_pg[0], NULL); - if (rc) - return rc; - mc->mc_db->md_depth--; - mc->mc_db->md_branch_pages--; - mc->mc_ki[0] = mc->mc_ki[1]; - for (i = 1; i<mc->mc_db->md_depth; i++) { - mc->mc_pg[i] = mc->mc_pg[i+1]; - mc->mc_ki[i] = mc->mc_ki[i+1]; - } - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc) continue; - if (!(m3->mc_flags & C_INITIALIZED)) - continue; - if (m3->mc_pg[0] == mp) { - for (i=0; i<mc->mc_db->md_depth; i++) { - m3->mc_pg[i] = m3->mc_pg[i+1]; - m3->mc_ki[i] = m3->mc_ki[i+1]; - } - m3->mc_snum--; - m3->mc_top--; - } - } - } - } else - DPUTS("root page doesn't need rebalancing"); - return MDB_SUCCESS; - } - - /* The parent (branch page) must have at least 2 pointers, - * otherwise the tree is invalid. - */ - ptop = mc->mc_top-1; - mdb_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1); - - /* Leaf page fill factor is below the threshold. - * Try to move keys from left or right neighbor, or - * merge with a neighbor page. - */ - - /* Find neighbors. - */ - mdb_cursor_copy(mc, &mn); - mn.mc_xcursor = NULL; - - oldki = mc->mc_ki[mc->mc_top]; - if (mc->mc_ki[ptop] == 0) { - /* We're the leftmost leaf in our parent. - */ - DPUTS("reading right neighbor"); - mn.mc_ki[ptop]++; - node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); - rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL); - if (rc) - return rc; - mn.mc_ki[mn.mc_top] = 0; - mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); - fromleft = 0; - } else { - /* There is at least one neighbor to the left. - */ - DPUTS("reading left neighbor"); - mn.mc_ki[ptop]--; - node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); - rc = mdb_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL); - if (rc) - return rc; - mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1; - mc->mc_ki[mc->mc_top] = 0; - fromleft = 1; - } - - DPRINTF(("found neighbor page %"Y"u (%u keys, %.1f%% full)", - mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]), - (float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10)); - - /* If the neighbor page is above threshold and has enough keys, - * move one key from it. Otherwise we should try to merge them. - * (A branch page must never have less than 2 keys.) - */ - if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= thresh && NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) { - rc = mdb_node_move(&mn, mc, fromleft); - if (fromleft) { - /* if we inserted on left, bump position up */ - oldki++; - } - } else { - if (!fromleft) { - rc = mdb_page_merge(&mn, mc); - } else { - oldki += NUMKEYS(mn.mc_pg[mn.mc_top]); - mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1; - /* We want mdb_rebalance to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_page_merge(mc, &mn)); - mdb_cursor_copy(&mn, mc); - } - mc->mc_flags &= ~C_EOF; - } - mc->mc_ki[mc->mc_top] = oldki; - return rc; -} - -/** Complete a delete operation started by #mdb_cursor_del(). */ -static int -mdb_cursor_del0(MDB_cursor *mc) -{ - int rc; - MDB_page *mp; - indx_t ki; - unsigned int nkeys; - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - - ki = mc->mc_ki[mc->mc_top]; - mp = mc->mc_pg[mc->mc_top]; - mdb_node_del(mc, mc->mc_db->md_pad); - mc->mc_db->md_entries--; - { - /* Adjust other cursors pointing to mp */ - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; - if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (m3 == mc || m3->mc_snum < mc->mc_snum) - continue; - if (m3->mc_pg[mc->mc_top] == mp) { - if (m3->mc_ki[mc->mc_top] == ki) { - m3->mc_flags |= C_DEL; - } else if (m3->mc_ki[mc->mc_top] > ki) { - m3->mc_ki[mc->mc_top]--; - } - if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { - MDB_node *node = NODEPTR(m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); - } - } - } - } - rc = mdb_rebalance(mc); - - if (rc == MDB_SUCCESS) { - /* DB is totally empty now, just bail out. - * Other cursors adjustments were already done - * by mdb_rebalance and aren't needed here. - */ - if (!mc->mc_snum) - return rc; - - mp = mc->mc_pg[mc->mc_top]; - nkeys = NUMKEYS(mp); - - /* Adjust other cursors pointing to mp */ - for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2=m2->mc_next) { - m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; - if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (m3->mc_snum < mc->mc_snum) - continue; - if (m3->mc_pg[mc->mc_top] == mp) { - /* if m3 points past last node in page, find next sibling */ - if (m3->mc_ki[mc->mc_top] >= mc->mc_ki[mc->mc_top]) { - if (m3->mc_ki[mc->mc_top] >= nkeys) { - rc = mdb_cursor_sibling(m3, 1); - if (rc == MDB_NOTFOUND) { - m3->mc_flags |= C_EOF; - rc = MDB_SUCCESS; - continue; - } - } - if (mc->mc_db->md_flags & MDB_DUPSORT) { - MDB_node *node = NODEPTR(m3->mc_pg[m3->mc_top], m3->mc_ki[m3->mc_top]); - if (node->mn_flags & F_DUPDATA) { - mdb_xcursor_init1(m3, node); - m3->mc_xcursor->mx_cursor.mc_flags |= C_DEL; - } - } - } - } - } - mc->mc_flags |= C_DEL; - } - - if (rc) - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_del(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data) -{ - if (!key || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - if (!F_ISSET(txn->mt_dbs[dbi].md_flags, MDB_DUPSORT)) { - /* must ignore any data */ - data = NULL; - } - - return mdb_del0(txn, dbi, key, data, 0); -} - -static int -mdb_del0(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data, unsigned flags) -{ - MDB_cursor mc; - MDB_xcursor mx; - MDB_cursor_op op; - MDB_val rdata, *xdata; - int rc, exact = 0; - DKBUF; - - DPRINTF(("====> delete db %u key [%s]", dbi, DKEY(key))); - - mdb_cursor_init(&mc, txn, dbi, &mx); - - if (data) { - op = MDB_GET_BOTH; - rdata = *data; - xdata = &rdata; - } else { - op = MDB_SET; - xdata = NULL; - flags |= MDB_NODUPDATA; - } - rc = mdb_cursor_set(&mc, key, xdata, op, &exact); - if (rc == 0) { - /* let mdb_page_split know about this cursor if needed: - * delete will trigger a rebalance; if it needs to move - * a node from one page to another, it will have to - * update the parent's separator key(s). If the new sepkey - * is larger than the current one, the parent page may - * run out of space, triggering a split. We need this - * cursor to be consistent until the end of the rebalance. - */ - mc.mc_flags |= C_UNTRACK; - mc.mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = &mc; - rc = mdb_cursor_del(&mc, flags); - txn->mt_cursors[dbi] = mc.mc_next; - } - return rc; -} - -/** Split a page and insert a new node. - * @param[in,out] mc Cursor pointing to the page and desired insertion index. - * The cursor will be updated to point to the actual page and index where - * the node got inserted after the split. - * @param[in] newkey The key for the newly inserted node. - * @param[in] newdata The data for the newly inserted node. - * @param[in] newpgno The page number, if the new node is a branch node. - * @param[in] nflags The #NODE_ADD_FLAGS for the new node. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, pgno_t newpgno, - unsigned int nflags) -{ - unsigned int flags; - int rc = MDB_SUCCESS, new_root = 0, did_split = 0; - indx_t newindx; - pgno_t pgno = 0; - int i, j, split_indx, nkeys, pmax; - MDB_env *env = mc->mc_txn->mt_env; - MDB_node *node; - MDB_val sepkey, rkey, xdata, *rdata = &xdata; - MDB_page *copy = NULL; - MDB_page *mp, *rp, *pp; - int ptop; - MDB_cursor mn; - DKBUF; - - mp = mc->mc_pg[mc->mc_top]; - newindx = mc->mc_ki[mc->mc_top]; - nkeys = NUMKEYS(mp); - - DPRINTF(("-----> splitting %s page %"Y"u and adding [%s] at index %i/%i", - IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno, - DKEY(newkey), mc->mc_ki[mc->mc_top], nkeys)); - - /* Create a right sibling. */ - if ((rc = mdb_page_new(mc, mp->mp_flags, 1, &rp))) - return rc; - rp->mp_pad = mp->mp_pad; - DPRINTF(("new right sibling: page %"Y"u", rp->mp_pgno)); - - /* Usually when splitting the root page, the cursor - * height is 1. But when called from mdb_update_key, - * the cursor height may be greater because it walks - * up the stack while finding the branch slot to update. - */ - if (mc->mc_top < 1) { - if ((rc = mdb_page_new(mc, P_BRANCH, 1, &pp))) - goto done; - /* shift current top to make room for new parent */ - for (i=mc->mc_snum; i>0; i--) { - mc->mc_pg[i] = mc->mc_pg[i-1]; - mc->mc_ki[i] = mc->mc_ki[i-1]; - } - mc->mc_pg[0] = pp; - mc->mc_ki[0] = 0; - mc->mc_db->md_root = pp->mp_pgno; - DPRINTF(("root split! new root = %"Y"u", pp->mp_pgno)); - new_root = mc->mc_db->md_depth++; - - /* Add left (implicit) pointer. */ - if ((rc = mdb_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != MDB_SUCCESS) { - /* undo the pre-push */ - mc->mc_pg[0] = mc->mc_pg[1]; - mc->mc_ki[0] = mc->mc_ki[1]; - mc->mc_db->md_root = mp->mp_pgno; - mc->mc_db->md_depth--; - goto done; - } - mc->mc_snum++; - mc->mc_top++; - ptop = 0; - } else { - ptop = mc->mc_top-1; - DPRINTF(("parent branch page is %"Y"u", mc->mc_pg[ptop]->mp_pgno)); - } - - mdb_cursor_copy(mc, &mn); - mn.mc_xcursor = NULL; - mn.mc_pg[mn.mc_top] = rp; - mn.mc_ki[ptop] = mc->mc_ki[ptop]+1; - - if (nflags & MDB_APPEND) { - mn.mc_ki[mn.mc_top] = 0; - sepkey = *newkey; - split_indx = newindx; - nkeys = 0; - } else { - - split_indx = (nkeys+1) / 2; - - if (IS_LEAF2(rp)) { - char *split, *ins; - int x; - unsigned int lsize, rsize, ksize; - /* Move half of the keys to the right sibling */ - x = mc->mc_ki[mc->mc_top] - split_indx; - ksize = mc->mc_db->md_pad; - split = LEAF2KEY(mp, split_indx, ksize); - rsize = (nkeys - split_indx) * ksize; - lsize = (nkeys - split_indx) * sizeof(indx_t); - mp->mp_lower -= lsize; - rp->mp_lower += lsize; - mp->mp_upper += rsize - lsize; - rp->mp_upper -= rsize - lsize; - sepkey.mv_size = ksize; - if (newindx == split_indx) { - sepkey.mv_data = newkey->mv_data; - } else { - sepkey.mv_data = split; - } - if (x<0) { - ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize); - memcpy(rp->mp_ptrs, split, rsize); - sepkey.mv_data = rp->mp_ptrs; - memmove(ins+ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize); - memcpy(ins, newkey->mv_data, ksize); - mp->mp_lower += sizeof(indx_t); - mp->mp_upper -= ksize - sizeof(indx_t); - } else { - if (x) - memcpy(rp->mp_ptrs, split, x * ksize); - ins = LEAF2KEY(rp, x, ksize); - memcpy(ins, newkey->mv_data, ksize); - memcpy(ins+ksize, split + x * ksize, rsize - x * ksize); - rp->mp_lower += sizeof(indx_t); - rp->mp_upper -= ksize - sizeof(indx_t); - mc->mc_ki[mc->mc_top] = x; - } - } else { - int psize, nsize, k; - /* Maximum free space in an empty page */ - pmax = env->me_psize - PAGEHDRSZ; - if (IS_LEAF(mp)) - nsize = mdb_leaf_size(env, newkey, newdata); - else - nsize = mdb_branch_size(env, newkey); - nsize = EVEN(nsize); - - /* grab a page to hold a temporary copy */ - copy = mdb_page_malloc(mc->mc_txn, 1); - if (copy == NULL) { - rc = ENOMEM; - goto done; - } - copy->mp_pgno = mp->mp_pgno; - copy->mp_flags = mp->mp_flags; - copy->mp_lower = (PAGEHDRSZ-PAGEBASE); - copy->mp_upper = env->me_psize - PAGEBASE; - - /* prepare to insert */ - for (i=0, j=0; i<nkeys; i++) { - if (i == newindx) { - copy->mp_ptrs[j++] = 0; - } - copy->mp_ptrs[j++] = mp->mp_ptrs[i]; - } - - /* When items are relatively large the split point needs - * to be checked, because being off-by-one will make the - * difference between success or failure in mdb_node_add. - * - * It's also relevant if a page happens to be laid out - * such that one half of its nodes are all "small" and - * the other half of its nodes are "large." If the new - * item is also "large" and falls on the half with - * "large" nodes, it also may not fit. - * - * As a final tweak, if the new item goes on the last - * spot on the page (and thus, onto the new page), bias - * the split so the new page is emptier than the old page. - * This yields better packing during sequential inserts. - */ - if (nkeys < 20 || nsize > pmax/16 || newindx >= nkeys) { - /* Find split point */ - psize = 0; - if (newindx <= split_indx || newindx >= nkeys) { - i = 0; j = 1; - k = newindx >= nkeys ? nkeys : split_indx+1+IS_LEAF(mp); - } else { - i = nkeys; j = -1; - k = split_indx-1; - } - for (; i!=k; i+=j) { - if (i == newindx) { - psize += nsize; - node = NULL; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); - psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t); - if (IS_LEAF(mp)) { - if (F_ISSET(node->mn_flags, F_BIGDATA)) - psize += sizeof(pgno_t); - else - psize += NODEDSZ(node); - } - psize = EVEN(psize); - } - if (psize > pmax || i == k-j) { - split_indx = i + (j<0); - break; - } - } - } - if (split_indx == newindx) { - sepkey.mv_size = newkey->mv_size; - sepkey.mv_data = newkey->mv_data; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEBASE); - sepkey.mv_size = node->mn_ksize; - sepkey.mv_data = NODEKEY(node); - } - } - } - - DPRINTF(("separator is %d [%s]", split_indx, DKEY(&sepkey))); - - /* Copy separator key to the parent. - */ - if (SIZELEFT(mn.mc_pg[ptop]) < mdb_branch_size(env, &sepkey)) { - int snum = mc->mc_snum; - mn.mc_snum--; - mn.mc_top--; - did_split = 1; - /* We want other splits to find mn when doing fixups */ - WITH_CURSOR_TRACKING(mn, - rc = mdb_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0)); - if (rc) - goto done; - - /* root split? */ - if (mc->mc_snum > snum) { - ptop++; - } - /* Right page might now have changed parent. - * Check if left page also changed parent. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; i<ptop; i++) { - mc->mc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - mc->mc_pg[ptop] = mn.mc_pg[ptop]; - if (mn.mc_ki[ptop]) { - mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1; - } else { - /* find right page's left sibling */ - mc->mc_ki[ptop] = mn.mc_ki[ptop]; - mdb_cursor_sibling(mc, 0); - } - } - } else { - mn.mc_top--; - rc = mdb_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0); - mn.mc_top++; - } - if (rc != MDB_SUCCESS) { - goto done; - } - if (nflags & MDB_APPEND) { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[mc->mc_top] = 0; - rc = mdb_node_add(mc, 0, newkey, newdata, newpgno, nflags); - if (rc) - goto done; - for (i=0; i<mc->mc_top; i++) - mc->mc_ki[i] = mn.mc_ki[i]; - } else if (!IS_LEAF2(mp)) { - /* Move nodes */ - mc->mc_pg[mc->mc_top] = rp; - i = split_indx; - j = 0; - do { - if (i == newindx) { - rkey.mv_data = newkey->mv_data; - rkey.mv_size = newkey->mv_size; - if (IS_LEAF(mp)) { - rdata = newdata; - } else - pgno = newpgno; - flags = nflags; - /* Update index for the new key. */ - mc->mc_ki[mc->mc_top] = j; - } else { - node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); - rkey.mv_data = NODEKEY(node); - rkey.mv_size = node->mn_ksize; - if (IS_LEAF(mp)) { - xdata.mv_data = NODEDATA(node); - xdata.mv_size = NODEDSZ(node); - rdata = &xdata; - } else - pgno = NODEPGNO(node); - flags = node->mn_flags; - } - - if (!IS_LEAF(mp) && j == 0) { - /* First branch index doesn't need key data. */ - rkey.mv_size = 0; - } - - rc = mdb_node_add(mc, j, &rkey, rdata, pgno, flags); - if (rc) - goto done; - if (i == nkeys) { - i = 0; - j = 0; - mc->mc_pg[mc->mc_top] = copy; - } else { - i++; - j++; - } - } while (i != split_indx); - - nkeys = NUMKEYS(copy); - for (i=0; i<nkeys; i++) - mp->mp_ptrs[i] = copy->mp_ptrs[i]; - mp->mp_lower = copy->mp_lower; - mp->mp_upper = copy->mp_upper; - memcpy(NODEPTR(mp, nkeys-1), NODEPTR(copy, nkeys-1), - env->me_psize - copy->mp_upper - PAGEBASE); - - /* reset back to original page */ - if (newindx < split_indx) { - mc->mc_pg[mc->mc_top] = mp; - } else { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[ptop]++; - /* Make sure mc_ki is still valid. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; i<=ptop; i++) { - mc->mc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - } - } - if (nflags & MDB_RESERVE) { - node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); - if (!(node->mn_flags & F_BIGDATA)) - newdata->mv_data = NODEDATA(node); - } - } else { - if (newindx >= split_indx) { - mc->mc_pg[mc->mc_top] = rp; - mc->mc_ki[ptop]++; - /* Make sure mc_ki is still valid. - */ - if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && - mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { - for (i=0; i<=ptop; i++) { - mc->mc_pg[i] = mn.mc_pg[i]; - mc->mc_ki[i] = mn.mc_ki[i]; - } - } - } - } - - { - /* Adjust other cursors pointing to mp */ - MDB_cursor *m2, *m3; - MDB_dbi dbi = mc->mc_dbi; - nkeys = NUMKEYS(mp); - - for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { - if (mc->mc_flags & C_SUB) - m3 = &m2->mc_xcursor->mx_cursor; - else - m3 = m2; - if (m3 == mc) - continue; - if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) - continue; - if (new_root) { - int k; - /* sub cursors may be on different DB */ - if (m3->mc_pg[0] != mp) - continue; - /* root split */ - for (k=new_root; k>=0; k--) { - m3->mc_ki[k+1] = m3->mc_ki[k]; - m3->mc_pg[k+1] = m3->mc_pg[k]; - } - if (m3->mc_ki[0] >= nkeys) { - m3->mc_ki[0] = 1; - } else { - m3->mc_ki[0] = 0; - } - m3->mc_pg[0] = mc->mc_pg[0]; - m3->mc_snum++; - m3->mc_top++; - } - if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) { - if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDB_SPLIT_REPLACE)) - m3->mc_ki[mc->mc_top]++; - if (m3->mc_ki[mc->mc_top] >= nkeys) { - m3->mc_pg[mc->mc_top] = rp; - m3->mc_ki[mc->mc_top] -= nkeys; - for (i=0; i<mc->mc_top; i++) { - m3->mc_ki[i] = mn.mc_ki[i]; - m3->mc_pg[i] = mn.mc_pg[i]; - } - } - } else if (!did_split && m3->mc_top >= ptop && m3->mc_pg[ptop] == mc->mc_pg[ptop] && - m3->mc_ki[ptop] >= mc->mc_ki[ptop]) { - m3->mc_ki[ptop]++; - } - if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && - IS_LEAF(mp)) { - MDB_node *node = NODEPTR(m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) - m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); - } - } - } - DPRINTF(("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp))); - -done: - if (copy) /* tmp page */ - mdb_page_free(env, copy); - if (rc) - mc->mc_txn->mt_flags |= MDB_TXN_ERROR; - return rc; -} - -int -mdb_put(MDB_txn *txn, MDB_dbi dbi, - MDB_val *key, MDB_val *data, unsigned int flags) -{ - MDB_cursor mc; - MDB_xcursor mx; - int rc; - - if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (flags & ~(MDB_NOOVERWRITE|MDB_NODUPDATA|MDB_RESERVE|MDB_APPEND|MDB_APPENDDUP)) - return EINVAL; - - if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) - return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; - - mdb_cursor_init(&mc, txn, dbi, &mx); - mc.mc_next = txn->mt_cursors[dbi]; - txn->mt_cursors[dbi] = &mc; - rc = mdb_cursor_put(&mc, key, data, flags); - txn->mt_cursors[dbi] = mc.mc_next; - return rc; -} - -#ifndef MDB_WBUF -#define MDB_WBUF (1024*1024) -#endif - - /** State needed for a compacting copy. */ -typedef struct mdb_copy { - pthread_mutex_t mc_mutex; - pthread_cond_t mc_cond; - char *mc_wbuf[2]; - char *mc_over[2]; - MDB_env *mc_env; - MDB_txn *mc_txn; - int mc_wlen[2]; - int mc_olen[2]; - pgno_t mc_next_pgno; - HANDLE mc_fd; - int mc_status; - volatile int mc_new; - int mc_toggle; - -} mdb_copy; - - /** Dedicated writer thread for compacting copy. */ -static THREAD_RET ESECT CALL_CONV -mdb_env_copythr(void *arg) -{ - mdb_copy *my = arg; - char *ptr; - int toggle = 0, wsize, rc; -#ifdef _WIN32 - DWORD len; -#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) -#else - int len; -#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) -#endif - - pthread_mutex_lock(&my->mc_mutex); - my->mc_new = 0; - pthread_cond_signal(&my->mc_cond); - for(;;) { - while (!my->mc_new) - pthread_cond_wait(&my->mc_cond, &my->mc_mutex); - if (my->mc_new < 0) { - my->mc_new = 0; - break; - } - my->mc_new = 0; - wsize = my->mc_wlen[toggle]; - ptr = my->mc_wbuf[toggle]; -again: - while (wsize > 0) { - DO_WRITE(rc, my->mc_fd, ptr, wsize, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - wsize -= len; - continue; - } else { - rc = EIO; - break; - } - } - if (rc) { - my->mc_status = rc; - break; - } - /* If there's an overflow page tail, write it too */ - if (my->mc_olen[toggle]) { - wsize = my->mc_olen[toggle]; - ptr = my->mc_over[toggle]; - my->mc_olen[toggle] = 0; - goto again; - } - my->mc_wlen[toggle] = 0; - toggle ^= 1; - pthread_cond_signal(&my->mc_cond); - } - pthread_cond_signal(&my->mc_cond); - pthread_mutex_unlock(&my->mc_mutex); - return (THREAD_RET)0; -#undef DO_WRITE -} - - /** Tell the writer thread there's a buffer ready to write */ -static int ESECT -mdb_env_cthr_toggle(mdb_copy *my, int st) -{ - int toggle = my->mc_toggle ^ 1; - pthread_mutex_lock(&my->mc_mutex); - if (my->mc_status) { - pthread_mutex_unlock(&my->mc_mutex); - return my->mc_status; - } - while (my->mc_new == 1) - pthread_cond_wait(&my->mc_cond, &my->mc_mutex); - my->mc_new = st; - my->mc_toggle = toggle; - pthread_cond_signal(&my->mc_cond); - pthread_mutex_unlock(&my->mc_mutex); - return 0; -} - - /** Depth-first tree traversal for compacting copy. */ -static int ESECT -mdb_env_cwalk(mdb_copy *my, pgno_t *pg, int flags) -{ - MDB_cursor mc = {0}; - MDB_node *ni; - MDB_page *mo, *mp, *leaf; - char *buf, *ptr; - int rc, toggle; - unsigned int i; - - /* Empty DB, nothing to do */ - if (*pg == P_INVALID) - return MDB_SUCCESS; - - mc.mc_snum = 1; - mc.mc_txn = my->mc_txn; - mc.mc_flags = my->mc_txn->mt_flags & (C_ORIG_RDONLY|C_WRITEMAP); - - rc = mdb_page_get(&mc, *pg, &mc.mc_pg[0], NULL); - if (rc) - return rc; - rc = mdb_page_search_root(&mc, NULL, MDB_PS_FIRST); - if (rc) - return rc; - - /* Make cursor pages writable */ - buf = ptr = malloc(my->mc_env->me_psize * mc.mc_snum); - if (buf == NULL) - return ENOMEM; - - for (i=0; i<mc.mc_top; i++) { - mdb_page_copy((MDB_page *)ptr, mc.mc_pg[i], my->mc_env->me_psize); - mc.mc_pg[i] = (MDB_page *)ptr; - ptr += my->mc_env->me_psize; - } - - /* This is writable space for a leaf page. Usually not needed. */ - leaf = (MDB_page *)ptr; - - toggle = my->mc_toggle; - while (mc.mc_snum > 0) { - unsigned n; - mp = mc.mc_pg[mc.mc_top]; - n = NUMKEYS(mp); - - if (IS_LEAF(mp)) { - if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) { - for (i=0; i<n; i++) { - ni = NODEPTR(mp, i); - if (ni->mn_flags & F_BIGDATA) { - MDB_page *omp; - pgno_t pg; - - /* Need writable leaf */ - if (mp != leaf) { - mc.mc_pg[mc.mc_top] = leaf; - mdb_page_copy(leaf, mp, my->mc_env->me_psize); - mp = leaf; - ni = NODEPTR(mp, i); - } - - memcpy(&pg, NODEDATA(ni), sizeof(pg)); - rc = mdb_page_get(&mc, pg, &omp, NULL); - if (rc) - goto done; - if (my->mc_wlen[toggle] >= MDB_WBUF) { - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); - memcpy(mo, omp, my->mc_env->me_psize); - mo->mp_pgno = my->mc_next_pgno; - my->mc_next_pgno += omp->mp_pages; - my->mc_wlen[toggle] += my->mc_env->me_psize; - if (omp->mp_pages > 1) { - my->mc_olen[toggle] = my->mc_env->me_psize * (omp->mp_pages - 1); - my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize; - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - memcpy(NODEDATA(ni), &mo->mp_pgno, sizeof(pgno_t)); - } else if (ni->mn_flags & F_SUBDATA) { - MDB_db db; - - /* Need writable leaf */ - if (mp != leaf) { - mc.mc_pg[mc.mc_top] = leaf; - mdb_page_copy(leaf, mp, my->mc_env->me_psize); - mp = leaf; - ni = NODEPTR(mp, i); - } - - memcpy(&db, NODEDATA(ni), sizeof(db)); - my->mc_toggle = toggle; - rc = mdb_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA); - if (rc) - goto done; - toggle = my->mc_toggle; - memcpy(NODEDATA(ni), &db, sizeof(db)); - } - } - } - } else { - mc.mc_ki[mc.mc_top]++; - if (mc.mc_ki[mc.mc_top] < n) { - pgno_t pg; -again: - ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]); - pg = NODEPGNO(ni); - rc = mdb_page_get(&mc, pg, &mp, NULL); - if (rc) - goto done; - mc.mc_top++; - mc.mc_snum++; - mc.mc_ki[mc.mc_top] = 0; - if (IS_BRANCH(mp)) { - /* Whenever we advance to a sibling branch page, - * we must proceed all the way down to its first leaf. - */ - mdb_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize); - goto again; - } else - mc.mc_pg[mc.mc_top] = mp; - continue; - } - } - if (my->mc_wlen[toggle] >= MDB_WBUF) { - rc = mdb_env_cthr_toggle(my, 1); - if (rc) - goto done; - toggle = my->mc_toggle; - } - mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); - mdb_page_copy(mo, mp, my->mc_env->me_psize); - mo->mp_pgno = my->mc_next_pgno++; - my->mc_wlen[toggle] += my->mc_env->me_psize; - if (mc.mc_top) { - /* Update parent if there is one */ - ni = NODEPTR(mc.mc_pg[mc.mc_top-1], mc.mc_ki[mc.mc_top-1]); - SETPGNO(ni, mo->mp_pgno); - mdb_cursor_pop(&mc); - } else { - /* Otherwise we're done */ - *pg = mo->mp_pgno; - break; - } - } -done: - free(buf); - return rc; -} - - /** Copy environment with compaction. */ -static int ESECT -mdb_env_copyfd1(MDB_env *env, HANDLE fd) -{ - MDB_meta *mm; - MDB_page *mp; - mdb_copy my; - MDB_txn *txn = NULL; - pthread_t thr; - int rc; - -#ifdef _WIN32 - my.mc_mutex = CreateMutex(NULL, FALSE, NULL); - my.mc_cond = CreateEvent(NULL, FALSE, FALSE, NULL); - my.mc_wbuf[0] = _aligned_malloc(MDB_WBUF*2, env->me_os_psize); - if (my.mc_wbuf[0] == NULL) - return errno; -#else - pthread_mutex_init(&my.mc_mutex, NULL); - pthread_cond_init(&my.mc_cond, NULL); -#ifdef HAVE_MEMALIGN - my.mc_wbuf[0] = memalign(env->me_os_psize, MDB_WBUF*2); - if (my.mc_wbuf[0] == NULL) - return errno; -#else - rc = posix_memalign((void **)&my.mc_wbuf[0], env->me_os_psize, MDB_WBUF*2); - if (rc) - return rc; -#endif -#endif - memset(my.mc_wbuf[0], 0, MDB_WBUF*2); - my.mc_wbuf[1] = my.mc_wbuf[0] + MDB_WBUF; - my.mc_wlen[0] = 0; - my.mc_wlen[1] = 0; - my.mc_olen[0] = 0; - my.mc_olen[1] = 0; - my.mc_next_pgno = NUM_METAS; - my.mc_status = 0; - my.mc_new = 1; - my.mc_toggle = 0; - my.mc_env = env; - my.mc_fd = fd; - THREAD_CREATE(thr, mdb_env_copythr, &my); - - rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); - if (rc) - return rc; - - mp = (MDB_page *)my.mc_wbuf[0]; - memset(mp, 0, NUM_METAS * env->me_psize); - mp->mp_pgno = 0; - mp->mp_flags = P_META; - mm = (MDB_meta *)METADATA(mp); - mdb_env_init_meta0(env, mm); - mm->mm_address = env->me_metas[0]->mm_address; - - mp = (MDB_page *)(my.mc_wbuf[0] + env->me_psize); - mp->mp_pgno = 1; - mp->mp_flags = P_META; - *(MDB_meta *)METADATA(mp) = *mm; - mm = (MDB_meta *)METADATA(mp); - - /* Count the number of free pages, subtract from lastpg to find - * number of active pages - */ - { - MDB_ID freecount = 0; - MDB_cursor mc; - MDB_val key, data; - mdb_cursor_init(&mc, txn, FREE_DBI, NULL); - while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) - freecount += *(MDB_ID *)data.mv_data; - freecount += txn->mt_dbs[FREE_DBI].md_branch_pages + - txn->mt_dbs[FREE_DBI].md_leaf_pages + - txn->mt_dbs[FREE_DBI].md_overflow_pages; - - /* Set metapage 1 */ - mm->mm_last_pg = txn->mt_next_pgno - freecount - 1; - mm->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; - if (mm->mm_last_pg > NUM_METAS-1) { - mm->mm_dbs[MAIN_DBI].md_root = mm->mm_last_pg; - mm->mm_txnid = 1; - } else { - mm->mm_dbs[MAIN_DBI].md_root = P_INVALID; - } - } - my.mc_wlen[0] = env->me_psize * NUM_METAS; - my.mc_txn = txn; - pthread_mutex_lock(&my.mc_mutex); - while(my.mc_new) - pthread_cond_wait(&my.mc_cond, &my.mc_mutex); - pthread_mutex_unlock(&my.mc_mutex); - rc = mdb_env_cwalk(&my, &txn->mt_dbs[MAIN_DBI].md_root, 0); - if (rc == MDB_SUCCESS && my.mc_wlen[my.mc_toggle]) - rc = mdb_env_cthr_toggle(&my, 1); - mdb_env_cthr_toggle(&my, -1); - pthread_mutex_lock(&my.mc_mutex); - while(my.mc_new) - pthread_cond_wait(&my.mc_cond, &my.mc_mutex); - pthread_mutex_unlock(&my.mc_mutex); - THREAD_FINISH(thr); - - mdb_txn_abort(txn); -#ifdef _WIN32 - CloseHandle(my.mc_cond); - CloseHandle(my.mc_mutex); - _aligned_free(my.mc_wbuf[0]); -#else - pthread_cond_destroy(&my.mc_cond); - pthread_mutex_destroy(&my.mc_mutex); - free(my.mc_wbuf[0]); -#endif - return rc; -} - - /** Copy environment as-is. */ -static int ESECT -mdb_env_copyfd0(MDB_env *env, HANDLE fd) -{ - MDB_txn *txn = NULL; - mdb_mutexref_t wmutex = NULL; - int rc; - mdb_size_t wsize, w3; - char *ptr; -#ifdef _WIN32 - DWORD len, w2; -#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) -#else - ssize_t len; - size_t w2; -#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) -#endif - - /* Do the lock/unlock of the reader mutex before starting the - * write txn. Otherwise other read txns could block writers. - */ - rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); - if (rc) - return rc; - - if (env->me_txns) { - /* We must start the actual read txn after blocking writers */ - mdb_txn_end(txn, MDB_END_RESET_TMP); - - /* Temporarily block writers until we snapshot the meta pages */ - wmutex = env->me_wmutex; - if (LOCK_MUTEX(rc, env, wmutex)) - goto leave; - - rc = mdb_txn_renew0(txn); - if (rc) { - UNLOCK_MUTEX(wmutex); - goto leave; - } - } - - wsize = env->me_psize * NUM_METAS; - ptr = env->me_map; - w2 = wsize; - while (w2 > 0) { - DO_WRITE(rc, fd, ptr, w2, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - w2 -= len; - continue; - } else { - /* Non-blocking or async handles are not supported */ - rc = EIO; - break; - } - } - if (wmutex) - UNLOCK_MUTEX(wmutex); - - if (rc) - goto leave; - - w3 = txn->mt_next_pgno * env->me_psize; - { - mdb_size_t fsize = 0; - if ((rc = mdb_fsize(env->me_fd, &fsize))) - goto leave; - if (w3 > fsize) - w3 = fsize; - } - wsize = w3 - wsize; - while (wsize > 0) { - if (wsize > MAX_WRITE) - w2 = MAX_WRITE; - else - w2 = wsize; - DO_WRITE(rc, fd, ptr, w2, len); - if (!rc) { - rc = ErrCode(); - break; - } else if (len > 0) { - rc = MDB_SUCCESS; - ptr += len; - wsize -= len; - continue; - } else { - rc = EIO; - break; - } - } - -leave: - mdb_txn_abort(txn); - return rc; -} - -int ESECT -mdb_env_copyfd2(MDB_env *env, HANDLE fd, unsigned int flags) -{ - if (flags & MDB_CP_COMPACT) - return mdb_env_copyfd1(env, fd); - else - return mdb_env_copyfd0(env, fd); -} - -int ESECT -mdb_env_copyfd(MDB_env *env, HANDLE fd) -{ - return mdb_env_copyfd2(env, fd, 0); -} - -int ESECT -mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags) -{ - int rc, len; - char *lpath; - HANDLE newfd = INVALID_HANDLE_VALUE; -#ifdef _WIN32 - wchar_t *wpath; -#endif - - if (env->me_flags & MDB_NOSUBDIR) { - lpath = (char *)path; - } else { - len = strlen(path); - len += sizeof(DATANAME); - lpath = malloc(len); - if (!lpath) - return ENOMEM; - sprintf(lpath, "%s" DATANAME, path); - } - - /* The destination path must exist, but the destination file must not. - * We don't want the OS to cache the writes, since the source data is - * already in the OS cache. - */ -#ifdef _WIN32 - rc = utf8_to_utf16(lpath, -1, &wpath, NULL); - if (rc) - goto leave; - newfd = CreateFileW(wpath, GENERIC_WRITE, 0, NULL, CREATE_NEW, - FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH, NULL); - free(wpath); -#else - newfd = open(lpath, O_WRONLY|O_CREAT|O_EXCL, 0666); -#endif - if (newfd == INVALID_HANDLE_VALUE) { - rc = ErrCode(); - goto leave; - } - - if (env->me_psize >= env->me_os_psize) { -#ifdef O_DIRECT - /* Set O_DIRECT if the file system supports it */ - if ((rc = fcntl(newfd, F_GETFL)) != -1) - (void) fcntl(newfd, F_SETFL, rc | O_DIRECT); -#endif -#ifdef F_NOCACHE /* __APPLE__ */ - rc = fcntl(newfd, F_NOCACHE, 1); - if (rc) { - rc = ErrCode(); - goto leave; - } -#endif - } - - rc = mdb_env_copyfd2(env, newfd, flags); - -leave: - if (!(env->me_flags & MDB_NOSUBDIR)) - free(lpath); - if (newfd != INVALID_HANDLE_VALUE) - if (close(newfd) < 0 && rc == MDB_SUCCESS) - rc = ErrCode(); - - return rc; -} - -int ESECT -mdb_env_copy(MDB_env *env, const char *path) -{ - return mdb_env_copy2(env, path, 0); -} - -int ESECT -mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff) -{ - if (flag & ~CHANGEABLE) - return EINVAL; - if (onoff) - env->me_flags |= flag; - else - env->me_flags &= ~flag; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_flags(MDB_env *env, unsigned int *arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_flags & (CHANGEABLE|CHANGELESS); - return MDB_SUCCESS; -} - -int ESECT -mdb_env_set_userctx(MDB_env *env, void *ctx) -{ - if (!env) - return EINVAL; - env->me_userctx = ctx; - return MDB_SUCCESS; -} - -void * ESECT -mdb_env_get_userctx(MDB_env *env) -{ - return env ? env->me_userctx : NULL; -} - -int ESECT -mdb_env_set_assert(MDB_env *env, MDB_assert_func *func) -{ - if (!env) - return EINVAL; -#ifndef NDEBUG - env->me_assert_func = func; -#endif - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_path(MDB_env *env, const char **arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_path; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *arg) -{ - if (!env || !arg) - return EINVAL; - - *arg = env->me_fd; - return MDB_SUCCESS; -} - -/** Common code for #mdb_stat() and #mdb_env_stat(). - * @param[in] env the environment to operate in. - * @param[in] db the #MDB_db record containing the stats to return. - * @param[out] arg the address of an #MDB_stat structure to receive the stats. - * @return 0, this function always succeeds. - */ -static int ESECT -mdb_stat0(MDB_env *env, MDB_db *db, MDB_stat *arg) -{ - arg->ms_psize = env->me_psize; - arg->ms_depth = db->md_depth; - arg->ms_branch_pages = db->md_branch_pages; - arg->ms_leaf_pages = db->md_leaf_pages; - arg->ms_overflow_pages = db->md_overflow_pages; - arg->ms_entries = db->md_entries; - - return MDB_SUCCESS; -} - -int ESECT -mdb_env_stat(MDB_env *env, MDB_stat *arg) -{ - MDB_meta *meta; - - if (env == NULL || arg == NULL) - return EINVAL; - - meta = mdb_env_pick_meta(env); - - return mdb_stat0(env, &meta->mm_dbs[MAIN_DBI], arg); -} - -int ESECT -mdb_env_info(MDB_env *env, MDB_envinfo *arg) -{ - MDB_meta *meta; - - if (env == NULL || arg == NULL) - return EINVAL; - - meta = mdb_env_pick_meta(env); - arg->me_mapaddr = meta->mm_address; - arg->me_last_pgno = meta->mm_last_pg; - arg->me_last_txnid = meta->mm_txnid; - - arg->me_mapsize = env->me_mapsize; - arg->me_maxreaders = env->me_maxreaders; - arg->me_numreaders = env->me_txns ? env->me_txns->mti_numreaders : 0; - return MDB_SUCCESS; -} - -/** Set the default comparison functions for a database. - * Called immediately after a database is opened to set the defaults. - * The user can then override them with #mdb_set_compare() or - * #mdb_set_dupsort(). - * @param[in] txn A transaction handle returned by #mdb_txn_begin() - * @param[in] dbi A database handle returned by #mdb_dbi_open() - */ -static void -mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi) -{ - uint16_t f = txn->mt_dbs[dbi].md_flags; - - txn->mt_dbxs[dbi].md_cmp = - (f & MDB_REVERSEKEY) ? mdb_cmp_memnr : - (f & MDB_INTEGERKEY) ? mdb_cmp_cint : mdb_cmp_memn; - - txn->mt_dbxs[dbi].md_dcmp = - !(f & MDB_DUPSORT) ? 0 : - ((f & MDB_INTEGERDUP) - ? ((f & MDB_DUPFIXED) ? mdb_cmp_int : mdb_cmp_cint) - : ((f & MDB_REVERSEDUP) ? mdb_cmp_memnr : mdb_cmp_memn)); -} - -int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi) -{ - MDB_val key, data; - MDB_dbi i; - MDB_cursor mc; - MDB_db dummy; - int rc, dbflag, exact; - unsigned int unused = 0, seq; - char *namedup; - size_t len; - - if (flags & ~VALID_FLAGS) - return EINVAL; - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - /* main DB? */ - if (!name) { - *dbi = MAIN_DBI; - if (flags & PERSISTENT_FLAGS) { - uint16_t f2 = flags & PERSISTENT_FLAGS; - /* make sure flag changes get committed */ - if ((txn->mt_dbs[MAIN_DBI].md_flags | f2) != txn->mt_dbs[MAIN_DBI].md_flags) { - txn->mt_dbs[MAIN_DBI].md_flags |= f2; - txn->mt_flags |= MDB_TXN_DIRTY; - } - } - mdb_default_cmp(txn, MAIN_DBI); - return MDB_SUCCESS; - } - - if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) { - mdb_default_cmp(txn, MAIN_DBI); - } - - /* Is the DB already open? */ - len = strlen(name); - for (i=CORE_DBS; i<txn->mt_numdbs; i++) { - if (!txn->mt_dbxs[i].md_name.mv_size) { - /* Remember this free slot */ - if (!unused) unused = i; - continue; - } - if (len == txn->mt_dbxs[i].md_name.mv_size && - !strncmp(name, txn->mt_dbxs[i].md_name.mv_data, len)) { - *dbi = i; - return MDB_SUCCESS; - } - } - - /* If no free slot and max hit, fail */ - if (!unused && txn->mt_numdbs >= txn->mt_env->me_maxdbs) - return MDB_DBS_FULL; - - /* Cannot mix named databases with some mainDB flags */ - if (txn->mt_dbs[MAIN_DBI].md_flags & (MDB_DUPSORT|MDB_INTEGERKEY)) - return (flags & MDB_CREATE) ? MDB_INCOMPATIBLE : MDB_NOTFOUND; - - /* Find the DB info */ - dbflag = DB_NEW|DB_VALID|DB_USRVALID; - exact = 0; - key.mv_size = len; - key.mv_data = (void *)name; - mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); - rc = mdb_cursor_set(&mc, &key, &data, MDB_SET, &exact); - if (rc == MDB_SUCCESS) { - /* make sure this is actually a DB */ - MDB_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); - if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA) - return MDB_INCOMPATIBLE; - } else if (! (rc == MDB_NOTFOUND && (flags & MDB_CREATE))) { - return rc; - } - - /* Done here so we cannot fail after creating a new DB */ - if ((namedup = strdup(name)) == NULL) - return ENOMEM; - - if (rc) { - /* MDB_NOTFOUND and MDB_CREATE: Create new DB */ - data.mv_size = sizeof(MDB_db); - data.mv_data = &dummy; - memset(&dummy, 0, sizeof(dummy)); - dummy.md_root = P_INVALID; - dummy.md_flags = flags & PERSISTENT_FLAGS; - rc = mdb_cursor_put(&mc, &key, &data, F_SUBDATA); - dbflag |= DB_DIRTY; - } - - if (rc) { - free(namedup); - } else { - /* Got info, register DBI in this txn */ - unsigned int slot = unused ? unused : txn->mt_numdbs; - txn->mt_dbxs[slot].md_name.mv_data = namedup; - txn->mt_dbxs[slot].md_name.mv_size = len; - txn->mt_dbxs[slot].md_rel = NULL; - txn->mt_dbflags[slot] = dbflag; - /* txn-> and env-> are the same in read txns, use - * tmp variable to avoid undefined assignment - */ - seq = ++txn->mt_env->me_dbiseqs[slot]; - txn->mt_dbiseqs[slot] = seq; - - memcpy(&txn->mt_dbs[slot], data.mv_data, sizeof(MDB_db)); - *dbi = slot; - mdb_default_cmp(txn, slot); - if (!unused) { - txn->mt_numdbs++; - } - } - - return rc; -} - -int ESECT -mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg) -{ - if (!arg || !TXN_DBI_EXIST(txn, dbi, DB_VALID)) - return EINVAL; - - if (txn->mt_flags & MDB_TXN_BLOCKED) - return MDB_BAD_TXN; - - if (txn->mt_dbflags[dbi] & DB_STALE) { - MDB_cursor mc; - MDB_xcursor mx; - /* Stale, must read the DB's root. cursor_init does it for us. */ - mdb_cursor_init(&mc, txn, dbi, &mx); - } - return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg); -} - -void mdb_dbi_close(MDB_env *env, MDB_dbi dbi) -{ - char *ptr; - if (dbi < CORE_DBS || dbi >= env->me_maxdbs) - return; - ptr = env->me_dbxs[dbi].md_name.mv_data; - /* If there was no name, this was already closed */ - if (ptr) { - env->me_dbxs[dbi].md_name.mv_data = NULL; - env->me_dbxs[dbi].md_name.mv_size = 0; - env->me_dbflags[dbi] = 0; - env->me_dbiseqs[dbi]++; - free(ptr); - } -} - -int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags) -{ - /* We could return the flags for the FREE_DBI too but what's the point? */ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - *flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS; - return MDB_SUCCESS; -} - -/** Add all the DB's pages to the free list. - * @param[in] mc Cursor on the DB to free. - * @param[in] subs non-Zero to check for sub-DBs in this DB. - * @return 0 on success, non-zero on failure. - */ -static int -mdb_drop0(MDB_cursor *mc, int subs) -{ - int rc; - - rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); - if (rc == MDB_SUCCESS) { - MDB_txn *txn = mc->mc_txn; - MDB_node *ni; - MDB_cursor mx; - unsigned int i; - - /* DUPSORT sub-DBs have no ovpages/DBs. Omit scanning leaves. - * This also avoids any P_LEAF2 pages, which have no nodes. - * Also if the DB doesn't have sub-DBs and has no overflow - * pages, omit scanning leaves. - */ - if ((mc->mc_flags & C_SUB) || - (!subs && !mc->mc_db->md_overflow_pages)) - mdb_cursor_pop(mc); - - mdb_cursor_copy(mc, &mx); -#ifdef MDB_VL32 - /* bump refcount for mx's pages */ - for (i=0; i<mc->mc_snum; i++) - mdb_page_get(&mx, mc->mc_pg[i]->mp_pgno, &mx.mc_pg[i], NULL); -#endif - while (mc->mc_snum > 0) { - MDB_page *mp = mc->mc_pg[mc->mc_top]; - unsigned n = NUMKEYS(mp); - if (IS_LEAF(mp)) { - for (i=0; i<n; i++) { - ni = NODEPTR(mp, i); - if (ni->mn_flags & F_BIGDATA) { - MDB_page *omp; - pgno_t pg; - memcpy(&pg, NODEDATA(ni), sizeof(pg)); - rc = mdb_page_get(mc, pg, &omp, NULL); - if (rc != 0) - goto done; - mdb_cassert(mc, IS_OVERFLOW(omp)); - rc = mdb_midl_append_range(&txn->mt_free_pgs, - pg, omp->mp_pages); - if (rc) - goto done; - mc->mc_db->md_overflow_pages -= omp->mp_pages; - if (!mc->mc_db->md_overflow_pages && !subs) - break; - } else if (subs && (ni->mn_flags & F_SUBDATA)) { - mdb_xcursor_init1(mc, ni); - rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); - if (rc) - goto done; - } - } - if (!subs && !mc->mc_db->md_overflow_pages) - goto pop; - } else { - if ((rc = mdb_midl_need(&txn->mt_free_pgs, n)) != 0) - goto done; - for (i=0; i<n; i++) { - pgno_t pg; - ni = NODEPTR(mp, i); - pg = NODEPGNO(ni); - /* free it */ - mdb_midl_xappend(txn->mt_free_pgs, pg); - } - } - if (!mc->mc_top) - break; - mc->mc_ki[mc->mc_top] = i; - rc = mdb_cursor_sibling(mc, 1); - if (rc) { - if (rc != MDB_NOTFOUND) - goto done; - /* no more siblings, go back to beginning - * of previous level. - */ -pop: - mdb_cursor_pop(mc); - mc->mc_ki[0] = 0; - for (i=1; i<mc->mc_snum; i++) { - mc->mc_ki[i] = 0; - mc->mc_pg[i] = mx.mc_pg[i]; - } - } - } - /* free it */ - rc = mdb_midl_append(&txn->mt_free_pgs, mc->mc_db->md_root); -done: - if (rc) - txn->mt_flags |= MDB_TXN_ERROR; -#ifdef MDB_VL32 - /* drop refcount for mx's pages */ - mdb_cursor_unref(&mx); -#endif - } else if (rc == MDB_NOTFOUND) { - rc = MDB_SUCCESS; - } - mc->mc_flags &= ~C_INITIALIZED; - return rc; -} - -int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del) -{ - MDB_cursor *mc, *m2; - int rc; - - if ((unsigned)del > 1 || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) - return EACCES; - - if (TXN_DBI_CHANGED(txn, dbi)) - return MDB_BAD_DBI; - - rc = mdb_cursor_open(txn, dbi, &mc); - if (rc) - return rc; - - rc = mdb_drop0(mc, mc->mc_db->md_flags & MDB_DUPSORT); - /* Invalidate the dropped DB's cursors */ - for (m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) - m2->mc_flags &= ~(C_INITIALIZED|C_EOF); - if (rc) - goto leave; - - /* Can't delete the main DB */ - if (del && dbi >= CORE_DBS) { - rc = mdb_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, F_SUBDATA); - if (!rc) { - txn->mt_dbflags[dbi] = DB_STALE; - mdb_dbi_close(txn->mt_env, dbi); - } else { - txn->mt_flags |= MDB_TXN_ERROR; - } - } else { - /* reset the DB record, mark it dirty */ - txn->mt_dbflags[dbi] |= DB_DIRTY; - txn->mt_dbs[dbi].md_depth = 0; - txn->mt_dbs[dbi].md_branch_pages = 0; - txn->mt_dbs[dbi].md_leaf_pages = 0; - txn->mt_dbs[dbi].md_overflow_pages = 0; - txn->mt_dbs[dbi].md_entries = 0; - txn->mt_dbs[dbi].md_root = P_INVALID; - - txn->mt_flags |= MDB_TXN_DIRTY; - } -leave: - mdb_cursor_close(mc); - return rc; -} - -int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_cmp = cmp; - return MDB_SUCCESS; -} - -int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_dcmp = cmp; - return MDB_SUCCESS; -} - -int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_rel = rel; - return MDB_SUCCESS; -} - -int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx) -{ - if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) - return EINVAL; - - txn->mt_dbxs[dbi].md_relctx = ctx; - return MDB_SUCCESS; -} - -int ESECT -mdb_env_get_maxkeysize(MDB_env *env) -{ - return ENV_MAXKEY(env); -} - -int ESECT -mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx) -{ - unsigned int i, rdrs; - MDB_reader *mr; - char buf[64]; - int rc = 0, first = 1; - - if (!env || !func) - return -1; - if (!env->me_txns) { - return func("(no reader locks)\n", ctx); - } - rdrs = env->me_txns->mti_numreaders; - mr = env->me_txns->mti_readers; - for (i=0; i<rdrs; i++) { - if (mr[i].mr_pid) { - txnid_t txnid = mr[i].mr_txnid; - sprintf(buf, txnid == (txnid_t)-1 ? - "%10d %"Z"x -\n" : "%10d %"Z"x %"Y"u\n", - (int)mr[i].mr_pid, (size_t)mr[i].mr_tid, txnid); - if (first) { - first = 0; - rc = func(" pid thread txnid\n", ctx); - if (rc < 0) - break; - } - rc = func(buf, ctx); - if (rc < 0) - break; - } - } - if (first) { - rc = func("(no active readers)\n", ctx); - } - return rc; -} - -/** Insert pid into list if not already present. - * return -1 if already present. - */ -static int ESECT -mdb_pid_insert(MDB_PID_T *ids, MDB_PID_T pid) -{ - /* binary search of pid in list */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = ids[0]; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = pid - ids[cursor]; - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - /* found, so it's a duplicate */ - return -1; - } - } - - if( val > 0 ) { - ++cursor; - } - ids[0]++; - for (n = ids[0]; n > cursor; n--) - ids[n] = ids[n-1]; - ids[n] = pid; - return 0; -} - -int ESECT -mdb_reader_check(MDB_env *env, int *dead) -{ - if (!env) - return EINVAL; - if (dead) - *dead = 0; - return env->me_txns ? mdb_reader_check0(env, 0, dead) : MDB_SUCCESS; -} - -/** As #mdb_reader_check(). rlocked = <caller locked the reader mutex>. */ -static int ESECT -mdb_reader_check0(MDB_env *env, int rlocked, int *dead) -{ - mdb_mutexref_t rmutex = rlocked ? NULL : env->me_rmutex; - unsigned int i, j, rdrs; - MDB_reader *mr; - MDB_PID_T *pids, pid; - int rc = MDB_SUCCESS, count = 0; - - rdrs = env->me_txns->mti_numreaders; - pids = malloc((rdrs+1) * sizeof(MDB_PID_T)); - if (!pids) - return ENOMEM; - pids[0] = 0; - mr = env->me_txns->mti_readers; - for (i=0; i<rdrs; i++) { - pid = mr[i].mr_pid; - if (pid && pid != env->me_pid) { - if (mdb_pid_insert(pids, pid) == 0) { - if (!mdb_reader_pid(env, Pidcheck, pid)) { - /* Stale reader found */ - j = i; - if (rmutex) { - if ((rc = LOCK_MUTEX0(rmutex)) != 0) { - if ((rc = mdb_mutex_failed(env, rmutex, rc))) - break; - rdrs = 0; /* the above checked all readers */ - } else { - /* Recheck, a new process may have reused pid */ - if (mdb_reader_pid(env, Pidcheck, pid)) - j = rdrs; - } - } - for (; j<rdrs; j++) - if (mr[j].mr_pid == pid) { - DPRINTF(("clear stale reader pid %u txn %"Y"d", - (unsigned) pid, mr[j].mr_txnid)); - mr[j].mr_pid = 0; - count++; - } - if (rmutex) - UNLOCK_MUTEX(rmutex); - } - } - } - } - free(pids); - if (dead) - *dead = count; - return rc; -} - -#ifdef MDB_ROBUST_SUPPORTED -/** Handle #LOCK_MUTEX0() failure. - * Try to repair the lock file if the mutex owner died. - * @param[in] env the environment handle - * @param[in] mutex LOCK_MUTEX0() mutex - * @param[in] rc LOCK_MUTEX0() error (nonzero) - * @return 0 on success with the mutex locked, or an error code on failure. - */ -static int ESECT -mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc) -{ - int rlocked, rc2; - MDB_meta *meta; - - if (rc == MDB_OWNERDEAD) { - /* We own the mutex. Clean up after dead previous owner. */ - rc = MDB_SUCCESS; - rlocked = (mutex == env->me_rmutex); - if (!rlocked) { - /* Keep mti_txnid updated, otherwise next writer can - * overwrite data which latest meta page refers to. - */ - meta = mdb_env_pick_meta(env); - env->me_txns->mti_txnid = meta->mm_txnid; - /* env is hosed if the dead thread was ours */ - if (env->me_txn) { - env->me_flags |= MDB_FATAL_ERROR; - env->me_txn = NULL; - rc = MDB_PANIC; - } - } - DPRINTF(("%cmutex owner died, %s", (rlocked ? 'r' : 'w'), - (rc ? "this process' env is hosed" : "recovering"))); - rc2 = mdb_reader_check0(env, rlocked, NULL); - if (rc2 == 0) - rc2 = mdb_mutex_consistent(mutex); - if (rc || (rc = rc2)) { - DPRINTF(("LOCK_MUTEX recovery failed, %s", mdb_strerror(rc))); - UNLOCK_MUTEX(mutex); - } - } else { -#ifdef _WIN32 - rc = ErrCode(); -#endif - DPRINTF(("LOCK_MUTEX failed, %s", mdb_strerror(rc))); - } - - return rc; -} -#endif /* MDB_ROBUST_SUPPORTED */ -/** @} */ - -#if defined(_WIN32) -static int utf8_to_utf16(const char *src, int srcsize, wchar_t **dst, int *dstsize) -{ - int need; - wchar_t *result; - need = MultiByteToWideChar(CP_UTF8, 0, src, srcsize, NULL, 0); - if (need == 0xFFFD) - return EILSEQ; - if (need == 0) - return EINVAL; - result = malloc(sizeof(wchar_t) * need); - if (!result) - return ENOMEM; - MultiByteToWideChar(CP_UTF8, 0, src, srcsize, result, need); - if (dstsize) - *dstsize = need; - *dst = result; - return 0; -} -#endif /* defined(_WIN32) */ diff --git a/plugins/Dbx_mdb/src/lmdb/midl.c b/plugins/Dbx_mdb/src/lmdb/midl.c deleted file mode 100644 index 9748d8dba8..0000000000 --- a/plugins/Dbx_mdb/src/lmdb/midl.c +++ /dev/null @@ -1,420 +0,0 @@ -/** @file midl.c - * @brief ldap bdb back-end ID List functions */ -/* $OpenLDAP$ */ -/* This work is part of OpenLDAP Software <http://www.openldap.org/>. - * - * Copyright 2000-2016 The OpenLDAP Foundation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * <http://www.OpenLDAP.org/license.html>. - */ - -#include <limits.h> -#include <string.h> -#include <stdlib.h> -#include <errno.h> -#include <sys/types.h> -#include "midl.h" - -/** @defgroup internal LMDB Internals - * @{ - */ -/** @defgroup idls ID List Management - * @{ - */ -#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) ) - -unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = ids[0]; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( ids[cursor], id ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -#if 0 /* superseded by append/sort */ -int mdb_midl_insert( MDB_IDL ids, MDB_ID id ) -{ - unsigned x, i; - - x = mdb_midl_search( ids, id ); - assert( x > 0 ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0] && ids[x] == id ) { - /* duplicate */ - assert(0); - return -1; - } - - if ( ++ids[0] >= MDB_IDL_DB_MAX ) { - /* no room */ - --ids[0]; - return -2; - - } else { - /* insert id */ - for (i=ids[0]; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = id; - } - - return 0; -} -#endif - -MDB_IDL mdb_midl_alloc(int num) -{ - MDB_IDL ids = malloc((num+2) * sizeof(MDB_ID)); - if (ids) { - *ids++ = num; - *ids = 0; - } - return ids; -} - -void mdb_midl_free(MDB_IDL ids) -{ - if (ids) - free(ids-1); -} - -void mdb_midl_shrink( MDB_IDL *idp ) -{ - MDB_IDL ids = *idp; - if (*(--ids) > MDB_IDL_UM_MAX && - (ids = realloc(ids, (MDB_IDL_UM_MAX+2) * sizeof(MDB_ID)))) - { - *ids++ = MDB_IDL_UM_MAX; - *idp = ids; - } -} - -static int mdb_midl_grow( MDB_IDL *idp, int num ) -{ - MDB_IDL idn = *idp-1; - /* grow it */ - idn = realloc(idn, (*idn + num + 2) * sizeof(MDB_ID)); - if (!idn) - return ENOMEM; - *idn++ += num; - *idp = idn; - return 0; -} - -int mdb_midl_need( MDB_IDL *idp, unsigned num ) -{ - MDB_IDL ids = *idp; - num += ids[0]; - if (num > ids[-1]) { - num = (num + num/4 + (256 + 2)) & -256; - if (!(ids = realloc(ids-1, num * sizeof(MDB_ID)))) - return ENOMEM; - *ids++ = num - 2; - *idp = ids; - } - return 0; -} - -int mdb_midl_append( MDB_IDL *idp, MDB_ID id ) -{ - MDB_IDL ids = *idp; - /* Too big? */ - if (ids[0] >= ids[-1]) { - if (mdb_midl_grow(idp, MDB_IDL_UM_MAX)) - return ENOMEM; - ids = *idp; - } - ids[0]++; - ids[ids[0]] = id; - return 0; -} - -int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app ) -{ - MDB_IDL ids = *idp; - /* Too big? */ - if (ids[0] + app[0] >= ids[-1]) { - if (mdb_midl_grow(idp, app[0])) - return ENOMEM; - ids = *idp; - } - memcpy(&ids[ids[0]+1], &app[1], app[0] * sizeof(MDB_ID)); - ids[0] += app[0]; - return 0; -} - -int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n ) -{ - MDB_ID *ids = *idp, len = ids[0]; - /* Too big? */ - if (len + n > ids[-1]) { - if (mdb_midl_grow(idp, n | MDB_IDL_UM_MAX)) - return ENOMEM; - ids = *idp; - } - ids[0] = len + n; - ids += len; - while (n) - ids[n--] = id++; - return 0; -} - -void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge ) -{ - MDB_ID old_id, merge_id, i = merge[0], j = idl[0], k = i+j, total = k; - idl[0] = (MDB_ID)-1; /* delimiter for idl scan below */ - old_id = idl[j]; - while (i) { - merge_id = merge[i--]; - for (; old_id < merge_id; old_id = idl[--j]) - idl[k--] = old_id; - idl[k--] = merge_id; - } - idl[0] = total; -} - -/* Quicksort + Insertion sort for small arrays */ - -#define SMALL 8 -#define MIDL_SWAP(a,b) { itmp=(a); (a)=(b); (b)=itmp; } - -void -mdb_midl_sort( MDB_IDL ids ) -{ - /* Max possible depth of int-indexed tree * 2 items/level */ - int istack[sizeof(int)*CHAR_BIT * 2]; - int i,j,k,l,ir,jstack; - MDB_ID a, itmp; - - ir = (int)ids[0]; - l = 1; - jstack = 0; - for(;;) { - if (ir - l < SMALL) { /* Insertion sort */ - for (j=l+1;j<=ir;j++) { - a = ids[j]; - for (i=j-1;i>=1;i--) { - if (ids[i] >= a) break; - ids[i+1] = ids[i]; - } - ids[i+1] = a; - } - if (jstack == 0) break; - ir = istack[jstack--]; - l = istack[jstack--]; - } else { - k = (l + ir) >> 1; /* Choose median of left, center, right */ - MIDL_SWAP(ids[k], ids[l+1]); - if (ids[l] < ids[ir]) { - MIDL_SWAP(ids[l], ids[ir]); - } - if (ids[l+1] < ids[ir]) { - MIDL_SWAP(ids[l+1], ids[ir]); - } - if (ids[l] < ids[l+1]) { - MIDL_SWAP(ids[l], ids[l+1]); - } - i = l+1; - j = ir; - a = ids[l+1]; - for(;;) { - do i++; while(ids[i] > a); - do j--; while(ids[j] < a); - if (j < i) break; - MIDL_SWAP(ids[i],ids[j]); - } - ids[l+1] = ids[j]; - ids[j] = a; - jstack += 2; - if (ir-i+1 >= j-l) { - istack[jstack] = ir; - istack[jstack-1] = i; - ir = j-1; - } else { - istack[jstack] = j-1; - istack[jstack-1] = l; - l = i; - } - } - } -} - -unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = (unsigned)ids[0].mid; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( id, ids[cursor].mid ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id ) -{ - unsigned x, i; - - x = mdb_mid2l_search( ids, id->mid ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0].mid && ids[x].mid == id->mid ) { - /* duplicate */ - return -1; - } - - if ( ids[0].mid >= MDB_IDL_UM_MAX ) { - /* too big */ - return -2; - - } else { - /* insert id */ - ids[0].mid++; - for (i=(unsigned)ids[0].mid; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = *id; - } - - return 0; -} - -int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id ) -{ - /* Too big? */ - if (ids[0].mid >= MDB_IDL_UM_MAX) { - return -2; - } - ids[0].mid++; - ids[ids[0].mid] = *id; - return 0; -} - -#ifdef MDB_VL32 -unsigned mdb_mid3l_search( MDB_ID3L ids, MDB_ID id ) -{ - /* - * binary search of id in ids - * if found, returns position of id - * if not found, returns first position greater than id - */ - unsigned base = 0; - unsigned cursor = 1; - int val = 0; - unsigned n = (unsigned)ids[0].mid; - - while( 0 < n ) { - unsigned pivot = n >> 1; - cursor = base + pivot + 1; - val = CMP( id, ids[cursor].mid ); - - if( val < 0 ) { - n = pivot; - - } else if ( val > 0 ) { - base = cursor; - n -= pivot + 1; - - } else { - return cursor; - } - } - - if( val > 0 ) { - ++cursor; - } - return cursor; -} - -int mdb_mid3l_insert( MDB_ID3L ids, MDB_ID3 *id ) -{ - unsigned x, i; - - x = mdb_mid3l_search( ids, id->mid ); - - if( x < 1 ) { - /* internal error */ - return -2; - } - - if ( x <= ids[0].mid && ids[x].mid == id->mid ) { - /* duplicate */ - return -1; - } - - /* insert id */ - ids[0].mid++; - for (i=(unsigned)ids[0].mid; i>x; i--) - ids[i] = ids[i-1]; - ids[x] = *id; - - return 0; -} -#endif /* MDB_VL32 */ - -/** @} */ -/** @} */ diff --git a/plugins/Dbx_mdb/src/lmdb/midl.h b/plugins/Dbx_mdb/src/lmdb/midl.h deleted file mode 100644 index 91fa58f0fb..0000000000 --- a/plugins/Dbx_mdb/src/lmdb/midl.h +++ /dev/null @@ -1,212 +0,0 @@ -/** @file midl.h - * @brief LMDB ID List header file. - * - * This file was originally part of back-bdb but has been - * modified for use in libmdb. Most of the macros defined - * in this file are unused, just left over from the original. - * - * This file is only used internally in libmdb and its definitions - * are not exposed publicly. - */ -/* $OpenLDAP$ */ -/* This work is part of OpenLDAP Software <http://www.openldap.org/>. - * - * Copyright 2000-2016 The OpenLDAP Foundation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted only as authorized by the OpenLDAP - * Public License. - * - * A copy of this license is available in the file LICENSE in the - * top-level directory of the distribution or, alternatively, at - * <http://www.OpenLDAP.org/license.html>. - */ - -#ifndef _MDB_MIDL_H_ -#define _MDB_MIDL_H_ - -#include <stddef.h> -#if _MSC_VER < 1800 -# include <msapi/inttypes.h> -#else -# include <inttypes.h> -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -/** @defgroup internal LMDB Internals - * @{ - */ - -/** @defgroup idls ID List Management - * @{ - */ - /** A generic unsigned ID number. These were entryIDs in back-bdb. - * Preferably it should have the same size as a pointer. - */ -#ifdef MDB_VL32 -typedef uint64_t MDB_ID; -#else -typedef size_t MDB_ID; -#endif - - /** An IDL is an ID List, a sorted array of IDs. The first - * element of the array is a counter for how many actual - * IDs are in the list. In the original back-bdb code, IDLs are - * sorted in ascending order. For libmdb IDLs are sorted in - * descending order. - */ -typedef MDB_ID *MDB_IDL; - -/* IDL sizes - likely should be even bigger - * limiting factors: sizeof(ID), thread stack size - */ -#ifdef MDB_VL32 -#define MDB_IDL_LOGN 14 /* DB_SIZE is 2^14, UM_SIZE is 2^15 */ -#else -#define MDB_IDL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */ -#endif -#define MDB_IDL_DB_SIZE (1<<MDB_IDL_LOGN) -#define MDB_IDL_UM_SIZE (1<<(MDB_IDL_LOGN+1)) - -#define MDB_IDL_DB_MAX (MDB_IDL_DB_SIZE-1) -#define MDB_IDL_UM_MAX (MDB_IDL_UM_SIZE-1) - -#define MDB_IDL_SIZEOF(ids) (((ids)[0]+1) * sizeof(MDB_ID)) -#define MDB_IDL_IS_ZERO(ids) ( (ids)[0] == 0 ) -#define MDB_IDL_CPY( dst, src ) (memcpy( dst, src, MDB_IDL_SIZEOF( src ) )) -#define MDB_IDL_FIRST( ids ) ( (ids)[1] ) -#define MDB_IDL_LAST( ids ) ( (ids)[(ids)[0]] ) - - /** Current max length of an #mdb_midl_alloc()ed IDL */ -#define MDB_IDL_ALLOCLEN( ids ) ( (ids)[-1] ) - - /** Append ID to IDL. The IDL must be big enough. */ -#define mdb_midl_xappend(idl, id) do { \ - MDB_ID *xidl = (idl), xlen = ++(xidl[0]); \ - xidl[xlen] = (id); \ - } while (0) - - /** Search for an ID in an IDL. - * @param[in] ids The IDL to search. - * @param[in] id The ID to search for. - * @return The index of the first ID greater than or equal to \b id. - */ -unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ); - - /** Allocate an IDL. - * Allocates memory for an IDL of the given size. - * @return IDL on success, NULL on failure. - */ -MDB_IDL mdb_midl_alloc(int num); - - /** Free an IDL. - * @param[in] ids The IDL to free. - */ -void mdb_midl_free(MDB_IDL ids); - - /** Shrink an IDL. - * Return the IDL to the default size if it has grown larger. - * @param[in,out] idp Address of the IDL to shrink. - */ -void mdb_midl_shrink(MDB_IDL *idp); - - /** Make room for num additional elements in an IDL. - * @param[in,out] idp Address of the IDL. - * @param[in] num Number of elements to make room for. - * @return 0 on success, ENOMEM on failure. - */ -int mdb_midl_need(MDB_IDL *idp, unsigned num); - - /** Append an ID onto an IDL. - * @param[in,out] idp Address of the IDL to append to. - * @param[in] id The ID to append. - * @return 0 on success, ENOMEM if the IDL is too large. - */ -int mdb_midl_append( MDB_IDL *idp, MDB_ID id ); - - /** Append an IDL onto an IDL. - * @param[in,out] idp Address of the IDL to append to. - * @param[in] app The IDL to append. - * @return 0 on success, ENOMEM if the IDL is too large. - */ -int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app ); - - /** Append an ID range onto an IDL. - * @param[in,out] idp Address of the IDL to append to. - * @param[in] id The lowest ID to append. - * @param[in] n Number of IDs to append. - * @return 0 on success, ENOMEM if the IDL is too large. - */ -int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n ); - - /** Merge an IDL onto an IDL. The destination IDL must be big enough. - * @param[in] idl The IDL to merge into. - * @param[in] merge The IDL to merge. - */ -void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge ); - - /** Sort an IDL. - * @param[in,out] ids The IDL to sort. - */ -void mdb_midl_sort( MDB_IDL ids ); - - /** An ID2 is an ID/pointer pair. - */ -typedef struct MDB_ID2 { - MDB_ID mid; /**< The ID */ - void *mptr; /**< The pointer */ -} MDB_ID2; - - /** An ID2L is an ID2 List, a sorted array of ID2s. - * The first element's \b mid member is a count of how many actual - * elements are in the array. The \b mptr member of the first element is unused. - * The array is sorted in ascending order by \b mid. - */ -typedef MDB_ID2 *MDB_ID2L; - - /** Search for an ID in an ID2L. - * @param[in] ids The ID2L to search. - * @param[in] id The ID to search for. - * @return The index of the first ID2 whose \b mid member is greater than or equal to \b id. - */ -unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id ); - - - /** Insert an ID2 into a ID2L. - * @param[in,out] ids The ID2L to insert into. - * @param[in] id The ID2 to insert. - * @return 0 on success, -1 if the ID was already present in the ID2L. - */ -int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id ); - - /** Append an ID2 into a ID2L. - * @param[in,out] ids The ID2L to append into. - * @param[in] id The ID2 to append. - * @return 0 on success, -2 if the ID2L is too big. - */ -int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id ); - -#ifdef MDB_VL32 -typedef struct MDB_ID3 { - MDB_ID mid; /**< The ID */ - void *mptr; /**< The pointer */ - unsigned int mcnt; /**< Number of pages */ - unsigned int mref; /**< Refcounter */ -} MDB_ID3; - -typedef MDB_ID3 *MDB_ID3L; - -unsigned mdb_mid3l_search( MDB_ID3L ids, MDB_ID id ); -int mdb_mid3l_insert( MDB_ID3L ids, MDB_ID3 *id ); - -#endif /* MDB_VL32 */ -/** @} */ -/** @} */ -#ifdef __cplusplus -} -#endif -#endif /* _MDB_MIDL_H_ */ diff --git a/plugins/Dbx_mdb/src/mdbx/bits.h b/plugins/Dbx_mdb/src/mdbx/bits.h new file mode 100644 index 0000000000..74b30c628a --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/bits.h @@ -0,0 +1,1224 @@ +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. */ + +#pragma once +/* *INDENT-OFF* */ +/* clang-format off */ + +#ifndef MDBX_DEBUG +# define MDBX_DEBUG 0 +#endif + +#if MDBX_DEBUG +# undef NDEBUG +#endif + +/* Features under development */ +#ifndef MDBX_DEVEL +# define MDBX_DEVEL 1 +#endif + +/*----------------------------------------------------------------------------*/ + +/* Should be defined before any includes */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE 1 +#endif +#ifndef _FILE_OFFSET_BITS +# define _FILE_OFFSET_BITS 64 +#endif + +#ifdef _MSC_VER +# ifndef _CRT_SECURE_NO_WARNINGS +# define _CRT_SECURE_NO_WARNINGS +# endif +#if _MSC_VER > 1800 +# pragma warning(disable : 4464) /* relative include path contains '..' */ +#endif +#pragma warning(disable : 4710) /* 'xyz': function not inlined */ +#pragma warning(disable : 4711) /* function 'xyz' selected for automatic inline expansion */ +#pragma warning(disable : 4201) /* nonstandard extension used : nameless struct / union */ +#pragma warning(disable : 4706) /* assignment within conditional expression */ +#pragma warning(disable : 4127) /* conditional expression is constant */ +#pragma warning(disable : 4324) /* 'xyz': structure was padded due to alignment specifier */ +#pragma warning(disable : 4310) /* cast truncates constant value */ +#pragma warning(disable : 4820) /* bytes padding added after data member for aligment */ +#pragma warning(disable : 4548) /* expression before comma has no effect; expected expression with side - effect */ +#endif /* _MSC_VER (warnings) */ + +#include "./mdbx.h" +#include "./defs.h" + +#if defined(__GNUC__) && !__GNUC_PREREQ(4,2) + /* Actualy libmdbx was not tested with compilers older than GCC from RHEL6. + * But you could remove this #error and try to continue at your own risk. + * In such case please don't rise up an issues related ONLY to old compilers. + */ +# warning "libmdbx required at least GCC 4.2 compatible C/C++ compiler." +#endif + +#if defined(__GLIBC__) && !__GLIBC_PREREQ(2,12) + /* Actualy libmdbx was not tested with something older than glibc 2.12 (from RHEL6). + * But you could remove this #error and try to continue at your own risk. + * In such case please don't rise up an issues related ONLY to old systems. + */ +# warning "libmdbx required at least GLIBC 2.12." +#endif + +#ifdef __SANITIZE_THREAD__ +# warning "libmdbx don't compatible with ThreadSanitizer, you will get a lot of false-positive issues." +#endif /* __SANITIZE_THREAD__ */ + +#include "./osal.h" + +/* *INDENT-ON* */ +/* clang-format on */ + +/*----------------------------------------------------------------------------*/ +/* Basic constants and types */ + +/* The minimum number of keys required in a database page. + * Setting this to a larger value will place a smaller bound on the + * maximum size of a data item. Data items larger than this size will + * be pushed into overflow pages instead of being stored directly in + * the B-tree node. This value used to default to 4. With a page size + * of 4096 bytes that meant that any item larger than 1024 bytes would + * go into an overflow page. That also meant that on average 2-3KB of + * each overflow page was wasted space. The value cannot be lower than + * 2 because then there would no longer be a tree structure. With this + * value, items larger than 2KB will go into overflow pages, and on + * average only 1KB will be wasted. */ +#define MDBX_MINKEYS 2 + +/* A stamp that identifies a file as an MDBX file. + * There's nothing special about this value other than that it is easily + * recognizable, and it will reflect any byte order mismatches. */ +#define MDBX_MAGIC UINT64_C(/* 56-bit prime */ 0x59659DBDEF4C11) + +/* The version number for a database's datafile format. */ +#define MDBX_DATA_VERSION ((MDBX_DEVEL) ? 255 : 2) +/* The version number for a database's lockfile format. */ +#define MDBX_LOCK_VERSION ((MDBX_DEVEL) ? 255 : 2) + +/* handle for the DB used to track free pages. */ +#define FREE_DBI 0 +/* handle for the default DB. */ +#define MAIN_DBI 1 +/* Number of DBs in metapage (free and main) - also hardcoded elsewhere */ +#define CORE_DBS 2 +#define MAX_DBI (INT16_MAX - CORE_DBS) + +/* Number of meta pages - also hardcoded elsewhere */ +#define NUM_METAS 3 + +/* A page number in the database. + * + * MDBX uses 32 bit for page numbers. This limits database + * size up to 2^44 bytes, in case of 4K pages. */ +typedef uint32_t pgno_t; +#define PRIaPGNO PRIu32 +#define MAX_PAGENO ((pgno_t)UINT64_C(0xffffFFFFffff)) +#define MIN_PAGENO NUM_METAS + +/* A transaction ID. */ +typedef uint64_t txnid_t; +#define PRIaTXN PRIi64 +#if MDBX_DEVEL +#define MIN_TXNID (UINT64_MAX - UINT32_MAX) +#elif MDBX_DEBUG +#define MIN_TXNID UINT64_C(0x100000000) +#else +#define MIN_TXNID UINT64_C(1) +#endif /* MIN_TXNID */ + +/* Used for offsets within a single page. + * Since memory pages are typically 4 or 8KB in size, 12-13 bits, + * this is plenty. */ +typedef uint16_t indx_t; + +#define MEGABYTE ((size_t)1 << 20) + +/*----------------------------------------------------------------------------*/ +/* Core structures for database and shared memory (i.e. format definition) */ +#pragma pack(push, 1) + +/* Reader Lock Table + * + * Readers don't acquire any locks for their data access. Instead, they + * simply record their transaction ID in the reader table. The reader + * mutex is needed just to find an empty slot in the reader table. The + * slot's address is saved in thread-specific data so that subsequent + * read transactions started by the same thread need no further locking to + * proceed. + * + * If MDBX_NOTLS is set, the slot address is not saved in thread-specific data. + * No reader table is used if the database is on a read-only filesystem. + * + * Since the database uses multi-version concurrency control, readers don't + * actually need any locking. This table is used to keep track of which + * readers are using data from which old transactions, so that we'll know + * when a particular old transaction is no longer in use. Old transactions + * that have discarded any data pages can then have those pages reclaimed + * for use by a later write transaction. + * + * The lock table is constructed such that reader slots are aligned with the + * processor's cache line size. Any slot is only ever used by one thread. + * This alignment guarantees that there will be no contention or cache + * thrashing as threads update their own slot info, and also eliminates + * any need for locking when accessing a slot. + * + * A writer thread will scan every slot in the table to determine the oldest + * outstanding reader transaction. Any freed pages older than this will be + * reclaimed by the writer. The writer doesn't use any locks when scanning + * this table. This means that there's no guarantee that the writer will + * see the most up-to-date reader info, but that's not required for correct + * operation - all we need is to know the upper bound on the oldest reader, + * we don't care at all about the newest reader. So the only consequence of + * reading stale information here is that old pages might hang around a + * while longer before being reclaimed. That's actually good anyway, because + * the longer we delay reclaiming old pages, the more likely it is that a + * string of contiguous pages can be found after coalescing old pages from + * many old transactions together. */ + +/* The actual reader record, with cacheline padding. */ +typedef struct MDBX_reader { + /* Current Transaction ID when this transaction began, or (txnid_t)-1. + * Multiple readers that start at the same time will probably have the + * same ID here. Again, it's not important to exclude them from + * anything; all we need to know is which version of the DB they + * started from so we can avoid overwriting any data used in that + * particular version. */ + volatile txnid_t mr_txnid; + + /* The information we store in a single slot of the reader table. + * In addition to a transaction ID, we also record the process and + * thread ID that owns a slot, so that we can detect stale information, + * e.g. threads or processes that went away without cleaning up. + * + * NOTE: We currently don't check for stale records. + * We simply re-init the table when we know that we're the only process + * opening the lock file. */ + + /* The process ID of the process owning this reader txn. */ + volatile mdbx_pid_t mr_pid; + /* The thread ID of the thread owning this txn. */ + volatile mdbx_tid_t mr_tid; + + /* cache line alignment */ + uint8_t pad[MDBX_CACHELINE_SIZE - + (sizeof(txnid_t) + sizeof(mdbx_pid_t) + sizeof(mdbx_tid_t)) % + MDBX_CACHELINE_SIZE]; +} __cache_aligned MDBX_reader; + +/* Information about a single database in the environment. */ +typedef struct MDBX_db { + uint16_t md_flags; /* see mdbx_dbi_open */ + uint16_t md_depth; /* depth of this tree */ + uint32_t md_xsize; /* also ksize for LEAF2 pages */ + pgno_t md_root; /* the root page of this tree */ + pgno_t md_branch_pages; /* number of internal pages */ + pgno_t md_leaf_pages; /* number of leaf pages */ + pgno_t md_overflow_pages; /* number of overflow pages */ + uint64_t md_seq; /* table sequence counter */ + uint64_t md_entries; /* number of data items */ + uint64_t md_merkle; /* Merkle tree checksum */ +} MDBX_db; + +/* Meta page content. + * A meta page is the start point for accessing a database snapshot. + * Pages 0-1 are meta pages. Transaction N writes meta page (N % 2). */ +typedef struct MDBX_meta { + /* Stamp identifying this as an MDBX file. + * It must be set to MDBX_MAGIC with MDBX_DATA_VERSION. */ + uint64_t mm_magic_and_version; + + /* txnid that committed this page, the first of a two-phase-update pair */ + volatile txnid_t mm_txnid_a; + + uint16_t mm_extra_flags; /* extra DB flags, zero (nothing) for now */ + uint8_t mm_validator_id; /* ID of checksum and page validation method, + * zero (nothing) for now */ + uint8_t mm_extra_pagehdr; /* extra bytes in the page header, + * zero (nothing) for now */ + + struct { + uint16_t grow; /* datafile growth step in pages */ + uint16_t shrink; /* datafile shrink threshold in pages */ + pgno_t lower; /* minimal size of datafile in pages */ + pgno_t upper; /* maximal size of datafile in pages */ + pgno_t now; /* current size of datafile in pages */ + pgno_t next; /* first unused page in the datafile, + * but actually the file may be shorter. */ + } mm_geo; + + MDBX_db mm_dbs[CORE_DBS]; /* first is free space, 2nd is main db */ + /* The size of pages used in this DB */ +#define mm_psize mm_dbs[FREE_DBI].md_xsize +/* Any persistent environment flags, see mdbx_env */ +#define mm_flags mm_dbs[FREE_DBI].md_flags + mdbx_canary mm_canary; + +#define MDBX_DATASIGN_NONE 0u +#define MDBX_DATASIGN_WEAK 1u +#define SIGN_IS_WEAK(sign) ((sign) == MDBX_DATASIGN_WEAK) +#define SIGN_IS_STEADY(sign) ((sign) > MDBX_DATASIGN_WEAK) +#define META_IS_WEAK(meta) SIGN_IS_WEAK((meta)->mm_datasync_sign) +#define META_IS_STEADY(meta) SIGN_IS_STEADY((meta)->mm_datasync_sign) + volatile uint64_t mm_datasync_sign; + + /* txnid that committed this page, the second of a two-phase-update pair */ + volatile txnid_t mm_txnid_b; +} MDBX_meta; + +/* Common header for all page types. The page type depends on mp_flags. + * + * P_BRANCH and P_LEAF pages have unsorted 'MDBX_node's at the end, with + * sorted mp_ptrs[] entries referring to them. Exception: P_LEAF2 pages + * omit mp_ptrs and pack sorted MDBX_DUPFIXED values after the page header. + * + * P_OVERFLOW records occupy one or more contiguous pages where only the + * first has a page header. They hold the real data of F_BIGDATA nodes. + * + * P_SUBP sub-pages are small leaf "pages" with duplicate data. + * A node with flag F_DUPDATA but not F_SUBDATA contains a sub-page. + * (Duplicate data can also go in sub-databases, which use normal pages.) + * + * P_META pages contain MDBX_meta, the start point of an MDBX snapshot. + * + * Each non-metapage up to MDBX_meta.mm_last_pg is reachable exactly once + * in the snapshot: Either used by a database or listed in a freeDB record. */ +typedef struct MDBX_page { + union { + struct MDBX_page *mp_next; /* for in-memory list of freed pages, + * must be first field, see NEXT_LOOSE_PAGE */ + uint64_t mp_validator; /* checksum of page content or a txnid during + * which the page has been updated */ + }; + uint16_t mp_leaf2_ksize; /* key size if this is a LEAF2 page */ +#define P_BRANCH 0x01 /* branch page */ +#define P_LEAF 0x02 /* leaf page */ +#define P_OVERFLOW 0x04 /* overflow page */ +#define P_META 0x08 /* meta page */ +#define P_DIRTY 0x10 /* dirty page, also set for P_SUBP pages */ +#define P_LEAF2 0x20 /* for MDBX_DUPFIXED records */ +#define P_SUBP 0x40 /* for MDBX_DUPSORT sub-pages */ +#define P_LOOSE 0x4000 /* page was dirtied then freed, can be reused */ +#define P_KEEP 0x8000 /* leave this page alone during spill */ + uint16_t mp_flags; + union { + struct { + indx_t mp_lower; /* lower bound of free space */ + indx_t mp_upper; /* upper bound of free space */ + }; + uint32_t mp_pages; /* number of overflow pages */ + }; + pgno_t mp_pgno; /* page number */ + + /* dynamic size */ + union { + indx_t mp_ptrs[1]; + MDBX_meta mp_meta; + uint8_t mp_data[1]; + }; +} MDBX_page; + +/* Size of the page header, excluding dynamic data at the end */ +#define PAGEHDRSZ ((unsigned)offsetof(MDBX_page, mp_data)) + +/* The maximum size of a database page. +* +* It is 64K, but value-PAGEHDRSZ must fit in MDBX_page.mp_upper. +* +* MDBX will use database pages < OS pages if needed. +* That causes more I/O in write transactions: The OS must +* know (read) the whole page before writing a partial page. +* +* Note that we don't currently support Huge pages. On Linux, +* regular data files cannot use Huge pages, and in general +* Huge pages aren't actually pageable. We rely on the OS +* demand-pager to read our data and page it out when memory +* pressure from other processes is high. So until OSs have +* actual paging support for Huge pages, they're not viable. */ +#define MAX_PAGESIZE 0x10000u +#define MIN_PAGESIZE 512u + +#define MIN_MAPSIZE (MIN_PAGESIZE * MIN_PAGENO) +#if defined(_WIN32) || defined(_WIN64) +#define MAX_MAPSIZE32 UINT32_C(0x38000000) +#else +#define MAX_MAPSIZE32 UINT32_C(0x7ff80000) +#endif +#define MAX_MAPSIZE64 \ + ((sizeof(pgno_t) > 4) ? UINT64_C(0x7fffFFFFfff80000) \ + : MAX_PAGENO * (uint64_t)MAX_PAGESIZE) + +#define MAX_MAPSIZE ((sizeof(size_t) < 8) ? MAX_MAPSIZE32 : MAX_MAPSIZE64) + +/* The header for the reader table (a memory-mapped lock file). */ +typedef struct MDBX_lockinfo { + /* Stamp identifying this as an MDBX file. + * It must be set to MDBX_MAGIC with with MDBX_LOCK_VERSION. */ + uint64_t mti_magic_and_version; + + /* Format of this lock file. Must be set to MDBX_LOCK_FORMAT. */ + uint32_t mti_os_and_format; + + /* Flags which environment was opened. */ + volatile uint32_t mti_envmode; + + union { +#ifdef MDBX_OSAL_LOCK + MDBX_OSAL_LOCK mti_wmutex; +#endif + uint64_t align_wmutex; + }; + + union { + /* The number of slots that have been used in the reader table. + * This always records the maximum count, it is not decremented + * when readers release their slots. */ + volatile unsigned __cache_aligned mti_numreaders; + uint64_t align_numreaders; + }; + + union { +#ifdef MDBX_OSAL_LOCK + /* Mutex protecting access to this table. */ + MDBX_OSAL_LOCK mti_rmutex; +#endif + uint64_t align_rmutex; + }; + + union { + volatile txnid_t mti_oldest; + uint64_t align_oldest; + }; + + union { + volatile uint32_t mti_reader_finished_flag; + uint64_t align_reader_finished_flag; + }; + + uint8_t pad_align[MDBX_CACHELINE_SIZE - sizeof(uint64_t) * 7]; + + MDBX_reader __cache_aligned mti_readers[1]; +} MDBX_lockinfo; + +#ifdef _MSC_VER +#pragma pack(pop) +#endif /* MSVC: Enable aligment */ + +#define MDBX_LOCKINFO_WHOLE_SIZE \ + ((sizeof(MDBX_lockinfo) + MDBX_CACHELINE_SIZE - 1) & \ + ~((size_t)MDBX_CACHELINE_SIZE - 1)) + +/* Lockfile format signature: version, features and field layout */ +#define MDBX_LOCK_FORMAT \ + ((MDBX_OSAL_LOCK_SIGN << 16) + \ + (uint16_t)(MDBX_LOCKINFO_WHOLE_SIZE + MDBX_CACHELINE_SIZE - 1)) + +#define MDBX_DATA_MAGIC ((MDBX_MAGIC << 8) + MDBX_DATA_VERSION) + +#define MDBX_LOCK_MAGIC ((MDBX_MAGIC << 8) + MDBX_LOCK_VERSION) + +/*----------------------------------------------------------------------------*/ +/* Two kind lists of pages (aka PNL) */ + +/* An PNL is an Page Number List, a sorted array of IDs. The first element of + * the array is a counter for how many actual page-numbers are in the list. + * PNLs are sorted in descending order, this allow cut off a page with lowest + * pgno (at the tail) just truncating the list */ +#define MDBX_PNL_ASCENDING 0 +typedef pgno_t *MDBX_PNL; + +#if MDBX_PNL_ASCENDING +#define MDBX_PNL_ORDERED(first, last) ((first) < (last)) +#define MDBX_PNL_DISORDERED(first, last) ((first) >= (last)) +#else +#define MDBX_PNL_ORDERED(first, last) ((first) > (last)) +#define MDBX_PNL_DISORDERED(first, last) ((first) <= (last)) +#endif + +/* List of txnid, only for MDBX_env.mt_lifo_reclaimed */ +typedef txnid_t *MDBX_TXL; + +/* An ID2 is an ID/pointer pair. */ +typedef struct MDBX_ID2 { + pgno_t mid; /* The ID */ + void *mptr; /* The pointer */ +} MDBX_ID2; + +/* An ID2L is an ID2 List, a sorted array of ID2s. + * The first element's mid member is a count of how many actual + * elements are in the array. The mptr member of the first element is + * unused. The array is sorted in ascending order by mid. */ +typedef MDBX_ID2 *MDBX_ID2L; + +/* PNL sizes - likely should be even bigger + * limiting factors: sizeof(pgno_t), thread stack size */ +#define MDBX_PNL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */ +#define MDBX_PNL_DB_SIZE (1 << MDBX_PNL_LOGN) +#define MDBX_PNL_UM_SIZE (1 << (MDBX_PNL_LOGN + 1)) + +#define MDBX_PNL_DB_MAX (MDBX_PNL_DB_SIZE - 1) +#define MDBX_PNL_UM_MAX (MDBX_PNL_UM_SIZE - 1) + +#define MDBX_PNL_SIZEOF(pl) (((pl)[0] + 1) * sizeof(pgno_t)) +#define MDBX_PNL_IS_ZERO(pl) ((pl)[0] == 0) +#define MDBX_PNL_CPY(dst, src) (memcpy(dst, src, MDBX_PNL_SIZEOF(src))) +#define MDBX_PNL_FIRST(pl) ((pl)[1]) +#define MDBX_PNL_LAST(pl) ((pl)[(pl)[0]]) + +/* Current max length of an mdbx_pnl_alloc()ed PNL */ +#define MDBX_PNL_ALLOCLEN(pl) ((pl)[-1]) + +/*----------------------------------------------------------------------------*/ +/* Internal structures */ + +/* Auxiliary DB info. + * The information here is mostly static/read-only. There is + * only a single copy of this record in the environment. */ +typedef struct MDBX_dbx { + MDBX_val md_name; /* name of the database */ + MDBX_cmp_func *md_cmp; /* function for comparing keys */ + MDBX_cmp_func *md_dcmp; /* function for comparing data items */ +} MDBX_dbx; + +/* A database transaction. + * Every operation requires a transaction handle. */ +struct MDBX_txn { +#define MDBX_MT_SIGNATURE UINT32_C(0x93D53A31) + size_t mt_signature; + MDBX_txn *mt_parent; /* parent of a nested txn */ + /* Nested txn under this txn, set together with flag MDBX_TXN_HAS_CHILD */ + MDBX_txn *mt_child; + pgno_t mt_next_pgno; /* next unallocated page */ + pgno_t mt_end_pgno; /* corresponding to the current size of datafile */ + /* The ID of this transaction. IDs are integers incrementing from 1. + * Only committed write transactions increment the ID. If a transaction + * aborts, the ID may be re-used by the next writer. */ + txnid_t mt_txnid; + MDBX_env *mt_env; /* the DB environment */ + /* The list of reclaimed txns from freeDB */ + MDBX_TXL mt_lifo_reclaimed; + /* The list of pages that became unused during this transaction. */ + MDBX_PNL mt_befree_pages; + /* The list of loose pages that became unused and may be reused + * in this transaction, linked through NEXT_LOOSE_PAGE(page). */ + MDBX_page *mt_loose_pages; + /* Number of loose pages (mt_loose_pages) */ + unsigned mt_loose_count; + /* The sorted list of dirty pages we temporarily wrote to disk + * because the dirty list was full. page numbers in here are + * shifted left by 1, deleted slots have the LSB set. */ + MDBX_PNL mt_spill_pages; + union { + /* For write txns: Modified pages. Sorted when not MDBX_WRITEMAP. */ + MDBX_ID2L mt_rw_dirtylist; + /* For read txns: This thread/txn's reader table slot, or NULL. */ + MDBX_reader *mt_ro_reader; + }; + /* Array of records for each DB known in the environment. */ + MDBX_dbx *mt_dbxs; + /* Array of MDBX_db records for each known DB */ + MDBX_db *mt_dbs; + /* Array of sequence numbers for each DB handle */ + unsigned *mt_dbiseqs; + +/* Transaction DB Flags */ +#define DB_DIRTY MDBX_TBL_DIRTY /* DB was written in this txn */ +#define DB_STALE MDBX_TBL_STALE /* Named-DB record is older than txnID */ +#define DB_NEW MDBX_TBL_NEW /* Named-DB handle opened in this txn */ +#define DB_VALID 0x08 /* DB handle is valid, see also MDBX_VALID */ +#define DB_USRVALID 0x10 /* As DB_VALID, but not set for FREE_DBI */ +#define DB_DUPDATA 0x20 /* DB is MDBX_DUPSORT data */ + /* In write txns, array of cursors for each DB */ + MDBX_cursor **mt_cursors; + /* Array of flags for each DB */ + uint8_t *mt_dbflags; + /* Number of DB records in use, or 0 when the txn is finished. + * This number only ever increments until the txn finishes; we + * don't decrement it when individual DB handles are closed. */ + MDBX_dbi mt_numdbs; + +/* Transaction Flags */ +/* mdbx_txn_begin() flags */ +#define MDBX_TXN_BEGIN_FLAGS (MDBX_NOMETASYNC | MDBX_NOSYNC | MDBX_RDONLY) +#define MDBX_TXN_NOMETASYNC \ + MDBX_NOMETASYNC /* don't sync meta for this txn on commit */ +#define MDBX_TXN_NOSYNC MDBX_NOSYNC /* don't sync this txn on commit */ +#define MDBX_TXN_RDONLY MDBX_RDONLY /* read-only transaction */ + /* internal txn flags */ +#define MDBX_TXN_WRITEMAP MDBX_WRITEMAP /* copy of MDBX_env flag in writers */ +#define MDBX_TXN_FINISHED 0x01 /* txn is finished or never began */ +#define MDBX_TXN_ERROR 0x02 /* txn is unusable after an error */ +#define MDBX_TXN_DIRTY 0x04 /* must write, even if dirty list is empty */ +#define MDBX_TXN_SPILLS 0x08 /* txn or a parent has spilled pages */ +#define MDBX_TXN_HAS_CHILD 0x10 /* txn has an MDBX_txn.mt_child */ +/* most operations on the txn are currently illegal */ +#define MDBX_TXN_BLOCKED \ + (MDBX_TXN_FINISHED | MDBX_TXN_ERROR | MDBX_TXN_HAS_CHILD) + unsigned mt_flags; + /* dirtylist room: Array size - dirty pages visible to this txn. + * Includes ancestor txns' dirty pages not hidden by other txns' + * dirty/spilled pages. Thus commit(nested txn) has room to merge + * dirtylist into mt_parent after freeing hidden mt_parent pages. */ + unsigned mt_dirtyroom; + mdbx_tid_t mt_owner; /* thread ID that owns this transaction */ + mdbx_canary mt_canary; +}; + +/* Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty. + * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to + * raise this on a 64 bit machine. */ +#define CURSOR_STACK 32 + +struct MDBX_xcursor; + +/* Cursors are used for all DB operations. + * A cursor holds a path of (page pointer, key index) from the DB + * root to a position in the DB, plus other state. MDBX_DUPSORT + * cursors include an xcursor to the current data item. Write txns + * track their cursors and keep them up to date when data moves. + * Exception: An xcursor's pointer to a P_SUBP page can be stale. + * (A node with F_DUPDATA but no F_SUBDATA contains a subpage). */ +struct MDBX_cursor { +#define MDBX_MC_SIGNATURE UINT32_C(0xFE05D5B1) +#define MDBX_MC_READY4CLOSE UINT32_C(0x2817A047) +#define MDBX_MC_WAIT4EOT UINT32_C(0x90E297A7) + uint32_t mc_signature; + /* The database handle this cursor operates on */ + MDBX_dbi mc_dbi; + /* Next cursor on this DB in this txn */ + MDBX_cursor *mc_next; + /* Backup of the original cursor if this cursor is a shadow */ + MDBX_cursor *mc_backup; + /* Context used for databases with MDBX_DUPSORT, otherwise NULL */ + struct MDBX_xcursor *mc_xcursor; + /* The transaction that owns this cursor */ + MDBX_txn *mc_txn; + /* The database record for this cursor */ + MDBX_db *mc_db; + /* The database auxiliary record for this cursor */ + MDBX_dbx *mc_dbx; + /* The mt_dbflag for this database */ + uint8_t *mc_dbflag; + uint16_t mc_snum; /* number of pushed pages */ + uint16_t mc_top; /* index of top page, normally mc_snum-1 */ + /* Cursor state flags. */ +#define C_INITIALIZED 0x01 /* cursor has been initialized and is valid */ +#define C_EOF 0x02 /* No more data */ +#define C_SUB 0x04 /* Cursor is a sub-cursor */ +#define C_DEL 0x08 /* last op was a cursor_del */ +#define C_UNTRACK 0x40 /* Un-track cursor when closing */ +#define C_RECLAIMING 0x80 /* FreeDB lookup is prohibited */ + unsigned mc_flags; /* see mdbx_cursor */ + MDBX_page *mc_pg[CURSOR_STACK]; /* stack of pushed pages */ + indx_t mc_ki[CURSOR_STACK]; /* stack of page indices */ +}; + +/* Context for sorted-dup records. + * We could have gone to a fully recursive design, with arbitrarily + * deep nesting of sub-databases. But for now we only handle these + * levels - main DB, optional sub-DB, sorted-duplicate DB. */ +typedef struct MDBX_xcursor { + /* A sub-cursor for traversing the Dup DB */ + MDBX_cursor mx_cursor; + /* The database record for this Dup DB */ + MDBX_db mx_db; + /* The auxiliary DB record for this Dup DB */ + MDBX_dbx mx_dbx; + /* The mt_dbflag for this Dup DB */ + uint8_t mx_dbflag; +} MDBX_xcursor; + +/* Check if there is an inited xcursor, so XCURSOR_REFRESH() is proper */ +#define XCURSOR_INITED(mc) \ + ((mc)->mc_xcursor && ((mc)->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) + +/* Update sub-page pointer, if any, in mc->mc_xcursor. + * Needed when the node which contains the sub-page may have moved. + * Called with mp = mc->mc_pg[mc->mc_top], ki = mc->mc_ki[mc->mc_top]. */ +#define XCURSOR_REFRESH(mc, mp, ki) \ + do { \ + MDBX_page *xr_pg = (mp); \ + MDBX_node *xr_node = NODEPTR(xr_pg, ki); \ + if ((xr_node->mn_flags & (F_DUPDATA | F_SUBDATA)) == F_DUPDATA) \ + (mc)->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(xr_node); \ + } while (0) + +/* State of FreeDB old pages, stored in the MDBX_env */ +typedef struct MDBX_pgstate { + pgno_t *mf_reclaimed_pglist; /* Reclaimed freeDB pages, or NULL before use */ + txnid_t mf_last_reclaimed; /* ID of last used record, or 0 if + !mf_reclaimed_pglist */ +} MDBX_pgstate; + +/* The database environment. */ +struct MDBX_env { +#define MDBX_ME_SIGNATURE UINT32_C(0x9A899641) + size_t me_signature; + mdbx_mmap_t me_dxb_mmap; /* The main data file */ +#define me_map me_dxb_mmap.dxb +#define me_fd me_dxb_mmap.fd +#define me_mapsize me_dxb_mmap.length + mdbx_mmap_t me_lck_mmap; /* The lock file */ +#define me_lfd me_lck_mmap.fd +#define me_lck me_lck_mmap.lck + +/* Failed to update the meta page. Probably an I/O error. */ +#define MDBX_FATAL_ERROR UINT32_C(0x80000000) +/* Additional flag for mdbx_sync_locked() */ +#define MDBX_SHRINK_ALLOWED UINT32_C(0x40000000) +/* Some fields are initialized. */ +#define MDBX_ENV_ACTIVE UINT32_C(0x20000000) +/* me_txkey is set */ +#define MDBX_ENV_TXKEY UINT32_C(0x10000000) + uint32_t me_flags; /* see mdbx_env */ + unsigned me_psize; /* DB page size, inited from me_os_psize */ + unsigned me_psize2log; /* log2 of DB page size */ + unsigned me_os_psize; /* OS page size, from mdbx_syspagesize() */ + unsigned me_maxreaders; /* size of the reader table */ + /* Max MDBX_lockinfo.mti_numreaders of interest to mdbx_env_close() */ + unsigned me_close_readers; + mdbx_fastmutex_t me_dbi_lock; + MDBX_dbi me_numdbs; /* number of DBs opened */ + MDBX_dbi me_maxdbs; /* size of the DB table */ + mdbx_pid_t me_pid; /* process ID of this env */ + mdbx_thread_key_t me_txkey; /* thread-key for readers */ + char *me_path; /* path to the DB files */ + void *me_pbuf; /* scratch area for DUPSORT put() */ + MDBX_txn *me_txn; /* current write transaction */ + MDBX_txn *me_txn0; /* prealloc'd write transaction */ + MDBX_dbx *me_dbxs; /* array of static DB info */ + uint16_t *me_dbflags; /* array of flags from MDBX_db.md_flags */ + unsigned *me_dbiseqs; /* array of dbi sequence numbers */ + volatile txnid_t *me_oldest; /* ID of oldest reader last time we looked */ + MDBX_pgstate me_pgstate; /* state of old pages from freeDB */ +#define me_last_reclaimed me_pgstate.mf_last_reclaimed +#define me_reclaimed_pglist me_pgstate.mf_reclaimed_pglist + MDBX_page *me_dpages; /* list of malloc'd blocks for re-use */ + /* PNL of pages that became unused in a write txn */ + MDBX_PNL me_free_pgs; + /* ID2L of pages written during a write txn. Length MDBX_PNL_UM_SIZE. */ + MDBX_ID2L me_dirtylist; + /* Max number of freelist items that can fit in a single overflow page */ + unsigned me_maxfree_1pg; + /* Max size of a node on a page */ + unsigned me_nodemax; + unsigned me_maxkey_limit; /* max size of a key */ + mdbx_pid_t me_live_reader; /* have liveness lock in reader table */ + void *me_userctx; /* User-settable context */ + size_t me_sync_pending; /* Total dirty/non-sync'ed bytes + * since the last mdbx_env_sync() */ + size_t me_sync_threshold; /* Treshold of above to force synchronous flush */ + MDBX_oom_func *me_oom_func; /* Callback for kicking laggard readers */ + txnid_t me_oldest_stub; +#if MDBX_DEBUG + MDBX_assert_func *me_assert_func; /* Callback for assertion failures */ +#endif +#ifdef USE_VALGRIND + int me_valgrind_handle; +#endif + + struct { + size_t lower; /* minimal size of datafile */ + size_t upper; /* maximal size of datafile */ + size_t now; /* current size of datafile */ + size_t grow; /* step to grow datafile */ + size_t shrink; /* threshold to shrink datafile */ + } me_dbgeo; /* */ +}; + +/* Nested transaction */ +typedef struct MDBX_ntxn { + MDBX_txn mnt_txn; /* the transaction */ + MDBX_pgstate mnt_pgstate; /* parent transaction's saved freestate */ +} MDBX_ntxn; + +/*----------------------------------------------------------------------------*/ +/* Debug and Logging stuff */ + +extern int mdbx_runtime_flags; +extern MDBX_debug_func *mdbx_debug_logger; +extern txnid_t mdbx_debug_edge; + +void mdbx_debug_log(int type, const char *function, int line, const char *fmt, + ...) +#if defined(__GNUC__) || __has_attribute(format) + __attribute__((format(printf, 4, 5))) +#endif + ; + +void mdbx_panic(const char *fmt, ...) +#if defined(__GNUC__) || __has_attribute(format) + __attribute__((format(printf, 1, 2))) +#endif + ; + +#if MDBX_DEBUG + +#define mdbx_assert_enabled() unlikely(mdbx_runtime_flags &MDBX_DBG_ASSERT) + +#define mdbx_audit_enabled() unlikely(mdbx_runtime_flags &MDBX_DBG_AUDIT) + +#define mdbx_debug_enabled(type) \ + unlikely(mdbx_runtime_flags &(type & (MDBX_DBG_TRACE | MDBX_DBG_EXTRA))) + +#else +#define mdbx_debug_enabled(type) (0) +#define mdbx_audit_enabled() (0) +#ifndef NDEBUG +#define mdbx_assert_enabled() (1) +#else +#define mdbx_assert_enabled() (0) +#endif /* NDEBUG */ +#endif /* MDBX_DEBUG */ + +#define mdbx_print(fmt, ...) \ + mdbx_debug_log(MDBX_DBG_PRINT, NULL, 0, fmt, ##__VA_ARGS__) + +#define mdbx_trace(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE)) \ + mdbx_debug_log(MDBX_DBG_TRACE, __FUNCTION__, __LINE__, fmt "\n", \ + ##__VA_ARGS__); \ + } while (0) + +#define mdbx_verbose(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE /* FIXME */)) \ + mdbx_debug_log(MDBX_DBG_TRACE /* FIXME */, __FUNCTION__, __LINE__, \ + fmt "\n", ##__VA_ARGS__); \ + } while (0) + +#define mdbx_info(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE /* FIXME */)) \ + mdbx_debug_log(MDBX_DBG_TRACE /* FIXME */, __FUNCTION__, __LINE__, \ + fmt "\n", ##__VA_ARGS__); \ + } while (0) + +#define mdbx_notice(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE /* FIXME */)) \ + mdbx_debug_log(MDBX_DBG_TRACE /* FIXME */, __FUNCTION__, __LINE__, \ + fmt "\n", ##__VA_ARGS__); \ + } while (0) + +#define mdbx_warning(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE /* FIXME */)) \ + mdbx_debug_log(MDBX_DBG_TRACE /* FIXME */, __FUNCTION__, __LINE__, \ + fmt "\n", ##__VA_ARGS__); \ + } while (0) + +#define mdbx_error(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE /* FIXME */)) \ + mdbx_debug_log(MDBX_DBG_TRACE /* FIXME */, __FUNCTION__, __LINE__, \ + fmt "\n", ##__VA_ARGS__); \ + } while (0) + +#define mdbx_fatal(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE /* FIXME */)) \ + mdbx_debug_log(MDBX_DBG_TRACE /* FIXME */, __FUNCTION__, __LINE__, \ + fmt "\n", ##__VA_ARGS__); \ + } while (0) + +#define mdbx_debug(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE)) \ + mdbx_debug_log(MDBX_DBG_TRACE, __FUNCTION__, __LINE__, fmt "\n", \ + ##__VA_ARGS__); \ + } while (0) + +#define mdbx_debug_print(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_TRACE)) \ + mdbx_debug_log(MDBX_DBG_TRACE, NULL, 0, fmt, ##__VA_ARGS__); \ + } while (0) + +#define mdbx_debug_extra(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_EXTRA)) \ + mdbx_debug_log(MDBX_DBG_EXTRA, __FUNCTION__, __LINE__, fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define mdbx_debug_extra_print(fmt, ...) \ + do { \ + if (mdbx_debug_enabled(MDBX_DBG_EXTRA)) \ + mdbx_debug_log(MDBX_DBG_EXTRA, NULL, 0, fmt, ##__VA_ARGS__); \ + } while (0) + +#define mdbx_ensure_msg(env, expr, msg) \ + do { \ + if (unlikely(!(expr))) \ + mdbx_assert_fail(env, msg, __FUNCTION__, __LINE__); \ + } while (0) + +#define mdbx_ensure(env, expr) mdbx_ensure_msg(env, expr, #expr) + +/* assert(3) variant in environment context */ +#define mdbx_assert(env, expr) \ + do { \ + if (mdbx_assert_enabled()) \ + mdbx_ensure(env, expr); \ + } while (0) + +/* assert(3) variant in cursor context */ +#define mdbx_cassert(mc, expr) mdbx_assert((mc)->mc_txn->mt_env, expr) + +/* assert(3) variant in transaction context */ +#define mdbx_tassert(txn, expr) mdbx_assert((txn)->mt_env, expr) + +static __inline void mdbx_jitter4testing(bool tiny) { +#ifndef NDEBUG + if (MDBX_DBG_JITTER & mdbx_runtime_flags) + mdbx_osal_jitter(tiny); +#else + (void)tiny; +#endif +} + +/*----------------------------------------------------------------------------*/ +/* Internal prototypes and inlines */ + +int mdbx_reader_check0(MDBX_env *env, int rlocked, int *dead); +void mdbx_rthc_dtor(void *rthc); +void mdbx_rthc_lock(void); +void mdbx_rthc_unlock(void); +int mdbx_rthc_alloc(mdbx_thread_key_t *key, MDBX_reader *begin, + MDBX_reader *end); +void mdbx_rthc_remove(mdbx_thread_key_t key); +void mdbx_rthc_cleanup(void); + +static __inline bool mdbx_is_power2(size_t x) { return (x & (x - 1)) == 0; } + +static __inline size_t mdbx_roundup2(size_t value, size_t granularity) { + assert(mdbx_is_power2(granularity)); + return (value + granularity - 1) & ~(granularity - 1); +} + +static __inline unsigned mdbx_log2(size_t value) { + assert(mdbx_is_power2(value)); + + unsigned log = 0; + while (value > 1) { + log += 1; + value >>= 1; + } + return log; +} + +#define MDBX_IS_ERROR(rc) \ + ((rc) != MDBX_RESULT_TRUE && (rc) != MDBX_RESULT_FALSE) + +/* Internal error codes, not exposed outside libmdbx */ +#define MDBX_NO_ROOT (MDBX_LAST_ERRCODE + 10) + +/* Debuging output value of a cursor DBI: Negative in a sub-cursor. */ +#define DDBI(mc) \ + (((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi) + +/* Key size which fits in a DKBUF. */ +#define DKBUF_MAXKEYSIZE 511 /* FIXME */ + +#if MDBX_DEBUG +#define DKBUF char _kbuf[DKBUF_MAXKEYSIZE * 4 + 2] +#define DKEY(x) mdbx_dkey(x, _kbuf, DKBUF_MAXKEYSIZE * 2 + 1) +#define DVAL(x) \ + mdbx_dkey(x, _kbuf + DKBUF_MAXKEYSIZE * 2 + 1, DKBUF_MAXKEYSIZE * 2 + 1) +#else +#define DKBUF ((void)(0)) +#define DKEY(x) ("-") +#define DVAL(x) ("-") +#endif + +/* An invalid page number. + * Mainly used to denote an empty tree. */ +#define P_INVALID (~(pgno_t)0) + +/* Test if the flags f are set in a flag word w. */ +#define F_ISSET(w, f) (((w) & (f)) == (f)) + +/* Round n up to an even number. */ +#define EVEN(n) (((n) + 1U) & -2) /* sign-extending -2 to match n+1U */ + +/* Default size of memory map. + * This is certainly too small for any actual applications. Apps should + * always set the size explicitly using mdbx_env_set_mapsize(). */ +#define DEFAULT_MAPSIZE 1048576 + +/* Number of slots in the reader table. + * This value was chosen somewhat arbitrarily. The 61 is a prime number, + * and such readers plus a couple mutexes fit into single 4KB page. + * Applications should set the table size using mdbx_env_set_maxreaders(). */ +#define DEFAULT_READERS 61 + +/* Address of first usable data byte in a page, after the header */ +#define PAGEDATA(p) ((void *)((char *)(p) + PAGEHDRSZ)) + +/* Number of nodes on a page */ +#define NUMKEYS(p) ((unsigned)(p)->mp_lower >> 1) + +/* The amount of space remaining in the page */ +#define SIZELEFT(p) (indx_t)((p)->mp_upper - (p)->mp_lower) + +/* The percentage of space used in the page, in tenths of a percent. */ +#define PAGEFILL(env, p) \ + (1024UL * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \ + ((env)->me_psize - PAGEHDRSZ)) +/* The minimum page fill factor, in tenths of a percent. + * Pages emptier than this are candidates for merging. */ +#define FILL_THRESHOLD 256 + +/* Test if a page is a leaf page */ +#define IS_LEAF(p) F_ISSET((p)->mp_flags, P_LEAF) +/* Test if a page is a LEAF2 page */ +#define IS_LEAF2(p) F_ISSET((p)->mp_flags, P_LEAF2) +/* Test if a page is a branch page */ +#define IS_BRANCH(p) F_ISSET((p)->mp_flags, P_BRANCH) +/* Test if a page is an overflow page */ +#define IS_OVERFLOW(p) unlikely(F_ISSET((p)->mp_flags, P_OVERFLOW)) +/* Test if a page is a sub page */ +#define IS_SUBP(p) F_ISSET((p)->mp_flags, P_SUBP) + +/* The number of overflow pages needed to store the given size. */ +#define OVPAGES(env, size) (bytes2pgno(env, PAGEHDRSZ - 1 + (size)) + 1) + +/* Link in MDBX_txn.mt_loose_pages list. + * Kept outside the page header, which is needed when reusing the page. */ +#define NEXT_LOOSE_PAGE(p) (*(MDBX_page **)((p) + 2)) + +/* Header for a single key/data pair within a page. + * Used in pages of type P_BRANCH and P_LEAF without P_LEAF2. + * We guarantee 2-byte alignment for 'MDBX_node's. + * + * mn_lo and mn_hi are used for data size on leaf nodes, and for child + * pgno on branch nodes. On 64 bit platforms, mn_flags is also used + * for pgno. (Branch nodes have no flags). Lo and hi are in host byte + * order in case some accesses can be optimized to 32-bit word access. + * + * Leaf node flags describe node contents. F_BIGDATA says the node's + * data part is the page number of an overflow page with actual data. + * F_DUPDATA and F_SUBDATA can be combined giving duplicate data in + * a sub-page/sub-database, and named databases (just F_SUBDATA). */ +typedef struct MDBX_node { + union { + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + union { + struct { + uint16_t mn_lo, mn_hi; /* part of data size or pgno */ + }; + uint32_t mn_dsize; + }; + uint16_t mn_flags; /* see mdbx_node */ + uint16_t mn_ksize; /* key size */ +#else + uint16_t mn_ksize; /* key size */ + uint16_t mn_flags; /* see mdbx_node */ + union { + struct { + uint16_t mn_hi, mn_lo; /* part of data size or pgno */ + }; + uint32_t mn_dsize; + }; +#endif + }; + pgno_t mn_ksize_and_pgno; + }; + +/* mdbx_node Flags */ +#define F_BIGDATA 0x01 /* data put on overflow page */ +#define F_SUBDATA 0x02 /* data is a sub-database */ +#define F_DUPDATA 0x04 /* data has duplicates */ + +/* valid flags for mdbx_node_add() */ +#define NODE_ADD_FLAGS (F_DUPDATA | F_SUBDATA | MDBX_RESERVE | MDBX_APPEND) + uint8_t mn_data[1]; /* key and data are appended here */ +} MDBX_node; + +/* Size of the node header, excluding dynamic data at the end */ +#define NODESIZE offsetof(MDBX_node, mn_data) + +/* Bit position of top word in page number, for shifting mn_flags */ +#define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0) + +/* Size of a node in a branch page with a given key. + * This is just the node header plus the key, there is no data. */ +#define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->iov_len)) + +/* Size of a node in a leaf page with a given key and data. + * This is node header plus key plus data size. */ +#define LEAFSIZE(k, d) (NODESIZE + (k)->iov_len + (d)->iov_len) + +/* Address of node i in page p */ +static __inline MDBX_node *NODEPTR(MDBX_page *p, unsigned i) { + assert(NUMKEYS(p) > (unsigned)(i)); + return (MDBX_node *)((char *)(p) + (p)->mp_ptrs[i] + PAGEHDRSZ); +} + +/* Address of the key for the node */ +#define NODEKEY(node) (void *)((node)->mn_data) + +/* Address of the data for a node */ +#define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize) + +/* Get the page number pointed to by a branch node */ +static __inline pgno_t NODEPGNO(const MDBX_node *node) { + pgno_t pgno; + if (UNALIGNED_OK) { + pgno = node->mn_ksize_and_pgno; + if (sizeof(pgno_t) > 4) + pgno &= MAX_PAGENO; + } else { + pgno = node->mn_lo | ((pgno_t)node->mn_hi << 16); + if (sizeof(pgno_t) > 4) + pgno |= ((uint64_t)node->mn_flags) << 32; + } + return pgno; +} + +/* Set the page number in a branch node */ +static __inline void SETPGNO(MDBX_node *node, pgno_t pgno) { + assert(pgno <= MAX_PAGENO); + + if (UNALIGNED_OK) { + if (sizeof(pgno_t) > 4) + pgno |= ((uint64_t)node->mn_ksize) << 48; + node->mn_ksize_and_pgno = pgno; + } else { + node->mn_lo = (uint16_t)pgno; + node->mn_hi = (uint16_t)(pgno >> 16); + if (sizeof(pgno_t) > 4) + node->mn_flags = (uint16_t)((uint64_t)pgno >> 32); + } +} + +/* Get the size of the data in a leaf node */ +static __inline size_t NODEDSZ(const MDBX_node *node) { + size_t size; + if (UNALIGNED_OK) { + size = node->mn_dsize; + } else { + size = node->mn_lo | ((size_t)node->mn_hi << 16); + } + return size; +} + +/* Set the size of the data for a leaf node */ +static __inline void SETDSZ(MDBX_node *node, size_t size) { + assert(size < INT_MAX); + if (UNALIGNED_OK) { + node->mn_dsize = (uint32_t)size; + } else { + node->mn_lo = (uint16_t)size; + node->mn_hi = (uint16_t)(size >> 16); + } +} + +/* The size of a key in a node */ +#define NODEKSZ(node) ((node)->mn_ksize) + +/* The address of a key in a LEAF2 page. + * LEAF2 pages are used for MDBX_DUPFIXED sorted-duplicate sub-DBs. + * There are no node headers, keys are stored contiguously. */ +#define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i) * (ks))) + +/* Set the node's key into keyptr, if requested. */ +#define MDBX_GET_KEY(node, keyptr) \ + do { \ + if ((keyptr) != NULL) { \ + (keyptr)->iov_len = NODEKSZ(node); \ + (keyptr)->iov_base = NODEKEY(node); \ + } \ + } while (0) + +/* Set the node's key into key. */ +#define MDBX_GET_KEY2(node, key) \ + do { \ + key.iov_len = NODEKSZ(node); \ + key.iov_base = NODEKEY(node); \ + } while (0) + +#define MDBX_VALID 0x8000 /* DB handle is valid, for me_dbflags */ +#define PERSISTENT_FLAGS (0xffff & ~(MDBX_VALID)) +/* mdbx_dbi_open() flags */ +#define VALID_FLAGS \ + (MDBX_REVERSEKEY | MDBX_DUPSORT | MDBX_INTEGERKEY | MDBX_DUPFIXED | \ + MDBX_INTEGERDUP | MDBX_REVERSEDUP | MDBX_CREATE) + +/* max number of pages to commit in one writev() call */ +#define MDBX_COMMIT_PAGES 64 +#if defined(IOV_MAX) && IOV_MAX < MDBX_COMMIT_PAGES /* sysconf(_SC_IOV_MAX) */ +#undef MDBX_COMMIT_PAGES +#define MDBX_COMMIT_PAGES IOV_MAX +#endif + +/* Check txn and dbi arguments to a function */ +#define TXN_DBI_EXIST(txn, dbi, validity) \ + ((dbi) < (txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & (validity))) + +/* Check for misused dbi handles */ +#define TXN_DBI_CHANGED(txn, dbi) \ + ((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi]) + +/* LY: fast enough on most systems + * + * / + * | -1, a < b + * cmp2int(a,b) = < 0, a == b + * | 1, a > b + * \ + */ +#if 1 +#define mdbx_cmp2int(a, b) (((b) > (a)) ? -1 : (a) > (b)) +#else +#define mdbx_cmp2int(a, b) (((a) > (b)) - ((b) > (a))) +#endif + +static __inline size_t pgno2bytes(const MDBX_env *env, pgno_t pgno) { + mdbx_assert(env, (1u << env->me_psize2log) == env->me_psize); + return ((size_t)pgno) << env->me_psize2log; +} + +static __inline MDBX_page *pgno2page(const MDBX_env *env, pgno_t pgno) { + return (MDBX_page *)(env->me_map + pgno2bytes(env, pgno)); +} + +static __inline pgno_t bytes2pgno(const MDBX_env *env, size_t bytes) { + mdbx_assert(env, (env->me_psize >> env->me_psize2log) == 1); + return (pgno_t)(bytes >> env->me_psize2log); +} + +static __inline pgno_t pgno_add(pgno_t base, pgno_t augend) { + assert(base <= MAX_PAGENO); + return (augend < MAX_PAGENO - base) ? base + augend : MAX_PAGENO; +} + +static __inline pgno_t pgno_sub(pgno_t base, pgno_t subtrahend) { + assert(base >= MIN_PAGENO); + return (subtrahend < base - MIN_PAGENO) ? base - subtrahend : MIN_PAGENO; +} + +static __inline size_t pgno_align2os_bytes(const MDBX_env *env, pgno_t pgno) { + return mdbx_roundup2(pgno2bytes(env, pgno), env->me_os_psize); +} + +static __inline pgno_t pgno_align2os_pgno(const MDBX_env *env, pgno_t pgno) { + return bytes2pgno(env, pgno_align2os_bytes(env, pgno)); +} diff --git a/plugins/Dbx_mdb/src/mdbx/defs.h b/plugins/Dbx_mdb/src/mdbx/defs.h new file mode 100644 index 0000000000..ed1a87e8a4 --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/defs.h @@ -0,0 +1,393 @@ +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + */ + +#pragma once +/* *INDENT-OFF* */ +/* clang-format off */ + +#ifndef __GNUC_PREREQ +# if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define __GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +# else +# define __GNUC_PREREQ(maj, min) (0) +# endif +#endif /* __GNUC_PREREQ */ + +#ifndef __CLANG_PREREQ +# ifdef __clang__ +# define __CLANG_PREREQ(maj,min) \ + ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min)) +# else +# define __CLANG_PREREQ(maj,min) (0) +# endif +#endif /* __CLANG_PREREQ */ + +#ifndef __GLIBC_PREREQ +# if defined(__GLIBC__) && defined(__GLIBC_MINOR__) +# define __GLIBC_PREREQ(maj, min) \ + ((__GLIBC__ << 16) + __GLIBC_MINOR__ >= ((maj) << 16) + (min)) +# else +# define __GLIBC_PREREQ(maj, min) (0) +# endif +#endif /* __GLIBC_PREREQ */ + +#ifndef __has_attribute +# define __has_attribute(x) (0) +#endif + +#ifndef __has_feature +# define __has_feature(x) (0) +#endif + +#ifndef __has_extension +# define __has_extension(x) (0) +#endif + +#ifndef __has_builtin +# define __has_builtin(x) (0) +#endif + +#if __has_feature(thread_sanitizer) +# define __SANITIZE_THREAD__ 1 +#endif + +#if __has_feature(address_sanitizer) +# define __SANITIZE_ADDRESS__ 1 +#endif + +/*----------------------------------------------------------------------------*/ + +#ifndef __extern_C +# ifdef __cplusplus +# define __extern_C extern "C" +# else +# define __extern_C +# endif +#endif /* __extern_C */ + +#ifndef __cplusplus +# ifndef bool +# define bool _Bool +# endif +# ifndef true +# define true (1) +# endif +# ifndef false +# define false (0) +# endif +#endif + +#if !defined(nullptr) && !defined(__cplusplus) || (__cplusplus < 201103L && !defined(_MSC_VER)) +# define nullptr NULL +#endif + +/*----------------------------------------------------------------------------*/ + +#if !defined(__thread) && (defined(_MSC_VER) || defined(__DMC__)) +# define __thread __declspec(thread) +#endif /* __thread */ + +#ifndef __alwaysinline +# if defined(__GNUC__) || __has_attribute(always_inline) +# define __alwaysinline __inline __attribute__((always_inline)) +# elif defined(_MSC_VER) +# define __alwaysinline __forceinline +# else +# define __alwaysinline +# endif +#endif /* __alwaysinline */ + +#ifndef __noinline +# if defined(__GNUC__) || __has_attribute(noinline) +# define __noinline __attribute__((noinline)) +# elif defined(_MSC_VER) +# define __noinline __declspec(noinline) +# elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) +# define __noinline inline +# elif !defined(__INTEL_COMPILER) +# define __noinline /* FIXME ? */ +# endif +#endif /* __noinline */ + +#ifndef __must_check_result +# if defined(__GNUC__) || __has_attribute(warn_unused_result) +# define __must_check_result __attribute__((warn_unused_result)) +# else +# define __must_check_result +# endif +#endif /* __must_check_result */ + +#ifndef __deprecated +# if defined(__GNUC__) || __has_attribute(deprecated) +# define __deprecated __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define __deprecated __declspec(deprecated) +# else +# define __deprecated +# endif +#endif /* __deprecated */ + +#ifndef __packed +# if defined(__GNUC__) || __has_attribute(packed) +# define __packed __attribute__((packed)) +# else +# define __packed +# endif +#endif /* __packed */ + +#ifndef __aligned +# if defined(__GNUC__) || __has_attribute(aligned) +# define __aligned(N) __attribute__((aligned(N))) +# elif defined(_MSC_VER) +# define __aligned(N) __declspec(align(N)) +# else +# define __aligned(N) +# endif +#endif /* __aligned */ + +#ifndef __noreturn +# if defined(__GNUC__) || __has_attribute(noreturn) +# define __noreturn __attribute__((noreturn)) +# elif defined(_MSC_VER) +# define __noreturn __declspec(noreturn) +# else +# define __noreturn +# endif +#endif /* __noreturn */ + +#ifndef __nothrow +# if defined(__GNUC__) || __has_attribute(nothrow) +# define __nothrow __attribute__((nothrow)) +# elif defined(_MSC_VER) && defined(__cplusplus) +# define __nothrow __declspec(nothrow) +# else +# define __nothrow +# endif +#endif /* __nothrow */ + +#ifndef __pure_function + /* Many functions have no effects except the return value and their + * return value depends only on the parameters and/or global variables. + * Such a function can be subject to common subexpression elimination + * and loop optimization just as an arithmetic operator would be. + * These functions should be declared with the attribute pure. */ +# if defined(__GNUC__) || __has_attribute(pure) +# define __pure_function __attribute__((pure)) +# else +# define __pure_function +# endif +#endif /* __pure_function */ + +#ifndef __const_function + /* Many functions do not examine any values except their arguments, + * and have no effects except the return value. Basically this is just + * slightly more strict class than the PURE attribute, since function + * is not allowed to read global memory. + * + * Note that a function that has pointer arguments and examines the + * data pointed to must not be declared const. Likewise, a function + * that calls a non-const function usually must not be const. + * It does not make sense for a const function to return void. */ +# if defined(__GNUC__) || __has_attribute(const) +# define __const_function __attribute__((const)) +# else +# define __const_function +# endif +#endif /* __const_function */ + +#ifndef __dll_hidden +# if defined(__GNUC__) || __has_attribute(visibility) +# define __hidden __attribute__((visibility("hidden"))) +# else +# define __hidden +# endif +#endif /* __dll_hidden */ + +#ifndef __optimize +# if defined(__OPTIMIZE__) +# if defined(__clang__) && !__has_attribute(optimize) +# define __optimize(ops) +# elif defined(__GNUC__) || __has_attribute(optimize) +# define __optimize(ops) __attribute__((optimize(ops))) +# else +# define __optimize(ops) +# endif +# else +# define __optimize(ops) +# endif +#endif /* __optimize */ + +#ifndef __hot +# if defined(__OPTIMIZE__) +# if defined(__clang__) && !__has_attribute(hot) + /* just put frequently used functions in separate section */ +# define __hot __attribute__((section("text.hot"))) __optimize("O3") +# elif defined(__GNUC__) || __has_attribute(hot) +# define __hot __attribute__((hot)) __optimize("O3") +# else +# define __hot __optimize("O3") +# endif +# else +# define __hot +# endif +#endif /* __hot */ + +#ifndef __cold +# if defined(__OPTIMIZE__) +# if defined(__clang__) && !__has_attribute(cold) + /* just put infrequently used functions in separate section */ +# define __cold __attribute__((section("text.unlikely"))) __optimize("Os") +# elif defined(__GNUC__) || __has_attribute(cold) +# define __cold __attribute__((cold)) __optimize("Os") +# else +# define __cold __optimize("Os") +# endif +# else +# define __cold +# endif +#endif /* __cold */ + +#ifndef __flatten +# if defined(__OPTIMIZE__) && (defined(__GNUC__) || __has_attribute(flatten)) +# define __flatten __attribute__((flatten)) +# else +# define __flatten +# endif +#endif /* __flatten */ + +#ifndef likely +# if defined(__GNUC__) || defined(__clang__) +# define likely(cond) __builtin_expect(!!(cond), 1) +# else +# define likely(x) (x) +# endif +#endif /* likely */ + +#ifndef unlikely +# if defined(__GNUC__) || defined(__clang__) +# define unlikely(cond) __builtin_expect(!!(cond), 0) +# else +# define unlikely(x) (x) +# endif +#endif /* unlikely */ + +#if !defined(__noop) && !defined(_MSC_VER) + static __inline int __do_noop(void* crutch, ...) { + (void) crutch; return 0; + } +# define __noop(...) __do_noop(0, __VA_ARGS__) +#endif /* __noop */ + +/* Wrapper around __func__, which is a C99 feature */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define mdbx_func_ __func__ +#elif (defined(__GNUC__) && __GNUC__ >= 2) || defined(__clang__) || defined(_MSC_VER) +# define mdbx_func_ __FUNCTION__ +#else +# define mdbx_func_ "<mdbx_unknown>" +#endif + +/*----------------------------------------------------------------------------*/ + +#if defined(USE_VALGRIND) +# include <valgrind/memcheck.h> +# ifndef VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE + /* LY: available since Valgrind 3.10 */ +# define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(a,s) +# define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(a,s) +# endif +#else +# define VALGRIND_CREATE_MEMPOOL(h,r,z) +# define VALGRIND_DESTROY_MEMPOOL(h) +# define VALGRIND_MEMPOOL_TRIM(h,a,s) +# define VALGRIND_MEMPOOL_ALLOC(h,a,s) +# define VALGRIND_MEMPOOL_FREE(h,a) +# define VALGRIND_MEMPOOL_CHANGE(h,a,b,s) +# define VALGRIND_MAKE_MEM_NOACCESS(a,s) +# define VALGRIND_MAKE_MEM_DEFINED(a,s) +# define VALGRIND_MAKE_MEM_UNDEFINED(a,s) +# define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(a,s) +# define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(a,s) +# define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(a,s) (0) +# define VALGRIND_CHECK_MEM_IS_DEFINED(a,s) (0) +# define RUNNING_ON_VALGRIND (0) +#endif /* USE_VALGRIND */ + +#ifdef __SANITIZE_ADDRESS__ +# include <sanitizer/asan_interface.h> +#else +# define ASAN_POISON_MEMORY_REGION(addr, size) \ + ((void)(addr), (void)(size)) +# define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ + ((void)(addr), (void)(size)) +#endif /* __SANITIZE_ADDRESS__ */ + +/*----------------------------------------------------------------------------*/ + +#ifndef ARRAY_LENGTH +# ifdef __cplusplus + template <typename T, size_t N> + char (&__ArraySizeHelper(T (&array)[N]))[N]; +# define ARRAY_LENGTH(array) (sizeof(::__ArraySizeHelper(array))) +# else +# define ARRAY_LENGTH(array) (sizeof(array) / sizeof(array[0])) +# endif +#endif /* ARRAY_LENGTH */ + +#ifndef ARRAY_END +# define ARRAY_END(array) (&array[ARRAY_LENGTH(array)]) +#endif /* ARRAY_END */ + +#ifndef STRINGIFY +# define STRINGIFY_HELPER(x) #x +# define STRINGIFY(x) STRINGIFY_HELPER(x) +#endif /* STRINGIFY */ + +#ifndef offsetof +# define offsetof(type, member) __builtin_offsetof(type, member) +#endif /* offsetof */ + +#ifndef container_of +# define container_of(ptr, type, member) \ + ((type *)((char *)(ptr) - offsetof(type, member))) +#endif /* container_of */ + +#define MDBX_TETRAD(a, b, c, d) \ + ((uint32_t)(a) << 24 | (uint32_t)(b) << 16 | (uint32_t)(c) << 8 | (d)) + +#define MDBX_STRING_TETRAD(str) MDBX_TETRAD(str[0], str[1], str[2], str[3]) + +#define FIXME "FIXME: " __FILE__ ", " STRINGIFY(__LINE__) + +#ifndef STATIC_ASSERT_MSG +# if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) \ + || __has_feature(c_static_assert) +# define STATIC_ASSERT_MSG(expr, msg) _Static_assert(expr, msg) +# elif defined(static_assert) +# define STATIC_ASSERT_MSG(expr, msg) static_assert(expr, msg) +# elif defined(_MSC_VER) +# include <crtdbg.h> +# define STATIC_ASSERT_MSG(expr, msg) _STATIC_ASSERT(expr) +# else +# define STATIC_ASSERT_MSG(expr, msg) switch (0) {case 0:case (expr):;} +# endif +#endif /* STATIC_ASSERT */ + +#ifndef STATIC_ASSERT +# define STATIC_ASSERT(expr) STATIC_ASSERT_MSG(expr, #expr) +#endif + +/* *INDENT-ON* */ +/* clang-format on */ diff --git a/plugins/Dbx_mdb/src/mdbx/lck-windows.c b/plugins/Dbx_mdb/src/mdbx/lck-windows.c new file mode 100644 index 0000000000..66591db912 --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/lck-windows.c @@ -0,0 +1,449 @@ +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + */ + +#include "./bits.h" + +/* PREAMBLE FOR WINDOWS: + * + * We are not concerned for performance here. + * If you are running Windows a performance could NOT be the goal. + * Otherwise please use Linux. + * + * Regards, + * LY + */ + +/*----------------------------------------------------------------------------*/ +/* rthc */ + +static CRITICAL_SECTION rthc_critical_section; + +static void NTAPI tls_callback(PVOID module, DWORD reason, PVOID reserved) { + (void)module; + (void)reserved; + switch (reason) { + case DLL_PROCESS_ATTACH: + InitializeCriticalSection(&rthc_critical_section); + break; + case DLL_PROCESS_DETACH: + DeleteCriticalSection(&rthc_critical_section); + break; + + case DLL_THREAD_ATTACH: + break; + case DLL_THREAD_DETACH: + mdbx_rthc_cleanup(); + break; + } +} + +void mdbx_rthc_lock(void) { EnterCriticalSection(&rthc_critical_section); } + +void mdbx_rthc_unlock(void) { LeaveCriticalSection(&rthc_critical_section); } + +/* *INDENT-OFF* */ +/* clang-format off */ +#if defined(_MSC_VER) +# pragma const_seg(push) +# pragma data_seg(push) + +# ifdef _WIN64 + /* kick a linker to create the TLS directory if not already done */ +# pragma comment(linker, "/INCLUDE:_tls_used") + /* Force some symbol references. */ +# pragma comment(linker, "/INCLUDE:mdbx_tls_callback") + /* specific const-segment for WIN64 */ +# pragma const_seg(".CRT$XLB") + const +# else + /* kick a linker to create the TLS directory if not already done */ +# pragma comment(linker, "/INCLUDE:__tls_used") + /* Force some symbol references. */ +# pragma comment(linker, "/INCLUDE:_mdbx_tls_callback") + /* specific data-segment for WIN32 */ +# pragma data_seg(".CRT$XLB") +# endif + + PIMAGE_TLS_CALLBACK mdbx_tls_callback = tls_callback; +# pragma data_seg(pop) +# pragma const_seg(pop) + +#elif defined(__GNUC__) +# ifdef _WIN64 + const +# endif + PIMAGE_TLS_CALLBACK mdbx_tls_callback __attribute__((section(".CRT$XLB"), used)) + = tls_callback; +#else +# error FIXME +#endif +/* *INDENT-ON* */ +/* clang-format on */ + +/*----------------------------------------------------------------------------*/ + +#define LCK_SHARED 0 +#define LCK_EXCLUSIVE LOCKFILE_EXCLUSIVE_LOCK +#define LCK_WAITFOR 0 +#define LCK_DONTWAIT LOCKFILE_FAIL_IMMEDIATELY + +static __inline BOOL flock(mdbx_filehandle_t fd, DWORD flags, uint64_t offset, + size_t bytes) { + OVERLAPPED ov; + ov.hEvent = 0; + ov.Offset = (DWORD)offset; + ov.OffsetHigh = HIGH_DWORD(offset); + return LockFileEx(fd, flags, 0, (DWORD)bytes, HIGH_DWORD(bytes), &ov); +} + +static __inline BOOL funlock(mdbx_filehandle_t fd, uint64_t offset, + size_t bytes) { + return UnlockFile(fd, (DWORD)offset, HIGH_DWORD(offset), (DWORD)bytes, + HIGH_DWORD(bytes)); +} + +/*----------------------------------------------------------------------------*/ +/* global `write` lock for write-txt processing, + * exclusive locking both meta-pages) */ + +#define LCK_MAXLEN (1u + (size_t)(MAXSSIZE_T)) +#define LCK_META_OFFSET 0 +#define LCK_META_LEN 0x10000u +#define LCK_BODY_OFFSET LCK_META_LEN +#define LCK_BODY_LEN (LCK_MAXLEN - LCK_BODY_OFFSET + 1u) +#define LCK_META LCK_META_OFFSET, LCK_META_LEN +#define LCK_BODY LCK_BODY_OFFSET, LCK_BODY_LEN +#define LCK_WHOLE 0, LCK_MAXLEN + +int mdbx_txn_lock(MDBX_env *env) { + if (flock(env->me_fd, LCK_EXCLUSIVE | LCK_WAITFOR, LCK_BODY)) + return MDBX_SUCCESS; + return GetLastError(); +} + +void mdbx_txn_unlock(MDBX_env *env) { + if (!funlock(env->me_fd, LCK_BODY)) + mdbx_panic("%s failed: errcode %u", mdbx_func_, GetLastError()); +} + +/*----------------------------------------------------------------------------*/ +/* global `read` lock for readers registration, + * exclusive locking `mti_numreaders` (second) cacheline */ + +#define LCK_LO_OFFSET 0 +#define LCK_LO_LEN offsetof(MDBX_lockinfo, mti_numreaders) +#define LCK_UP_OFFSET LCK_LO_LEN +#define LCK_UP_LEN (MDBX_LOCKINFO_WHOLE_SIZE - LCK_UP_OFFSET) +#define LCK_LOWER LCK_LO_OFFSET, LCK_LO_LEN +#define LCK_UPPER LCK_UP_OFFSET, LCK_UP_LEN + +int mdbx_rdt_lock(MDBX_env *env) { + if (env->me_lfd == INVALID_HANDLE_VALUE) + return MDBX_SUCCESS; /* readonly database in readonly filesystem */ + + /* transite from S-? (used) to S-E (locked), e.g. exclusive lock upper-part */ + if (flock(env->me_lfd, LCK_EXCLUSIVE | LCK_WAITFOR, LCK_UPPER)) + return MDBX_SUCCESS; + return GetLastError(); +} + +void mdbx_rdt_unlock(MDBX_env *env) { + if (env->me_lfd != INVALID_HANDLE_VALUE) { + /* transite from S-E (locked) to S-? (used), e.g. unlock upper-part */ + if (!funlock(env->me_lfd, LCK_UPPER)) + mdbx_panic("%s failed: errcode %u", mdbx_func_, GetLastError()); + } +} + +/*----------------------------------------------------------------------------*/ +/* global `initial` lock for lockfile initialization, + * exclusive/shared locking first cacheline */ + +/* FIXME: locking schema/algo descritpion. + ?-? = free + S-? = used + E-? = exclusive-read + ?-S + ?-E = middle + S-S + S-E = locked + E-S + E-E = exclusive-write +*/ + +int mdbx_lck_init(MDBX_env *env) { + (void)env; + return MDBX_SUCCESS; +} + +/* Seize state as 'exclusive-write' (E-E and returns MDBX_RESULT_TRUE) + * or as 'used' (S-? and returns MDBX_RESULT_FALSE), otherwise returns an error + */ +static int internal_seize_lck(HANDLE lfd) { + int rc; + assert(lfd != INVALID_HANDLE_VALUE); + + /* 1) now on ?-? (free), get ?-E (middle) */ + mdbx_jitter4testing(false); + if (!flock(lfd, LCK_EXCLUSIVE | LCK_WAITFOR, LCK_UPPER)) { + rc = GetLastError() /* 2) something went wrong, give up */; + mdbx_error("%s(%s) failed: errcode %u", mdbx_func_, + "?-?(free) >> ?-E(middle)", rc); + return rc; + } + + /* 3) now on ?-E (middle), try E-E (exclusive-write) */ + mdbx_jitter4testing(false); + if (flock(lfd, LCK_EXCLUSIVE | LCK_DONTWAIT, LCK_LOWER)) + return MDBX_RESULT_TRUE /* 4) got E-E (exclusive-write), done */; + + /* 5) still on ?-E (middle) */ + rc = GetLastError(); + mdbx_jitter4testing(false); + if (rc != ERROR_SHARING_VIOLATION && rc != ERROR_LOCK_VIOLATION) { + /* 6) something went wrong, give up */ + if (!funlock(lfd, LCK_UPPER)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "?-E(middle) >> ?-?(free)", GetLastError()); + return rc; + } + + /* 7) still on ?-E (middle), try S-E (locked) */ + mdbx_jitter4testing(false); + rc = flock(lfd, LCK_SHARED | LCK_DONTWAIT, LCK_LOWER) ? MDBX_RESULT_FALSE + : GetLastError(); + + mdbx_jitter4testing(false); + if (rc != MDBX_RESULT_FALSE) + mdbx_error("%s(%s) failed: errcode %u", mdbx_func_, + "?-E(middle) >> S-E(locked)", rc); + + /* 8) now on S-E (locked) or still on ?-E (middle), + * transite to S-? (used) or ?-? (free) */ + if (!funlock(lfd, LCK_UPPER)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "X-E(locked/middle) >> X-?(used/free)", GetLastError()); + + /* 9) now on S-? (used, DONE) or ?-? (free, FAILURE) */ + return rc; +} + +int mdbx_lck_seize(MDBX_env *env) { + int rc; + + assert(env->me_fd != INVALID_HANDLE_VALUE); + if (env->me_lfd == INVALID_HANDLE_VALUE) { + /* LY: without-lck mode (e.g. on read-only filesystem) */ + mdbx_jitter4testing(false); + if (!flock(env->me_fd, LCK_SHARED | LCK_DONTWAIT, LCK_WHOLE)) { + rc = GetLastError(); + mdbx_error("%s(%s) failed: errcode %u", mdbx_func_, "without-lck", rc); + return rc; + } + return MDBX_RESULT_FALSE; + } + + rc = internal_seize_lck(env->me_lfd); + mdbx_jitter4testing(false); + if (rc == MDBX_RESULT_TRUE && (env->me_flags & MDBX_RDONLY) == 0) { + /* Check that another process don't operates in without-lck mode. + * Doing such check by exclusive locking the body-part of db. Should be + * noted: + * - we need an exclusive lock for do so; + * - we can't lock meta-pages, otherwise other process could get an error + * while opening db in valid (non-conflict) mode. */ + if (!flock(env->me_fd, LCK_EXCLUSIVE | LCK_DONTWAIT, LCK_BODY)) { + rc = GetLastError(); + mdbx_error("%s(%s) failed: errcode %u", mdbx_func_, + "lock-against-without-lck", rc); + mdbx_jitter4testing(false); + mdbx_lck_destroy(env); + } else { + mdbx_jitter4testing(false); + if (!funlock(env->me_fd, LCK_BODY)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "unlock-against-without-lck", GetLastError()); + } + } + + return rc; +} + +int mdbx_lck_downgrade(MDBX_env *env, bool complete) { + /* Transite from exclusive state (E-?) to used (S-?) */ + assert(env->me_fd != INVALID_HANDLE_VALUE); + assert(env->me_lfd != INVALID_HANDLE_VALUE); + + /* 1) must be at E-E (exclusive-write) */ + if (!complete) { + /* transite from E-E to E_? (exclusive-read) */ + if (!funlock(env->me_lfd, LCK_UPPER)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "E-E(exclusive-write) >> E-?(exclusive-read)", GetLastError()); + return MDBX_SUCCESS /* 2) now at E-? (exclusive-read), done */; + } + + /* 3) now at E-E (exclusive-write), transite to ?_E (middle) */ + if (!funlock(env->me_lfd, LCK_LOWER)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "E-E(exclusive-write) >> ?-E(middle)", GetLastError()); + + /* 4) now at ?-E (middle), transite to S-E (locked) */ + if (!flock(env->me_lfd, LCK_SHARED | LCK_DONTWAIT, LCK_LOWER)) { + int rc = GetLastError() /* 5) something went wrong, give up */; + mdbx_error("%s(%s) failed: errcode %u", mdbx_func_, + "?-E(middle) >> S-E(locked)", rc); + return rc; + } + + /* 6) got S-E (locked), continue transition to S-? (used) */ + if (!funlock(env->me_lfd, LCK_UPPER)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "S-E(locked) >> S-?(used)", GetLastError()); + + return MDBX_SUCCESS /* 7) now at S-? (used), done */; +} + +int mdbx_lck_upgrade(MDBX_env *env) { + /* Transite from locked state (S-E) to exclusive-write (E-E) */ + assert(env->me_fd != INVALID_HANDLE_VALUE); + assert(env->me_lfd != INVALID_HANDLE_VALUE); + + /* 1) must be at S-E (locked), transite to ?_E (middle) */ + if (!funlock(env->me_lfd, LCK_LOWER)) + mdbx_panic("%s(%s) failed: errcode %u", mdbx_func_, + "S-E(locked) >> ?-E(middle)", GetLastError()); + + /* 3) now on ?-E (middle), try E-E (exclusive-write) */ + mdbx_jitter4testing(false); + if (flock(env->me_lfd, LCK_EXCLUSIVE | LCK_DONTWAIT, LCK_LOWER)) + return MDBX_RESULT_TRUE; /* 4) got E-E (exclusive-write), done */ + + /* 5) still on ?-E (middle) */ + int rc = GetLastError(); + mdbx_jitter4testing(false); + if (rc != ERROR_SHARING_VIOLATION && rc != ERROR_LOCK_VIOLATION) { + /* 6) something went wrong, report but continue */ + mdbx_error("%s(%s) failed: errcode %u", mdbx_func_, + "?-E(middle) >> E-E(exclusive-write)", rc); + } + + /* 7) still on ?-E (middle), try restore S-E (locked) */ + mdbx_jitter4testing(false); + rc = flock(env->me_lfd, LCK_SHARED | LCK_DONTWAIT, LCK_LOWER) + ? MDBX_RESULT_FALSE + : GetLastError(); + + mdbx_jitter4testing(false); + if (rc != MDBX_RESULT_FALSE) { + mdbx_fatal("%s(%s) failed: errcode %u", mdbx_func_, + "?-E(middle) >> S-E(locked)", rc); + return rc; + } + + /* 8) now on S-E (locked) */ + return MDBX_RESULT_FALSE; +} + +void mdbx_lck_destroy(MDBX_env *env) { + int rc; + + if (env->me_lfd != INVALID_HANDLE_VALUE) { + /* double `unlock` for robustly remove overlapped shared/exclusive locks */ + while (funlock(env->me_lfd, LCK_LOWER)) + ; + rc = GetLastError(); + assert(rc == ERROR_NOT_LOCKED); + (void)rc; + SetLastError(ERROR_SUCCESS); + + while (funlock(env->me_lfd, LCK_UPPER)) + ; + rc = GetLastError(); + assert(rc == ERROR_NOT_LOCKED); + (void)rc; + SetLastError(ERROR_SUCCESS); + } + + if (env->me_fd != INVALID_HANDLE_VALUE) { + /* explicitly unlock to avoid latency for other processes (windows kernel + * releases such locks via deferred queues) */ + while (funlock(env->me_fd, LCK_BODY)) + ; + rc = GetLastError(); + assert(rc == ERROR_NOT_LOCKED); + (void)rc; + SetLastError(ERROR_SUCCESS); + + while (funlock(env->me_fd, LCK_META)) + ; + rc = GetLastError(); + assert(rc == ERROR_NOT_LOCKED); + (void)rc; + SetLastError(ERROR_SUCCESS); + + while (funlock(env->me_fd, LCK_WHOLE)) + ; + rc = GetLastError(); + assert(rc == ERROR_NOT_LOCKED); + (void)rc; + SetLastError(ERROR_SUCCESS); + } +} + +/*----------------------------------------------------------------------------*/ +/* reader checking (by pid) */ + +int mdbx_rpid_set(MDBX_env *env) { + (void)env; + return MDBX_SUCCESS; +} + +int mdbx_rpid_clear(MDBX_env *env) { + (void)env; + return MDBX_SUCCESS; +} + +/* Checks reader by pid. + * + * Returns: + * MDBX_RESULT_TRUE, if pid is live (unable to acquire lock) + * MDBX_RESULT_FALSE, if pid is dead (lock acquired) + * or otherwise the errcode. */ +int mdbx_rpid_check(MDBX_env *env, mdbx_pid_t pid) { + (void)env; + HANDLE hProcess = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid); + int rc; + if (hProcess) { + rc = WaitForSingleObject(hProcess, 0); + CloseHandle(hProcess); + } else { + rc = GetLastError(); + } + + switch (rc) { + case ERROR_INVALID_PARAMETER: + /* pid seem invalid */ + return MDBX_RESULT_FALSE; + case WAIT_OBJECT_0: + /* process just exited */ + return MDBX_RESULT_FALSE; + case WAIT_TIMEOUT: + /* pid running */ + return MDBX_RESULT_TRUE; + default: + /* failure */ + return rc; + } +} diff --git a/plugins/Dbx_mdb/src/mdbx/mdbx.c b/plugins/Dbx_mdb/src/mdbx/mdbx.c new file mode 100644 index 0000000000..897051a12b --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/mdbx.c @@ -0,0 +1,11544 @@ +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * This code is derived from "LMDB engine" written by + * Howard Chu (Symas Corporation), which itself derived from btree.c + * written by Martin Hedenfalk. + * + * --- + * + * Portions Copyright 2011-2015 Howard Chu, Symas Corp. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + * + * --- + * + * Portions Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include "./bits.h" + +/*----------------------------------------------------------------------------*/ +/* rthc (tls keys and destructors) */ + +typedef struct rthc_entry_t { + MDBX_reader *begin; + MDBX_reader *end; + mdbx_thread_key_t key; +} rthc_entry_t; + +#if MDBX_DEBUG +#define RTHC_INITIAL_LIMIT 1 +#else +#define RTHC_INITIAL_LIMIT 16 +#endif + +static unsigned rthc_count; +static unsigned rthc_limit = RTHC_INITIAL_LIMIT; +static rthc_entry_t rthc_table_static[RTHC_INITIAL_LIMIT]; +static rthc_entry_t *rthc_table = rthc_table_static; + +__cold void mdbx_rthc_dtor(void *ptr) { + MDBX_reader *rthc = (MDBX_reader *)ptr; + + mdbx_rthc_lock(); + const mdbx_pid_t self_pid = mdbx_getpid(); + for (unsigned i = 0; i < rthc_count; ++i) { + if (rthc >= rthc_table[i].begin && rthc < rthc_table[i].end) { + if (rthc->mr_pid == self_pid) { + rthc->mr_pid = 0; + mdbx_coherent_barrier(); + } + break; + } + } + mdbx_rthc_unlock(); +} + +__cold void mdbx_rthc_cleanup(void) { + mdbx_rthc_lock(); + const mdbx_pid_t self_pid = mdbx_getpid(); + for (unsigned i = 0; i < rthc_count; ++i) { + mdbx_thread_key_t key = rthc_table[i].key; + MDBX_reader *rthc = mdbx_thread_rthc_get(key); + if (rthc) { + mdbx_thread_rthc_set(key, NULL); + if (rthc->mr_pid == self_pid) { + rthc->mr_pid = 0; + mdbx_coherent_barrier(); + } + } + } + mdbx_rthc_unlock(); +} + +__cold int mdbx_rthc_alloc(mdbx_thread_key_t *key, MDBX_reader *begin, + MDBX_reader *end) { +#ifndef NDEBUG + *key = (mdbx_thread_key_t)0xBADBADBAD; +#endif /* NDEBUG */ + int rc = mdbx_thread_key_create(key); + if (rc != MDBX_SUCCESS) + return rc; + + mdbx_rthc_lock(); + if (rthc_count == rthc_limit) { + rthc_entry_t *new_table = + realloc((rthc_table == rthc_table_static) ? NULL : rthc_table, + sizeof(rthc_entry_t) * rthc_limit * 2); + if (new_table == NULL) { + rc = MDBX_ENOMEM; + goto bailout; + } + if (rthc_table == rthc_table_static) + memcpy(new_table, rthc_table_static, sizeof(rthc_table_static)); + rthc_table = new_table; + rthc_limit *= 2; + } + + rthc_table[rthc_count].key = *key; + rthc_table[rthc_count].begin = begin; + rthc_table[rthc_count].end = end; + ++rthc_count; + mdbx_rthc_unlock(); + return MDBX_SUCCESS; + +bailout: + mdbx_thread_key_delete(*key); + mdbx_rthc_unlock(); + return rc; +} + +__cold void mdbx_rthc_remove(mdbx_thread_key_t key) { + mdbx_rthc_lock(); + mdbx_thread_key_delete(key); + + for (unsigned i = 0; i < rthc_count; ++i) { + if (key == rthc_table[i].key) { + const mdbx_pid_t self_pid = mdbx_getpid(); + for (MDBX_reader *rthc = rthc_table[i].begin; rthc < rthc_table[i].end; + ++rthc) + if (rthc->mr_pid == self_pid) + rthc->mr_pid = 0; + mdbx_coherent_barrier(); + if (--rthc_count > 0) + rthc_table[i] = rthc_table[rthc_count]; + else if (rthc_table != rthc_table_static) { + free(rthc_table); + rthc_table = rthc_table_static; + rthc_limit = RTHC_INITIAL_LIMIT; + } + break; + } + } + + mdbx_rthc_unlock(); +} + +/*----------------------------------------------------------------------------*/ + +/* Allocate an PNL. + * Allocates memory for an PNL of the given size. + * Returns PNL on success, NULL on failure. */ +static MDBX_PNL mdbx_pnl_alloc(size_t size) { + MDBX_PNL pl = malloc((size + 2) * sizeof(pgno_t)); + if (likely(pl)) { + *pl++ = (pgno_t)size; + *pl = 0; + } + return pl; +} + +static MDBX_TXL mdbx_txl_alloc(void) { + const size_t malloc_overhead = sizeof(void *) * 2; + const size_t bytes = mdbx_roundup2(malloc_overhead + sizeof(txnid_t) * 61, + MDBX_CACHELINE_SIZE) - + malloc_overhead; + MDBX_TXL ptr = malloc(bytes); + if (likely(ptr)) { + *ptr++ = bytes / sizeof(txnid_t) - 2; + *ptr = 0; + } + return ptr; +} + +/* Free an PNL. + * [in] pl The PNL to free. */ +static void mdbx_pnl_free(MDBX_PNL pl) { + if (likely(pl)) + free(pl - 1); +} + +static void mdbx_txl_free(MDBX_TXL list) { + if (likely(list)) + free(list - 1); +} + +/* Append ID to PNL. The PNL must be big enough. */ +static __inline void mdbx_pnl_xappend(MDBX_PNL pl, pgno_t id) { + assert(pl[0] + (size_t)1 < MDBX_PNL_ALLOCLEN(pl)); + pl[pl[0] += 1] = id; +} + +static bool mdbx_pnl_check(MDBX_PNL pl) { + if (pl) { + for (const pgno_t *ptr = pl + pl[0]; --ptr > pl;) { + assert(MDBX_PNL_ORDERED(ptr[0], ptr[1])); + if (unlikely(MDBX_PNL_DISORDERED(ptr[0], ptr[1]))) + return false; + } + } + return true; +} + +/* Sort an PNL. + * [in,out] pnl The PNL to sort. */ +static void __hot mdbx_pnl_sort(MDBX_PNL pnl) { + /* Max possible depth of int-indexed tree * 2 items/level */ + int istack[sizeof(int) * CHAR_BIT * 2]; + int i, j, k, l, ir, jstack; + pgno_t a; + +/* Quicksort + Insertion sort for small arrays */ +#define PNL_SMALL 8 +#define PNL_SWAP(a, b) \ + do { \ + pgno_t tmp_pgno = (a); \ + (a) = (b); \ + (b) = tmp_pgno; \ + } while (0) + + ir = (int)pnl[0]; + l = 1; + jstack = 0; + while (1) { + if (ir - l < PNL_SMALL) { /* Insertion sort */ + for (j = l + 1; j <= ir; j++) { + a = pnl[j]; + for (i = j - 1; i >= 1; i--) { + if (MDBX_PNL_DISORDERED(a, pnl[i])) + break; + pnl[i + 1] = pnl[i]; + } + pnl[i + 1] = a; + } + if (jstack == 0) + break; + ir = istack[jstack--]; + l = istack[jstack--]; + } else { + k = (l + ir) >> 1; /* Choose median of left, center, right */ + PNL_SWAP(pnl[k], pnl[l + 1]); + if (MDBX_PNL_ORDERED(pnl[ir], pnl[l])) + PNL_SWAP(pnl[l], pnl[ir]); + + if (MDBX_PNL_ORDERED(pnl[ir], pnl[l + 1])) + PNL_SWAP(pnl[l + 1], pnl[ir]); + + if (MDBX_PNL_ORDERED(pnl[l + 1], pnl[l])) + PNL_SWAP(pnl[l], pnl[l + 1]); + + i = l + 1; + j = ir; + a = pnl[l + 1]; + while (1) { + do + i++; + while (MDBX_PNL_ORDERED(pnl[i], a)); + do + j--; + while (MDBX_PNL_ORDERED(a, pnl[j])); + if (j < i) + break; + PNL_SWAP(pnl[i], pnl[j]); + } + pnl[l + 1] = pnl[j]; + pnl[j] = a; + jstack += 2; + if (ir - i + 1 >= j - l) { + istack[jstack] = ir; + istack[jstack - 1] = i; + ir = j - 1; + } else { + istack[jstack] = j - 1; + istack[jstack - 1] = l; + l = i; + } + } + } +#undef PNL_SMALL +#undef PNL_SWAP + assert(mdbx_pnl_check(pnl)); +} + +/* Search for an ID in an PNL. + * [in] pl The PNL to search. + * [in] id The ID to search for. + * Returns The index of the first ID greater than or equal to id. */ +static unsigned __hot mdbx_pnl_search(MDBX_PNL pnl, pgno_t id) { + assert(mdbx_pnl_check(pnl)); + + /* binary search of id in pl + * if found, returns position of id + * if not found, returns first position greater than id */ + unsigned base = 0; + unsigned cursor = 1; + int val = 0; + unsigned n = pnl[0]; + + while (n > 0) { + unsigned pivot = n >> 1; + cursor = base + pivot + 1; + val = MDBX_PNL_ASCENDING ? mdbx_cmp2int(pnl[cursor], id) + : mdbx_cmp2int(id, pnl[cursor]); + + if (val < 0) { + n = pivot; + } else if (val > 0) { + base = cursor; + n -= pivot + 1; + } else { + return cursor; + } + } + + if (val > 0) + ++cursor; + + return cursor; +} + +/* Shrink an PNL. + * Return the PNL to the default size if it has grown larger. + * [in,out] ppl Address of the PNL to shrink. */ +static void mdbx_pnl_shrink(MDBX_PNL *ppl) { + MDBX_PNL pl = *ppl - 1; + if (unlikely(*pl > MDBX_PNL_UM_MAX)) { + /* shrink to MDBX_PNL_UM_MAX */ + pl = realloc(pl, (MDBX_PNL_UM_MAX + 2) * sizeof(pgno_t)); + if (likely(pl)) { + *pl++ = MDBX_PNL_UM_MAX; + *ppl = pl; + } + } +} + +/* Grow an PNL. + * Return the PNL to the size growed by given number. + * [in,out] ppl Address of the PNL to grow. */ +static int mdbx_pnl_grow(MDBX_PNL *ppl, size_t num) { + MDBX_PNL idn = *ppl - 1; + /* grow it */ + idn = realloc(idn, (*idn + num + 2) * sizeof(pgno_t)); + if (unlikely(!idn)) + return MDBX_ENOMEM; + *idn++ += (pgno_t)num; + *ppl = idn; + return 0; +} + +static int mdbx_txl_grow(MDBX_TXL *ptr, size_t num) { + MDBX_TXL list = *ptr - 1; + /* grow it */ + list = realloc(list, ((size_t)*list + num + 2) * sizeof(txnid_t)); + if (unlikely(!list)) + return MDBX_ENOMEM; + *list++ += num; + *ptr = list; + return 0; +} + +/* Make room for num additional elements in an PNL. + * [in,out] ppl Address of the PNL. + * [in] num Number of elements to make room for. + * Returns 0 on success, MDBX_ENOMEM on failure. */ +static int mdbx_pnl_need(MDBX_PNL *ppl, size_t num) { + MDBX_PNL pl = *ppl; + num += pl[0]; + if (unlikely(num > pl[-1])) { + num = (num + num / 4 + (256 + 2)) & -256; + pl = realloc(pl - 1, num * sizeof(pgno_t)); + if (unlikely(!pl)) + return MDBX_ENOMEM; + *pl++ = (pgno_t)num - 2; + *ppl = pl; + } + return 0; +} + +/* Append an ID onto an PNL. + * [in,out] ppl Address of the PNL to append to. + * [in] id The ID to append. + * Returns 0 on success, MDBX_ENOMEM if the PNL is too large. */ +static int mdbx_pnl_append(MDBX_PNL *ppl, pgno_t id) { + MDBX_PNL pl = *ppl; + /* Too big? */ + if (unlikely(pl[0] >= pl[-1])) { + if (mdbx_pnl_grow(ppl, MDBX_PNL_UM_MAX)) + return MDBX_ENOMEM; + pl = *ppl; + } + pl[0]++; + pl[pl[0]] = id; + return 0; +} + +static int mdbx_txl_append(MDBX_TXL *ptr, txnid_t id) { + MDBX_TXL list = *ptr; + /* Too big? */ + if (unlikely(list[0] >= list[-1])) { + if (mdbx_txl_grow(ptr, (size_t)list[0])) + return MDBX_ENOMEM; + list = *ptr; + } + list[0]++; + list[list[0]] = id; + return 0; +} + +/* Append an PNL onto an PNL. + * [in,out] ppl Address of the PNL to append to. + * [in] app The PNL to append. + * Returns 0 on success, MDBX_ENOMEM if the PNL is too large. */ +static int mdbx_pnl_append_list(MDBX_PNL *ppl, MDBX_PNL app) { + MDBX_PNL pnl = *ppl; + /* Too big? */ + if (unlikely(pnl[0] + app[0] >= pnl[-1])) { + if (mdbx_pnl_grow(ppl, app[0])) + return MDBX_ENOMEM; + pnl = *ppl; + } + memcpy(&pnl[pnl[0] + 1], &app[1], app[0] * sizeof(pgno_t)); + pnl[0] += app[0]; + return 0; +} + +static int mdbx_txl_append_list(MDBX_TXL *ptr, MDBX_TXL append) { + MDBX_TXL list = *ptr; + /* Too big? */ + if (unlikely(list[0] + append[0] >= list[-1])) { + if (mdbx_txl_grow(ptr, (size_t)append[0])) + return MDBX_ENOMEM; + list = *ptr; + } + memcpy(&list[list[0] + 1], &append[1], (size_t)append[0] * sizeof(txnid_t)); + list[0] += append[0]; + return 0; +} + +/* Append an ID range onto an PNL. + * [in,out] ppl Address of the PNL to append to. + * [in] id The lowest ID to append. + * [in] n Number of IDs to append. + * Returns 0 on success, MDBX_ENOMEM if the PNL is too large. */ +static int mdbx_pnl_append_range(MDBX_PNL *ppl, pgno_t id, size_t n) { + pgno_t *pnl = *ppl, len = pnl[0]; + /* Too big? */ + if (unlikely(len + n > pnl[-1])) { + if (mdbx_pnl_grow(ppl, n | MDBX_PNL_UM_MAX)) + return MDBX_ENOMEM; + pnl = *ppl; + } + pnl[0] = len + (pgno_t)n; + pnl += len; + while (n) + pnl[n--] = id++; + return 0; +} + +/* Merge an PNL onto an PNL. The destination PNL must be big enough. + * [in] pl The PNL to merge into. + * [in] merge The PNL to merge. */ +static void __hot mdbx_pnl_xmerge(MDBX_PNL pnl, MDBX_PNL merge) { + assert(mdbx_pnl_check(pnl)); + assert(mdbx_pnl_check(merge)); + pgno_t old_id, merge_id, i = merge[0], j = pnl[0], k = i + j, total = k; + pnl[0] = + MDBX_PNL_ASCENDING ? 0 : ~(pgno_t)0; /* delimiter for pl scan below */ + old_id = pnl[j]; + while (i) { + merge_id = merge[i--]; + for (; MDBX_PNL_ORDERED(merge_id, old_id); old_id = pnl[--j]) + pnl[k--] = old_id; + pnl[k--] = merge_id; + } + pnl[0] = total; + assert(mdbx_pnl_check(pnl)); +} + +/* Search for an ID in an ID2L. + * [in] pnl The ID2L to search. + * [in] id The ID to search for. + * Returns The index of the first ID2 whose mid member is greater than + * or equal to id. */ +static unsigned __hot mdbx_mid2l_search(MDBX_ID2L pnl, pgno_t id) { + /* binary search of id in pnl + * if found, returns position of id + * if not found, returns first position greater than id */ + unsigned base = 0; + unsigned cursor = 1; + int val = 0; + unsigned n = (unsigned)pnl[0].mid; + + while (n > 0) { + unsigned pivot = n >> 1; + cursor = base + pivot + 1; + val = mdbx_cmp2int(id, pnl[cursor].mid); + + if (val < 0) { + n = pivot; + } else if (val > 0) { + base = cursor; + n -= pivot + 1; + } else { + return cursor; + } + } + + if (val > 0) + ++cursor; + + return cursor; +} + +/* Insert an ID2 into a ID2L. + * [in,out] pnl The ID2L to insert into. + * [in] id The ID2 to insert. + * Returns 0 on success, -1 if the ID was already present in the ID2L. */ +static int mdbx_mid2l_insert(MDBX_ID2L pnl, MDBX_ID2 *id) { + unsigned x = mdbx_mid2l_search(pnl, id->mid); + if (unlikely(x < 1)) + return /* internal error */ -2; + + if (x <= pnl[0].mid && pnl[x].mid == id->mid) + return /* duplicate */ -1; + + if (unlikely(pnl[0].mid >= MDBX_PNL_UM_MAX)) + return /* too big */ -2; + + /* insert id */ + pnl[0].mid++; + for (unsigned i = (unsigned)pnl[0].mid; i > x; i--) + pnl[i] = pnl[i - 1]; + pnl[x] = *id; + return 0; +} + +/* Append an ID2 into a ID2L. + * [in,out] pnl The ID2L to append into. + * [in] id The ID2 to append. + * Returns 0 on success, -2 if the ID2L is too big. */ +static int mdbx_mid2l_append(MDBX_ID2L pnl, MDBX_ID2 *id) { + /* Too big? */ + if (unlikely(pnl[0].mid >= MDBX_PNL_UM_MAX)) + return -2; + + pnl[0].mid++; + pnl[pnl[0].mid] = *id; + return 0; +} + +/*----------------------------------------------------------------------------*/ + +int mdbx_runtime_flags = MDBX_DBG_PRINT +#if MDBX_DEBUG + | MDBX_DBG_ASSERT +#endif +#if MDBX_DEBUG > 1 + | MDBX_DBG_TRACE +#endif +#if MDBX_DEBUG > 2 + | MDBX_DBG_AUDIT +#endif +#if MDBX_DEBUG > 3 + | MDBX_DBG_EXTRA +#endif + ; + +MDBX_debug_func *mdbx_debug_logger; + +static int mdbx_page_alloc(MDBX_cursor *mc, unsigned num, MDBX_page **mp, + int flags); +static int mdbx_page_new(MDBX_cursor *mc, uint32_t flags, unsigned num, + MDBX_page **mp); +static int mdbx_page_touch(MDBX_cursor *mc); +static int mdbx_cursor_touch(MDBX_cursor *mc); + +#define MDBX_END_NAMES \ + { \ + "committed", "empty-commit", "abort", "reset", "reset-tmp", "fail-begin", \ + "fail-beginchild" \ + } +enum { + /* mdbx_txn_end operation number, for logging */ + MDBX_END_COMMITTED, + MDBX_END_EMPTY_COMMIT, + MDBX_END_ABORT, + MDBX_END_RESET, + MDBX_END_RESET_TMP, + MDBX_END_FAIL_BEGIN, + MDBX_END_FAIL_BEGINCHILD +}; +#define MDBX_END_OPMASK 0x0F /* mask for mdbx_txn_end() operation number */ +#define MDBX_END_UPDATE 0x10 /* update env state (DBIs) */ +#define MDBX_END_FREE 0x20 /* free txn unless it is MDBX_env.me_txn0 */ +#define MDBX_END_EOTDONE 0x40 /* txn's cursors already closed */ +#define MDBX_END_SLOT 0x80 /* release any reader slot if MDBX_NOTLS */ +static int mdbx_txn_end(MDBX_txn *txn, unsigned mode); + +static int mdbx_page_get(MDBX_cursor *mc, pgno_t pgno, MDBX_page **mp, + int *lvl); +static int mdbx_page_search_root(MDBX_cursor *mc, MDBX_val *key, int modify); + +#define MDBX_PS_MODIFY 1 +#define MDBX_PS_ROOTONLY 2 +#define MDBX_PS_FIRST 4 +#define MDBX_PS_LAST 8 +static int mdbx_page_search(MDBX_cursor *mc, MDBX_val *key, int flags); +static int mdbx_page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst); + +#define MDBX_SPLIT_REPLACE MDBX_APPENDDUP /* newkey is not new */ +static int mdbx_page_split(MDBX_cursor *mc, MDBX_val *newkey, MDBX_val *newdata, + pgno_t newpgno, unsigned nflags); + +static int mdbx_read_header(MDBX_env *env, MDBX_meta *meta); +static int mdbx_sync_locked(MDBX_env *env, unsigned flags, + MDBX_meta *const pending); +static void mdbx_env_close0(MDBX_env *env); + +static MDBX_node *mdbx_node_search(MDBX_cursor *mc, MDBX_val *key, int *exactp); +static int mdbx_node_add(MDBX_cursor *mc, unsigned indx, MDBX_val *key, + MDBX_val *data, pgno_t pgno, unsigned flags); +static void mdbx_node_del(MDBX_cursor *mc, size_t ksize); +static void mdbx_node_shrink(MDBX_page *mp, unsigned indx); +static int mdbx_node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, int fromleft); +static int mdbx_node_read(MDBX_cursor *mc, MDBX_node *leaf, MDBX_val *data); +static size_t mdbx_leaf_size(MDBX_env *env, MDBX_val *key, MDBX_val *data); +static size_t mdbx_branch_size(MDBX_env *env, MDBX_val *key); + +static int mdbx_rebalance(MDBX_cursor *mc); +static int mdbx_update_key(MDBX_cursor *mc, MDBX_val *key); + +static void mdbx_cursor_pop(MDBX_cursor *mc); +static int mdbx_cursor_push(MDBX_cursor *mc, MDBX_page *mp); + +static int mdbx_cursor_del0(MDBX_cursor *mc); +static int mdbx_del0(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + unsigned flags); +static int mdbx_cursor_sibling(MDBX_cursor *mc, int move_right); +static int mdbx_cursor_next(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op); +static int mdbx_cursor_prev(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op); +static int mdbx_cursor_set(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op, int *exactp); +static int mdbx_cursor_first(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data); +static int mdbx_cursor_last(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data); + +static void mdbx_cursor_init(MDBX_cursor *mc, MDBX_txn *txn, MDBX_dbi dbi, + MDBX_xcursor *mx); +static void mdbx_xcursor_init0(MDBX_cursor *mc); +static void mdbx_xcursor_init1(MDBX_cursor *mc, MDBX_node *node); +static void mdbx_xcursor_init2(MDBX_cursor *mc, MDBX_xcursor *src_mx, + int force); + +static int mdbx_drop0(MDBX_cursor *mc, int subs); + +static MDBX_cmp_func mdbx_cmp_memn, mdbx_cmp_memnr, mdbx_cmp_int_ai, + mdbx_cmp_int_a2, mdbx_cmp_int_ua; + +static const char *__mdbx_strerr(int errnum) { + /* Table of descriptions for MDBX errors */ + static const char *const tbl[] = { + "MDBX_KEYEXIST: Key/data pair already exists", + "MDBX_NOTFOUND: No matching key/data pair found", + "MDBX_PAGE_NOTFOUND: Requested page not found", + "MDBX_CORRUPTED: Database is corrupted", + "MDBX_PANIC: Update of meta page failed or environment had fatal error", + "MDBX_VERSION_MISMATCH: DB version mismatch libmdbx", + "MDBX_INVALID: File is not an MDBX file", + "MDBX_MAP_FULL: Environment mapsize limit reached", + "MDBX_DBS_FULL: Too may DBI (maxdbs reached)", + "MDBX_READERS_FULL: Too many readers (maxreaders reached)", + NULL /* MDBX_TLS_FULL (-30789): unused in MDBX */, + "MDBX_TXN_FULL: Transaction has too many dirty pages - transaction too " + "big", + "MDBX_CURSOR_FULL: Internal error - cursor stack limit reached", + "MDBX_PAGE_FULL: Internal error - page has no more space", + "MDBX_MAP_RESIZED: Database contents grew beyond environment mapsize", + "MDBX_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed", + "MDBX_BAD_RSLOT: Invalid reuse of reader locktable slot", + "MDBX_BAD_TXN: Transaction must abort, has a child, or is invalid", + "MDBX_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong " + "DUPFIXED size", + "MDBX_BAD_DBI: The specified DBI handle was closed/changed unexpectedly", + "MDBX_PROBLEM: Unexpected problem - txn should abort", + }; + + if (errnum >= MDBX_KEYEXIST && errnum <= MDBX_LAST_ERRCODE) { + int i = errnum - MDBX_KEYEXIST; + return tbl[i]; + } + + switch (errnum) { + case MDBX_SUCCESS: + return "MDBX_SUCCESS: Successful"; + case MDBX_EMULTIVAL: + return "MDBX_EMULTIVAL: Unable to update multi-value for the given key"; + case MDBX_EBADSIGN: + return "MDBX_EBADSIGN: Wrong signature of a runtime object(s)"; + case MDBX_WANNA_RECOVERY: + return "MDBX_WANNA_RECOVERY: Database should be recovered, but this could " + "NOT be done in a read-only mode"; + case MDBX_EKEYMISMATCH: + return "MDBX_EKEYMISMATCH: The given key value is mismatched to the " + "current cursor position"; + case MDBX_TOO_LARGE: + return "MDBX_TOO_LARGE: Database is too large for current system, " + "e.g. could NOT be mapped into RAM"; + case MDBX_THREAD_MISMATCH: + return "MDBX_THREAD_MISMATCH: A thread has attempted to use a not " + "owned object, e.g. a transaction that started by another thread"; + default: + return NULL; + } +} + +const char *__cold mdbx_strerror_r(int errnum, char *buf, size_t buflen) { + const char *msg = __mdbx_strerr(errnum); + if (!msg) { + if (!buflen || buflen > INT_MAX) + return NULL; +#ifdef _MSC_VER + size_t size = FormatMessageA( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, + errnum, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buf, (DWORD)buflen, + NULL); + return size ? buf : NULL; +#elif defined(_GNU_SOURCE) + /* GNU-specific */ + msg = strerror_r(errnum, buf, buflen); +#elif (_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) + /* XSI-compliant */ + int rc = strerror_r(errnum, buf, buflen); + if (rc) { + rc = snprintf(buf, buflen, "error %d", errnum); + assert(rc > 0); + } + return buf; +#else + strncpy(buf, strerror(errnum), buflen); + buf[buflen - 1] = '\0'; + return buf; +#endif + } + return msg; +} + +const char *__cold mdbx_strerror(int errnum) { + const char *msg = __mdbx_strerr(errnum); + if (!msg) { +#ifdef _MSC_VER + static __thread char buffer[1024]; + size_t size = FormatMessageA( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, + errnum, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), buffer, + sizeof(buffer), NULL); + if (size) + msg = buffer; +#else + msg = strerror(errnum); +#endif + } + return msg; +} + +static txnid_t mdbx_oomkick(MDBX_env *env, const txnid_t laggard); + +void __cold mdbx_debug_log(int type, const char *function, int line, + const char *fmt, ...) { + va_list args; + + va_start(args, fmt); + if (mdbx_debug_logger) + mdbx_debug_logger(type, function, line, fmt, args); + else { + if (function && line > 0) + fprintf(stderr, "%s:%d ", function, line); + else if (function) + fprintf(stderr, "%s: ", function); + else if (line > 0) + fprintf(stderr, "%d: ", line); + vfprintf(stderr, fmt, args); + } + va_end(args); +} + +/* Dump a key in ascii or hexadecimal. */ +char *mdbx_dkey(const MDBX_val *key, char *const buf, const size_t bufsize) { + if (!key) + return "<null>"; + if (!buf || bufsize < 4) + return nullptr; + if (!key->iov_len) + return "<empty>"; + + const uint8_t *const data = key->iov_base; + bool is_ascii = true; + unsigned i; + for (i = 0; is_ascii && i < key->iov_len; i++) + if (data[i] < ' ' || data[i] > 127) + is_ascii = false; + + if (is_ascii) { + int len = + snprintf(buf, bufsize, "%.*s", + (key->iov_len > INT_MAX) ? INT_MAX : (int)key->iov_len, data); + assert(len > 0 && (unsigned)len < bufsize); + (void)len; + } else { + char *const detent = buf + bufsize - 2; + char *ptr = buf; + *ptr++ = '<'; + for (i = 0; i < key->iov_len; i++) { + const ptrdiff_t left = detent - ptr; + assert(left > 0); + int len = snprintf(ptr, left, "%02x", data[i]); + if (len < 0 || len >= left) + break; + ptr += len; + } + if (ptr < detent) { + ptr[0] = '>'; + ptr[1] = '\0'; + } + } + return buf; +} + +#if 0 /* LY: debug stuff */ +static const char *mdbx_leafnode_type(MDBX_node *n) { + static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}}; + return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" + : tp[F_ISSET(n->mn_flags, F_DUPDATA)] + [F_ISSET(n->mn_flags, F_SUBDATA)]; +} + +/* Display all the keys in the page. */ +static void mdbx_page_list(MDBX_page *mp) { + pgno_t pgno = mp->mp_pgno; + const char *type, *state = (mp->mp_flags & P_DIRTY) ? ", dirty" : ""; + MDBX_node *node; + unsigned i, nkeys, nsize, total = 0; + MDBX_val key; + DKBUF; + + switch (mp->mp_flags & + (P_BRANCH | P_LEAF | P_LEAF2 | P_META | P_OVERFLOW | P_SUBP)) { + case P_BRANCH: + type = "Branch page"; + break; + case P_LEAF: + type = "Leaf page"; + break; + case P_LEAF | P_SUBP: + type = "Sub-page"; + break; + case P_LEAF | P_LEAF2: + type = "LEAF2 page"; + break; + case P_LEAF | P_LEAF2 | P_SUBP: + type = "LEAF2 sub-page"; + break; + case P_OVERFLOW: + mdbx_print("Overflow page %" PRIu64 " pages %u%s\n", pgno, mp->mp_pages, + state); + return; + case P_META: + mdbx_print("Meta-page %" PRIu64 " txnid %" PRIu64 "\n", pgno, + ((MDBX_meta *)PAGEDATA(mp))->mm_txnid); + return; + default: + mdbx_print("Bad page %" PRIu64 " flags 0x%X\n", pgno, mp->mp_flags); + return; + } + + nkeys = NUMKEYS(mp); + mdbx_print("%s %" PRIu64 " numkeys %u%s\n", type, pgno, nkeys, state); + + for (i = 0; i < nkeys; i++) { + if (IS_LEAF2(mp)) { /* LEAF2 pages have no mp_ptrs[] or node headers */ + key.iov_len = nsize = mp->mp_leaf2_ksize; + key.iov_base = LEAF2KEY(mp, i, nsize); + total += nsize; + mdbx_print("key %u: nsize %u, %s\n", i, nsize, DKEY(&key)); + continue; + } + node = NODEPTR(mp, i); + key.iov_len = node->mn_ksize; + key.iov_base = node->mn_data; + nsize = NODESIZE + key.iov_len; + if (IS_BRANCH(mp)) { + mdbx_print("key %u: page %" PRIu64 ", %s\n", i, NODEPGNO(node), + DKEY(&key)); + total += nsize; + } else { + if (F_ISSET(node->mn_flags, F_BIGDATA)) + nsize += sizeof(pgno_t); + else + nsize += NODEDSZ(node); + total += nsize; + nsize += sizeof(indx_t); + mdbx_print("key %u: nsize %u, %s%s\n", i, nsize, DKEY(&key), + mdbx_leafnode_type(node)); + } + total = EVEN(total); + } + mdbx_print("Total: header %u + contents %u + unused %u\n", + IS_LEAF2(mp) ? PAGEHDRSZ : PAGEHDRSZ + mp->mp_lower, total, + SIZELEFT(mp)); +} + +static void mdbx_cursor_chk(MDBX_cursor *mc) { + unsigned i; + MDBX_node *node; + MDBX_page *mp; + + if (!mc->mc_snum || !(mc->mc_flags & C_INITIALIZED)) + return; + for (i = 0; i < mc->mc_top; i++) { + mp = mc->mc_pg[i]; + node = NODEPTR(mp, mc->mc_ki[i]); + if (unlikely(NODEPGNO(node) != mc->mc_pg[i + 1]->mp_pgno)) + mdbx_print("oops!\n"); + } + if (unlikely(mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i]))) + mdbx_print("ack!\n"); + if (XCURSOR_INITED(mc)) { + node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (((node->mn_flags & (F_DUPDATA | F_SUBDATA)) == F_DUPDATA) && + mc->mc_xcursor->mx_cursor.mc_pg[0] != NODEDATA(node)) { + mdbx_print("blah!\n"); + } + } +} +#endif /* 0 */ + +/* Count all the pages in each DB and in the freelist and make sure + * it matches the actual number of pages being used. + * All named DBs must be open for a correct count. */ +static void mdbx_audit(MDBX_txn *txn) { + MDBX_cursor mc; + MDBX_val key, data; + int rc; + + pgno_t freecount = 0; + mdbx_cursor_init(&mc, txn, FREE_DBI, NULL); + while ((rc = mdbx_cursor_get(&mc, &key, &data, MDBX_NEXT)) == 0) + freecount += *(pgno_t *)data.iov_base; + mdbx_tassert(txn, rc == MDBX_NOTFOUND); + + pgno_t count = 0; + for (MDBX_dbi i = 0; i < txn->mt_numdbs; i++) { + MDBX_xcursor mx; + if (!(txn->mt_dbflags[i] & DB_VALID)) + continue; + mdbx_cursor_init(&mc, txn, i, &mx); + if (txn->mt_dbs[i].md_root == P_INVALID) + continue; + count += txn->mt_dbs[i].md_branch_pages + txn->mt_dbs[i].md_leaf_pages + + txn->mt_dbs[i].md_overflow_pages; + if (txn->mt_dbs[i].md_flags & MDBX_DUPSORT) { + rc = mdbx_page_search(&mc, NULL, MDBX_PS_FIRST); + for (; rc == MDBX_SUCCESS; rc = mdbx_cursor_sibling(&mc, 1)) { + MDBX_page *mp = mc.mc_pg[mc.mc_top]; + for (unsigned j = 0; j < NUMKEYS(mp); j++) { + MDBX_node *leaf = NODEPTR(mp, j); + if (leaf->mn_flags & F_SUBDATA) { + MDBX_db db; + memcpy(&db, NODEDATA(leaf), sizeof(db)); + count += + db.md_branch_pages + db.md_leaf_pages + db.md_overflow_pages; + } + } + } + mdbx_tassert(txn, rc == MDBX_NOTFOUND); + } + } + if (freecount + count + NUM_METAS != txn->mt_next_pgno) { + mdbx_print("audit: %" PRIaTXN " freecount: %" PRIaPGNO " count: %" PRIaPGNO + " total: %" PRIaPGNO " next_pgno: %" PRIaPGNO "\n", + txn->mt_txnid, freecount, count + NUM_METAS, + freecount + count + NUM_METAS, txn->mt_next_pgno); + } +} + +int mdbx_cmp(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, + const MDBX_val *b) { + mdbx_assert(NULL, txn->mt_signature == MDBX_MT_SIGNATURE); + return txn->mt_dbxs[dbi].md_cmp(a, b); +} + +int mdbx_dcmp(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, + const MDBX_val *b) { + mdbx_assert(NULL, txn->mt_signature == MDBX_MT_SIGNATURE); + return txn->mt_dbxs[dbi].md_dcmp(a, b); +} + +/* Allocate memory for a page. + * Re-use old malloc'd pages first for singletons, otherwise just malloc. + * Set MDBX_TXN_ERROR on failure. */ +static MDBX_page *mdbx_page_malloc(MDBX_txn *txn, unsigned num) { + MDBX_env *env = txn->mt_env; + MDBX_page *np = env->me_dpages; + size_t size = env->me_psize; + if (likely(num == 1 && np)) { + ASAN_UNPOISON_MEMORY_REGION(np, size); + VALGRIND_MEMPOOL_ALLOC(env, np, size); + VALGRIND_MAKE_MEM_DEFINED(&np->mp_next, sizeof(np->mp_next)); + env->me_dpages = np->mp_next; + } else { + size = pgno2bytes(env, num); + np = malloc(size); + if (unlikely(!np)) { + txn->mt_flags |= MDBX_TXN_ERROR; + return np; + } + VALGRIND_MEMPOOL_ALLOC(env, np, size); + } + + if ((env->me_flags & MDBX_NOMEMINIT) == 0) { + /* For a single page alloc, we init everything after the page header. + * For multi-page, we init the final page; if the caller needed that + * many pages they will be filling in at least up to the last page. */ + size_t skip = PAGEHDRSZ; + if (num > 1) + skip += pgno2bytes(env, num - 1); + memset((char *)np + skip, 0, size - skip); + } + VALGRIND_MAKE_MEM_UNDEFINED(np, size); + np->mp_flags = 0; + np->mp_pages = num; + return np; +} + +/* Free a single page. + * Saves single pages to a list, for future reuse. + * (This is not used for multi-page overflow pages.) */ +static __inline void mdbx_page_free(MDBX_env *env, MDBX_page *mp) { + mp->mp_next = env->me_dpages; + VALGRIND_MEMPOOL_FREE(env, mp); + env->me_dpages = mp; +} + +/* Free a dirty page */ +static void mdbx_dpage_free(MDBX_env *env, MDBX_page *dp) { + if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) { + mdbx_page_free(env, dp); + } else { + /* large pages just get freed directly */ + VALGRIND_MEMPOOL_FREE(env, dp); + free(dp); + } +} + +/* Return all dirty pages to dpage list */ +static void mdbx_dlist_free(MDBX_txn *txn) { + MDBX_env *env = txn->mt_env; + MDBX_ID2L dl = txn->mt_rw_dirtylist; + size_t i, n = dl[0].mid; + + for (i = 1; i <= n; i++) { + mdbx_dpage_free(env, dl[i].mptr); + } + dl[0].mid = 0; +} + +static size_t bytes_align2os_bytes(const MDBX_env *env, size_t bytes) { + return mdbx_roundup2(mdbx_roundup2(bytes, env->me_psize), env->me_os_psize); +} + +static void __cold mdbx_kill_page(MDBX_env *env, pgno_t pgno) { + const size_t offs = pgno2bytes(env, pgno); + const size_t shift = offsetof(MDBX_page, mp_pages); + + if (env->me_flags & MDBX_WRITEMAP) { + MDBX_page *mp = (MDBX_page *)(env->me_map + offs); + memset(&mp->mp_pages, 0x6F /* 'o', 111 */, env->me_psize - shift); + VALGRIND_MAKE_MEM_NOACCESS(&mp->mp_pages, env->me_psize - shift); + ASAN_POISON_MEMORY_REGION(&mp->mp_pages, env->me_psize - shift); + } else { + intptr_t len = env->me_psize - shift; + void *buf = alloca(len); + memset(buf, 0x6F /* 'o', 111 */, len); + (void)mdbx_pwrite(env->me_fd, buf, len, offs + shift); + } +} + +/* Loosen or free a single page. + * + * Saves single pages to a list for future reuse + * in this same txn. It has been pulled from the freeDB + * and already resides on the dirty list, but has been + * deleted. Use these pages first before pulling again + * from the freeDB. + * + * If the page wasn't dirtied in this txn, just add it + * to this txn's free list. */ +static int mdbx_page_loose(MDBX_cursor *mc, MDBX_page *mp) { + int loose = 0; + const pgno_t pgno = mp->mp_pgno; + MDBX_txn *txn = mc->mc_txn; + + if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) { + if (txn->mt_parent) { + MDBX_ID2 *dl = txn->mt_rw_dirtylist; + /* If txn has a parent, + * make sure the page is in our dirty list. */ + if (dl[0].mid) { + unsigned x = mdbx_mid2l_search(dl, pgno); + if (x <= dl[0].mid && dl[x].mid == pgno) { + if (unlikely(mp != dl[x].mptr)) { /* bad cursor? */ + mdbx_error("wrong page 0x%p #%" PRIaPGNO + " in the dirtylist[%d], expecting %p", + dl[x].mptr, pgno, x, mp); + mc->mc_flags &= ~(C_INITIALIZED | C_EOF); + txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_PROBLEM; + } + /* ok, it's ours */ + loose = 1; + } + } + } else { + /* no parent txn, so it's just ours */ + loose = 1; + } + } + if (loose) { + mdbx_debug("loosen db %d page %" PRIaPGNO, DDBI(mc), mp->mp_pgno); + MDBX_page **link = &NEXT_LOOSE_PAGE(mp); + if (unlikely(txn->mt_env->me_flags & MDBX_PAGEPERTURB)) { + mdbx_kill_page(txn->mt_env, pgno); + VALGRIND_MAKE_MEM_UNDEFINED(link, sizeof(MDBX_page *)); + ASAN_UNPOISON_MEMORY_REGION(link, sizeof(MDBX_page *)); + } + *link = txn->mt_loose_pages; + txn->mt_loose_pages = mp; + txn->mt_loose_count++; + mp->mp_flags |= P_LOOSE; + } else { + int rc = mdbx_pnl_append(&txn->mt_befree_pages, pgno); + if (unlikely(rc)) + return rc; + } + + return MDBX_SUCCESS; +} + +/* Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn. + * + * [in] mc A cursor handle for the current operation. + * [in] pflags Flags of the pages to update: + * - P_DIRTY to set P_KEEP, + * - P_DIRTY|P_KEEP to clear it. + * [in] all No shortcuts. Needed except after a full mdbx_page_flush(). + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_pages_xkeep(MDBX_cursor *mc, unsigned pflags, bool all) { + const unsigned Mask = P_SUBP | P_DIRTY | P_LOOSE | P_KEEP; + MDBX_txn *txn = mc->mc_txn; + MDBX_cursor *m3, *m0 = mc; + MDBX_xcursor *mx; + MDBX_page *dp, *mp; + MDBX_node *leaf; + unsigned i, j; + int rc = MDBX_SUCCESS, level; + + /* Mark pages seen by cursors: First m0, then tracked cursors */ + for (i = txn->mt_numdbs;;) { + if (mc->mc_flags & C_INITIALIZED) { + for (m3 = mc;; m3 = &mx->mx_cursor) { + mp = NULL; + for (j = 0; j < m3->mc_snum; j++) { + mp = m3->mc_pg[j]; + if ((mp->mp_flags & Mask) == pflags) + mp->mp_flags ^= P_KEEP; + } + mx = m3->mc_xcursor; + /* Proceed to mx if it is at a sub-database */ + if (!(mx && (mx->mx_cursor.mc_flags & C_INITIALIZED))) + break; + if (!(mp && (mp->mp_flags & P_LEAF))) + break; + leaf = NODEPTR(mp, m3->mc_ki[j - 1]); + if (!(leaf->mn_flags & F_SUBDATA)) + break; + } + } + mc = mc->mc_next; + for (; !mc || mc == m0; mc = txn->mt_cursors[--i]) + if (i == 0) + goto mark_done; + } + +mark_done: + if (all) { + /* Mark dirty root pages */ + for (i = 0; i < txn->mt_numdbs; i++) { + if (txn->mt_dbflags[i] & DB_DIRTY) { + pgno_t pgno = txn->mt_dbs[i].md_root; + if (pgno == P_INVALID) + continue; + if (unlikely((rc = mdbx_page_get(m0, pgno, &dp, &level)) != + MDBX_SUCCESS)) + break; + if ((dp->mp_flags & Mask) == pflags && level <= 1) + dp->mp_flags ^= P_KEEP; + } + } + } + + return rc; +} + +static int mdbx_page_flush(MDBX_txn *txn, pgno_t keep); + +/* Spill pages from the dirty list back to disk. + * This is intended to prevent running into MDBX_TXN_FULL situations, + * but note that they may still occur in a few cases: + * + * 1) our estimate of the txn size could be too small. Currently this + * seems unlikely, except with a large number of MDBX_MULTIPLE items. + * + * 2) child txns may run out of space if their parents dirtied a + * lot of pages and never spilled them. TODO: we probably should do + * a preemptive spill during mdbx_txn_begin() of a child txn, if + * the parent's dirtyroom is below a given threshold. + * + * Otherwise, if not using nested txns, it is expected that apps will + * not run into MDBX_TXN_FULL any more. The pages are flushed to disk + * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared. + * If the txn never references them again, they can be left alone. + * If the txn only reads them, they can be used without any fuss. + * If the txn writes them again, they can be dirtied immediately without + * going thru all of the work of mdbx_page_touch(). Such references are + * handled by mdbx_page_unspill(). + * + * Also note, we never spill DB root pages, nor pages of active cursors, + * because we'll need these back again soon anyway. And in nested txns, + * we can't spill a page in a child txn if it was already spilled in a + * parent txn. That would alter the parent txns' data even though + * the child hasn't committed yet, and we'd have no way to undo it if + * the child aborted. + * + * [in] m0 cursor A cursor handle identifying the transaction and + * database for which we are checking space. + * [in] key For a put operation, the key being stored. + * [in] data For a put operation, the data being stored. + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_spill(MDBX_cursor *m0, MDBX_val *key, MDBX_val *data) { + MDBX_txn *txn = m0->mc_txn; + MDBX_ID2L dl = txn->mt_rw_dirtylist; + + if (m0->mc_flags & C_SUB) + return MDBX_SUCCESS; + + /* Estimate how much space this op will take */ + pgno_t i = m0->mc_db->md_depth; + /* Named DBs also dirty the main DB */ + if (m0->mc_dbi >= CORE_DBS) + i += txn->mt_dbs[MAIN_DBI].md_depth; + /* For puts, roughly factor in the key+data size */ + if (key) + i += bytes2pgno(txn->mt_env, LEAFSIZE(key, data) + txn->mt_env->me_psize); + i += i; /* double it for good measure */ + pgno_t need = i; + + if (txn->mt_dirtyroom > i) + return MDBX_SUCCESS; + + if (!txn->mt_spill_pages) { + txn->mt_spill_pages = mdbx_pnl_alloc(MDBX_PNL_UM_MAX); + if (unlikely(!txn->mt_spill_pages)) + return MDBX_ENOMEM; + } else { + /* purge deleted slots */ + MDBX_PNL sl = txn->mt_spill_pages; + pgno_t num = sl[0], j = 0; + for (i = 1; i <= num; i++) { + if (!(sl[i] & 1)) + sl[++j] = sl[i]; + } + sl[0] = j; + } + + /* Preserve pages which may soon be dirtied again */ + int rc = mdbx_pages_xkeep(m0, P_DIRTY, true); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + + /* Less aggressive spill - we originally spilled the entire dirty list, + * with a few exceptions for cursor pages and DB root pages. But this + * turns out to be a lot of wasted effort because in a large txn many + * of those pages will need to be used again. So now we spill only 1/8th + * of the dirty pages. Testing revealed this to be a good tradeoff, + * better than 1/2, 1/4, or 1/10. */ + if (need < MDBX_PNL_UM_MAX / 8) + need = MDBX_PNL_UM_MAX / 8; + + /* Save the page IDs of all the pages we're flushing */ + /* flush from the tail forward, this saves a lot of shifting later on. */ + for (i = dl[0].mid; i && need; i--) { + pgno_t pn = dl[i].mid << 1; + MDBX_page *dp = dl[i].mptr; + if (dp->mp_flags & (P_LOOSE | P_KEEP)) + continue; + /* Can't spill twice, + * make sure it's not already in a parent's spill list. */ + if (txn->mt_parent) { + MDBX_txn *tx2; + for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { + if (tx2->mt_spill_pages) { + unsigned j = mdbx_pnl_search(tx2->mt_spill_pages, pn); + if (j <= tx2->mt_spill_pages[0] && tx2->mt_spill_pages[j] == pn) { + dp->mp_flags |= P_KEEP; + break; + } + } + } + if (tx2) + continue; + } + rc = mdbx_pnl_append(&txn->mt_spill_pages, pn); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + need--; + } + mdbx_pnl_sort(txn->mt_spill_pages); + + /* Flush the spilled part of dirty list */ + rc = mdbx_page_flush(txn, i); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + + /* Reset any dirty pages we kept that page_flush didn't see */ + rc = mdbx_pages_xkeep(m0, P_DIRTY | P_KEEP, i != 0); + +bailout: + txn->mt_flags |= rc ? MDBX_TXN_ERROR : MDBX_TXN_SPILLS; + return rc; +} + +/*----------------------------------------------------------------------------*/ + +#define METAPAGE(env, n) (&pgno2page(env, n)->mp_meta) + +#define METAPAGE_END(env) METAPAGE(env, NUM_METAS) + +static __inline txnid_t meta_txnid(const MDBX_env *env, const MDBX_meta *meta, + bool allow_volatile) { + mdbx_assert(env, meta >= METAPAGE(env, 0) || meta < METAPAGE_END(env)); + txnid_t a = meta->mm_txnid_a; + txnid_t b = meta->mm_txnid_b; + if (allow_volatile) + return (a == b) ? a : 0; + mdbx_assert(env, a == b); + return a; +} + +static __inline txnid_t mdbx_meta_txnid_stable(const MDBX_env *env, + const MDBX_meta *meta) { + return meta_txnid(env, meta, false); +} + +static __inline txnid_t mdbx_meta_txnid_fluid(const MDBX_env *env, + const MDBX_meta *meta) { + return meta_txnid(env, meta, true); +} + +static __inline void mdbx_meta_update_begin(const MDBX_env *env, + MDBX_meta *meta, txnid_t txnid) { + mdbx_assert(env, meta >= METAPAGE(env, 0) || meta < METAPAGE_END(env)); + mdbx_assert(env, meta->mm_txnid_a < txnid && meta->mm_txnid_b < txnid); + meta->mm_txnid_a = txnid; + (void)env; + mdbx_coherent_barrier(); +} + +static __inline void mdbx_meta_update_end(const MDBX_env *env, MDBX_meta *meta, + txnid_t txnid) { + mdbx_assert(env, meta >= METAPAGE(env, 0) || meta < METAPAGE_END(env)); + mdbx_assert(env, meta->mm_txnid_a == txnid); + mdbx_assert(env, meta->mm_txnid_b < txnid); + + mdbx_jitter4testing(true); + meta->mm_txnid_b = txnid; + mdbx_coherent_barrier(); +} + +static __inline void mdbx_meta_set_txnid(const MDBX_env *env, MDBX_meta *meta, + txnid_t txnid) { + mdbx_assert(env, meta < METAPAGE(env, 0) || meta > METAPAGE_END(env)); + meta->mm_txnid_a = txnid; + meta->mm_txnid_b = txnid; +} + +static __inline uint64_t mdbx_meta_sign(const MDBX_meta *meta) { + uint64_t sign = MDBX_DATASIGN_NONE; +#if 0 /* TODO */ + sign = hippeus_hash64(...); +#else + (void)meta; +#endif + /* LY: newer returns MDBX_DATASIGN_NONE or MDBX_DATASIGN_WEAK */ + return (sign > MDBX_DATASIGN_WEAK) ? sign : ~sign; +} + +enum meta_choise_mode { prefer_last, prefer_noweak, prefer_steady }; + +static __inline bool mdbx_meta_ot(const enum meta_choise_mode mode, + const MDBX_env *env, const MDBX_meta *a, + const MDBX_meta *b) { + mdbx_jitter4testing(true); + txnid_t txnid_a = mdbx_meta_txnid_fluid(env, a); + txnid_t txnid_b = mdbx_meta_txnid_fluid(env, b); + if (txnid_a == txnid_b) + return META_IS_STEADY(b) || (META_IS_WEAK(a) && !META_IS_WEAK(a)); + + mdbx_jitter4testing(true); + switch (mode) { + default: + assert(false); + /* fall through */ + case prefer_steady: + if (META_IS_STEADY(a) != META_IS_STEADY(b)) + return META_IS_STEADY(b); + /* fall through */ + case prefer_noweak: + if (META_IS_WEAK(a) != META_IS_WEAK(b)) + return !META_IS_WEAK(b); + /* fall through */ + case prefer_last: + mdbx_jitter4testing(true); + return txnid_a < txnid_b; + } +} + +static __inline bool mdbx_meta_eq(const MDBX_env *env, const MDBX_meta *a, + const MDBX_meta *b) { + mdbx_jitter4testing(true); + const txnid_t txnid = mdbx_meta_txnid_fluid(env, a); + if (!txnid || txnid != mdbx_meta_txnid_fluid(env, b)) + return false; + + mdbx_jitter4testing(true); + if (META_IS_STEADY(a) != META_IS_STEADY(b)) + return false; + + mdbx_jitter4testing(true); + return true; +} + +static int mdbx_meta_eq_mask(const MDBX_env *env) { + MDBX_meta *m0 = METAPAGE(env, 0); + MDBX_meta *m1 = METAPAGE(env, 1); + MDBX_meta *m2 = METAPAGE(env, 2); + + int rc = mdbx_meta_eq(env, m0, m1) ? 1 : 0; + if (mdbx_meta_eq(env, m1, m2)) + rc += 2; + if (mdbx_meta_eq(env, m2, m0)) + rc += 4; + return rc; +} + +static __inline MDBX_meta *mdbx_meta_recent(const enum meta_choise_mode mode, + const MDBX_env *env, MDBX_meta *a, + MDBX_meta *b) { + const bool a_older_that_b = mdbx_meta_ot(mode, env, a, b); + mdbx_assert(env, !mdbx_meta_eq(env, a, b)); + return a_older_that_b ? b : a; +} + +static __inline MDBX_meta *mdbx_meta_ancient(const enum meta_choise_mode mode, + const MDBX_env *env, MDBX_meta *a, + MDBX_meta *b) { + const bool a_older_that_b = mdbx_meta_ot(mode, env, a, b); + mdbx_assert(env, !mdbx_meta_eq(env, a, b)); + return a_older_that_b ? a : b; +} + +static __inline MDBX_meta * +mdbx_meta_mostrecent(const enum meta_choise_mode mode, const MDBX_env *env) { + MDBX_meta *m0 = METAPAGE(env, 0); + MDBX_meta *m1 = METAPAGE(env, 1); + MDBX_meta *m2 = METAPAGE(env, 2); + + MDBX_meta *head = mdbx_meta_recent(mode, env, m0, m1); + head = mdbx_meta_recent(mode, env, head, m2); + return head; +} + +static __hot MDBX_meta *mdbx_meta_steady(const MDBX_env *env) { + return mdbx_meta_mostrecent(prefer_steady, env); +} + +static __hot MDBX_meta *mdbx_meta_head(const MDBX_env *env) { + return mdbx_meta_mostrecent(prefer_last, env); +} + +static __hot txnid_t mdbx_reclaiming_detent(const MDBX_env *env) { + if (F_ISSET(env->me_flags, MDBX_UTTERLY_NOSYNC)) + return env->me_txn->mt_txnid - 1; + + return mdbx_meta_txnid_stable(env, mdbx_meta_steady(env)); +} + +static const char *mdbx_durable_str(const MDBX_meta *const meta) { + if (META_IS_WEAK(meta)) + return "Weak"; + if (META_IS_STEADY(meta)) + return (meta->mm_datasync_sign == mdbx_meta_sign(meta)) ? "Steady" + : "Tainted"; + return "Legacy"; +} + +/*----------------------------------------------------------------------------*/ + +/* Find oldest txnid still referenced. */ +static txnid_t mdbx_find_oldest(MDBX_txn *txn) { + mdbx_tassert(txn, (txn->mt_flags & MDBX_RDONLY) == 0); + const MDBX_env *env = txn->mt_env; + MDBX_lockinfo *const lck = env->me_lck; + + txnid_t oldest = mdbx_reclaiming_detent(env); + mdbx_tassert(txn, oldest <= txn->mt_txnid - 1); + const txnid_t last_oldest = lck->mti_oldest; + mdbx_tassert(txn, oldest >= last_oldest); + if (last_oldest == oldest || + lck->mti_reader_finished_flag == MDBX_STRING_TETRAD("None")) + return last_oldest; + + const unsigned snap_nreaders = lck->mti_numreaders; + lck->mti_reader_finished_flag = MDBX_STRING_TETRAD("None"); + for (unsigned i = 0; i < snap_nreaders; ++i) { + if (lck->mti_readers[i].mr_pid) { + /* mdbx_jitter4testing(true); */ + const txnid_t snap = lck->mti_readers[i].mr_txnid; + if (oldest > snap && last_oldest <= /* ignore pending updates */ snap) { + oldest = snap; + if (oldest == last_oldest) + break; + } + } + } + + if (oldest != last_oldest) { + mdbx_notice("update oldest %" PRIaTXN " -> %" PRIaTXN, last_oldest, oldest); + mdbx_tassert(txn, oldest >= lck->mti_oldest); + lck->mti_oldest = oldest; + } + return oldest; +} + +/* Add a page to the txn's dirty list */ +static void mdbx_page_dirty(MDBX_txn *txn, MDBX_page *mp) { + MDBX_ID2 mid; + int rc, (*insert)(MDBX_ID2L, MDBX_ID2 *); + + if (txn->mt_flags & MDBX_TXN_WRITEMAP) { + insert = mdbx_mid2l_append; + } else { + insert = mdbx_mid2l_insert; + } + mid.mid = mp->mp_pgno; + mid.mptr = mp; + rc = insert(txn->mt_rw_dirtylist, &mid); + mdbx_tassert(txn, rc == 0); + txn->mt_dirtyroom--; +} + +static int mdbx_mapresize(MDBX_env *env, const pgno_t size_pgno, + const pgno_t limit_pgno) { +#ifdef USE_VALGRIND + const size_t prev_mapsize = env->me_mapsize; + void *const prev_mapaddr = env->me_map; +#endif + + const size_t limit_bytes = pgno_align2os_bytes(env, limit_pgno); + const size_t size_bytes = pgno_align2os_bytes(env, size_pgno); + + mdbx_info("resize datafile/mapping: " + "present %" PRIuPTR " -> %" PRIuPTR ", " + "limit %" PRIuPTR " -> %" PRIuPTR, + env->me_dbgeo.now, size_bytes, env->me_dbgeo.upper, limit_bytes); + + mdbx_assert(env, limit_bytes >= size_bytes); + mdbx_assert(env, bytes2pgno(env, size_bytes) == size_pgno); + mdbx_assert(env, bytes2pgno(env, limit_bytes) == limit_pgno); + const int rc = + mdbx_mresize(env->me_flags, &env->me_dxb_mmap, size_bytes, limit_bytes); + + if (rc == MDBX_SUCCESS) { + env->me_dbgeo.now = size_bytes; + env->me_dbgeo.upper = limit_bytes; + } else if (rc != MDBX_RESULT_TRUE) { + mdbx_error("failed resize datafile/mapping: " + "present %" PRIuPTR " -> %" PRIuPTR ", " + "limit %" PRIuPTR " -> %" PRIuPTR ", errcode %d", + env->me_dbgeo.now, size_bytes, env->me_dbgeo.upper, limit_bytes, + rc); + return rc; + } else { + mdbx_notice("unable resize datafile/mapping: " + "present %" PRIuPTR " -> %" PRIuPTR ", " + "limit %" PRIuPTR " -> %" PRIuPTR ", errcode %d", + env->me_dbgeo.now, size_bytes, env->me_dbgeo.upper, limit_bytes, + rc); + } + +#ifdef USE_VALGRIND + if (prev_mapsize != env->me_mapsize || prev_mapaddr != env->me_map) { + VALGRIND_DISCARD(env->me_valgrind_handle); + env->me_valgrind_handle = 0; + if (env->me_mapsize) + env->me_valgrind_handle = + VALGRIND_CREATE_BLOCK(env->me_map, env->me_mapsize, "mdbx"); + } +#endif + + if (env->me_txn) { + mdbx_tassert(env->me_txn, size_pgno >= env->me_txn->mt_next_pgno); + env->me_txn->mt_end_pgno = size_pgno; + } + return MDBX_SUCCESS; +} + +/* Allocate page numbers and memory for writing. Maintain me_last_reclaimed, + * me_reclaimed_pglist and mt_next_pgno. Set MDBX_TXN_ERROR on failure. + * + * If there are free pages available from older transactions, they + * are re-used first. Otherwise allocate a new page at mt_next_pgno. + * Do not modify the freedB, just merge freeDB records into me_reclaimed_pglist + * and move me_last_reclaimed to say which records were consumed. Only this + * function can create me_reclaimed_pglist and move + * me_last_reclaimed/mt_next_pgno. + * + * [in] mc cursor A cursor handle identifying the transaction and + * database for which we are allocating. + * [in] num the number of pages to allocate. + * [out] mp Address of the allocated page(s). Requests for multiple pages + * will always be satisfied by a single contiguous chunk of memory. + * + * Returns 0 on success, non-zero on failure.*/ + +#define MDBX_ALLOC_CACHE 1 +#define MDBX_ALLOC_GC 2 +#define MDBX_ALLOC_NEW 4 +#define MDBX_ALLOC_KICK 8 +#define MDBX_ALLOC_ALL \ + (MDBX_ALLOC_CACHE | MDBX_ALLOC_GC | MDBX_ALLOC_NEW | MDBX_ALLOC_KICK) + +static int mdbx_page_alloc(MDBX_cursor *mc, unsigned num, MDBX_page **mp, + int flags) { + int rc; + MDBX_txn *txn = mc->mc_txn; + MDBX_env *env = txn->mt_env; + MDBX_page *np; + + if (likely(flags & MDBX_ALLOC_GC)) { + flags |= env->me_flags & (MDBX_COALESCE | MDBX_LIFORECLAIM); + if (unlikely(mc->mc_flags & C_RECLAIMING)) { + /* If mc is updating the freeDB, then the freelist cannot play + * catch-up with itself by growing while trying to save it. */ + flags &= + ~(MDBX_ALLOC_GC | MDBX_ALLOC_KICK | MDBX_COALESCE | MDBX_LIFORECLAIM); + } + } + + if (likely(flags & MDBX_ALLOC_CACHE)) { + /* If there are any loose pages, just use them */ + assert(mp && num); + if (likely(num == 1 && txn->mt_loose_pages)) { + np = txn->mt_loose_pages; + txn->mt_loose_pages = NEXT_LOOSE_PAGE(np); + txn->mt_loose_count--; + mdbx_debug("db %d use loose page %" PRIaPGNO, DDBI(mc), np->mp_pgno); + ASAN_UNPOISON_MEMORY_REGION(np, env->me_psize); + mdbx_tassert(txn, np->mp_pgno < txn->mt_next_pgno); + mdbx_ensure(env, np->mp_pgno >= NUM_METAS); + *mp = np; + return MDBX_SUCCESS; + } + } + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + pgno_t pgno, *repg_list = env->me_reclaimed_pglist; + unsigned repg_pos = 0, repg_len = repg_list ? repg_list[0] : 0; + txnid_t oldest = 0, last = 0; + const unsigned wanna_range = num - 1; + + while (1) { /* oom-kick retry loop */ + /* If our dirty list is already full, we can't do anything */ + if (unlikely(txn->mt_dirtyroom == 0)) { + rc = MDBX_TXN_FULL; + goto fail; + } + + MDBX_cursor recur; + for (MDBX_cursor_op op = MDBX_FIRST;; + op = (flags & MDBX_LIFORECLAIM) ? MDBX_PREV : MDBX_NEXT) { + MDBX_val key, data; + + /* Seek a big enough contiguous page range. + * Prefer pages with lower pgno. */ + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (likely(flags & MDBX_ALLOC_CACHE) && repg_len > wanna_range && + (!(flags & MDBX_COALESCE) || op == MDBX_FIRST)) { +#if MDBX_PNL_ASCENDING + for (repg_pos = 1; repg_pos <= repg_len - wanna_range; ++repg_pos) { + pgno = repg_list[repg_pos]; + if (likely(repg_list[repg_pos + wanna_range - 1] == + pgno + wanna_range - 1)) + goto done; + } +#else + repg_pos = repg_len; + do { + pgno = repg_list[repg_pos]; + if (likely(repg_list[repg_pos - wanna_range] == pgno + wanna_range)) + goto done; + } while (--repg_pos > wanna_range); +#endif /* MDBX_PNL sort-order */ + } + + if (op == MDBX_FIRST) { /* 1st iteration, setup cursor, etc */ + if (unlikely(!(flags & MDBX_ALLOC_GC))) + break /* reclaiming is prohibited for now */; + + /* Prepare to fetch more and coalesce */ + oldest = (flags & MDBX_LIFORECLAIM) ? mdbx_find_oldest(txn) + : env->me_oldest[0]; + mdbx_cursor_init(&recur, txn, FREE_DBI, NULL); + if (flags & MDBX_LIFORECLAIM) { + /* Begin from oldest reader if any */ + if (oldest > 2) { + last = oldest - 1; + op = MDBX_SET_RANGE; + } + } else if (env->me_last_reclaimed) { + /* Continue lookup from env->me_last_reclaimed to oldest reader */ + last = env->me_last_reclaimed; + op = MDBX_SET_RANGE; + } + + key.iov_base = &last; + key.iov_len = sizeof(last); + } + + if (!(flags & MDBX_LIFORECLAIM)) { + /* Do not try fetch more if the record will be too recent */ + if (op != MDBX_FIRST && ++last >= oldest) { + oldest = mdbx_find_oldest(txn); + if (oldest <= last) + break; + } + } + + rc = mdbx_cursor_get(&recur, &key, NULL, op); + if (rc == MDBX_NOTFOUND && (flags & MDBX_LIFORECLAIM)) { + if (op == MDBX_SET_RANGE) + continue; + if (oldest < mdbx_find_oldest(txn)) { + oldest = *env->me_oldest; + last = oldest - 1; + key.iov_base = &last; + key.iov_len = sizeof(last); + op = MDBX_SET_RANGE; + rc = mdbx_cursor_get(&recur, &key, NULL, op); + } + } + if (unlikely(rc)) { + if (rc == MDBX_NOTFOUND) + break; + goto fail; + } + + last = *(txnid_t *)key.iov_base; + if (oldest <= last) { + oldest = mdbx_find_oldest(txn); + if (oldest <= last) { + if (flags & MDBX_LIFORECLAIM) + continue; + break; + } + } + + if (flags & MDBX_LIFORECLAIM) { + /* skip IDs of records that already reclaimed */ + if (txn->mt_lifo_reclaimed) { + unsigned i; + for (i = (unsigned)txn->mt_lifo_reclaimed[0]; i > 0; --i) + if (txn->mt_lifo_reclaimed[i] == last) + break; + if (i) + continue; + } + } + + /* Reading next FreeDB record */ + np = recur.mc_pg[recur.mc_top]; + MDBX_node *leaf = NODEPTR(np, recur.mc_ki[recur.mc_top]); + if (unlikely((rc = mdbx_node_read(&recur, leaf, &data)) != MDBX_SUCCESS)) + goto fail; + + if ((flags & MDBX_LIFORECLAIM) && !txn->mt_lifo_reclaimed) { + txn->mt_lifo_reclaimed = mdbx_txl_alloc(); + if (unlikely(!txn->mt_lifo_reclaimed)) { + rc = MDBX_ENOMEM; + goto fail; + } + } + + /* Append PNL from FreeDB record to me_reclaimed_pglist */ + pgno_t *re_pnl = (pgno_t *)data.iov_base; + mdbx_tassert(txn, re_pnl[0] == 0 || + data.iov_len == (re_pnl[0] + 1) * sizeof(pgno_t)); + mdbx_tassert(txn, mdbx_pnl_check(re_pnl)); + repg_pos = re_pnl[0]; + if (!repg_list) { + if (unlikely(!(env->me_reclaimed_pglist = repg_list = + mdbx_pnl_alloc(repg_pos)))) { + rc = MDBX_ENOMEM; + goto fail; + } + } else { + if (unlikely( + (rc = mdbx_pnl_need(&env->me_reclaimed_pglist, repg_pos)) != 0)) + goto fail; + repg_list = env->me_reclaimed_pglist; + } + + /* Remember ID of FreeDB record */ + if (flags & MDBX_LIFORECLAIM) { + if ((rc = mdbx_txl_append(&txn->mt_lifo_reclaimed, last)) != 0) + goto fail; + } + env->me_last_reclaimed = last; + + if (mdbx_debug_enabled(MDBX_DBG_EXTRA)) { + mdbx_debug_extra("PNL read txn %" PRIaTXN " root %" PRIaPGNO + " num %u, PNL", + last, txn->mt_dbs[FREE_DBI].md_root, repg_pos); + unsigned i; + for (i = repg_pos; i; i--) + mdbx_debug_extra_print(" %" PRIaPGNO "", re_pnl[i]); + mdbx_debug_extra_print("\n"); + } + + /* Merge in descending sorted order */ + mdbx_pnl_xmerge(repg_list, re_pnl); + repg_len = repg_list[0]; + if (unlikely((flags & MDBX_ALLOC_CACHE) == 0)) { + /* Done for a kick-reclaim mode, actually no page needed */ + return MDBX_SUCCESS; + } + + mdbx_tassert(txn, + repg_len == 0 || repg_list[repg_len] < txn->mt_next_pgno); + if (repg_len) { + /* Refund suitable pages into "unallocated" space */ + pgno_t tail = txn->mt_next_pgno; + pgno_t *const begin = repg_list + 1; + pgno_t *const end = begin + repg_len; + pgno_t *higest; +#if MDBX_PNL_ASCENDING + for (higest = end; --higest >= begin;) { +#else + for (higest = begin; higest < end; ++higest) { +#endif /* MDBX_PNL sort-order */ + mdbx_tassert(txn, *higest >= NUM_METAS && *higest < tail); + if (*higest != tail - 1) + break; + tail -= 1; + } + if (tail != txn->mt_next_pgno) { +#if MDBX_PNL_ASCENDING + repg_len = (unsigned)(higest + 1 - begin); +#else + repg_len -= (unsigned)(higest - begin); + for (pgno_t *move = begin; higest < end; ++move, ++higest) + *move = *higest; +#endif /* MDBX_PNL sort-order */ + repg_list[0] = repg_len; + mdbx_info("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, + tail - txn->mt_next_pgno, tail, txn->mt_next_pgno); + txn->mt_next_pgno = tail; + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + } + } + + /* Don't try to coalesce too much. */ + if (repg_len > MDBX_PNL_UM_SIZE / 2) + break; + if (flags & MDBX_COALESCE) { + if (repg_len /* current size */ >= env->me_maxfree_1pg / 2 || + repg_pos /* prev size */ >= env->me_maxfree_1pg / 4) + flags &= ~MDBX_COALESCE; + } + } + + if ((flags & (MDBX_COALESCE | MDBX_ALLOC_CACHE)) == + (MDBX_COALESCE | MDBX_ALLOC_CACHE) && + repg_len > wanna_range) { +#if MDBX_PNL_ASCENDING + for (repg_pos = 1; repg_pos <= repg_len - wanna_range; ++repg_pos) { + pgno = repg_list[repg_pos]; + if (likely(repg_list[repg_pos + wanna_range - 1] == + pgno + wanna_range - 1)) + goto done; + } +#else + repg_pos = repg_len; + do { + pgno = repg_list[repg_pos]; + if (likely(repg_list[repg_pos - wanna_range] == pgno + wanna_range)) + goto done; + } while (--repg_pos > wanna_range); +#endif /* MDBX_PNL sort-order */ + } + + /* Use new pages from the map when nothing suitable in the freeDB */ + repg_pos = 0; + pgno = txn->mt_next_pgno; + rc = MDBX_MAP_FULL; + const pgno_t next = pgno_add(pgno, num); + if (likely(next <= txn->mt_end_pgno)) { + rc = MDBX_NOTFOUND; + if (likely(flags & MDBX_ALLOC_NEW)) + goto done; + } + + const MDBX_meta *head = mdbx_meta_head(env); + if ((flags & MDBX_ALLOC_GC) && + ((flags & MDBX_ALLOC_KICK) || rc == MDBX_MAP_FULL)) { + MDBX_meta *steady = mdbx_meta_steady(env); + + if (oldest == mdbx_meta_txnid_stable(env, steady) && + !META_IS_STEADY(head) && META_IS_STEADY(steady)) { + /* LY: Here an oom was happened: + * - all pages had allocated; + * - reclaiming was stopped at the last steady-sync; + * - the head-sync is weak. + * Now we need make a sync to resume reclaiming. If both + * MDBX_NOSYNC and MDBX_MAPASYNC flags are set, then assume that + * utterly no-sync write mode was requested. In such case + * don't make a steady-sync, but only a legacy-mode checkpoint, + * just for resume reclaiming only, not for data consistency. */ + + mdbx_debug("kick-gc: head %" PRIaTXN "-%s, tail %" PRIaTXN + "-%s, oldest %" PRIaTXN "", + mdbx_meta_txnid_stable(env, head), mdbx_durable_str(head), + mdbx_meta_txnid_stable(env, steady), + mdbx_durable_str(steady), oldest); + + const unsigned syncflags = F_ISSET(env->me_flags, MDBX_UTTERLY_NOSYNC) + ? env->me_flags + : env->me_flags & MDBX_WRITEMAP; + MDBX_meta meta = *head; + if (mdbx_sync_locked(env, syncflags, &meta) == MDBX_SUCCESS) { + txnid_t snap = mdbx_find_oldest(txn); + if (snap > oldest) + continue; + } + } + + if (rc == MDBX_MAP_FULL && oldest < txn->mt_txnid - 1) { + if (mdbx_oomkick(env, oldest) > oldest) + continue; + } + } + + if (rc == MDBX_MAP_FULL && next < head->mm_geo.upper) { + mdbx_assert(env, next > txn->mt_end_pgno); + pgno_t aligned = pgno_align2os_pgno( + env, pgno_add(next, head->mm_geo.grow - next % head->mm_geo.grow)); + + if (aligned > head->mm_geo.upper) + aligned = head->mm_geo.upper; + mdbx_assert(env, aligned > txn->mt_end_pgno); + + mdbx_info("try growth datafile to %" PRIaPGNO " pages (+%" PRIaPGNO ")", + aligned, aligned - txn->mt_end_pgno); + rc = mdbx_mapresize(env, aligned, head->mm_geo.upper); + if (rc == MDBX_SUCCESS) { + mdbx_tassert(env->me_txn, txn->mt_end_pgno >= next); + if (!mp) + return rc; + goto done; + } + + mdbx_warning("unable growth datafile to %" PRIaPGNO "pages (+%" PRIaPGNO + "), errcode %d", + aligned, aligned - txn->mt_end_pgno, rc); + } + + fail: + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (mp) { + *mp = NULL; + txn->mt_flags |= MDBX_TXN_ERROR; + } + assert(rc); + return rc; + } + +done: + mdbx_tassert(txn, mp && num); + mdbx_ensure(env, pgno >= NUM_METAS); + if (env->me_flags & MDBX_WRITEMAP) { + np = pgno2page(env, pgno); + /* LY: reset no-access flag from mdbx_kill_page() */ + VALGRIND_MAKE_MEM_UNDEFINED(np, pgno2bytes(env, num)); + ASAN_UNPOISON_MEMORY_REGION(np, pgno2bytes(env, num)); + } else { + if (unlikely(!(np = mdbx_page_malloc(txn, num)))) { + rc = MDBX_ENOMEM; + goto fail; + } + } + + if (repg_pos) { + mdbx_tassert(txn, pgno < txn->mt_next_pgno); + mdbx_tassert(txn, pgno == repg_list[repg_pos]); + /* Cutoff allocated pages from me_reclaimed_pglist */ + repg_list[0] = repg_len -= num; + for (unsigned i = repg_pos - num; i < repg_len;) + repg_list[++i] = repg_list[++repg_pos]; + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + } else { + txn->mt_next_pgno = pgno + num; + mdbx_assert(env, txn->mt_next_pgno <= txn->mt_end_pgno); + } + + if (env->me_flags & MDBX_PAGEPERTURB) + memset(np, 0x71 /* 'q', 113 */, pgno2bytes(env, num)); + VALGRIND_MAKE_MEM_UNDEFINED(np, pgno2bytes(env, num)); + + np->mp_pgno = pgno; + np->mp_leaf2_ksize = 0; + np->mp_flags = 0; + np->mp_pages = num; + mdbx_page_dirty(txn, np); + *mp = np; + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + return MDBX_SUCCESS; +} + +/* Copy the used portions of a non-overflow page. + * [in] dst page to copy into + * [in] src page to copy from + * [in] psize size of a page */ +static void mdbx_page_copy(MDBX_page *dst, MDBX_page *src, unsigned psize) { + STATIC_ASSERT(UINT16_MAX > MAX_PAGESIZE - PAGEHDRSZ); + STATIC_ASSERT(MIN_PAGESIZE > PAGEHDRSZ + NODESIZE * 42); + enum { Align = sizeof(pgno_t) }; + indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper - lower; + + /* If page isn't full, just copy the used portion. Adjust + * alignment so memcpy may copy words instead of bytes. */ + if ((unused &= -Align) && !IS_LEAF2(src)) { + upper = (upper + PAGEHDRSZ) & -Align; + memcpy(dst, src, (lower + PAGEHDRSZ + (Align - 1)) & -Align); + memcpy((pgno_t *)((char *)dst + upper), (pgno_t *)((char *)src + upper), + psize - upper); + } else { + memcpy(dst, src, psize - unused); + } +} + +/* Pull a page off the txn's spill list, if present. + * + * If a page being referenced was spilled to disk in this txn, bring + * it back and make it dirty/writable again. + * + * [in] txn the transaction handle. + * [in] mp the page being referenced. It must not be dirty. + * [out] ret the writable page, if any. + * ret is unchanged if mp wasn't spilled. */ +static int mdbx_page_unspill(MDBX_txn *txn, MDBX_page *mp, MDBX_page **ret) { + MDBX_env *env = txn->mt_env; + const MDBX_txn *tx2; + unsigned x; + pgno_t pgno = mp->mp_pgno, pn = pgno << 1; + + for (tx2 = txn; tx2; tx2 = tx2->mt_parent) { + if (!tx2->mt_spill_pages) + continue; + x = mdbx_pnl_search(tx2->mt_spill_pages, pn); + if (x <= tx2->mt_spill_pages[0] && tx2->mt_spill_pages[x] == pn) { + MDBX_page *np; + int num; + if (txn->mt_dirtyroom == 0) + return MDBX_TXN_FULL; + num = IS_OVERFLOW(mp) ? mp->mp_pages : 1; + if (env->me_flags & MDBX_WRITEMAP) { + np = mp; + } else { + np = mdbx_page_malloc(txn, num); + if (unlikely(!np)) + return MDBX_ENOMEM; + if (unlikely(num > 1)) + memcpy(np, mp, pgno2bytes(env, num)); + else + mdbx_page_copy(np, mp, env->me_psize); + } + mdbx_debug("unspill page %" PRIaPGNO, mp->mp_pgno); + if (tx2 == txn) { + /* If in current txn, this page is no longer spilled. + * If it happens to be the last page, truncate the spill list. + * Otherwise mark it as deleted by setting the LSB. */ + if (x == txn->mt_spill_pages[0]) + txn->mt_spill_pages[0]--; + else + txn->mt_spill_pages[x] |= 1; + } /* otherwise, if belonging to a parent txn, the + * page remains spilled until child commits */ + + mdbx_page_dirty(txn, np); + np->mp_flags |= P_DIRTY; + *ret = np; + break; + } + } + return MDBX_SUCCESS; +} + +/* Touch a page: make it dirty and re-insert into tree with updated pgno. + * Set MDBX_TXN_ERROR on failure. + * + * [in] mc cursor pointing to the page to be touched + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_touch(MDBX_cursor *mc) { + MDBX_page *mp = mc->mc_pg[mc->mc_top], *np; + MDBX_txn *txn = mc->mc_txn; + MDBX_cursor *m2, *m3; + pgno_t pgno; + int rc; + + mdbx_cassert(mc, !IS_OVERFLOW(mp)); + if (!F_ISSET(mp->mp_flags, P_DIRTY)) { + if (txn->mt_flags & MDBX_TXN_SPILLS) { + np = NULL; + rc = mdbx_page_unspill(txn, mp, &np); + if (unlikely(rc)) + goto fail; + if (likely(np)) + goto done; + } + + if (unlikely((rc = mdbx_pnl_need(&txn->mt_befree_pages, 1)) || + (rc = mdbx_page_alloc(mc, 1, &np, MDBX_ALLOC_ALL)))) + goto fail; + pgno = np->mp_pgno; + mdbx_debug("touched db %d page %" PRIaPGNO " -> %" PRIaPGNO, DDBI(mc), + mp->mp_pgno, pgno); + mdbx_cassert(mc, mp->mp_pgno != pgno); + mdbx_pnl_xappend(txn->mt_befree_pages, mp->mp_pgno); + /* Update the parent page, if any, to point to the new page */ + if (mc->mc_top) { + MDBX_page *parent = mc->mc_pg[mc->mc_top - 1]; + MDBX_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top - 1]); + SETPGNO(node, pgno); + } else { + mc->mc_db->md_root = pgno; + } + } else if (txn->mt_parent && !IS_SUBP(mp)) { + MDBX_ID2 mid, *dl = txn->mt_rw_dirtylist; + pgno = mp->mp_pgno; + /* If txn has a parent, make sure the page is in our dirty list. */ + if (dl[0].mid) { + unsigned x = mdbx_mid2l_search(dl, pgno); + if (x <= dl[0].mid && dl[x].mid == pgno) { + if (unlikely(mp != dl[x].mptr)) { /* bad cursor? */ + mdbx_error("wrong page 0x%p #%" PRIaPGNO + " in the dirtylist[%d], expecting %p", + dl[x].mptr, pgno, x, mp); + mc->mc_flags &= ~(C_INITIALIZED | C_EOF); + txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_PROBLEM; + } + return MDBX_SUCCESS; + } + } + + mdbx_debug("clone db %d page %" PRIaPGNO, DDBI(mc), mp->mp_pgno); + mdbx_cassert(mc, dl[0].mid < MDBX_PNL_UM_MAX); + /* No - copy it */ + np = mdbx_page_malloc(txn, 1); + if (unlikely(!np)) + return MDBX_ENOMEM; + mid.mid = pgno; + mid.mptr = np; + rc = mdbx_mid2l_insert(dl, &mid); + mdbx_cassert(mc, rc == 0); + } else { + return MDBX_SUCCESS; + } + + mdbx_page_copy(np, mp, txn->mt_env->me_psize); + np->mp_pgno = pgno; + np->mp_flags |= P_DIRTY; + +done: + /* Adjust cursors pointing to mp */ + mc->mc_pg[mc->mc_top] = np; + m2 = txn->mt_cursors[mc->mc_dbi]; + if (mc->mc_flags & C_SUB) { + for (; m2; m2 = m2->mc_next) { + m3 = &m2->mc_xcursor->mx_cursor; + if (m3->mc_snum < mc->mc_snum) + continue; + if (m3->mc_pg[mc->mc_top] == mp) + m3->mc_pg[mc->mc_top] = np; + } + } else { + for (; m2; m2 = m2->mc_next) { + if (m2->mc_snum < mc->mc_snum) + continue; + if (m2 == mc) + continue; + if (m2->mc_pg[mc->mc_top] == mp) { + m2->mc_pg[mc->mc_top] = np; + if (XCURSOR_INITED(m2) && IS_LEAF(np)) + XCURSOR_REFRESH(m2, np, m2->mc_ki[mc->mc_top]); + } + } + } + return MDBX_SUCCESS; + +fail: + txn->mt_flags |= MDBX_TXN_ERROR; + return rc; +} + +int mdbx_env_sync(MDBX_env *env, int force) { + if (unlikely(!env)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + unsigned flags = env->me_flags & ~MDBX_NOMETASYNC; + if (unlikely(flags & (MDBX_RDONLY | MDBX_FATAL_ERROR))) + return MDBX_EACCESS; + + if (unlikely(!env->me_lck)) + return MDBX_PANIC; + + const bool outside_txn = + (!env->me_txn0 || env->me_txn0->mt_owner != mdbx_thread_self()); + + if (outside_txn) { + int rc = mdbx_txn_lock(env); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + + MDBX_meta *head = mdbx_meta_head(env); + if (!META_IS_STEADY(head) || env->me_sync_pending) { + + if (force || (env->me_sync_threshold && + env->me_sync_pending >= env->me_sync_threshold)) + flags &= MDBX_WRITEMAP /* clear flags for full steady sync */; + + if (outside_txn && + env->me_sync_pending > + pgno2bytes(env, 16 /* FIXME: define threshold */) && + (flags & MDBX_NOSYNC) == 0) { + assert(((flags ^ env->me_flags) & MDBX_WRITEMAP) == 0); + const size_t usedbytes = pgno_align2os_bytes(env, head->mm_geo.next); + + mdbx_txn_unlock(env); + + /* LY: pre-sync without holding lock to reduce latency for writer(s) */ + int rc = (flags & MDBX_WRITEMAP) + ? mdbx_msync(&env->me_dxb_mmap, 0, usedbytes, + flags & MDBX_MAPASYNC) + : mdbx_filesync(env->me_fd, false); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + rc = mdbx_txn_lock(env); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + /* LY: head may be changed. */ + head = mdbx_meta_head(env); + } + + if (!META_IS_STEADY(head) || env->me_sync_pending) { + mdbx_debug("meta-head %" PRIaPGNO ", %s, sync_pending %" PRIuPTR, + container_of(head, MDBX_page, mp_data)->mp_pgno, + mdbx_durable_str(head), env->me_sync_pending); + MDBX_meta meta = *head; + int rc = mdbx_sync_locked(env, flags | MDBX_SHRINK_ALLOWED, &meta); + if (unlikely(rc != MDBX_SUCCESS)) { + if (outside_txn) + mdbx_txn_unlock(env); + return rc; + } + } + } + + if (outside_txn) + mdbx_txn_unlock(env); + return MDBX_SUCCESS; +} + +/* Back up parent txn's cursors, then grab the originals for tracking */ +static int mdbx_cursor_shadow(MDBX_txn *src, MDBX_txn *dst) { + MDBX_cursor *mc, *bk; + MDBX_xcursor *mx; + size_t size; + int i; + + for (i = src->mt_numdbs; --i >= 0;) { + if ((mc = src->mt_cursors[i]) != NULL) { + size = sizeof(MDBX_cursor); + if (mc->mc_xcursor) + size += sizeof(MDBX_xcursor); + for (; mc; mc = bk->mc_next) { + bk = malloc(size); + if (unlikely(!bk)) + return MDBX_ENOMEM; + *bk = *mc; + mc->mc_backup = bk; + mc->mc_db = &dst->mt_dbs[i]; + /* Kill pointers into src to reduce abuse: The + * user may not use mc until dst ends. But we need a valid + * txn pointer here for cursor fixups to keep working. */ + mc->mc_txn = dst; + mc->mc_dbflag = &dst->mt_dbflags[i]; + if ((mx = mc->mc_xcursor) != NULL) { + *(MDBX_xcursor *)(bk + 1) = *mx; + mx->mx_cursor.mc_txn = dst; + } + mc->mc_next = dst->mt_cursors[i]; + dst->mt_cursors[i] = mc; + } + } + } + return MDBX_SUCCESS; +} + +/* Close this write txn's cursors, give parent txn's cursors back to parent. + * + * [in] txn the transaction handle. + * [in] merge true to keep changes to parent cursors, false to revert. + * + * Returns 0 on success, non-zero on failure. */ +static void mdbx_cursors_eot(MDBX_txn *txn, unsigned merge) { + MDBX_cursor **cursors = txn->mt_cursors, *mc, *next, *bk; + MDBX_xcursor *mx; + int i; + + for (i = txn->mt_numdbs; --i >= 0;) { + for (mc = cursors[i]; mc; mc = next) { + unsigned stage = mc->mc_signature; + mdbx_ensure(NULL, + stage == MDBX_MC_SIGNATURE || stage == MDBX_MC_WAIT4EOT); + next = mc->mc_next; + if ((bk = mc->mc_backup) != NULL) { + if (merge) { + /* Commit changes to parent txn */ + mc->mc_next = bk->mc_next; + mc->mc_backup = bk->mc_backup; + mc->mc_txn = bk->mc_txn; + mc->mc_db = bk->mc_db; + mc->mc_dbflag = bk->mc_dbflag; + if ((mx = mc->mc_xcursor) != NULL) + mx->mx_cursor.mc_txn = bk->mc_txn; + } else { + /* Abort nested txn */ + *mc = *bk; + if ((mx = mc->mc_xcursor) != NULL) + *mx = *(MDBX_xcursor *)(bk + 1); + } + bk->mc_signature = 0; + free(bk); + } + if (stage == MDBX_MC_WAIT4EOT) { + mc->mc_signature = 0; + free(mc); + } else { + mc->mc_signature = MDBX_MC_READY4CLOSE; + mc->mc_flags = 0 /* reset C_UNTRACK */; + } + } + cursors[i] = NULL; + } +} + +/* Common code for mdbx_txn_begin() and mdbx_txn_renew(). */ +static int mdbx_txn_renew0(MDBX_txn *txn, unsigned flags) { + MDBX_env *env = txn->mt_env; + int rc; + + if (unlikely(env->me_pid != mdbx_getpid())) { + env->me_flags |= MDBX_FATAL_ERROR; + return MDBX_PANIC; + } + + pgno_t upper_pgno = 0; + if (flags & MDBX_TXN_RDONLY) { + txn->mt_flags = MDBX_TXN_RDONLY; + MDBX_reader *r = txn->mt_ro_reader; + if (likely(env->me_flags & MDBX_ENV_TXKEY)) { + mdbx_assert(env, !(env->me_flags & MDBX_NOTLS)); + r = mdbx_thread_rthc_get(env->me_txkey); + if (likely(r)) { + mdbx_assert(env, r->mr_pid == env->me_pid); + mdbx_assert(env, r->mr_tid == mdbx_thread_self()); + } + } else { + mdbx_assert(env, !env->me_lck || (env->me_flags & MDBX_NOTLS)); + } + + if (likely(r)) { + if (unlikely(r->mr_pid != env->me_pid || r->mr_txnid != ~(txnid_t)0)) + return MDBX_BAD_RSLOT; + } else if (env->me_lck) { + unsigned slot, nreaders; + const mdbx_pid_t pid = env->me_pid; + const mdbx_tid_t tid = mdbx_thread_self(); + mdbx_assert(env, env->me_lck->mti_magic_and_version == MDBX_LOCK_MAGIC); + mdbx_assert(env, env->me_lck->mti_os_and_format == MDBX_LOCK_FORMAT); + + rc = mdbx_rdt_lock(env); + if (unlikely(MDBX_IS_ERROR(rc))) + return rc; + rc = MDBX_SUCCESS; + + if (unlikely(env->me_live_reader != pid)) { + rc = mdbx_rpid_set(env); + if (unlikely(rc != MDBX_SUCCESS)) { + mdbx_rdt_unlock(env); + return rc; + } + env->me_live_reader = pid; + } + + while (1) { + nreaders = env->me_lck->mti_numreaders; + for (slot = 0; slot < nreaders; slot++) + if (env->me_lck->mti_readers[slot].mr_pid == 0) + break; + + if (likely(slot < env->me_maxreaders)) + break; + + rc = mdbx_reader_check0(env, true, NULL); + if (rc != MDBX_RESULT_TRUE) { + mdbx_rdt_unlock(env); + return (rc == MDBX_SUCCESS) ? MDBX_READERS_FULL : rc; + } + } + + STATIC_ASSERT(sizeof(MDBX_reader) == MDBX_CACHELINE_SIZE); + STATIC_ASSERT( + offsetof(MDBX_lockinfo, mti_readers) % MDBX_CACHELINE_SIZE == 0); + r = &env->me_lck->mti_readers[slot]; + /* Claim the reader slot, carefully since other code + * uses the reader table un-mutexed: First reset the + * slot, next publish it in mtb.mti_numreaders. After + * that, it is safe for mdbx_env_close() to touch it. + * When it will be closed, we can finally claim it. */ + r->mr_pid = 0; + r->mr_txnid = ~(txnid_t)0; + r->mr_tid = tid; + mdbx_coherent_barrier(); + if (slot == nreaders) + env->me_lck->mti_numreaders = ++nreaders; + if (env->me_close_readers < nreaders) + env->me_close_readers = nreaders; + r->mr_pid = pid; + mdbx_rdt_unlock(env); + + if (likely(env->me_flags & MDBX_ENV_TXKEY)) + mdbx_thread_rthc_set(env->me_txkey, r); + } + + while (1) { + MDBX_meta *const meta = mdbx_meta_head(env); + mdbx_jitter4testing(false); + const txnid_t snap = mdbx_meta_txnid_fluid(env, meta); + mdbx_jitter4testing(false); + if (r) { + r->mr_txnid = snap; + mdbx_jitter4testing(false); + mdbx_assert(env, r->mr_pid == mdbx_getpid()); + mdbx_assert(env, r->mr_tid == mdbx_thread_self()); + mdbx_assert(env, r->mr_txnid == snap); + mdbx_coherent_barrier(); + } + mdbx_jitter4testing(true); + + /* Snap the state from current meta-head */ + txn->mt_txnid = snap; + txn->mt_next_pgno = meta->mm_geo.next; + txn->mt_end_pgno = meta->mm_geo.now; + upper_pgno = meta->mm_geo.upper; + memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDBX_db)); + txn->mt_canary = meta->mm_canary; + + /* LY: Retry on a race, ITS#7970. */ + mdbx_compiler_barrier(); + if (likely(meta == mdbx_meta_head(env) && + snap == mdbx_meta_txnid_fluid(env, meta) && + snap >= env->me_oldest[0])) { + mdbx_jitter4testing(false); + break; + } + if (env->me_lck) + env->me_lck->mti_reader_finished_flag = true; + } + + if (unlikely(txn->mt_txnid == 0)) { + mdbx_error("environment corrupted by died writer, must shutdown!"); + rc = MDBX_WANNA_RECOVERY; + goto bailout; + } + mdbx_assert(env, txn->mt_txnid >= *env->me_oldest); + txn->mt_ro_reader = r; + txn->mt_dbxs = env->me_dbxs; /* mostly static anyway */ + } else { + /* Not yet touching txn == env->me_txn0, it may be active */ + mdbx_jitter4testing(false); + rc = mdbx_txn_lock(env); + if (unlikely(rc)) + return rc; + + mdbx_jitter4testing(false); + MDBX_meta *meta = mdbx_meta_head(env); + mdbx_jitter4testing(false); + txn->mt_canary = meta->mm_canary; + const txnid_t snap = mdbx_meta_txnid_stable(env, meta); + txn->mt_txnid = snap + 1; + if (unlikely(txn->mt_txnid < snap)) { + mdbx_debug("txnid overflow!"); + rc = MDBX_TXN_FULL; + goto bailout; + } + + txn->mt_flags = flags; + txn->mt_child = NULL; + txn->mt_loose_pages = NULL; + txn->mt_loose_count = 0; + txn->mt_dirtyroom = MDBX_PNL_UM_MAX; + txn->mt_rw_dirtylist = env->me_dirtylist; + txn->mt_rw_dirtylist[0].mid = 0; + txn->mt_befree_pages = env->me_free_pgs; + txn->mt_befree_pages[0] = 0; + txn->mt_spill_pages = NULL; + if (txn->mt_lifo_reclaimed) + txn->mt_lifo_reclaimed[0] = 0; + env->me_txn = txn; + memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned)); + /* Copy the DB info and flags */ + memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDBX_db)); + /* Moved to here to avoid a data race in read TXNs */ + txn->mt_next_pgno = meta->mm_geo.next; + txn->mt_end_pgno = meta->mm_geo.now; + upper_pgno = meta->mm_geo.upper; + } + + /* Setup db info */ + txn->mt_numdbs = env->me_numdbs; + for (unsigned i = CORE_DBS; i < txn->mt_numdbs; i++) { + unsigned x = env->me_dbflags[i]; + txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS; + txn->mt_dbflags[i] = + (x & MDBX_VALID) ? DB_VALID | DB_USRVALID | DB_STALE : 0; + } + txn->mt_dbflags[MAIN_DBI] = DB_VALID | DB_USRVALID; + txn->mt_dbflags[FREE_DBI] = DB_VALID; + + if (unlikely(env->me_flags & MDBX_FATAL_ERROR)) { + mdbx_warning("environment had fatal error, must shutdown!"); + rc = MDBX_PANIC; + } else { + const size_t size = pgno2bytes(env, txn->mt_end_pgno); + if (unlikely(size > env->me_mapsize)) { + if (upper_pgno > MAX_PAGENO || + bytes2pgno(env, pgno2bytes(env, upper_pgno)) != upper_pgno) { + rc = MDBX_MAP_RESIZED; + goto bailout; + } + rc = mdbx_mapresize(env, txn->mt_end_pgno, upper_pgno); + if (rc != MDBX_SUCCESS) + goto bailout; + } + return MDBX_SUCCESS; + } +bailout: + assert(rc != MDBX_SUCCESS); + mdbx_txn_end(txn, MDBX_END_SLOT | MDBX_END_FAIL_BEGIN); + return rc; +} + +int mdbx_txn_renew(MDBX_txn *txn) { + int rc; + + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY | MDBX_TXN_FINISHED))) + return MDBX_EINVAL; + + rc = mdbx_txn_renew0(txn, MDBX_TXN_RDONLY); + if (rc == MDBX_SUCCESS) { + mdbx_debug("renew txn %" PRIaTXN "%c %p on env %p, root page %" PRIaPGNO + "/%" PRIaPGNO, + txn->mt_txnid, (txn->mt_flags & MDBX_TXN_RDONLY) ? 'r' : 'w', + (void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root, + txn->mt_dbs[FREE_DBI].md_root); + } + return rc; +} + +int mdbx_txn_begin(MDBX_env *env, MDBX_txn *parent, unsigned flags, + MDBX_txn **ret) { + MDBX_txn *txn; + MDBX_ntxn *ntxn; + int rc, size, tsize; + + if (unlikely(!env || !ret)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(env->me_pid != mdbx_getpid())) { + env->me_flags |= MDBX_FATAL_ERROR; + return MDBX_PANIC; + } + + if (unlikely(!env->me_map)) + return MDBX_EPERM; + + flags &= MDBX_TXN_BEGIN_FLAGS; + flags |= env->me_flags & MDBX_WRITEMAP; + + if (unlikely(env->me_flags & MDBX_RDONLY & + ~flags)) /* write txn in RDONLY env */ + return MDBX_EACCESS; + + if (parent) { + if (unlikely(parent->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EINVAL; + + if (unlikely(parent->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + /* Nested transactions: Max 1 child, write txns only, no writemap */ + flags |= parent->mt_flags; + if (unlikely(flags & (MDBX_RDONLY | MDBX_WRITEMAP | MDBX_TXN_BLOCKED))) { + return (parent->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EINVAL : MDBX_BAD_TXN; + } + /* Child txns save MDBX_pgstate and use own copy of cursors */ + size = env->me_maxdbs * (sizeof(MDBX_db) + sizeof(MDBX_cursor *) + 1); + size += tsize = sizeof(MDBX_ntxn); + } else if (flags & MDBX_RDONLY) { + size = env->me_maxdbs * (sizeof(MDBX_db) + 1); + size += tsize = sizeof(MDBX_txn); + } else { + /* Reuse preallocated write txn. However, do not touch it until + * mdbx_txn_renew0() succeeds, since it currently may be active. */ + txn = env->me_txn0; + goto renew; + } + if (unlikely((txn = calloc(1, size)) == NULL)) { + mdbx_debug("calloc: %s", "failed"); + return MDBX_ENOMEM; + } + txn->mt_dbxs = env->me_dbxs; /* static */ + txn->mt_dbs = (MDBX_db *)((char *)txn + tsize); + txn->mt_dbflags = (uint8_t *)txn + size - env->me_maxdbs; + txn->mt_flags = flags; + txn->mt_env = env; + + if (parent) { + unsigned i; + txn->mt_cursors = (MDBX_cursor **)(txn->mt_dbs + env->me_maxdbs); + txn->mt_dbiseqs = parent->mt_dbiseqs; + txn->mt_rw_dirtylist = malloc(sizeof(MDBX_ID2) * MDBX_PNL_UM_SIZE); + if (!txn->mt_rw_dirtylist || + !(txn->mt_befree_pages = mdbx_pnl_alloc(MDBX_PNL_UM_MAX))) { + free(txn->mt_rw_dirtylist); + free(txn); + return MDBX_ENOMEM; + } + txn->mt_txnid = parent->mt_txnid; + txn->mt_dirtyroom = parent->mt_dirtyroom; + txn->mt_rw_dirtylist[0].mid = 0; + txn->mt_spill_pages = NULL; + txn->mt_next_pgno = parent->mt_next_pgno; + txn->mt_end_pgno = parent->mt_end_pgno; + parent->mt_flags |= MDBX_TXN_HAS_CHILD; + parent->mt_child = txn; + txn->mt_parent = parent; + txn->mt_numdbs = parent->mt_numdbs; + memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDBX_db)); + /* Copy parent's mt_dbflags, but clear DB_NEW */ + for (i = 0; i < txn->mt_numdbs; i++) + txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW; + rc = 0; + ntxn = (MDBX_ntxn *)txn; + ntxn->mnt_pgstate = + env->me_pgstate; /* save parent me_reclaimed_pglist & co */ + if (env->me_reclaimed_pglist) { + size = MDBX_PNL_SIZEOF(env->me_reclaimed_pglist); + env->me_reclaimed_pglist = mdbx_pnl_alloc(env->me_reclaimed_pglist[0]); + if (likely(env->me_reclaimed_pglist)) + memcpy(env->me_reclaimed_pglist, ntxn->mnt_pgstate.mf_reclaimed_pglist, + size); + else + rc = MDBX_ENOMEM; + } + if (likely(!rc)) + rc = mdbx_cursor_shadow(parent, txn); + if (unlikely(rc)) + mdbx_txn_end(txn, MDBX_END_FAIL_BEGINCHILD); + } else { /* MDBX_RDONLY */ + txn->mt_dbiseqs = env->me_dbiseqs; + renew: + rc = mdbx_txn_renew0(txn, flags); + } + + if (unlikely(rc)) { + if (txn != env->me_txn0) + free(txn); + } else { + txn->mt_owner = mdbx_thread_self(); + txn->mt_signature = MDBX_MT_SIGNATURE; + *ret = txn; + mdbx_debug("begin txn %" PRIaTXN "%c %p on env %p, root page %" PRIaPGNO + "/%" PRIaPGNO, + txn->mt_txnid, (flags & MDBX_RDONLY) ? 'r' : 'w', (void *)txn, + (void *)env, txn->mt_dbs[MAIN_DBI].md_root, + txn->mt_dbs[FREE_DBI].md_root); + } + + return rc; +} + +MDBX_env *mdbx_txn_env(MDBX_txn *txn) { + if (unlikely(!txn || txn->mt_signature != MDBX_MT_SIGNATURE)) + return NULL; + return txn->mt_env; +} + +uint64_t mdbx_txn_id(MDBX_txn *txn) { + if (unlikely(!txn || txn->mt_signature != MDBX_MT_SIGNATURE)) + return ~(txnid_t)0; + return txn->mt_txnid; +} + +/* Export or close DBI handles opened in this txn. */ +static void mdbx_dbis_update(MDBX_txn *txn, int keep) { + MDBX_dbi n = txn->mt_numdbs; + if (n) { + MDBX_env *env = txn->mt_env; + uint8_t *tdbflags = txn->mt_dbflags; + + for (unsigned i = n; --i >= CORE_DBS;) { + if (tdbflags[i] & DB_NEW) { + if (keep) { + env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDBX_VALID; + } else { + char *ptr = env->me_dbxs[i].md_name.iov_base; + if (ptr) { + env->me_dbxs[i].md_name.iov_base = NULL; + env->me_dbxs[i].md_name.iov_len = 0; + env->me_dbflags[i] = 0; + env->me_dbiseqs[i]++; + free(ptr); + } + } + } + } + if (keep && env->me_numdbs < n) + env->me_numdbs = n; + } +} + +/* End a transaction, except successful commit of a nested transaction. + * May be called twice for readonly txns: First reset it, then abort. + * [in] txn the transaction handle to end + * [in] mode why and how to end the transaction */ +static int mdbx_txn_end(MDBX_txn *txn, unsigned mode) { + MDBX_env *env = txn->mt_env; + static const char *const names[] = MDBX_END_NAMES; + + if (unlikely(txn->mt_env->me_pid != mdbx_getpid())) { + env->me_flags |= MDBX_FATAL_ERROR; + return MDBX_PANIC; + } + + /* Export or close DBI handles opened in this txn */ + mdbx_dbis_update(txn, mode & MDBX_END_UPDATE); + + mdbx_debug("%s txn %" PRIaTXN "%c %p on mdbenv %p, root page %" PRIaPGNO + "/%" PRIaPGNO, + names[mode & MDBX_END_OPMASK], txn->mt_txnid, + (txn->mt_flags & MDBX_TXN_RDONLY) ? 'r' : 'w', (void *)txn, + (void *)env, txn->mt_dbs[MAIN_DBI].md_root, + txn->mt_dbs[FREE_DBI].md_root); + + if (F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY)) { + if (txn->mt_ro_reader) { + txn->mt_ro_reader->mr_txnid = ~(txnid_t)0; + env->me_lck->mti_reader_finished_flag = true; + if (mode & MDBX_END_SLOT) { + if ((env->me_flags & MDBX_ENV_TXKEY) == 0) + txn->mt_ro_reader->mr_pid = 0; + txn->mt_ro_reader = NULL; + } + } + mdbx_coherent_barrier(); + txn->mt_numdbs = 0; /* prevent further DBI activity */ + txn->mt_flags |= MDBX_TXN_FINISHED; + txn->mt_owner = 0; + } else if (!F_ISSET(txn->mt_flags, MDBX_TXN_FINISHED)) { + pgno_t *pghead = env->me_reclaimed_pglist; + + if (!(mode & MDBX_END_EOTDONE)) /* !(already closed cursors) */ + mdbx_cursors_eot(txn, 0); + if (!(env->me_flags & MDBX_WRITEMAP)) { + mdbx_dlist_free(txn); + } + + if (txn->mt_lifo_reclaimed) { + txn->mt_lifo_reclaimed[0] = 0; + if (txn != env->me_txn0) { + mdbx_txl_free(txn->mt_lifo_reclaimed); + txn->mt_lifo_reclaimed = NULL; + } + } + txn->mt_numdbs = 0; + txn->mt_flags = MDBX_TXN_FINISHED; + + if (!txn->mt_parent) { + mdbx_pnl_shrink(&txn->mt_befree_pages); + env->me_free_pgs = txn->mt_befree_pages; + /* me_pgstate: */ + env->me_reclaimed_pglist = NULL; + env->me_last_reclaimed = 0; + + env->me_txn = NULL; + txn->mt_owner = 0; + txn->mt_signature = 0; + mode = 0; /* txn == env->me_txn0, do not free() it */ + + /* The writer mutex was locked in mdbx_txn_begin. */ + mdbx_txn_unlock(env); + } else { + txn->mt_parent->mt_child = NULL; + txn->mt_parent->mt_flags &= ~MDBX_TXN_HAS_CHILD; + env->me_pgstate = ((MDBX_ntxn *)txn)->mnt_pgstate; + mdbx_pnl_free(txn->mt_befree_pages); + mdbx_pnl_free(txn->mt_spill_pages); + free(txn->mt_rw_dirtylist); + } + + mdbx_pnl_free(pghead); + } + + if (mode & MDBX_END_FREE) { + mdbx_ensure(env, txn != env->me_txn0); + txn->mt_owner = 0; + txn->mt_signature = 0; + free(txn); + } + + return MDBX_SUCCESS; +} + +int mdbx_txn_reset(MDBX_txn *txn) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + /* This call is only valid for read-only txns */ + if (unlikely(!(txn->mt_flags & MDBX_TXN_RDONLY))) + return MDBX_EINVAL; + + /* LY: don't close DBI-handles in MDBX mode */ + return mdbx_txn_end(txn, MDBX_END_RESET | MDBX_END_UPDATE); +} + +int mdbx_txn_abort(MDBX_txn *txn) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY)) + /* LY: don't close DBI-handles in MDBX mode */ + return mdbx_txn_end(txn, MDBX_END_ABORT | MDBX_END_UPDATE | MDBX_END_SLOT | + MDBX_END_FREE); + + if (txn->mt_child) + mdbx_txn_abort(txn->mt_child); + + return mdbx_txn_end(txn, MDBX_END_ABORT | MDBX_END_SLOT | MDBX_END_FREE); +} + +static __inline int mdbx_backlog_size(MDBX_txn *txn) { + int reclaimed = txn->mt_env->me_reclaimed_pglist + ? txn->mt_env->me_reclaimed_pglist[0] + : 0; + return reclaimed + txn->mt_loose_count + txn->mt_end_pgno - txn->mt_next_pgno; +} + +/* LY: Prepare a backlog of pages to modify FreeDB itself, + * while reclaiming is prohibited. It should be enough to prevent search + * in mdbx_page_alloc() during a deleting, when freeDB tree is unbalanced. */ +static int mdbx_prep_backlog(MDBX_txn *txn, MDBX_cursor *mc) { + /* LY: extra page(s) for b-tree rebalancing */ + const int extra = (txn->mt_env->me_flags & MDBX_LIFORECLAIM) ? 2 : 1; + + if (mdbx_backlog_size(txn) < mc->mc_db->md_depth + extra) { + int rc = mdbx_cursor_touch(mc); + if (unlikely(rc)) + return rc; + + int backlog; + while (unlikely((backlog = mdbx_backlog_size(txn)) < extra)) { + rc = mdbx_page_alloc(mc, 1, NULL, MDBX_ALLOC_GC); + if (unlikely(rc)) { + if (unlikely(rc != MDBX_NOTFOUND)) + return rc; + break; + } + } + } + + return MDBX_SUCCESS; +} + +/* Save the freelist as of this transaction to the freeDB. + * This changes the freelist. Keep trying until it stabilizes. */ +static int mdbx_freelist_save(MDBX_txn *txn) { + /* env->me_reclaimed_pglist[] can grow and shrink during this call. + * env->me_last_reclaimed and txn->mt_free_pages[] can only grow. + * Page numbers cannot disappear from txn->mt_free_pages[]. */ + MDBX_cursor mc; + MDBX_env *env = txn->mt_env; + int rc, more = 1; + txnid_t cleanup_reclaimed_id = 0, head_id = 0; + pgno_t befree_count = 0; + intptr_t head_room = 0, total_room = 0; + unsigned cleanup_reclaimed_pos = 0, refill_reclaimed_pos = 0; + const bool lifo = (env->me_flags & MDBX_LIFORECLAIM) != 0; + + mdbx_cursor_init(&mc, txn, FREE_DBI, NULL); + + /* MDBX_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */ + const intptr_t clean_limit = + (env->me_flags & (MDBX_NOMEMINIT | MDBX_WRITEMAP)) ? SSIZE_MAX + : env->me_maxfree_1pg; + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); +again_on_freelist_change: + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + while (1) { + /* Come back here after each Put() in case freelist changed */ + MDBX_val key, data; + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (!lifo) { + /* If using records from freeDB which we have not yet deleted, + * now delete them and any we reserved for me_reclaimed_pglist. */ + while (cleanup_reclaimed_id < env->me_last_reclaimed) { + rc = mdbx_cursor_first(&mc, &key, NULL); + if (unlikely(rc)) + goto bailout; + rc = mdbx_prep_backlog(txn, &mc); + if (unlikely(rc)) + goto bailout; + cleanup_reclaimed_id = head_id = *(txnid_t *)key.iov_base; + total_room = head_room = 0; + more = 1; + mdbx_tassert(txn, cleanup_reclaimed_id <= env->me_last_reclaimed); + mc.mc_flags |= C_RECLAIMING; + rc = mdbx_cursor_del(&mc, 0); + mc.mc_flags ^= C_RECLAIMING; + if (unlikely(rc)) + goto bailout; + } + } else if (txn->mt_lifo_reclaimed) { + /* LY: cleanup reclaimed records. */ + while (cleanup_reclaimed_pos < txn->mt_lifo_reclaimed[0]) { + cleanup_reclaimed_id = txn->mt_lifo_reclaimed[++cleanup_reclaimed_pos]; + key.iov_base = &cleanup_reclaimed_id; + key.iov_len = sizeof(cleanup_reclaimed_id); + rc = mdbx_cursor_get(&mc, &key, NULL, MDBX_SET); + if (likely(rc != MDBX_NOTFOUND)) { + if (unlikely(rc)) + goto bailout; + rc = mdbx_prep_backlog(txn, &mc); + if (unlikely(rc)) + goto bailout; + mc.mc_flags |= C_RECLAIMING; + rc = mdbx_cursor_del(&mc, 0); + mc.mc_flags ^= C_RECLAIMING; + if (unlikely(rc)) + goto bailout; + } + } + } + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (txn->mt_loose_pages) { + /* Return loose page numbers to me_reclaimed_pglist, + * though usually none are left at this point. + * The pages themselves remain in dirtylist. */ + if (unlikely(!env->me_reclaimed_pglist) && + !(lifo && env->me_last_reclaimed > 1)) { + /* Put loose page numbers in mt_free_pages, + * since unable to return them to me_reclaimed_pglist. */ + MDBX_page *mp = txn->mt_loose_pages; + if (unlikely((rc = mdbx_pnl_need(&txn->mt_befree_pages, + txn->mt_loose_count)) != 0)) + return rc; + for (; mp; mp = NEXT_LOOSE_PAGE(mp)) + mdbx_pnl_xappend(txn->mt_befree_pages, mp->mp_pgno); + } else { + /* Room for loose pages + temp PNL with same */ + if ((rc = mdbx_pnl_need(&env->me_reclaimed_pglist, + 2 * txn->mt_loose_count + 1)) != 0) + goto bailout; + MDBX_PNL loose = env->me_reclaimed_pglist + + MDBX_PNL_ALLOCLEN(env->me_reclaimed_pglist) - + txn->mt_loose_count; + unsigned count = 0; + for (MDBX_page *mp = txn->mt_loose_pages; mp; mp = NEXT_LOOSE_PAGE(mp)) + loose[++count] = mp->mp_pgno; + loose[0] = count; + mdbx_pnl_sort(loose); + mdbx_pnl_xmerge(env->me_reclaimed_pglist, loose); + } + + txn->mt_loose_pages = NULL; + txn->mt_loose_count = 0; + } + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (env->me_reclaimed_pglist) { + /* Refund suitable pages into "unallocated" space */ + pgno_t tail = txn->mt_next_pgno; + pgno_t *const begin = env->me_reclaimed_pglist + 1; + pgno_t *const end = begin + env->me_reclaimed_pglist[0]; + pgno_t *higest; +#if MDBX_PNL_ASCENDING + for (higest = end; --higest >= begin;) { +#else + for (higest = begin; higest < end; ++higest) { +#endif /* MDBX_PNL sort-order */ + mdbx_tassert(txn, *higest >= NUM_METAS && *higest < tail); + if (*higest != tail - 1) + break; + tail -= 1; + } + if (tail != txn->mt_next_pgno) { +#if MDBX_PNL_ASCENDING + env->me_reclaimed_pglist[0] = (unsigned)(higest + 1 - begin); +#else + env->me_reclaimed_pglist[0] -= (unsigned)(higest - begin); + for (pgno_t *move = begin; higest < end; ++move, ++higest) + *move = *higest; +#endif /* MDBX_PNL sort-order */ + mdbx_info("refunded %" PRIaPGNO " pages: %" PRIaPGNO " -> %" PRIaPGNO, + tail - txn->mt_next_pgno, tail, txn->mt_next_pgno); + txn->mt_next_pgno = tail; + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + } + } + + /* Save the PNL of pages freed by this txn, to a single record */ + if (befree_count < txn->mt_befree_pages[0]) { + if (unlikely(!befree_count)) { + /* Make sure last page of freeDB is touched and on freelist */ + rc = mdbx_page_search(&mc, NULL, MDBX_PS_LAST | MDBX_PS_MODIFY); + if (unlikely(rc && rc != MDBX_NOTFOUND)) + goto bailout; + } + pgno_t *befree_pages = txn->mt_befree_pages; + /* Write to last page of freeDB */ + key.iov_len = sizeof(txn->mt_txnid); + key.iov_base = &txn->mt_txnid; + do { + befree_count = befree_pages[0]; + data.iov_len = MDBX_PNL_SIZEOF(befree_pages); + rc = mdbx_cursor_put(&mc, &key, &data, MDBX_RESERVE); + if (unlikely(rc)) + goto bailout; + /* Retry if mt_free_pages[] grew during the Put() */ + befree_pages = txn->mt_befree_pages; + } while (befree_count < befree_pages[0]); + + mdbx_pnl_sort(befree_pages); + memcpy(data.iov_base, befree_pages, data.iov_len); + + if (mdbx_debug_enabled(MDBX_DBG_EXTRA)) { + unsigned i = (unsigned)befree_pages[0]; + mdbx_debug_extra("PNL write txn %" PRIaTXN " root %" PRIaPGNO + " num %u, PNL", + txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i); + for (; i; i--) + mdbx_debug_extra_print(" %" PRIaPGNO "", befree_pages[i]); + mdbx_debug_extra_print("\n"); + } + continue; + } + + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + const intptr_t rpl_len = + (env->me_reclaimed_pglist ? env->me_reclaimed_pglist[0] : 0) + + txn->mt_loose_count; + if (rpl_len && refill_reclaimed_pos == 0) + refill_reclaimed_pos = 1; + + /* Reserve records for me_reclaimed_pglist[]. Split it if multi-page, + * to avoid searching freeDB for a page range. Use keys in + * range [1,me_last_reclaimed]: Smaller than txnid of oldest reader. */ + if (total_room >= rpl_len) { + if (total_room == rpl_len || --more < 0) + break; + } else if (head_room >= (intptr_t)env->me_maxfree_1pg && head_id > 1) { + /* Keep current record (overflow page), add a new one */ + head_id--; + refill_reclaimed_pos++; + head_room = 0; + } + + if (lifo) { + if (refill_reclaimed_pos > + (txn->mt_lifo_reclaimed ? txn->mt_lifo_reclaimed[0] : 0)) { + /* LY: need just a txn-id for save page list. */ + rc = mdbx_page_alloc(&mc, 0, NULL, MDBX_ALLOC_GC | MDBX_ALLOC_KICK); + if (likely(rc == 0)) + /* LY: ok, reclaimed from freedb. */ + continue; + if (unlikely(rc != MDBX_NOTFOUND)) + /* LY: other troubles... */ + goto bailout; + + /* LY: freedb is empty, will look any free txn-id in high2low order. */ + if (unlikely(env->me_last_reclaimed < 1)) { + /* LY: not any txn in the past of freedb. */ + rc = MDBX_MAP_FULL; + goto bailout; + } + + if (unlikely(!txn->mt_lifo_reclaimed)) { + txn->mt_lifo_reclaimed = mdbx_txl_alloc(); + if (unlikely(!txn->mt_lifo_reclaimed)) { + rc = MDBX_ENOMEM; + goto bailout; + } + } + /* LY: append the list. */ + rc = mdbx_txl_append(&txn->mt_lifo_reclaimed, + env->me_last_reclaimed - 1); + if (unlikely(rc)) + goto bailout; + --env->me_last_reclaimed; + /* LY: note that freeDB cleanup is not needed. */ + ++cleanup_reclaimed_pos; + } + mdbx_tassert(txn, txn->mt_lifo_reclaimed != NULL); + head_id = txn->mt_lifo_reclaimed[refill_reclaimed_pos]; + } else { + mdbx_tassert(txn, txn->mt_lifo_reclaimed == NULL); + } + + /* (Re)write {key = head_id, PNL length = head_room} */ + total_room -= head_room; + head_room = rpl_len - total_room; + if (head_room > (intptr_t)env->me_maxfree_1pg && head_id > 1) { + /* Overflow multi-page for part of me_reclaimed_pglist */ + head_room /= (head_id < INT16_MAX) ? (pgno_t)head_id + : INT16_MAX; /* amortize page sizes */ + head_room += env->me_maxfree_1pg - head_room % (env->me_maxfree_1pg + 1); + } else if (head_room < 0) { + /* Rare case, not bothering to delete this record */ + head_room = 0; + continue; + } + key.iov_len = sizeof(head_id); + key.iov_base = &head_id; + data.iov_len = (head_room + 1) * sizeof(pgno_t); + rc = mdbx_cursor_put(&mc, &key, &data, MDBX_RESERVE); + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (unlikely(rc)) + goto bailout; + + /* PNL is initially empty, zero out at least the length */ + pgno_t *pgs = (pgno_t *)data.iov_base; + intptr_t i = head_room > clean_limit ? head_room : 0; + do { + pgs[i] = 0; + } while (--i >= 0); + total_room += head_room; + continue; + } + + mdbx_tassert(txn, + cleanup_reclaimed_pos == + (txn->mt_lifo_reclaimed ? txn->mt_lifo_reclaimed[0] : 0)); + + /* Fill in the reserved me_reclaimed_pglist records */ + rc = MDBX_SUCCESS; + mdbx_tassert(txn, mdbx_pnl_check(env->me_reclaimed_pglist)); + if (env->me_reclaimed_pglist && env->me_reclaimed_pglist[0]) { + MDBX_val key, data; + key.iov_len = data.iov_len = 0; /* avoid MSVC warning */ + key.iov_base = data.iov_base = NULL; + + size_t rpl_left = env->me_reclaimed_pglist[0]; + pgno_t *rpl_end = env->me_reclaimed_pglist + rpl_left; + if (txn->mt_lifo_reclaimed == 0) { + mdbx_tassert(txn, lifo == 0); + rc = mdbx_cursor_first(&mc, &key, &data); + if (unlikely(rc)) + goto bailout; + } else { + mdbx_tassert(txn, lifo != 0); + } + + while (1) { + txnid_t id; + if (txn->mt_lifo_reclaimed == 0) { + mdbx_tassert(txn, lifo == 0); + id = *(txnid_t *)key.iov_base; + mdbx_tassert(txn, id <= env->me_last_reclaimed); + } else { + mdbx_tassert(txn, lifo != 0); + mdbx_tassert(txn, + refill_reclaimed_pos > 0 && + refill_reclaimed_pos <= txn->mt_lifo_reclaimed[0]); + id = txn->mt_lifo_reclaimed[refill_reclaimed_pos--]; + key.iov_base = &id; + key.iov_len = sizeof(id); + rc = mdbx_cursor_get(&mc, &key, &data, MDBX_SET); + if (unlikely(rc)) + goto bailout; + } + mdbx_tassert( + txn, cleanup_reclaimed_pos == + (txn->mt_lifo_reclaimed ? txn->mt_lifo_reclaimed[0] : 0)); + + mdbx_tassert(txn, data.iov_len >= sizeof(pgno_t) * 2); + size_t chunk_len = (data.iov_len / sizeof(pgno_t)) - 1; + if (chunk_len > rpl_left) + chunk_len = rpl_left; + data.iov_len = (chunk_len + 1) * sizeof(pgno_t); + key.iov_base = &id; + key.iov_len = sizeof(id); + + rpl_end -= chunk_len; + data.iov_base = rpl_end; + pgno_t save = rpl_end[0]; + rpl_end[0] = (pgno_t)chunk_len; + mdbx_tassert(txn, mdbx_pnl_check(rpl_end)); + mc.mc_flags |= C_RECLAIMING; + rc = mdbx_cursor_put(&mc, &key, &data, MDBX_CURRENT); + mc.mc_flags ^= C_RECLAIMING; + mdbx_tassert( + txn, cleanup_reclaimed_pos == + (txn->mt_lifo_reclaimed ? txn->mt_lifo_reclaimed[0] : 0)); + rpl_end[0] = save; + if (unlikely(rc)) + goto bailout; + + rpl_left -= chunk_len; + if (rpl_left == 0) + break; + + if (!lifo) { + rc = mdbx_cursor_next(&mc, &key, &data, MDBX_NEXT); + if (unlikely(rc)) + goto bailout; + } + } + } + +bailout: + if (txn->mt_lifo_reclaimed) { + mdbx_tassert(txn, rc || cleanup_reclaimed_pos == txn->mt_lifo_reclaimed[0]); + if (rc == MDBX_SUCCESS && + cleanup_reclaimed_pos != txn->mt_lifo_reclaimed[0]) { + mdbx_tassert(txn, cleanup_reclaimed_pos < txn->mt_lifo_reclaimed[0]); + /* LY: zeroed cleanup_idx to force cleanup + * and refill created freeDB records. */ + cleanup_reclaimed_pos = 0; + /* LY: restart filling */ + total_room = head_room = refill_reclaimed_pos = 0; + more = 1; + goto again_on_freelist_change; + } + txn->mt_lifo_reclaimed[0] = 0; + if (txn != env->me_txn0) { + mdbx_txl_free(txn->mt_lifo_reclaimed); + txn->mt_lifo_reclaimed = NULL; + } + } + + return rc; +} + +/* Flush (some) dirty pages to the map, after clearing their dirty flag. + * [in] txn the transaction that's being committed + * [in] keep number of initial pages in dirtylist to keep dirty. + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_flush(MDBX_txn *txn, pgno_t keep) { + MDBX_env *env = txn->mt_env; + MDBX_ID2L dl = txn->mt_rw_dirtylist; + unsigned i, j, pagecount = dl[0].mid; + int rc; + size_t size = 0, pos = 0; + pgno_t pgno = 0; + MDBX_page *dp = NULL; + struct iovec iov[MDBX_COMMIT_PAGES]; + intptr_t wpos = 0, wsize = 0; + size_t next_pos = 1; /* impossible pos, so pos != next_pos */ + int n = 0; + + j = i = keep; + + if (env->me_flags & MDBX_WRITEMAP) { + /* Clear dirty flags */ + while (++i <= pagecount) { + dp = dl[i].mptr; + /* Don't flush this page yet */ + if (dp->mp_flags & (P_LOOSE | P_KEEP)) { + dp->mp_flags &= ~P_KEEP; + dl[++j] = dl[i]; + continue; + } + dp->mp_flags &= ~P_DIRTY; + dp->mp_validator = 0 /* TODO */; + env->me_sync_pending += + IS_OVERFLOW(dp) ? pgno2bytes(env, dp->mp_pages) : env->me_psize; + } + goto done; + } + + /* Write the pages */ + for (;;) { + if (++i <= pagecount) { + dp = dl[i].mptr; + /* Don't flush this page yet */ + if (dp->mp_flags & (P_LOOSE | P_KEEP)) { + dp->mp_flags &= ~P_KEEP; + dl[i].mid = 0; + continue; + } + pgno = dl[i].mid; + /* clear dirty flag */ + dp->mp_flags &= ~P_DIRTY; + dp->mp_validator = 0 /* TODO */; + pos = pgno2bytes(env, pgno); + size = IS_OVERFLOW(dp) ? pgno2bytes(env, dp->mp_pages) : env->me_psize; + env->me_sync_pending += size; + } + /* Write up to MDBX_COMMIT_PAGES dirty pages at a time. */ + if (pos != next_pos || n == MDBX_COMMIT_PAGES || wsize + size > MAX_WRITE) { + if (n) { + /* Write previous page(s) */ + rc = mdbx_pwritev(env->me_fd, iov, n, wpos, wsize); + if (unlikely(rc != MDBX_SUCCESS)) { + mdbx_debug("Write error: %s", strerror(rc)); + return rc; + } + n = 0; + } + if (i > pagecount) + break; + wpos = pos; + wsize = 0; + } + mdbx_debug("committing page %" PRIaPGNO "", pgno); + next_pos = pos + size; + iov[n].iov_len = size; + iov[n].iov_base = (char *)dp; + wsize += size; + n++; + } + + mdbx_invalidate_cache(env->me_map, pgno2bytes(env, txn->mt_next_pgno)); + + for (i = keep; ++i <= pagecount;) { + dp = dl[i].mptr; + /* This is a page we skipped above */ + if (!dl[i].mid) { + dl[++j] = dl[i]; + dl[j].mid = dp->mp_pgno; + continue; + } + mdbx_dpage_free(env, dp); + } + +done: + i--; + txn->mt_dirtyroom += i - j; + dl[0].mid = j; + return MDBX_SUCCESS; +} + +int mdbx_txn_commit(MDBX_txn *txn) { + int rc; + + if (unlikely(txn == NULL)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + MDBX_env *env = txn->mt_env; + if (unlikely(env->me_pid != mdbx_getpid())) { + env->me_flags |= MDBX_FATAL_ERROR; + return MDBX_PANIC; + } + + if (txn->mt_child) { + rc = mdbx_txn_commit(txn->mt_child); + txn->mt_child = NULL; + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + } + + /* mdbx_txn_end() mode for a commit which writes nothing */ + unsigned end_mode = + MDBX_END_EMPTY_COMMIT | MDBX_END_UPDATE | MDBX_END_SLOT | MDBX_END_FREE; + if (unlikely(F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY))) + goto done; + + if (unlikely(txn->mt_flags & (MDBX_TXN_FINISHED | MDBX_TXN_ERROR))) { + mdbx_debug("error flag is set, can't commit"); + if (txn->mt_parent) + txn->mt_parent->mt_flags |= MDBX_TXN_ERROR; + rc = MDBX_BAD_TXN; + goto fail; + } + + if (txn->mt_parent) { + MDBX_txn *parent = txn->mt_parent; + MDBX_page **lp; + MDBX_ID2L dst, src; + MDBX_PNL pspill; + unsigned i, x, y, len, ps_len; + + /* Append our reclaim list to parent's */ + if (txn->mt_lifo_reclaimed) { + if (parent->mt_lifo_reclaimed) { + rc = mdbx_txl_append_list(&parent->mt_lifo_reclaimed, + txn->mt_lifo_reclaimed); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + mdbx_txl_free(txn->mt_lifo_reclaimed); + } else + parent->mt_lifo_reclaimed = txn->mt_lifo_reclaimed; + txn->mt_lifo_reclaimed = NULL; + } + + /* Append our free list to parent's */ + rc = mdbx_pnl_append_list(&parent->mt_befree_pages, txn->mt_befree_pages); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + mdbx_pnl_free(txn->mt_befree_pages); + /* Failures after this must either undo the changes + * to the parent or set MDBX_TXN_ERROR in the parent. */ + + parent->mt_next_pgno = txn->mt_next_pgno; + parent->mt_end_pgno = txn->mt_end_pgno; + parent->mt_flags = txn->mt_flags; + + /* Merge our cursors into parent's and close them */ + mdbx_cursors_eot(txn, 1); + + /* Update parent's DB table. */ + memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDBX_db)); + parent->mt_numdbs = txn->mt_numdbs; + parent->mt_dbflags[FREE_DBI] = txn->mt_dbflags[FREE_DBI]; + parent->mt_dbflags[MAIN_DBI] = txn->mt_dbflags[MAIN_DBI]; + for (i = CORE_DBS; i < txn->mt_numdbs; i++) { + /* preserve parent's DB_NEW status */ + parent->mt_dbflags[i] = + txn->mt_dbflags[i] | (parent->mt_dbflags[i] & DB_NEW); + } + + dst = parent->mt_rw_dirtylist; + src = txn->mt_rw_dirtylist; + /* Remove anything in our dirty list from parent's spill list */ + if ((pspill = parent->mt_spill_pages) && (ps_len = pspill[0])) { + x = y = ps_len; + pspill[0] = (pgno_t)-1; + /* Mark our dirty pages as deleted in parent spill list */ + for (i = 0, len = src[0].mid; ++i <= len;) { + pgno_t pn = src[i].mid << 1; + while (pn > pspill[x]) + x--; + if (pn == pspill[x]) { + pspill[x] = 1; + y = --x; + } + } + /* Squash deleted pagenums if we deleted any */ + for (x = y; ++x <= ps_len;) + if (!(pspill[x] & 1)) + pspill[++y] = pspill[x]; + pspill[0] = y; + } + + /* Remove anything in our spill list from parent's dirty list */ + if (txn->mt_spill_pages && txn->mt_spill_pages[0]) { + for (i = 1; i <= txn->mt_spill_pages[0]; i++) { + pgno_t pn = txn->mt_spill_pages[i]; + if (pn & 1) + continue; /* deleted spillpg */ + pn >>= 1; + y = mdbx_mid2l_search(dst, pn); + if (y <= dst[0].mid && dst[y].mid == pn) { + free(dst[y].mptr); + while (y < dst[0].mid) { + dst[y] = dst[y + 1]; + y++; + } + dst[0].mid--; + } + } + } + + /* Find len = length of merging our dirty list with parent's */ + x = dst[0].mid; + dst[0].mid = 0; /* simplify loops */ + if (parent->mt_parent) { + len = x + src[0].mid; + y = mdbx_mid2l_search(src, dst[x].mid + 1) - 1; + for (i = x; y && i; y--) { + pgno_t yp = src[y].mid; + while (yp < dst[i].mid) + i--; + if (yp == dst[i].mid) { + i--; + len--; + } + } + } else { /* Simplify the above for single-ancestor case */ + len = MDBX_PNL_UM_MAX - txn->mt_dirtyroom; + } + /* Merge our dirty list with parent's */ + y = src[0].mid; + for (i = len; y; dst[i--] = src[y--]) { + pgno_t yp = src[y].mid; + while (yp < dst[x].mid) + dst[i--] = dst[x--]; + if (yp == dst[x].mid) + free(dst[x--].mptr); + } + mdbx_tassert(txn, i == x); + dst[0].mid = len; + free(txn->mt_rw_dirtylist); + parent->mt_dirtyroom = txn->mt_dirtyroom; + if (txn->mt_spill_pages) { + if (parent->mt_spill_pages) { + /* TODO: Prevent failure here, so parent does not fail */ + rc = mdbx_pnl_append_list(&parent->mt_spill_pages, txn->mt_spill_pages); + if (unlikely(rc != MDBX_SUCCESS)) + parent->mt_flags |= MDBX_TXN_ERROR; + mdbx_pnl_free(txn->mt_spill_pages); + mdbx_pnl_sort(parent->mt_spill_pages); + } else { + parent->mt_spill_pages = txn->mt_spill_pages; + } + } + + /* Append our loose page list to parent's */ + for (lp = &parent->mt_loose_pages; *lp; lp = &NEXT_LOOSE_PAGE(*lp)) + ; + *lp = txn->mt_loose_pages; + parent->mt_loose_count += txn->mt_loose_count; + + parent->mt_child = NULL; + mdbx_pnl_free(((MDBX_ntxn *)txn)->mnt_pgstate.mf_reclaimed_pglist); + txn->mt_signature = 0; + free(txn); + return rc; + } + + if (unlikely(txn != env->me_txn)) { + mdbx_debug("attempt to commit unknown transaction"); + rc = MDBX_EINVAL; + goto fail; + } + + mdbx_cursors_eot(txn, 0); + end_mode |= MDBX_END_EOTDONE; + + if (!txn->mt_rw_dirtylist[0].mid && + !(txn->mt_flags & (MDBX_TXN_DIRTY | MDBX_TXN_SPILLS))) + goto done; + + mdbx_debug("committing txn %" PRIaTXN " %p on mdbenv %p, root page %" PRIaPGNO + "/%" PRIaPGNO, + txn->mt_txnid, (void *)txn, (void *)env, + txn->mt_dbs[MAIN_DBI].md_root, txn->mt_dbs[FREE_DBI].md_root); + + /* Update DB root pointers */ + if (txn->mt_numdbs > CORE_DBS) { + MDBX_cursor mc; + MDBX_dbi i; + MDBX_val data; + data.iov_len = sizeof(MDBX_db); + + mdbx_cursor_init(&mc, txn, MAIN_DBI, NULL); + for (i = CORE_DBS; i < txn->mt_numdbs; i++) { + if (txn->mt_dbflags[i] & DB_DIRTY) { + if (unlikely(TXN_DBI_CHANGED(txn, i))) { + rc = MDBX_BAD_DBI; + goto fail; + } + data.iov_base = &txn->mt_dbs[i]; + rc = mdbx_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data, F_SUBDATA); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + } + } + } + + rc = mdbx_freelist_save(txn); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + + mdbx_pnl_free(env->me_reclaimed_pglist); + env->me_reclaimed_pglist = NULL; + mdbx_pnl_shrink(&txn->mt_befree_pages); + + if (mdbx_audit_enabled()) + mdbx_audit(txn); + + rc = mdbx_page_flush(txn, 0); + if (likely(rc == MDBX_SUCCESS)) { + MDBX_meta meta, *head = mdbx_meta_head(env); + + meta.mm_magic_and_version = head->mm_magic_and_version; + meta.mm_extra_flags = head->mm_extra_flags; + meta.mm_validator_id = head->mm_validator_id; + meta.mm_extra_pagehdr = head->mm_extra_pagehdr; + + meta.mm_geo = head->mm_geo; + meta.mm_geo.next = txn->mt_next_pgno; + meta.mm_geo.now = txn->mt_end_pgno; + meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; + meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; + meta.mm_canary = txn->mt_canary; + mdbx_meta_set_txnid(env, &meta, txn->mt_txnid); + + rc = mdbx_sync_locked( + env, env->me_flags | txn->mt_flags | MDBX_SHRINK_ALLOWED, &meta); + } + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + end_mode = MDBX_END_COMMITTED | MDBX_END_UPDATE | MDBX_END_EOTDONE; + +done: + return mdbx_txn_end(txn, end_mode); + +fail: + mdbx_txn_abort(txn); + return rc; +} + +/* Read the environment parameters of a DB environment + * before mapping it into memory. */ +static int __cold mdbx_read_header(MDBX_env *env, MDBX_meta *meta) { + assert(offsetof(MDBX_page, mp_meta) == PAGEHDRSZ); + memset(meta, 0, sizeof(MDBX_meta)); + meta->mm_datasync_sign = MDBX_DATASIGN_WEAK; + int rc = MDBX_CORRUPTED; + + /* Read twice all meta pages so we can find the latest one. */ + unsigned loop_limit = NUM_METAS * 2; + for (unsigned loop_count = 0; loop_count < loop_limit; ++loop_count) { + MDBX_page page; + + /* We don't know the page size on first time. + * So, just guess it. */ + unsigned guess_pagesize = meta->mm_psize; + if (guess_pagesize == 0) + guess_pagesize = + (loop_count > NUM_METAS) ? env->me_psize : env->me_os_psize; + + const unsigned meta_number = loop_count % NUM_METAS; + const unsigned offset = guess_pagesize * meta_number; + + unsigned retryleft = 42; + while (1) { + int err = mdbx_pread(env->me_fd, &page, sizeof(page), offset); + if (err != MDBX_SUCCESS) { + mdbx_error("read meta[%u,%u]: %i, %s", offset, (unsigned)sizeof(page), + err, mdbx_strerror(err)); + return err; + } + + MDBX_page again; + err = mdbx_pread(env->me_fd, &again, sizeof(again), offset); + if (err != MDBX_SUCCESS) { + mdbx_error("read meta[%u,%u]: %i, %s", offset, (unsigned)sizeof(again), + err, mdbx_strerror(err)); + return err; + } + + if (memcmp(&page, &again, sizeof(page)) == 0 || --retryleft == 0) + break; + + mdbx_info("meta[%u] was updated, re-read it", meta_number); + } + + if (!retryleft) { + mdbx_error("meta[%u] is too volatile, skip it", meta_number); + continue; + } + + if (page.mp_pgno != meta_number) { + mdbx_error("meta[%u] has invalid pageno %" PRIaPGNO, meta_number, + page.mp_pgno); + return MDBX_INVALID; + } + + if (!F_ISSET(page.mp_flags, P_META)) { + mdbx_error("page #%u not a meta-page", meta_number); + return MDBX_INVALID; + } + + if (page.mp_meta.mm_magic_and_version != MDBX_DATA_MAGIC) { + mdbx_error("meta[%u] has invalid magic/version", meta_number); + return ((page.mp_meta.mm_magic_and_version >> 8) != MDBX_MAGIC) + ? MDBX_INVALID + : MDBX_VERSION_MISMATCH; + } + + if (page.mp_meta.mm_txnid_a != page.mp_meta.mm_txnid_b) { + mdbx_warning("meta[%u] not completely updated, skip it", meta_number); + continue; + } + + /* LY: check signature as a checksum */ + if (META_IS_STEADY(&page.mp_meta) && + page.mp_meta.mm_datasync_sign != mdbx_meta_sign(&page.mp_meta)) { + mdbx_notice("meta[%u] has invalid steady-checksum (0x%" PRIx64 + " != 0x%" PRIx64 "), skip it", + meta_number, page.mp_meta.mm_datasync_sign, + mdbx_meta_sign(&page.mp_meta)); + continue; + } + + /* LY: check pagesize */ + if (!mdbx_is_power2(page.mp_meta.mm_psize) || + page.mp_meta.mm_psize < MIN_PAGESIZE || + page.mp_meta.mm_psize > MAX_PAGESIZE) { + mdbx_notice("meta[%u] has invalid pagesize (%u), skip it", meta_number, + page.mp_meta.mm_psize); + rc = MDBX_VERSION_MISMATCH; + continue; + } + + mdbx_debug("read meta%" PRIaPGNO " = root %" PRIaPGNO "/%" PRIaPGNO + ", geo %" PRIaPGNO "/%" PRIaPGNO "-%" PRIaPGNO "/%" PRIaPGNO + " +%u -%u, txn_id %" PRIaTXN ", %s", + page.mp_pgno, page.mp_meta.mm_dbs[MAIN_DBI].md_root, + page.mp_meta.mm_dbs[FREE_DBI].md_root, page.mp_meta.mm_geo.lower, + page.mp_meta.mm_geo.next, page.mp_meta.mm_geo.now, + page.mp_meta.mm_geo.upper, page.mp_meta.mm_geo.grow, + page.mp_meta.mm_geo.shrink, page.mp_meta.mm_txnid_a, + mdbx_durable_str(&page.mp_meta)); + + /* LY: check min-pages value */ + if (page.mp_meta.mm_geo.lower < MIN_PAGENO || + page.mp_meta.mm_geo.lower > MAX_PAGENO) { + mdbx_notice("meta[%u] has invalid min-pages (%" PRIaPGNO "), skip it", + meta_number, page.mp_meta.mm_geo.lower); + rc = MDBX_INVALID; + continue; + } + + /* LY: check max-pages value */ + if (page.mp_meta.mm_geo.upper < MIN_PAGENO || + page.mp_meta.mm_geo.upper > MAX_PAGENO || + page.mp_meta.mm_geo.upper < page.mp_meta.mm_geo.lower) { + mdbx_notice("meta[%u] has invalid max-pages (%" PRIaPGNO "), skip it", + meta_number, page.mp_meta.mm_geo.upper); + rc = MDBX_INVALID; + continue; + } + + /* LY: check mapsize limits */ + const uint64_t mapsize_min = + page.mp_meta.mm_geo.lower * (uint64_t)page.mp_meta.mm_psize; + const uint64_t mapsize_max = + page.mp_meta.mm_geo.upper * (uint64_t)page.mp_meta.mm_psize; + STATIC_ASSERT(MAX_MAPSIZE < SSIZE_MAX - MAX_PAGESIZE); + STATIC_ASSERT(MIN_MAPSIZE < MAX_MAPSIZE); + if (mapsize_min < MIN_MAPSIZE || mapsize_max > MAX_MAPSIZE) { + mdbx_notice("meta[%u] has invalid min-mapsize (%" PRIu64 "), skip it", + meta_number, mapsize_min); + rc = MDBX_VERSION_MISMATCH; + continue; + } + + STATIC_ASSERT(MIN_MAPSIZE < MAX_MAPSIZE); + if (mapsize_max > MAX_MAPSIZE || + MAX_PAGENO < mdbx_roundup2((size_t)mapsize_max, env->me_os_psize) / + (uint64_t)page.mp_meta.mm_psize) { + mdbx_notice("meta[%u] has too large max-mapsize (%" PRIu64 "), skip it", + meta_number, mapsize_max); + rc = MDBX_TOO_LARGE; + continue; + } + + /* LY: check end_pgno */ + if (page.mp_meta.mm_geo.now < page.mp_meta.mm_geo.lower || + page.mp_meta.mm_geo.now > page.mp_meta.mm_geo.upper) { + mdbx_notice("meta[%u] has invalid end-pageno (%" PRIaPGNO "), skip it", + meta_number, page.mp_meta.mm_geo.now); + rc = MDBX_CORRUPTED; + continue; + } + + /* LY: check last_pgno */ + if (page.mp_meta.mm_geo.next < MIN_PAGENO || + page.mp_meta.mm_geo.next - 1 > MAX_PAGENO) { + mdbx_notice("meta[%u] has invalid next-pageno (%" PRIaPGNO "), skip it", + meta_number, page.mp_meta.mm_geo.next); + rc = MDBX_CORRUPTED; + continue; + } + + if (page.mp_meta.mm_geo.next > page.mp_meta.mm_geo.now) { + mdbx_notice("meta[%u] next-pageno (%" PRIaPGNO + ") is beyond end-pgno (%" PRIaPGNO "), skip it", + meta_number, page.mp_meta.mm_geo.next, + page.mp_meta.mm_geo.now); + rc = MDBX_CORRUPTED; + continue; + } + + /* LY: FreeDB root */ + if (page.mp_meta.mm_dbs[FREE_DBI].md_root == P_INVALID) { + if (page.mp_meta.mm_dbs[FREE_DBI].md_branch_pages || + page.mp_meta.mm_dbs[FREE_DBI].md_depth || + page.mp_meta.mm_dbs[FREE_DBI].md_entries || + page.mp_meta.mm_dbs[FREE_DBI].md_leaf_pages || + page.mp_meta.mm_dbs[FREE_DBI].md_overflow_pages) { + mdbx_notice("meta[%u] has false-empty freedb, skip it", meta_number); + rc = MDBX_CORRUPTED; + continue; + } + } else if (page.mp_meta.mm_dbs[FREE_DBI].md_root >= + page.mp_meta.mm_geo.next) { + mdbx_notice("meta[%u] has invalid freedb-root %" PRIaPGNO ", skip it", + meta_number, page.mp_meta.mm_dbs[FREE_DBI].md_root); + rc = MDBX_CORRUPTED; + continue; + } + + /* LY: MainDB root */ + if (page.mp_meta.mm_dbs[MAIN_DBI].md_root == P_INVALID) { + if (page.mp_meta.mm_dbs[MAIN_DBI].md_branch_pages || + page.mp_meta.mm_dbs[MAIN_DBI].md_depth || + page.mp_meta.mm_dbs[MAIN_DBI].md_entries || + page.mp_meta.mm_dbs[MAIN_DBI].md_leaf_pages || + page.mp_meta.mm_dbs[MAIN_DBI].md_overflow_pages) { + mdbx_notice("meta[%u] has false-empty maindb", meta_number); + rc = MDBX_CORRUPTED; + continue; + } + } else if (page.mp_meta.mm_dbs[MAIN_DBI].md_root >= + page.mp_meta.mm_geo.next) { + mdbx_notice("meta[%u] has invalid maindb-root %" PRIaPGNO ", skip it", + meta_number, page.mp_meta.mm_dbs[MAIN_DBI].md_root); + rc = MDBX_CORRUPTED; + continue; + } + + if (page.mp_meta.mm_txnid_a == 0) { + mdbx_warning("meta[%u] has zero txnid, skip it", meta_number); + continue; + } + + if (mdbx_meta_ot(prefer_steady, env, meta, &page.mp_meta)) { + *meta = page.mp_meta; + if (META_IS_WEAK(meta)) + loop_limit += 1; /* LY: should re-read to hush race with update */ + mdbx_info("latch meta[%u]", meta_number); + } + } + + if (META_IS_WEAK(meta)) { + mdbx_error("no usable meta-pages, database is corrupted"); + return rc; + } + + return MDBX_SUCCESS; +} + +static MDBX_page *__cold mdbx_meta_model(const MDBX_env *env, MDBX_page *model, + unsigned num) { + + mdbx_ensure(env, mdbx_is_power2(env->me_psize)); + mdbx_ensure(env, env->me_psize >= MIN_PAGESIZE); + mdbx_ensure(env, env->me_psize <= MAX_PAGESIZE); + mdbx_ensure(env, env->me_dbgeo.lower >= MIN_MAPSIZE); + mdbx_ensure(env, env->me_dbgeo.upper <= MAX_MAPSIZE); + mdbx_ensure(env, env->me_dbgeo.now >= env->me_dbgeo.lower); + mdbx_ensure(env, env->me_dbgeo.now <= env->me_dbgeo.upper); + + memset(model, 0, sizeof(*model)); + model->mp_pgno = num; + model->mp_flags = P_META; + model->mp_meta.mm_magic_and_version = MDBX_DATA_MAGIC; + + model->mp_meta.mm_geo.lower = bytes2pgno(env, env->me_dbgeo.lower); + model->mp_meta.mm_geo.upper = bytes2pgno(env, env->me_dbgeo.upper); + model->mp_meta.mm_geo.grow = (uint16_t)bytes2pgno(env, env->me_dbgeo.grow); + model->mp_meta.mm_geo.shrink = + (uint16_t)bytes2pgno(env, env->me_dbgeo.shrink); + model->mp_meta.mm_geo.now = bytes2pgno(env, env->me_dbgeo.now); + model->mp_meta.mm_geo.next = NUM_METAS; + + mdbx_ensure(env, model->mp_meta.mm_geo.lower >= MIN_PAGENO); + mdbx_ensure(env, model->mp_meta.mm_geo.upper <= MAX_PAGENO); + mdbx_ensure(env, model->mp_meta.mm_geo.now >= model->mp_meta.mm_geo.lower); + mdbx_ensure(env, model->mp_meta.mm_geo.now <= model->mp_meta.mm_geo.upper); + mdbx_ensure(env, model->mp_meta.mm_geo.next >= MIN_PAGENO); + mdbx_ensure(env, model->mp_meta.mm_geo.next <= model->mp_meta.mm_geo.now); + mdbx_ensure(env, model->mp_meta.mm_geo.grow == + bytes2pgno(env, env->me_dbgeo.grow)); + mdbx_ensure(env, model->mp_meta.mm_geo.shrink == + bytes2pgno(env, env->me_dbgeo.shrink)); + + model->mp_meta.mm_psize = env->me_psize; + model->mp_meta.mm_flags = (uint16_t)env->me_flags; + model->mp_meta.mm_flags |= + MDBX_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */ + model->mp_meta.mm_dbs[FREE_DBI].md_root = P_INVALID; + model->mp_meta.mm_dbs[MAIN_DBI].md_root = P_INVALID; + mdbx_meta_set_txnid(env, &model->mp_meta, MIN_TXNID + num); + model->mp_meta.mm_datasync_sign = mdbx_meta_sign(&model->mp_meta); + return (MDBX_page *)((uint8_t *)model + env->me_psize); +} + +/* Fill in most of the zeroed meta-pages for an empty database environment. + * Return pointer to recenly (head) meta-page. */ +static MDBX_page *__cold mdbx_init_metas(const MDBX_env *env, void *buffer) { + MDBX_page *page0 = (MDBX_page *)buffer; + MDBX_page *page1 = mdbx_meta_model(env, page0, 0); + MDBX_page *page2 = mdbx_meta_model(env, page1, 1); + mdbx_meta_model(env, page2, 2); + page2->mp_meta.mm_datasync_sign = MDBX_DATASIGN_WEAK; + mdbx_assert(env, !mdbx_meta_eq(env, &page0->mp_meta, &page1->mp_meta)); + mdbx_assert(env, !mdbx_meta_eq(env, &page1->mp_meta, &page2->mp_meta)); + mdbx_assert(env, !mdbx_meta_eq(env, &page2->mp_meta, &page0->mp_meta)); + return page1; +} + +static int mdbx_sync_locked(MDBX_env *env, unsigned flags, + MDBX_meta *const pending) { + mdbx_assert(env, ((env->me_flags ^ flags) & MDBX_WRITEMAP) == 0); + MDBX_meta *const meta0 = METAPAGE(env, 0); + MDBX_meta *const meta1 = METAPAGE(env, 1); + MDBX_meta *const meta2 = METAPAGE(env, 2); + MDBX_meta *const head = mdbx_meta_head(env); + + mdbx_assert(env, mdbx_meta_eq_mask(env) == 0); + mdbx_assert(env, + pending < METAPAGE(env, 0) || pending > METAPAGE(env, NUM_METAS)); + mdbx_assert(env, (env->me_flags & (MDBX_RDONLY | MDBX_FATAL_ERROR)) == 0); + mdbx_assert(env, !META_IS_STEADY(head) || env->me_sync_pending != 0); + mdbx_assert(env, pending->mm_geo.next <= pending->mm_geo.now); + + const size_t usedbytes = pgno_align2os_bytes(env, pending->mm_geo.next); + if (env->me_sync_threshold && env->me_sync_pending >= env->me_sync_threshold) + flags &= MDBX_WRITEMAP | MDBX_SHRINK_ALLOWED; + + /* LY: step#1 - sync previously written/updated data-pages */ + int rc = MDBX_RESULT_TRUE; + if (env->me_sync_pending && (flags & MDBX_NOSYNC) == 0) { + mdbx_assert(env, ((flags ^ env->me_flags) & MDBX_WRITEMAP) == 0); + MDBX_meta *const steady = mdbx_meta_steady(env); + if (flags & MDBX_WRITEMAP) { + rc = mdbx_msync(&env->me_dxb_mmap, 0, usedbytes, flags & MDBX_MAPASYNC); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + if ((flags & MDBX_MAPASYNC) == 0) { + if (unlikely(pending->mm_geo.next > steady->mm_geo.now)) { + rc = mdbx_filesize_sync(env->me_fd); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + } + env->me_sync_pending = 0; + } + } else { + rc = mdbx_filesync(env->me_fd, pending->mm_geo.next > steady->mm_geo.now); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + env->me_sync_pending = 0; + } + } + +#if defined(_WIN32) || defined(_WIN64) +/* Windows is unable shrinking a mapped file */ +#else + /* LY: check conditions to shrink datafile */ + pgno_t shrink = 0; + if ((flags & MDBX_SHRINK_ALLOWED) && pending->mm_geo.shrink && + pending->mm_geo.now - pending->mm_geo.next > pending->mm_geo.shrink) { + const pgno_t aligner = + pending->mm_geo.grow ? pending->mm_geo.grow : pending->mm_geo.shrink; + const pgno_t aligned = pgno_align2os_pgno( + env, pending->mm_geo.next + aligner - pending->mm_geo.next % aligner); + const pgno_t bottom = + (aligned > pending->mm_geo.lower) ? aligned : pending->mm_geo.lower; + if (pending->mm_geo.now > bottom) { + shrink = pending->mm_geo.now - bottom; + pending->mm_geo.now = bottom; + if (mdbx_meta_txnid_stable(env, head) == pending->mm_txnid_a) + mdbx_meta_set_txnid(env, pending, pending->mm_txnid_a + 1); + } + } +#endif /* not a Windows */ + + /* Steady or Weak */ + if (env->me_sync_pending == 0) { + pending->mm_datasync_sign = mdbx_meta_sign(pending); + } else { + pending->mm_datasync_sign = + (flags & MDBX_UTTERLY_NOSYNC) == MDBX_UTTERLY_NOSYNC + ? MDBX_DATASIGN_NONE + : MDBX_DATASIGN_WEAK; + } + + MDBX_meta *target = nullptr; + if (mdbx_meta_txnid_stable(env, head) == pending->mm_txnid_a) { + mdbx_assert(env, memcmp(&head->mm_dbs, &pending->mm_dbs, + sizeof(head->mm_dbs)) == 0); + mdbx_assert(env, memcmp(&head->mm_canary, &pending->mm_canary, + sizeof(head->mm_canary)) == 0); + mdbx_assert(env, memcmp(&head->mm_geo, &pending->mm_geo, + sizeof(pending->mm_geo)) == 0); + if (!META_IS_STEADY(head) && META_IS_STEADY(pending)) + target = head; + else { + mdbx_ensure(env, mdbx_meta_eq(env, head, pending)); + mdbx_debug("skip update meta"); + return MDBX_SUCCESS; + } + } else if (head == meta0) + target = mdbx_meta_ancient(prefer_steady, env, meta1, meta2); + else if (head == meta1) + target = mdbx_meta_ancient(prefer_steady, env, meta0, meta2); + else { + mdbx_assert(env, head == meta2); + target = mdbx_meta_ancient(prefer_steady, env, meta0, meta1); + } + + /* LY: step#2 - update meta-page. */ + mdbx_debug("writing meta%" PRIaPGNO " = root %" PRIaPGNO "/%" PRIaPGNO + ", geo %" PRIaPGNO "/%" PRIaPGNO "-%" PRIaPGNO "/%" PRIaPGNO + " +%u -%u, txn_id %" PRIaTXN ", %s", + container_of(target, MDBX_page, mp_data)->mp_pgno, + pending->mm_dbs[MAIN_DBI].md_root, + pending->mm_dbs[FREE_DBI].md_root, pending->mm_geo.lower, + pending->mm_geo.next, pending->mm_geo.now, pending->mm_geo.upper, + pending->mm_geo.grow, pending->mm_geo.shrink, pending->mm_txnid_a, + mdbx_durable_str(pending)); + + mdbx_debug("meta0: %s, %s, txn_id %" PRIaTXN ", root %" PRIaPGNO + "/%" PRIaPGNO, + (meta0 == head) ? "head" : (meta0 == target) ? "tail" : "stay", + mdbx_durable_str(meta0), mdbx_meta_txnid_fluid(env, meta0), + meta0->mm_dbs[MAIN_DBI].md_root, meta0->mm_dbs[FREE_DBI].md_root); + mdbx_debug("meta1: %s, %s, txn_id %" PRIaTXN ", root %" PRIaPGNO + "/%" PRIaPGNO, + (meta1 == head) ? "head" : (meta1 == target) ? "tail" : "stay", + mdbx_durable_str(meta1), mdbx_meta_txnid_fluid(env, meta1), + meta1->mm_dbs[MAIN_DBI].md_root, meta1->mm_dbs[FREE_DBI].md_root); + mdbx_debug("meta2: %s, %s, txn_id %" PRIaTXN ", root %" PRIaPGNO + "/%" PRIaPGNO, + (meta2 == head) ? "head" : (meta2 == target) ? "tail" : "stay", + mdbx_durable_str(meta2), mdbx_meta_txnid_fluid(env, meta2), + meta2->mm_dbs[MAIN_DBI].md_root, meta2->mm_dbs[FREE_DBI].md_root); + + mdbx_assert(env, !mdbx_meta_eq(env, pending, meta0)); + mdbx_assert(env, !mdbx_meta_eq(env, pending, meta1)); + mdbx_assert(env, !mdbx_meta_eq(env, pending, meta2)); + + mdbx_assert(env, ((env->me_flags ^ flags) & MDBX_WRITEMAP) == 0); + mdbx_ensure(env, + target == head || + mdbx_meta_txnid_stable(env, target) < pending->mm_txnid_a); + if (env->me_flags & MDBX_WRITEMAP) { + mdbx_jitter4testing(true); + if (likely(target != head)) { + /* LY: 'invalidate' the meta. */ + target->mm_datasync_sign = MDBX_DATASIGN_WEAK; + mdbx_meta_update_begin(env, target, pending->mm_txnid_a); +#ifndef NDEBUG + /* debug: provoke failure to catch a violators */ + memset(target->mm_dbs, 0xCC, + sizeof(target->mm_dbs) + sizeof(target->mm_canary)); + mdbx_jitter4testing(false); +#endif + + /* LY: update info */ + target->mm_geo = pending->mm_geo; + target->mm_dbs[FREE_DBI] = pending->mm_dbs[FREE_DBI]; + target->mm_dbs[MAIN_DBI] = pending->mm_dbs[MAIN_DBI]; + target->mm_canary = pending->mm_canary; + mdbx_jitter4testing(true); + mdbx_coherent_barrier(); + + /* LY: 'commit' the meta */ + mdbx_meta_update_end(env, target, pending->mm_txnid_b); + mdbx_jitter4testing(true); + } else { + /* dangerous case (target == head), only mm_datasync_sign could + * me updated, check assertions once again */ + mdbx_ensure(env, + mdbx_meta_txnid_stable(env, head) == pending->mm_txnid_a && + !META_IS_STEADY(head) && META_IS_STEADY(pending)); + mdbx_ensure(env, memcmp(&head->mm_geo, &pending->mm_geo, + sizeof(head->mm_geo)) == 0); + mdbx_ensure(env, memcmp(&head->mm_dbs, &pending->mm_dbs, + sizeof(head->mm_dbs)) == 0); + mdbx_ensure(env, memcmp(&head->mm_canary, &pending->mm_canary, + sizeof(head->mm_canary)) == 0); + } + target->mm_datasync_sign = pending->mm_datasync_sign; + mdbx_coherent_barrier(); + mdbx_jitter4testing(true); + } else { + rc = mdbx_pwrite(env->me_fd, pending, sizeof(MDBX_meta), + (uint8_t *)target - env->me_map); + if (unlikely(rc != MDBX_SUCCESS)) { + undo: + mdbx_debug("write failed, disk error?"); + /* On a failure, the pagecache still contains the new data. + * Try write some old data back, to prevent it from being used. */ + mdbx_pwrite(env->me_fd, (void *)target, sizeof(MDBX_meta), + (uint8_t *)target - env->me_map); + goto fail; + } + mdbx_invalidate_cache(target, sizeof(MDBX_meta)); + } + + /* LY: step#3 - sync meta-pages. */ + mdbx_assert(env, ((env->me_flags ^ flags) & MDBX_WRITEMAP) == 0); + if ((flags & (MDBX_NOSYNC | MDBX_NOMETASYNC)) == 0) { + mdbx_assert(env, ((flags ^ env->me_flags) & MDBX_WRITEMAP) == 0); + if (flags & MDBX_WRITEMAP) { + const size_t offset = + ((uint8_t *)container_of(head, MDBX_page, mp_meta)) - + env->me_dxb_mmap.dxb; + const size_t paged_offset = offset & ~(env->me_os_psize - 1); + const size_t paged_length = mdbx_roundup2( + env->me_psize + offset - paged_offset, env->me_os_psize); + rc = mdbx_msync(&env->me_dxb_mmap, paged_offset, paged_length, + flags & MDBX_MAPASYNC); + if (unlikely(rc != MDBX_SUCCESS)) + goto fail; + } else { + rc = mdbx_filesync(env->me_fd, false); + if (rc != MDBX_SUCCESS) + goto undo; + } + } + +#if defined(_WIN32) || defined(_WIN64) +/* Windows is unable shrinking a mapped file */ +#else + /* LY: shrink datafile if needed */ + if (unlikely(shrink)) { + mdbx_info("shrink to %" PRIaPGNO " pages (-%" PRIaPGNO ")", + pending->mm_geo.now, shrink); + rc = mdbx_mapresize(env, pending->mm_geo.now, pending->mm_geo.upper); + if (MDBX_IS_ERROR(rc)) + goto fail; + } +#endif /* not a Windows */ + + return MDBX_SUCCESS; + +fail: + env->me_flags |= MDBX_FATAL_ERROR; + return rc; +} + +int __cold mdbx_env_get_maxkeysize(MDBX_env *env) { + if (!env || env->me_signature != MDBX_ME_SIGNATURE || !env->me_maxkey_limit) + return -MDBX_EINVAL; + return env->me_maxkey_limit; +} + +#define mdbx_nodemax(pagesize) \ + (((((pagesize)-PAGEHDRSZ) / MDBX_MINKEYS) & -(intptr_t)2) - sizeof(indx_t)) + +#define mdbx_maxkey(nodemax) ((nodemax) - (NODESIZE + sizeof(MDBX_db))) + +#define mdbx_maxfree1pg(pagesize) (((pagesize)-PAGEHDRSZ) / sizeof(pgno_t) - 1) + +int mdbx_get_maxkeysize(size_t pagesize) { + if (pagesize == 0) + pagesize = mdbx_syspagesize(); + + intptr_t nodemax = mdbx_nodemax(pagesize); + if (nodemax < 0) + return -MDBX_EINVAL; + + intptr_t maxkey = mdbx_maxkey(nodemax); + return (maxkey > 0 && maxkey < INT_MAX) ? (int)maxkey : -MDBX_EINVAL; +} + +static void __cold mdbx_setup_pagesize(MDBX_env *env, const size_t pagesize) { + STATIC_ASSERT(SSIZE_MAX > MAX_MAPSIZE); + STATIC_ASSERT(MIN_PAGESIZE > sizeof(MDBX_page)); + mdbx_ensure(env, mdbx_is_power2(pagesize)); + mdbx_ensure(env, pagesize >= MIN_PAGESIZE); + mdbx_ensure(env, pagesize <= MAX_PAGESIZE); + env->me_psize = (unsigned)pagesize; + + STATIC_ASSERT(mdbx_maxfree1pg(MIN_PAGESIZE) > 42); + STATIC_ASSERT(mdbx_maxfree1pg(MAX_PAGESIZE) < MDBX_PNL_DB_MAX); + const intptr_t maxfree_1pg = (pagesize - PAGEHDRSZ) / sizeof(pgno_t) - 1; + mdbx_ensure(env, maxfree_1pg > 42 && maxfree_1pg < MDBX_PNL_DB_MAX); + env->me_maxfree_1pg = (unsigned)maxfree_1pg; + + STATIC_ASSERT(mdbx_nodemax(MIN_PAGESIZE) > 42); + STATIC_ASSERT(mdbx_nodemax(MAX_PAGESIZE) < UINT16_MAX); + const intptr_t nodemax = mdbx_nodemax(pagesize); + mdbx_ensure(env, nodemax > 42 && nodemax < UINT16_MAX); + env->me_nodemax = (unsigned)nodemax; + + STATIC_ASSERT(mdbx_maxkey(MIN_PAGESIZE) > 42); + STATIC_ASSERT(mdbx_maxkey(MIN_PAGESIZE) < MIN_PAGESIZE); + STATIC_ASSERT(mdbx_maxkey(MAX_PAGESIZE) > 42); + STATIC_ASSERT(mdbx_maxkey(MAX_PAGESIZE) < MAX_PAGESIZE); + const intptr_t maxkey_limit = mdbx_maxkey(env->me_nodemax); + mdbx_ensure(env, maxkey_limit > 42 && (size_t)maxkey_limit < pagesize); + env->me_maxkey_limit = (unsigned)maxkey_limit; + + env->me_psize2log = mdbx_log2(pagesize); + mdbx_assert(env, pgno2bytes(env, 1) == pagesize); + mdbx_assert(env, bytes2pgno(env, pagesize + pagesize) == 2); +} + +int __cold mdbx_env_create(MDBX_env **penv) { + MDBX_env *env = calloc(1, sizeof(MDBX_env)); + if (!env) + return MDBX_ENOMEM; + + env->me_maxreaders = DEFAULT_READERS; + env->me_maxdbs = env->me_numdbs = CORE_DBS; + env->me_fd = INVALID_HANDLE_VALUE; + env->me_lfd = INVALID_HANDLE_VALUE; + env->me_pid = mdbx_getpid(); + + int rc; + const size_t os_psize = mdbx_syspagesize(); + if (!mdbx_is_power2(os_psize) || os_psize < MIN_PAGESIZE) { + mdbx_error("unsuitable system pagesize %" PRIuPTR, os_psize); + rc = MDBX_INCOMPATIBLE; + goto bailout; + } + env->me_os_psize = (unsigned)os_psize; + mdbx_setup_pagesize(env, env->me_os_psize); + + rc = mdbx_fastmutex_init(&env->me_dbi_lock); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + + VALGRIND_CREATE_MEMPOOL(env, 0, 0); + env->me_signature = MDBX_ME_SIGNATURE; + *penv = env; + return MDBX_SUCCESS; + +bailout: + free(env); + *penv = nullptr; + return rc; +} + +static int __cold mdbx_env_map(MDBX_env *env, size_t usedsize) { + int rc = mdbx_mmap(env->me_flags, &env->me_dxb_mmap, env->me_dbgeo.now, + env->me_dbgeo.upper); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + +#ifdef MADV_DONTFORK + if (madvise(env->me_map, env->me_mapsize, MADV_DONTFORK)) + return errno; +#endif + +#ifdef MADV_NOHUGEPAGE + (void)madvise(env->me_map, env->me_mapsize, MADV_NOHUGEPAGE); +#endif + +#if defined(MADV_DODUMP) && defined(MADV_DONTDUMP) + const size_t meta_length = pgno2bytes(env, NUM_METAS); + (void)madvise(env->me_map, meta_length, MADV_DODUMP); + if (!(env->me_flags & MDBX_PAGEPERTURB)) + (void)madvise(env->me_map + meta_length, env->me_mapsize - meta_length, + MADV_DONTDUMP); +#endif + +#ifdef MADV_REMOVE + if (usedsize && (env->me_flags & MDBX_WRITEMAP)) { + (void)madvise(env->me_map + usedsize, env->me_mapsize - usedsize, + MADV_REMOVE); + } +#else + (void)usedsize; +#endif + +#if defined(MADV_RANDOM) && defined(MADV_WILLNEED) + /* Turn on/off readahead. It's harmful when the DB is larger than RAM. */ + if (madvise(env->me_map, env->me_mapsize, + (env->me_flags & MDBX_NORDAHEAD) ? MADV_RANDOM : MADV_WILLNEED)) + return errno; +#endif + +#ifdef USE_VALGRIND + env->me_valgrind_handle = + VALGRIND_CREATE_BLOCK(env->me_map, env->me_mapsize, "mdbx"); +#endif + + return MDBX_SUCCESS; +} + +LIBMDBX_API int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, + intptr_t size_now, intptr_t size_upper, + intptr_t growth_step, + intptr_t shrink_threshold, + intptr_t pagesize) { + if (unlikely(!env)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + const bool outside_txn = + (!env->me_txn0 || env->me_txn0->mt_owner != mdbx_thread_self()); + +#if MDBX_DEBUG + if (growth_step < 0) + growth_step = 1; + if (shrink_threshold < 0) + shrink_threshold = 1; +#endif + + int rc = MDBX_PROBLEM; + if (env->me_map) { + /* env already mapped */ + if (!env->me_lck || (env->me_flags & MDBX_RDONLY)) + return MDBX_EACCESS; + + if (outside_txn) { + int err = mdbx_txn_lock(env); + if (unlikely(err != MDBX_SUCCESS)) + return err; + } + MDBX_meta *head = mdbx_meta_head(env); + + if (pagesize < 0) + pagesize = env->me_psize; + if (pagesize != (intptr_t)env->me_psize) { + rc = MDBX_EINVAL; + goto bailout; + } + + if (size_lower < 0) + size_lower = pgno2bytes(env, head->mm_geo.lower); + if (size_now < 0) + size_now = pgno2bytes(env, head->mm_geo.now); + if (size_upper < 0) + size_upper = pgno2bytes(env, head->mm_geo.upper); + if (growth_step < 0) + growth_step = pgno2bytes(env, head->mm_geo.grow); + if (shrink_threshold < 0) + shrink_threshold = pgno2bytes(env, head->mm_geo.shrink); + + const size_t usedbytes = pgno2bytes(env, head->mm_geo.next); + if ((size_t)size_upper < usedbytes) { + rc = MDBX_MAP_FULL; + goto bailout; + } + if ((size_t)size_now < usedbytes) + size_now = usedbytes; +#if defined(_WIN32) || defined(_WIN64) + if ((size_t)size_now < env->me_dbgeo.now || + (size_t)size_upper < env->me_dbgeo.upper) { + /* Windows is unable shrinking a mapped file */ + return ERROR_USER_MAPPED_FILE; + } +#endif /* Windows */ + } else { + /* env NOT yet mapped */ + if (!outside_txn) { + rc = MDBX_PANIC; + goto bailout; + } + + if (pagesize < 0) { + pagesize = env->me_os_psize; + if (pagesize > MAX_PAGESIZE) + pagesize = MAX_PAGESIZE; + mdbx_assert(env, pagesize >= MIN_PAGESIZE); + } + } + + if (pagesize < MIN_PAGESIZE || pagesize > MAX_PAGESIZE || + !mdbx_is_power2(pagesize)) { + rc = MDBX_EINVAL; + goto bailout; + } + + if (size_lower < 0) { + size_lower = MIN_MAPSIZE; + if (MIN_MAPSIZE / pagesize < MIN_PAGENO) + size_lower = MIN_PAGENO * pagesize; + } + + if (size_now < 0) { + size_now = DEFAULT_MAPSIZE; + if (size_now < size_lower) + size_now = size_lower; + } + + if (size_upper < 0) { + if ((size_t)size_now >= MAX_MAPSIZE / 2) + size_upper = MAX_MAPSIZE; + else if (MAX_MAPSIZE != MAX_MAPSIZE32 && + (size_t)size_now >= MAX_MAPSIZE32 / 2) + size_upper = MAX_MAPSIZE32; + else { + size_upper = size_now + size_now; + if ((size_t)size_upper < DEFAULT_MAPSIZE * 2) + size_upper = DEFAULT_MAPSIZE * 2; + } + if ((size_t)size_upper / pagesize > MAX_PAGENO) + size_upper = pagesize * MAX_PAGENO; + } + + if (unlikely(size_lower < MIN_MAPSIZE || size_lower > size_upper)) { + rc = MDBX_EINVAL; + goto bailout; + } + + if ((uint64_t)size_lower / pagesize < MIN_PAGENO) { + rc = MDBX_EINVAL; + goto bailout; + } + + if (unlikely((size_t)size_upper > MAX_MAPSIZE || + (uint64_t)size_upper / pagesize > MAX_PAGENO)) { + rc = MDBX_TOO_LARGE; + goto bailout; + } + + size_lower = mdbx_roundup2(size_lower, env->me_os_psize); + size_upper = mdbx_roundup2(size_upper, env->me_os_psize); + size_now = mdbx_roundup2(size_now, env->me_os_psize); + + /* LY: подбираем значение size_upper: + * - кратное размеру системной страницы + * - без нарушения MAX_MAPSIZE или MAX_PAGENO */ + while (unlikely((size_t)size_upper > MAX_MAPSIZE || + (uint64_t)size_upper / pagesize > MAX_PAGENO)) { + if ((size_t)size_upper < env->me_os_psize + MIN_MAPSIZE || + (size_t)size_upper < env->me_os_psize * (MIN_PAGENO + 1)) { + /* паранойа на случай переполнения при невероятных значениях */ + rc = MDBX_EINVAL; + goto bailout; + } + size_upper -= env->me_os_psize; + if ((size_t)size_upper > (size_t)size_lower) + size_lower = size_upper; + } + mdbx_assert(env, (size_upper - size_lower) % env->me_os_psize == 0); + + if (size_now < size_lower) + size_now = size_lower; + if (size_now > size_upper) + size_now = size_upper; + + if (growth_step < 0) { + growth_step = ((size_t)(size_upper - size_lower)) / 42; + if (growth_step > size_lower) + growth_step = size_lower; + if (growth_step < 65536) + growth_step = 65536; + if ((size_t)growth_step > MEGABYTE * 16) + growth_step = MEGABYTE * 16; + } + growth_step = mdbx_roundup2(growth_step, env->me_os_psize); + if (bytes2pgno(env, growth_step) > UINT16_MAX) + growth_step = pgno2bytes(env, UINT16_MAX); + + if (shrink_threshold < 0) { + shrink_threshold = growth_step + growth_step; + if (shrink_threshold < growth_step) + shrink_threshold = growth_step; + } + shrink_threshold = mdbx_roundup2(shrink_threshold, env->me_os_psize); + if (bytes2pgno(env, shrink_threshold) > UINT16_MAX) + shrink_threshold = pgno2bytes(env, UINT16_MAX); + + /* save user's geo-params for future open/create */ + env->me_dbgeo.lower = size_lower; + env->me_dbgeo.now = size_now; + env->me_dbgeo.upper = size_upper; + env->me_dbgeo.grow = growth_step; + env->me_dbgeo.shrink = shrink_threshold; + rc = MDBX_SUCCESS; + + if (env->me_map) { + /* apply new params */ + mdbx_assert(env, pagesize == (intptr_t)env->me_psize); + + MDBX_meta *head = mdbx_meta_head(env); + MDBX_meta meta = *head; + meta.mm_geo.lower = bytes2pgno(env, env->me_dbgeo.lower); + meta.mm_geo.now = bytes2pgno(env, env->me_dbgeo.now); + meta.mm_geo.upper = bytes2pgno(env, env->me_dbgeo.upper); + meta.mm_geo.grow = (uint16_t)bytes2pgno(env, env->me_dbgeo.grow); + meta.mm_geo.shrink = (uint16_t)bytes2pgno(env, env->me_dbgeo.shrink); + + mdbx_assert(env, env->me_dbgeo.lower >= MIN_MAPSIZE); + mdbx_assert(env, meta.mm_geo.lower >= MIN_PAGENO); + mdbx_assert(env, env->me_dbgeo.upper <= MAX_MAPSIZE); + mdbx_assert(env, meta.mm_geo.upper <= MAX_PAGENO); + mdbx_assert(env, meta.mm_geo.now >= meta.mm_geo.next); + mdbx_assert(env, env->me_dbgeo.upper >= env->me_dbgeo.lower); + mdbx_assert(env, meta.mm_geo.upper >= meta.mm_geo.now); + mdbx_assert(env, meta.mm_geo.now >= meta.mm_geo.lower); + mdbx_assert(env, meta.mm_geo.grow == bytes2pgno(env, env->me_dbgeo.grow)); + mdbx_assert(env, + meta.mm_geo.shrink == bytes2pgno(env, env->me_dbgeo.shrink)); + + if (memcmp(&meta.mm_geo, &head->mm_geo, sizeof(meta.mm_geo))) { + if (meta.mm_geo.now != head->mm_geo.now || + meta.mm_geo.upper != head->mm_geo.upper) { + rc = mdbx_mapresize(env, meta.mm_geo.now, meta.mm_geo.upper); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + } + mdbx_meta_set_txnid(env, &meta, mdbx_meta_txnid_stable(env, head) + 1); + rc = mdbx_sync_locked(env, env->me_flags, &meta); + } + } else if (pagesize != (intptr_t)env->me_psize) { + mdbx_setup_pagesize(env, pagesize); + } + +bailout: + if (env->me_map && outside_txn) + mdbx_txn_unlock(env); + return rc; +} + +int __cold mdbx_env_set_mapsize(MDBX_env *env, size_t size) { + return mdbx_env_set_geometry(env, -1, size, -1, -1, -1, -1); +} + +int __cold mdbx_env_set_maxdbs(MDBX_env *env, MDBX_dbi dbs) { + if (unlikely(dbs > MAX_DBI)) + return MDBX_EINVAL; + + if (unlikely(!env)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(env->me_map)) + return MDBX_EPERM; + + env->me_maxdbs = dbs + CORE_DBS; + return MDBX_SUCCESS; +} + +int __cold mdbx_env_set_maxreaders(MDBX_env *env, unsigned readers) { + if (unlikely(readers < 1 || readers > INT16_MAX)) + return MDBX_EINVAL; + + if (unlikely(!env)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(env->me_map)) + return MDBX_EPERM; + + env->me_maxreaders = readers; + return MDBX_SUCCESS; +} + +int __cold mdbx_env_get_maxreaders(MDBX_env *env, unsigned *readers) { + if (!env || !readers) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + *readers = env->me_maxreaders; + return MDBX_SUCCESS; +} + +/* Further setup required for opening an MDBX environment */ +static int __cold mdbx_setup_dxb(MDBX_env *env, int lck_rc) { + MDBX_meta meta; + int rc = MDBX_RESULT_FALSE; + int err = mdbx_read_header(env, &meta); + if (unlikely(err != MDBX_SUCCESS)) { + if (lck_rc != /* lck exclusive */ MDBX_RESULT_TRUE || err != MDBX_ENODATA || + (env->me_flags & MDBX_RDONLY) != 0) + return err; + + mdbx_debug("create new database"); + rc = /* new database */ MDBX_RESULT_TRUE; + + if (!env->me_dbgeo.now) { + /* set defaults if not configured */ + err = mdbx_env_set_mapsize(env, DEFAULT_MAPSIZE); + if (unlikely(err != MDBX_SUCCESS)) + return err; + } + + void *buffer = calloc(NUM_METAS, env->me_psize); + if (!buffer) + return MDBX_ENOMEM; + + meta = mdbx_init_metas(env, buffer)->mp_meta; + err = mdbx_pwrite(env->me_fd, buffer, env->me_psize * NUM_METAS, 0); + free(buffer); + if (unlikely(err != MDBX_SUCCESS)) + return err; + + err = mdbx_ftruncate(env->me_fd, env->me_dbgeo.now); + if (unlikely(err != MDBX_SUCCESS)) + return err; + +#ifndef NDEBUG /* just for checking */ + err = mdbx_read_header(env, &meta); + if (unlikely(err != MDBX_SUCCESS)) + return err; +#endif + } + + mdbx_info("header: root %" PRIaPGNO "/%" PRIaPGNO ", geo %" PRIaPGNO + "/%" PRIaPGNO "-%" PRIaPGNO "/%" PRIaPGNO + " +%u -%u, txn_id %" PRIaTXN ", %s", + meta.mm_dbs[MAIN_DBI].md_root, meta.mm_dbs[FREE_DBI].md_root, + meta.mm_geo.lower, meta.mm_geo.next, meta.mm_geo.now, + meta.mm_geo.upper, meta.mm_geo.grow, meta.mm_geo.shrink, + meta.mm_txnid_a, mdbx_durable_str(&meta)); + + mdbx_setup_pagesize(env, meta.mm_psize); + if ((env->me_flags & MDBX_RDONLY) /* readonly */ + || lck_rc != MDBX_RESULT_TRUE /* not exclusive */) { + /* use present params from db */ + err = mdbx_env_set_geometry( + env, meta.mm_geo.lower * meta.mm_psize, meta.mm_geo.now * meta.mm_psize, + meta.mm_geo.upper * meta.mm_psize, meta.mm_geo.grow * meta.mm_psize, + meta.mm_geo.shrink * meta.mm_psize, meta.mm_psize); + if (unlikely(err != MDBX_SUCCESS)) { + mdbx_error("could not use present dbsize-params from db"); + return MDBX_INCOMPATIBLE; + } + } else if (env->me_dbgeo.now) { + /* silently growth to last used page */ + const size_t used_bytes = pgno2bytes(env, meta.mm_geo.next); + if (env->me_dbgeo.lower < used_bytes) + env->me_dbgeo.lower = used_bytes; + if (env->me_dbgeo.now < used_bytes) + env->me_dbgeo.now = used_bytes; + if (env->me_dbgeo.upper < used_bytes) + env->me_dbgeo.upper = used_bytes; + + /* apply preconfigured params, but only if substantial changes: + * - upper or lower limit changes + * - shrink theshold or growth step + * But ignore just chagne just a 'now/current' size. */ + if (bytes_align2os_bytes(env, env->me_dbgeo.upper) != + pgno_align2os_bytes(env, meta.mm_geo.upper) || + bytes_align2os_bytes(env, env->me_dbgeo.lower) != + pgno_align2os_bytes(env, meta.mm_geo.lower) || + bytes_align2os_bytes(env, env->me_dbgeo.shrink) != + pgno_align2os_bytes(env, meta.mm_geo.shrink) || + bytes_align2os_bytes(env, env->me_dbgeo.grow) != + pgno_align2os_bytes(env, meta.mm_geo.grow)) { + + if (env->me_dbgeo.shrink && env->me_dbgeo.now > used_bytes) + /* pre-shrink if enabled */ + env->me_dbgeo.now = used_bytes + env->me_dbgeo.shrink - + used_bytes % env->me_dbgeo.shrink; + + err = mdbx_env_set_geometry(env, env->me_dbgeo.lower, env->me_dbgeo.now, + env->me_dbgeo.upper, env->me_dbgeo.grow, + env->me_dbgeo.shrink, meta.mm_psize); + if (unlikely(err != MDBX_SUCCESS)) { + mdbx_error("could not apply preconfigured dbsize-params to db"); + return MDBX_INCOMPATIBLE; + } + + /* update meta fields */ + meta.mm_geo.now = bytes2pgno(env, env->me_dbgeo.now); + meta.mm_geo.lower = bytes2pgno(env, env->me_dbgeo.lower); + meta.mm_geo.upper = bytes2pgno(env, env->me_dbgeo.upper); + meta.mm_geo.grow = (uint16_t)bytes2pgno(env, env->me_dbgeo.grow); + meta.mm_geo.shrink = (uint16_t)bytes2pgno(env, env->me_dbgeo.shrink); + + mdbx_info("amended: root %" PRIaPGNO "/%" PRIaPGNO ", geo %" PRIaPGNO + "/%" PRIaPGNO "-%" PRIaPGNO "/%" PRIaPGNO + " +%u -%u, txn_id %" PRIaTXN ", %s", + meta.mm_dbs[MAIN_DBI].md_root, meta.mm_dbs[FREE_DBI].md_root, + meta.mm_geo.lower, meta.mm_geo.next, meta.mm_geo.now, + meta.mm_geo.upper, meta.mm_geo.grow, meta.mm_geo.shrink, + meta.mm_txnid_a, mdbx_durable_str(&meta)); + } + mdbx_ensure(env, meta.mm_geo.now >= meta.mm_geo.next); + } else { + /* geo-params not pre-configured by user, + * get current values from a meta. */ + env->me_dbgeo.now = pgno2bytes(env, meta.mm_geo.now); + env->me_dbgeo.lower = pgno2bytes(env, meta.mm_geo.lower); + env->me_dbgeo.upper = pgno2bytes(env, meta.mm_geo.upper); + env->me_dbgeo.grow = pgno2bytes(env, meta.mm_geo.grow); + env->me_dbgeo.shrink = pgno2bytes(env, meta.mm_geo.shrink); + } + + uint64_t filesize_before_mmap; + err = mdbx_filesize(env->me_fd, &filesize_before_mmap); + if (unlikely(err != MDBX_SUCCESS)) + return err; + + const size_t expected_bytes = + mdbx_roundup2(pgno2bytes(env, meta.mm_geo.now), env->me_os_psize); + const size_t used_bytes = pgno2bytes(env, meta.mm_geo.next); + mdbx_ensure(env, expected_bytes >= used_bytes); + if (filesize_before_mmap != expected_bytes) { + if (lck_rc != /* lck exclusive */ MDBX_RESULT_TRUE) { + mdbx_info("filesize mismatch (expect %" PRIuPTR "/%" PRIaPGNO + ", have %" PRIu64 "/%" PRIaPGNO "), " + "assume collision in non-exclusive mode", + expected_bytes, bytes2pgno(env, expected_bytes), + filesize_before_mmap, + bytes2pgno(env, (size_t)filesize_before_mmap)); + } else { + mdbx_notice("filesize mismatch (expect %" PRIuPTR "/%" PRIaPGNO + ", have %" PRIu64 "/%" PRIaPGNO ")", + expected_bytes, bytes2pgno(env, expected_bytes), + filesize_before_mmap, + bytes2pgno(env, (size_t)filesize_before_mmap)); + if (filesize_before_mmap < used_bytes) { + mdbx_error("last-page beyond end-of-file (last %" PRIaPGNO + ", have %" PRIaPGNO ")", + meta.mm_geo.next, + bytes2pgno(env, (size_t)filesize_before_mmap)); + return MDBX_CORRUPTED; + } + + if (env->me_flags & MDBX_RDONLY) { + mdbx_notice("ignore filesize mismatch in readonly-mode"); + } else { + mdbx_info("resize datafile to %" PRIu64 " bytes, %" PRIaPGNO " pages", + expected_bytes, bytes2pgno(env, expected_bytes)); + err = mdbx_ftruncate(env->me_fd, expected_bytes); + if (unlikely(err != MDBX_SUCCESS)) { + mdbx_error("error %d, while resize datafile to %" PRIu64 + " bytes, %" PRIaPGNO " pages", + rc, expected_bytes, bytes2pgno(env, expected_bytes)); + return err; + } + filesize_before_mmap = expected_bytes; + } + } + } + + err = mdbx_env_map(env, (lck_rc != /* lck exclusive */ MDBX_RESULT_TRUE) + ? 0 + : expected_bytes); + if (err != MDBX_SUCCESS) + return err; + + const unsigned meta_clash_mask = mdbx_meta_eq_mask(env); + if (meta_clash_mask) { + mdbx_error("meta-pages are clashed: mask 0x%d", meta_clash_mask); + return MDBX_WANNA_RECOVERY; + } + + while (1) { + MDBX_meta *head = mdbx_meta_head(env); + const txnid_t head_txnid = mdbx_meta_txnid_fluid(env, head); + if (head_txnid == meta.mm_txnid_a) + break; + + if (lck_rc == /* lck exclusive */ MDBX_RESULT_TRUE) { + assert(META_IS_STEADY(&meta) && !META_IS_STEADY(head)); + if (env->me_flags & MDBX_RDONLY) { + mdbx_error("rollback needed: (from head %" PRIaTXN + " to steady %" PRIaTXN "), but unable in read-only mode", + head_txnid, meta.mm_txnid_a); + return MDBX_WANNA_RECOVERY /* LY: could not recovery/rollback */; + } + + /* LY: rollback weak checkpoint */ + mdbx_trace("rollback: from %" PRIaTXN ", to %" PRIaTXN, head_txnid, + meta.mm_txnid_a); + mdbx_ensure(env, head_txnid == mdbx_meta_txnid_stable(env, head)); + + if (env->me_flags & MDBX_WRITEMAP) { + head->mm_txnid_a = 0; + head->mm_datasync_sign = MDBX_DATASIGN_WEAK; + head->mm_txnid_b = 0; + const size_t offset = + ((uint8_t *)container_of(head, MDBX_page, mp_meta)) - + env->me_dxb_mmap.dxb; + const size_t paged_offset = offset & ~(env->me_os_psize - 1); + const size_t paged_length = mdbx_roundup2( + env->me_psize + offset - paged_offset, env->me_os_psize); + err = mdbx_msync(&env->me_dxb_mmap, paged_offset, paged_length, false); + } else { + MDBX_meta rollback = *head; + mdbx_meta_set_txnid(env, &rollback, 0); + rollback.mm_datasync_sign = MDBX_DATASIGN_WEAK; + err = mdbx_pwrite(env->me_fd, &rollback, sizeof(MDBX_meta), + (uint8_t *)head - (uint8_t *)env->me_map); + } + if (err) + return err; + + mdbx_invalidate_cache(env->me_map, pgno2bytes(env, NUM_METAS)); + mdbx_ensure(env, 0 == mdbx_meta_txnid_fluid(env, head)); + mdbx_ensure(env, 0 == mdbx_meta_eq_mask(env)); + continue; + } + + if (!env->me_lck) { + /* LY: without-lck (read-only) mode, so it is imposible that other + * process made weak checkpoint. */ + mdbx_error("without-lck, unable recovery/rollback"); + return MDBX_WANNA_RECOVERY; + } + + /* LY: assume just have a collision with other running process, + * or someone make a weak checkpoint */ + mdbx_info("assume collision or online weak checkpoint"); + break; + } + + const MDBX_meta *head = mdbx_meta_head(env); + if (lck_rc == /* lck exclusive */ MDBX_RESULT_TRUE) { + /* re-check file size after mmap */ + uint64_t filesize_after_mmap; + err = mdbx_filesize(env->me_fd, &filesize_after_mmap); + if (unlikely(err != MDBX_SUCCESS)) + return err; + if (filesize_after_mmap != expected_bytes) { + if (filesize_after_mmap != filesize_before_mmap) + mdbx_info("datafile resized by system to %" PRIu64 " bytes", + filesize_after_mmap); + if (filesize_after_mmap % env->me_os_psize || + filesize_after_mmap > env->me_dbgeo.upper || + filesize_after_mmap < used_bytes) { + mdbx_info("unacceptable/unexpected datafile size %" PRIu64, + filesize_after_mmap); + return MDBX_PROBLEM; + } + if ((env->me_flags & MDBX_RDONLY) == 0) { + meta.mm_geo.now = + bytes2pgno(env, env->me_dbgeo.now = (size_t)filesize_after_mmap); + mdbx_info("update meta-geo to filesize %" PRIuPTR " bytes, %" PRIaPGNO + " pages", + env->me_dbgeo.now, meta.mm_geo.now); + } + } + + if (memcmp(&meta.mm_geo, &head->mm_geo, sizeof(meta.mm_geo))) { + const txnid_t txnid = mdbx_meta_txnid_stable(env, head); + mdbx_info("updating meta.geo: " + "from l%" PRIaPGNO "-n%" PRIaPGNO "-u%" PRIaPGNO + "/s%u-g%u (txn#%" PRIaTXN "), " + "to l%" PRIaPGNO "-n%" PRIaPGNO "-u%" PRIaPGNO + "/s%u-g%u (txn#%" PRIaTXN ")", + head->mm_geo.lower, head->mm_geo.now, head->mm_geo.upper, + head->mm_geo.shrink, head->mm_geo.grow, txnid, + meta.mm_geo.lower, meta.mm_geo.now, meta.mm_geo.upper, + meta.mm_geo.shrink, meta.mm_geo.grow, txnid + 1); + + mdbx_ensure(env, mdbx_meta_eq(env, &meta, head)); + mdbx_meta_set_txnid(env, &meta, txnid + 1); + env->me_sync_pending += env->me_psize; + err = mdbx_sync_locked(env, env->me_flags | MDBX_SHRINK_ALLOWED, &meta); + if (err) { + mdbx_info("error %d, while updating meta.geo: " + "from l%" PRIaPGNO "-n%" PRIaPGNO "-u%" PRIaPGNO + "/s%u-g%u (txn#%" PRIaTXN "), " + "to l%" PRIaPGNO "-n%" PRIaPGNO "-u%" PRIaPGNO + "/s%u-g%u (txn#%" PRIaTXN ")", + err, head->mm_geo.lower, head->mm_geo.now, head->mm_geo.upper, + head->mm_geo.shrink, head->mm_geo.grow, txnid, + meta.mm_geo.lower, meta.mm_geo.now, meta.mm_geo.upper, + meta.mm_geo.shrink, meta.mm_geo.grow, txnid + 1); + return err; + } + } + } + + return rc; +} + +/****************************************************************************/ + +/* Open and/or initialize the lock region for the environment. */ +static int __cold mdbx_setup_lck(MDBX_env *env, char *lck_pathname, + mode_t mode) { + assert(env->me_fd != INVALID_HANDLE_VALUE); + assert(env->me_lfd == INVALID_HANDLE_VALUE); + + int err = mdbx_openfile(lck_pathname, O_RDWR | O_CREAT, mode, &env->me_lfd); + if (err != MDBX_SUCCESS) { + if (err != MDBX_EROFS || (env->me_flags & MDBX_RDONLY) == 0) + return err; + /* LY: without-lck mode (e.g. on read-only filesystem) */ + env->me_lfd = INVALID_HANDLE_VALUE; + env->me_oldest = &env->me_oldest_stub; + env->me_maxreaders = UINT_MAX; + mdbx_debug("lck-setup: %s ", "lockless mode (readonly)"); + return MDBX_SUCCESS; + } + + /* Try to get exclusive lock. If we succeed, then + * nobody is using the lock region and we should initialize it. */ + const int rc = mdbx_lck_seize(env); + if (MDBX_IS_ERROR(rc)) + return rc; + + mdbx_debug("lck-setup: %s ", + (rc == MDBX_RESULT_TRUE) ? "exclusive" : "shared"); + + uint64_t size; + err = mdbx_filesize(env->me_lfd, &size); + if (unlikely(err != MDBX_SUCCESS)) + return err; + + if (rc == MDBX_RESULT_TRUE) { + uint64_t wanna = mdbx_roundup2( + (env->me_maxreaders - 1) * sizeof(MDBX_reader) + sizeof(MDBX_lockinfo), + env->me_os_psize); +#ifndef NDEBUG + err = mdbx_ftruncate(env->me_lfd, size = 0); + if (unlikely(err != MDBX_SUCCESS)) + return err; +#endif + mdbx_jitter4testing(false); + + if (size != wanna) { + err = mdbx_ftruncate(env->me_lfd, wanna); + if (unlikely(err != MDBX_SUCCESS)) + return err; + size = wanna; + } + } else if (size > SSIZE_MAX || (size & (env->me_os_psize - 1)) || + size < env->me_os_psize) { + mdbx_notice("lck-file has invalid size %" PRIu64 " bytes", size); + return MDBX_PROBLEM; + } + + const size_t maxreaders = + ((size_t)size - sizeof(MDBX_lockinfo)) / sizeof(MDBX_reader) + 1; + if (maxreaders > UINT16_MAX) { + mdbx_error("lck-size too big (up to %" PRIuPTR " readers)", maxreaders); + return MDBX_PROBLEM; + } + env->me_maxreaders = (unsigned)maxreaders; + + err = mdbx_mmap(MDBX_WRITEMAP, &env->me_lck_mmap, (size_t)size, (size_t)size); + if (unlikely(err != MDBX_SUCCESS)) + return err; + +#ifdef MADV_DODUMP + (void)madvise(env->me_lck, size, MADV_DODUMP); +#endif + +#ifdef MADV_DONTFORK + if (madvise(env->me_lck, size, MADV_DONTFORK) < 0) + return errno; +#endif + +#ifdef MADV_WILLNEED + if (madvise(env->me_lck, size, MADV_WILLNEED) < 0) + return errno; +#endif + +#ifdef MADV_RANDOM + if (madvise(env->me_lck, size, MADV_RANDOM) < 0) + return errno; +#endif + + if (rc == MDBX_RESULT_TRUE) { + /* LY: exlcusive mode, init lck */ + memset(env->me_lck, 0, (size_t)size); + err = mdbx_lck_init(env); + if (err) + return err; + + env->me_lck->mti_magic_and_version = MDBX_LOCK_MAGIC; + env->me_lck->mti_os_and_format = MDBX_LOCK_FORMAT; + } else { + if (env->me_lck->mti_magic_and_version != MDBX_LOCK_MAGIC) { + mdbx_error("lock region has invalid magic/version"); + return ((env->me_lck->mti_magic_and_version >> 8) != MDBX_MAGIC) + ? MDBX_INVALID + : MDBX_VERSION_MISMATCH; + } + if (env->me_lck->mti_os_and_format != MDBX_LOCK_FORMAT) { + mdbx_error("lock region has os/format 0x%" PRIx32 ", expected 0x%" PRIx32, + env->me_lck->mti_os_and_format, MDBX_LOCK_FORMAT); + return MDBX_VERSION_MISMATCH; + } + } + + mdbx_assert(env, !MDBX_IS_ERROR(rc)); + env->me_oldest = &env->me_lck->mti_oldest; + return rc; +} + +/* Only a subset of the mdbx_env flags can be changed + * at runtime. Changing other flags requires closing the + * environment and re-opening it with the new flags. */ +#define CHANGEABLE \ + (MDBX_NOSYNC | MDBX_NOMETASYNC | MDBX_MAPASYNC | MDBX_NOMEMINIT | \ + MDBX_COALESCE | MDBX_PAGEPERTURB) +#define CHANGELESS \ + (MDBX_NOSUBDIR | MDBX_RDONLY | MDBX_WRITEMAP | MDBX_NOTLS | MDBX_NORDAHEAD | \ + MDBX_LIFORECLAIM) + +#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE | CHANGELESS) +#error "Persistent DB flags & env flags overlap, but both go in mm_flags" +#endif + +int __cold mdbx_env_open_ex(MDBX_env *env, const char *path, unsigned flags, + mode_t mode, int *exclusive) { + if (unlikely(!env || !path)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + if (env->me_fd != INVALID_HANDLE_VALUE || + (flags & ~(CHANGEABLE | CHANGELESS))) + return MDBX_EINVAL; + + size_t len_full, len = strlen(path); + if (flags & MDBX_NOSUBDIR) { + len_full = len + sizeof(MDBX_LOCK_SUFFIX) + len + 1; + } else { + len_full = len + sizeof(MDBX_LOCKNAME) + len + sizeof(MDBX_DATANAME); + } + char *lck_pathname = malloc(len_full); + if (!lck_pathname) + return MDBX_ENOMEM; + + char *dxb_pathname; + if (flags & MDBX_NOSUBDIR) { + dxb_pathname = lck_pathname + len + sizeof(MDBX_LOCK_SUFFIX); + sprintf(lck_pathname, "%s" MDBX_LOCK_SUFFIX, path); + strcpy(dxb_pathname, path); + } else { + dxb_pathname = lck_pathname + len + sizeof(MDBX_LOCKNAME); + sprintf(lck_pathname, "%s" MDBX_LOCKNAME, path); + sprintf(dxb_pathname, "%s" MDBX_DATANAME, path); + } + + int rc = MDBX_SUCCESS; + flags |= env->me_flags; + if (flags & MDBX_RDONLY) { + /* LY: silently ignore irrelevant flags when + * we're only getting read access */ + flags &= ~(MDBX_WRITEMAP | MDBX_MAPASYNC | MDBX_NOSYNC | MDBX_NOMETASYNC | + MDBX_COALESCE | MDBX_LIFORECLAIM | MDBX_NOMEMINIT); + } else { + if (!((env->me_free_pgs = mdbx_pnl_alloc(MDBX_PNL_UM_MAX)) && + (env->me_dirtylist = calloc(MDBX_PNL_UM_SIZE, sizeof(MDBX_ID2))))) + rc = MDBX_ENOMEM; + } + env->me_flags = flags |= MDBX_ENV_ACTIVE; + if (rc) + goto bailout; + + env->me_path = mdbx_strdup(path); + env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDBX_dbx)); + env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t)); + env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned)); + if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) { + rc = MDBX_ENOMEM; + goto bailout; + } + env->me_dbxs[FREE_DBI].md_cmp = mdbx_cmp_int_ai; /* aligned MDBX_INTEGERKEY */ + + int oflags; + if (F_ISSET(flags, MDBX_RDONLY)) + oflags = O_RDONLY; + else + oflags = O_RDWR | O_CREAT; + + rc = mdbx_openfile(dxb_pathname, oflags, mode, &env->me_fd); + if (rc != MDBX_SUCCESS) + goto bailout; + + const int lck_rc = mdbx_setup_lck(env, lck_pathname, mode); + if (MDBX_IS_ERROR(lck_rc)) { + rc = lck_rc; + goto bailout; + } + + const int dxb_rc = mdbx_setup_dxb(env, lck_rc); + if (MDBX_IS_ERROR(dxb_rc)) { + rc = dxb_rc; + goto bailout; + } + + mdbx_debug("opened dbenv %p", (void *)env); + const unsigned mode_flags = + MDBX_WRITEMAP | MDBX_NOSYNC | MDBX_NOMETASYNC | MDBX_MAPASYNC; + if (lck_rc == MDBX_RESULT_TRUE) { + env->me_lck->mti_envmode = env->me_flags & (mode_flags | MDBX_RDONLY); + if (exclusive == NULL || *exclusive < 2) { + /* LY: downgrade lock only if exclusive access not requested. + * in case exclusive==1, just leave value as is. */ + rc = mdbx_lck_downgrade(env, true); + mdbx_debug("lck-downgrade-full: rc %i ", rc); + } else { + rc = mdbx_lck_downgrade(env, false); + mdbx_debug("lck-downgrade-partial: rc %i ", rc); + } + if (rc != MDBX_SUCCESS) + goto bailout; + } else { + if (exclusive) { + /* LY: just indicate that is not an exclusive access. */ + *exclusive = 0; + } + if ((env->me_flags & MDBX_RDONLY) == 0) { + while (env->me_lck->mti_envmode == MDBX_RDONLY) { + if (mdbx_atomic_compare_and_swap32(&env->me_lck->mti_envmode, + MDBX_RDONLY, + env->me_flags & mode_flags)) + break; + /* TODO: yield/relax cpu */ + } + if ((env->me_lck->mti_envmode ^ env->me_flags) & mode_flags) { + mdbx_error("current mode/flags incompatible with requested"); + rc = MDBX_INCOMPATIBLE; + goto bailout; + } + } + } + + if (env->me_lck && (env->me_flags & MDBX_NOTLS) == 0) { + rc = mdbx_rthc_alloc(&env->me_txkey, &env->me_lck->mti_readers[0], + &env->me_lck->mti_readers[env->me_maxreaders]); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + env->me_flags |= MDBX_ENV_TXKEY; + } + + if ((flags & MDBX_RDONLY) == 0) { + MDBX_txn *txn; + int tsize = sizeof(MDBX_txn), + size = tsize + + env->me_maxdbs * (sizeof(MDBX_db) + sizeof(MDBX_cursor *) + + sizeof(unsigned) + 1); + if ((env->me_pbuf = calloc(1, env->me_psize)) && (txn = calloc(1, size))) { + txn->mt_dbs = (MDBX_db *)((char *)txn + tsize); + txn->mt_cursors = (MDBX_cursor **)(txn->mt_dbs + env->me_maxdbs); + txn->mt_dbiseqs = (unsigned *)(txn->mt_cursors + env->me_maxdbs); + txn->mt_dbflags = (uint8_t *)(txn->mt_dbiseqs + env->me_maxdbs); + txn->mt_env = env; + txn->mt_dbxs = env->me_dbxs; + txn->mt_flags = MDBX_TXN_FINISHED; + env->me_txn0 = txn; + } else { + rc = MDBX_ENOMEM; + } + } + +#if MDBX_DEBUG + if (rc == MDBX_SUCCESS) { + MDBX_meta *meta = mdbx_meta_head(env); + MDBX_db *db = &meta->mm_dbs[MAIN_DBI]; + + mdbx_debug("opened database version %u, pagesize %u", + (uint8_t)meta->mm_magic_and_version, env->me_psize); + mdbx_debug("using meta page %" PRIaPGNO ", txn %" PRIaTXN "", + container_of(meta, MDBX_page, mp_data)->mp_pgno, + mdbx_meta_txnid_fluid(env, meta)); + mdbx_debug("depth: %u", db->md_depth); + mdbx_debug("entries: %" PRIu64 "", db->md_entries); + mdbx_debug("branch pages: %" PRIaPGNO "", db->md_branch_pages); + mdbx_debug("leaf pages: %" PRIaPGNO "", db->md_leaf_pages); + mdbx_debug("overflow pages: %" PRIaPGNO "", db->md_overflow_pages); + mdbx_debug("root: %" PRIaPGNO "", db->md_root); + } +#endif + +bailout: + if (rc) + mdbx_env_close0(env); + free(lck_pathname); + return rc; +} + +int __cold mdbx_env_open(MDBX_env *env, const char *path, unsigned flags, + mode_t mode) { + return mdbx_env_open_ex(env, path, flags, mode, NULL); +} + +/* Destroy resources from mdbx_env_open(), clear our readers & DBIs */ +static void __cold mdbx_env_close0(MDBX_env *env) { + if (!(env->me_flags & MDBX_ENV_ACTIVE)) + return; + env->me_flags &= ~MDBX_ENV_ACTIVE; + + /* Doing this here since me_dbxs may not exist during mdbx_env_close */ + if (env->me_dbxs) { + for (unsigned i = env->me_maxdbs; --i >= CORE_DBS;) + free(env->me_dbxs[i].md_name.iov_base); + free(env->me_dbxs); + } + + free(env->me_pbuf); + free(env->me_dbiseqs); + free(env->me_dbflags); + free(env->me_path); + free(env->me_dirtylist); + if (env->me_txn0) { + mdbx_txl_free(env->me_txn0->mt_lifo_reclaimed); + free(env->me_txn0); + } + mdbx_pnl_free(env->me_free_pgs); + + if (env->me_flags & MDBX_ENV_TXKEY) { + mdbx_rthc_remove(env->me_txkey); + env->me_flags &= ~MDBX_ENV_TXKEY; + } + + if (env->me_map) { + mdbx_munmap(&env->me_dxb_mmap); +#ifdef USE_VALGRIND + VALGRIND_DISCARD(env->me_valgrind_handle); + env->me_valgrind_handle = -1; +#endif + } + if (env->me_fd != INVALID_HANDLE_VALUE) { + (void)mdbx_closefile(env->me_fd); + env->me_fd = INVALID_HANDLE_VALUE; + } + + if (env->me_lck) + mdbx_munmap(&env->me_lck_mmap); + env->me_pid = 0; + env->me_oldest = nullptr; + + mdbx_lck_destroy(env); + if (env->me_lfd != INVALID_HANDLE_VALUE) { + (void)mdbx_closefile(env->me_lfd); + env->me_lfd = INVALID_HANDLE_VALUE; + } +} + +int __cold mdbx_env_close_ex(MDBX_env *env, int dont_sync) { + MDBX_page *dp; + int rc = MDBX_SUCCESS; + + if (unlikely(!env)) + return MDBX_EINVAL; + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + if (!dont_sync && !(env->me_flags & MDBX_RDONLY)) + rc = mdbx_env_sync(env, true); + + VALGRIND_DESTROY_MEMPOOL(env); + while ((dp = env->me_dpages) != NULL) { + ASAN_UNPOISON_MEMORY_REGION(&dp->mp_next, sizeof(dp->mp_next)); + VALGRIND_MAKE_MEM_DEFINED(&dp->mp_next, sizeof(dp->mp_next)); + env->me_dpages = dp->mp_next; + free(dp); + } + + mdbx_env_close0(env); + mdbx_ensure(env, mdbx_fastmutex_destroy(&env->me_dbi_lock) == MDBX_SUCCESS); + env->me_signature = 0; + free(env); + + return rc; +} + +void __cold mdbx_env_close(MDBX_env *env) { mdbx_env_close_ex(env, 0); } + +/* Compare two items pointing at aligned unsigned int's. */ +static int __hot mdbx_cmp_int_ai(const MDBX_val *a, const MDBX_val *b) { + mdbx_assert(NULL, a->iov_len == b->iov_len); + mdbx_assert(NULL, 0 == (uintptr_t)a->iov_base % sizeof(int) && + 0 == (uintptr_t)b->iov_base % sizeof(int)); + switch (a->iov_len) { + case 4: + return mdbx_cmp2int(*(uint32_t *)a->iov_base, *(uint32_t *)b->iov_base); + case 8: + return mdbx_cmp2int(*(uint64_t *)a->iov_base, *(uint64_t *)b->iov_base); + default: + mdbx_assert_fail(NULL, "invalid size for INTEGERKEY/INTEGERDUP", mdbx_func_, + __LINE__); + return 0; + } +} + +/* Compare two items pointing at 2-byte aligned unsigned int's. */ +static int __hot mdbx_cmp_int_a2(const MDBX_val *a, const MDBX_val *b) { + mdbx_assert(NULL, a->iov_len == b->iov_len); + mdbx_assert(NULL, 0 == (uintptr_t)a->iov_base % sizeof(uint16_t) && + 0 == (uintptr_t)b->iov_base % sizeof(uint16_t)); +#if UNALIGNED_OK + switch (a->iov_len) { + case 4: + return mdbx_cmp2int(*(uint32_t *)a->iov_base, *(uint32_t *)b->iov_base); + case 8: + return mdbx_cmp2int(*(uint64_t *)a->iov_base, *(uint64_t *)b->iov_base); + default: + mdbx_assert_fail(NULL, "invalid size for INTEGERKEY/INTEGERDUP", mdbx_func_, + __LINE__); + return 0; + } +#else + mdbx_assert(NULL, 0 == a->iov_len % sizeof(uint16_t)); + { + int diff; + const uint16_t *pa, *pb, *end; + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + end = (const uint16_t *)a->iov_base; + pa = (const uint16_t *)((char *)a->iov_base + a->iov_len); + pb = (const uint16_t *)((char *)b->iov_base + a->iov_len); + do { + diff = *--pa - *--pb; +#else /* __BYTE_ORDER__ */ + end = (const uint16_t *)((char *)a->iov_base + a->iov_len); + pa = (const uint16_t *)a->iov_base; + pb = (const uint16_t *)b->iov_base; + do { + diff = *pa++ - *pb++; +#endif /* __BYTE_ORDER__ */ + if (likely(diff != 0)) + break; + } while (pa != end); + return diff; + } +#endif /* UNALIGNED_OK */ +} + +/* Compare two items pointing at unsigneds of unknown alignment. + * + * This is also set as MDBX_INTEGERDUP|MDBX_DUPFIXED's MDBX_dbx.md_dcmp. */ +static int __hot mdbx_cmp_int_ua(const MDBX_val *a, const MDBX_val *b) { + mdbx_assert(NULL, a->iov_len == b->iov_len); +#if UNALIGNED_OK + switch (a->iov_len) { + case 4: + return mdbx_cmp2int(*(uint32_t *)a->iov_base, *(uint32_t *)b->iov_base); + case 8: + return mdbx_cmp2int(*(uint64_t *)a->iov_base, *(uint64_t *)b->iov_base); + default: + mdbx_assert_fail(NULL, "invalid size for INTEGERKEY/INTEGERDUP", mdbx_func_, + __LINE__); + return 0; + } +#else + mdbx_assert(NULL, a->iov_len == sizeof(int) || a->iov_len == sizeof(size_t)); +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + { + int diff; + const uint8_t *pa, *pb; + + pa = (const uint8_t *)a->iov_base + a->iov_len; + pb = (const uint8_t *)b->iov_base + a->iov_len; + + do { + diff = *--pa - *--pb; + if (likely(diff != 0)) + break; + } while (pa != a->iov_base); + return diff; + } +#else /* __BYTE_ORDER__ */ + return memcmp(a->iov_base, b->iov_base, a->iov_len); +#endif /* __BYTE_ORDER__ */ +#endif /* UNALIGNED_OK */ +} + +/* Compare two items lexically */ +static int __hot mdbx_cmp_memn(const MDBX_val *a, const MDBX_val *b) { +/* LY: assumes that length of keys are NOT equal for most cases, + * if no then branch-prediction should mitigate the problem */ +#if 0 + /* LY: without branch instructions on x86, + * but isn't best for equal length of keys */ + int diff_len = mdbx_cmp2int(a->iov_len, b->iov_len); +#else + /* LY: best when length of keys are equal, + * but got a branch-penalty otherwise */ + if (likely(a->iov_len == b->iov_len)) + return memcmp(a->iov_base, b->iov_base, a->iov_len); + int diff_len = (a->iov_len < b->iov_len) ? -1 : 1; +#endif + size_t shortest = (a->iov_len < b->iov_len) ? a->iov_len : b->iov_len; + int diff_data = memcmp(a->iov_base, b->iov_base, shortest); + return likely(diff_data) ? diff_data : diff_len; +} + +/* Compare two items in reverse byte order */ +static int __hot mdbx_cmp_memnr(const MDBX_val *a, const MDBX_val *b) { + const uint8_t *pa, *pb, *end; + + pa = (const uint8_t *)a->iov_base + a->iov_len; + pb = (const uint8_t *)b->iov_base + b->iov_len; + size_t minlen = (a->iov_len < b->iov_len) ? a->iov_len : b->iov_len; + end = pa - minlen; + + while (pa != end) { + int diff = *--pa - *--pb; + if (likely(diff)) + return diff; + } + return mdbx_cmp2int(a->iov_len, b->iov_len); +} + +/* Search for key within a page, using binary search. + * Returns the smallest entry larger or equal to the key. + * If exactp is non-null, stores whether the found entry was an exact match + * in *exactp (1 or 0). + * Updates the cursor index with the index of the found entry. + * If no entry larger or equal to the key is found, returns NULL. */ +static MDBX_node *__hot mdbx_node_search(MDBX_cursor *mc, MDBX_val *key, + int *exactp) { + unsigned i = 0, nkeys; + int low, high; + int rc = 0; + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + MDBX_node *node = NULL; + MDBX_val nodekey; + MDBX_cmp_func *cmp; + DKBUF; + + nkeys = NUMKEYS(mp); + + mdbx_debug("searching %u keys in %s %spage %" PRIaPGNO "", nkeys, + IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", + mp->mp_pgno); + + low = IS_LEAF(mp) ? 0 : 1; + high = nkeys - 1; + cmp = mc->mc_dbx->md_cmp; + + /* Branch pages have no data, so if using integer keys, + * alignment is guaranteed. Use faster mdbx_cmp_int_ai. + */ + if (cmp == mdbx_cmp_int_a2 && IS_BRANCH(mp)) + cmp = mdbx_cmp_int_ai; + + if (IS_LEAF2(mp)) { + nodekey.iov_len = mc->mc_db->md_xsize; + node = NODEPTR(mp, 0); /* fake */ + while (low <= high) { + i = (low + high) >> 1; + nodekey.iov_base = LEAF2KEY(mp, i, nodekey.iov_len); + rc = cmp(key, &nodekey); + mdbx_debug("found leaf index %u [%s], rc = %i", i, DKEY(&nodekey), rc); + if (rc == 0) + break; + if (rc > 0) + low = i + 1; + else + high = i - 1; + } + } else { + while (low <= high) { + i = (low + high) >> 1; + + node = NODEPTR(mp, i); + nodekey.iov_len = NODEKSZ(node); + nodekey.iov_base = NODEKEY(node); + + rc = cmp(key, &nodekey); + if (IS_LEAF(mp)) + mdbx_debug("found leaf index %u [%s], rc = %i", i, DKEY(&nodekey), rc); + else + mdbx_debug("found branch index %u [%s -> %" PRIaPGNO "], rc = %i", i, + DKEY(&nodekey), NODEPGNO(node), rc); + if (rc == 0) + break; + if (rc > 0) + low = i + 1; + else + high = i - 1; + } + } + + if (rc > 0) /* Found entry is less than the key. */ + i++; /* Skip to get the smallest entry larger than key. */ + + if (exactp) + *exactp = (rc == 0 && nkeys > 0); + /* store the key index */ + mdbx_cassert(mc, i <= UINT16_MAX); + mc->mc_ki[mc->mc_top] = (indx_t)i; + if (i >= nkeys) + /* There is no entry larger or equal to the key. */ + return NULL; + + /* nodeptr is fake for LEAF2 */ + return IS_LEAF2(mp) ? node : NODEPTR(mp, i); +} + +#if 0 /* unused for now */ +static void mdbx_cursor_adjust(MDBX_cursor *mc, func) { + MDBX_cursor *m2; + + for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2 = m2->mc_next) { + if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) { + func(mc, m2); + } + } +} +#endif + +/* Pop a page off the top of the cursor's stack. */ +static void mdbx_cursor_pop(MDBX_cursor *mc) { + if (mc->mc_snum) { + mdbx_debug("popped page %" PRIaPGNO " off db %d cursor %p", + mc->mc_pg[mc->mc_top]->mp_pgno, DDBI(mc), (void *)mc); + + mc->mc_snum--; + if (mc->mc_snum) { + mc->mc_top--; + } else { + mc->mc_flags &= ~C_INITIALIZED; + } + } +} + +/* Push a page onto the top of the cursor's stack. + * Set MDBX_TXN_ERROR on failure. */ +static int mdbx_cursor_push(MDBX_cursor *mc, MDBX_page *mp) { + mdbx_debug("pushing page %" PRIaPGNO " on db %d cursor %p", mp->mp_pgno, + DDBI(mc), (void *)mc); + + if (unlikely(mc->mc_snum >= CURSOR_STACK)) { + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_CURSOR_FULL; + } + + mdbx_cassert(mc, mc->mc_snum < UINT16_MAX); + mc->mc_top = mc->mc_snum++; + mc->mc_pg[mc->mc_top] = mp; + mc->mc_ki[mc->mc_top] = 0; + + return MDBX_SUCCESS; +} + +/* Find the address of the page corresponding to a given page number. + * Set MDBX_TXN_ERROR on failure. + * + * [in] mc the cursor accessing the page. + * [in] pgno the page number for the page to retrieve. + * [out] ret address of a pointer where the page's address will be + * stored. + * [out] lvl dirtylist inheritance level of found page. 1=current txn, + * 0=mapped page. + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_get(MDBX_cursor *mc, pgno_t pgno, MDBX_page **ret, + int *lvl) { + MDBX_txn *txn = mc->mc_txn; + MDBX_env *env = txn->mt_env; + MDBX_page *p = NULL; + int level; + + if (!(txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_WRITEMAP))) { + MDBX_txn *tx2 = txn; + level = 1; + do { + MDBX_ID2L dl = tx2->mt_rw_dirtylist; + unsigned x; + /* Spilled pages were dirtied in this txn and flushed + * because the dirty list got full. Bring this page + * back in from the map (but don't unspill it here, + * leave that unless page_touch happens again). */ + if (tx2->mt_spill_pages) { + pgno_t pn = pgno << 1; + x = mdbx_pnl_search(tx2->mt_spill_pages, pn); + if (x <= tx2->mt_spill_pages[0] && tx2->mt_spill_pages[x] == pn) + goto mapped; + } + if (dl[0].mid) { + unsigned y = mdbx_mid2l_search(dl, pgno); + if (y <= dl[0].mid && dl[y].mid == pgno) { + p = dl[y].mptr; + goto done; + } + } + level++; + } while ((tx2 = tx2->mt_parent) != NULL); + } + + if (unlikely(pgno >= txn->mt_next_pgno)) { + mdbx_debug("page %" PRIaPGNO " not found", pgno); + txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_PAGE_NOTFOUND; + } + level = 0; + +mapped: + p = pgno2page(env, pgno); +/* TODO: check p->mp_validator here */ + +done: + *ret = p; + if (lvl) + *lvl = level; + return MDBX_SUCCESS; +} + +/* Finish mdbx_page_search() / mdbx_page_search_lowest(). + * The cursor is at the root page, set up the rest of it. */ +static int mdbx_page_search_root(MDBX_cursor *mc, MDBX_val *key, int flags) { + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + int rc; + DKBUF; + + while (IS_BRANCH(mp)) { + MDBX_node *node; + indx_t i; + + mdbx_debug("branch page %" PRIaPGNO " has %u keys", mp->mp_pgno, + NUMKEYS(mp)); + /* Don't assert on branch pages in the FreeDB. We can get here + * while in the process of rebalancing a FreeDB branch page; we must + * let that proceed. ITS#8336 */ + mdbx_cassert(mc, !mc->mc_dbi || NUMKEYS(mp) > 1); + mdbx_debug("found index 0 to page %" PRIaPGNO "", NODEPGNO(NODEPTR(mp, 0))); + + if (flags & (MDBX_PS_FIRST | MDBX_PS_LAST)) { + i = 0; + if (flags & MDBX_PS_LAST) { + i = NUMKEYS(mp) - 1; + /* if already init'd, see if we're already in right place */ + if (mc->mc_flags & C_INITIALIZED) { + if (mc->mc_ki[mc->mc_top] == i) { + mc->mc_top = mc->mc_snum++; + mp = mc->mc_pg[mc->mc_top]; + goto ready; + } + } + } + } else { + int exact; + node = mdbx_node_search(mc, key, &exact); + if (node == NULL) + i = NUMKEYS(mp) - 1; + else { + i = mc->mc_ki[mc->mc_top]; + if (!exact) { + mdbx_cassert(mc, i > 0); + i--; + } + } + mdbx_debug("following index %u for key [%s]", i, DKEY(key)); + } + + mdbx_cassert(mc, i < NUMKEYS(mp)); + node = NODEPTR(mp, i); + + if (unlikely((rc = mdbx_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0)) + return rc; + + mc->mc_ki[mc->mc_top] = i; + if (unlikely(rc = mdbx_cursor_push(mc, mp))) + return rc; + + ready: + if (flags & MDBX_PS_MODIFY) { + if (unlikely((rc = mdbx_page_touch(mc)) != 0)) + return rc; + mp = mc->mc_pg[mc->mc_top]; + } + } + + if (unlikely(!IS_LEAF(mp))) { + mdbx_debug("internal error, index points to a page with 0x%02x flags!?", + mp->mp_flags); + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_CORRUPTED; + } + + mdbx_debug("found leaf page %" PRIaPGNO " for key [%s]", mp->mp_pgno, + DKEY(key)); + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; + + return MDBX_SUCCESS; +} + +/* Search for the lowest key under the current branch page. + * This just bypasses a NUMKEYS check in the current page + * before calling mdbx_page_search_root(), because the callers + * are all in situations where the current page is known to + * be underfilled. */ +static int mdbx_page_search_lowest(MDBX_cursor *mc) { + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + MDBX_node *node = NODEPTR(mp, 0); + int rc; + + if (unlikely((rc = mdbx_page_get(mc, NODEPGNO(node), &mp, NULL)) != 0)) + return rc; + + mc->mc_ki[mc->mc_top] = 0; + if (unlikely(rc = mdbx_cursor_push(mc, mp))) + return rc; + return mdbx_page_search_root(mc, NULL, MDBX_PS_FIRST); +} + +/* Search for the page a given key should be in. + * Push it and its parent pages on the cursor stack. + * + * [in,out] mc the cursor for this operation. + * [in] key the key to search for, or NULL for first/last page. + * [in] flags If MDBX_PS_MODIFY is set, visited pages in the DB + * are touched (updated with new page numbers). + * If MDBX_PS_FIRST or MDBX_PS_LAST is set, find first or last + * leaf. + * This is used by mdbx_cursor_first() and mdbx_cursor_last(). + * If MDBX_PS_ROOTONLY set, just fetch root node, no further + * lookups. + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_search(MDBX_cursor *mc, MDBX_val *key, int flags) { + int rc; + pgno_t root; + + /* Make sure the txn is still viable, then find the root from + * the txn's db table and set it as the root of the cursor's stack. */ + if (unlikely(mc->mc_txn->mt_flags & MDBX_TXN_BLOCKED)) { + mdbx_debug("transaction has failed, must abort"); + return MDBX_BAD_TXN; + } + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + /* Make sure we're using an up-to-date root */ + if (unlikely(*mc->mc_dbflag & DB_STALE)) { + MDBX_cursor mc2; + if (unlikely(TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi))) + return MDBX_BAD_DBI; + mdbx_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL); + rc = mdbx_page_search(&mc2, &mc->mc_dbx->md_name, 0); + if (rc) + return rc; + { + MDBX_val data; + int exact = 0; + MDBX_node *leaf = mdbx_node_search(&mc2, &mc->mc_dbx->md_name, &exact); + if (!exact) + return MDBX_NOTFOUND; + if (unlikely((leaf->mn_flags & (F_DUPDATA | F_SUBDATA)) != F_SUBDATA)) + return MDBX_INCOMPATIBLE; /* not a named DB */ + rc = mdbx_node_read(&mc2, leaf, &data); + if (rc) + return rc; + + uint16_t md_flags; + memcpy(&md_flags, ((char *)data.iov_base + offsetof(MDBX_db, md_flags)), + sizeof(uint16_t)); + /* The txn may not know this DBI, or another process may + * have dropped and recreated the DB with other flags. */ + if (unlikely((mc->mc_db->md_flags & PERSISTENT_FLAGS) != md_flags)) + return MDBX_INCOMPATIBLE; + memcpy(mc->mc_db, data.iov_base, sizeof(MDBX_db)); + } + *mc->mc_dbflag &= ~DB_STALE; + } + root = mc->mc_db->md_root; + + if (unlikely(root == P_INVALID)) { /* Tree is empty. */ + mdbx_debug("tree is empty"); + return MDBX_NOTFOUND; + } + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + mdbx_cassert(mc, root >= NUM_METAS); + if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root) + if (unlikely((rc = mdbx_page_get(mc, root, &mc->mc_pg[0], NULL)) != 0)) + return rc; + + mc->mc_snum = 1; + mc->mc_top = 0; + + mdbx_debug("db %d root page %" PRIaPGNO " has flags 0x%X", DDBI(mc), root, + mc->mc_pg[0]->mp_flags); + + if (flags & MDBX_PS_MODIFY) { + if (unlikely(rc = mdbx_page_touch(mc))) + return rc; + } + + if (flags & MDBX_PS_ROOTONLY) + return MDBX_SUCCESS; + + return mdbx_page_search_root(mc, key, flags); +} + +static int mdbx_ovpage_free(MDBX_cursor *mc, MDBX_page *mp) { + MDBX_txn *txn = mc->mc_txn; + pgno_t pg = mp->mp_pgno; + unsigned x = 0, ovpages = mp->mp_pages; + MDBX_env *env = txn->mt_env; + MDBX_PNL sl = txn->mt_spill_pages; + pgno_t pn = pg << 1; + int rc; + + mdbx_debug("free ov page %" PRIaPGNO " (%u)", pg, ovpages); + /* If the page is dirty or on the spill list we just acquired it, + * so we should give it back to our current free list, if any. + * Otherwise put it onto the list of pages we freed in this txn. + * + * Won't create me_reclaimed_pglist: me_last_reclaimed must be inited along + * with it. + * Unsupported in nested txns: They would need to hide the page + * range in ancestor txns' dirty and spilled lists. */ + if (env->me_reclaimed_pglist && !txn->mt_parent && + ((mp->mp_flags & P_DIRTY) || + (sl && (x = mdbx_pnl_search(sl, pn)) <= sl[0] && sl[x] == pn))) { + unsigned i, j; + pgno_t *mop; + MDBX_ID2 *dl, ix, iy; + rc = mdbx_pnl_need(&env->me_reclaimed_pglist, ovpages); + if (unlikely(rc)) + return rc; + if (!(mp->mp_flags & P_DIRTY)) { + /* This page is no longer spilled */ + if (x == sl[0]) + sl[0]--; + else + sl[x] |= 1; + goto release; + } + /* Remove from dirty list */ + dl = txn->mt_rw_dirtylist; + x = dl[0].mid--; + for (ix = dl[x]; ix.mptr != mp; ix = iy) { + if (likely(x > 1)) { + x--; + iy = dl[x]; + dl[x] = ix; + } else { + mdbx_cassert(mc, x > 1); + mdbx_error("not found page 0x%p #%" PRIaPGNO " in the dirtylist", mp, + mp->mp_pgno); + j = ++(dl[0].mid); + dl[j] = ix; /* Unsorted. OK when MDBX_TXN_ERROR. */ + txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_PROBLEM; + } + } + txn->mt_dirtyroom++; + if (!(env->me_flags & MDBX_WRITEMAP)) + mdbx_dpage_free(env, mp); + release: + /* Insert in me_reclaimed_pglist */ + mop = env->me_reclaimed_pglist; + j = mop[0] + ovpages; + for (i = mop[0]; i && mop[i] < pg; i--) + mop[j--] = mop[i]; + while (j > i) + mop[j--] = pg++; + mop[0] += ovpages; + } else { + rc = mdbx_pnl_append_range(&txn->mt_befree_pages, pg, ovpages); + if (unlikely(rc)) + return rc; + } + mc->mc_db->md_overflow_pages -= ovpages; + return 0; +} + +/* Return the data associated with a given node. + * + * [in] mc The cursor for this operation. + * [in] leaf The node being read. + * [out] data Updated to point to the node's data. + * + * Returns 0 on success, non-zero on failure. */ +static __inline int mdbx_node_read(MDBX_cursor *mc, MDBX_node *leaf, + MDBX_val *data) { + MDBX_page *omp; /* overflow page */ + pgno_t pgno; + int rc; + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) { + data->iov_len = NODEDSZ(leaf); + data->iov_base = NODEDATA(leaf); + return MDBX_SUCCESS; + } + + /* Read overflow data. */ + data->iov_len = NODEDSZ(leaf); + memcpy(&pgno, NODEDATA(leaf), sizeof(pgno)); + if (unlikely((rc = mdbx_page_get(mc, pgno, &omp, NULL)) != 0)) { + mdbx_debug("read overflow page %" PRIaPGNO " failed", pgno); + return rc; + } + data->iov_base = PAGEDATA(omp); + + return MDBX_SUCCESS; +} + +int mdbx_get(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data) { + MDBX_cursor mc; + MDBX_xcursor mx; + int exact = 0; + DKBUF; + + mdbx_debug("===> get db %u key [%s]", dbi, DKEY(key)); + + if (unlikely(!key || !data || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + mdbx_cursor_init(&mc, txn, dbi, &mx); + return mdbx_cursor_set(&mc, key, data, MDBX_SET, &exact); +} + +/* Find a sibling for a page. + * Replaces the page at the top of the cursor's stack with the specified + * sibling, if one exists. + * + * [in] mc The cursor for this operation. + * [in] move_right Non-zero if the right sibling is requested, + * otherwise the left sibling. + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_cursor_sibling(MDBX_cursor *mc, int move_right) { + int rc; + MDBX_node *indx; + MDBX_page *mp; + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + if (unlikely(mc->mc_snum < 2)) { + return MDBX_NOTFOUND; /* root has no siblings */ + } + + mdbx_cursor_pop(mc); + mdbx_debug("parent page is page %" PRIaPGNO ", index %u", + mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top]); + + if (move_right + ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top])) + : (mc->mc_ki[mc->mc_top] == 0)) { + mdbx_debug("no more keys left, moving to %s sibling", + move_right ? "right" : "left"); + if (unlikely((rc = mdbx_cursor_sibling(mc, move_right)) != MDBX_SUCCESS)) { + /* undo cursor_pop before returning */ + mc->mc_top++; + mc->mc_snum++; + return rc; + } + } else { + if (move_right) + mc->mc_ki[mc->mc_top]++; + else + mc->mc_ki[mc->mc_top]--; + mdbx_debug("just moving to %s index key %u", move_right ? "right" : "left", + mc->mc_ki[mc->mc_top]); + } + mdbx_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top])); + + indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (unlikely((rc = mdbx_page_get(mc, NODEPGNO(indx), &mp, NULL)) != 0)) { + /* mc will be inconsistent if caller does mc_snum++ as above */ + mc->mc_flags &= ~(C_INITIALIZED | C_EOF); + return rc; + } + + mdbx_cursor_push(mc, mp); + if (!move_right) + mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1; + + return MDBX_SUCCESS; +} + +/* Move the cursor to the next data item. */ +static int mdbx_cursor_next(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op) { + MDBX_page *mp; + MDBX_node *leaf; + int rc; + + if ((mc->mc_flags & C_DEL) && op == MDBX_NEXT_DUP) + return MDBX_NOTFOUND; + + if (!(mc->mc_flags & C_INITIALIZED)) + return mdbx_cursor_first(mc, key, data); + + mp = mc->mc_pg[mc->mc_top]; + if (mc->mc_flags & C_EOF) { + if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) + return MDBX_NOTFOUND; + mc->mc_flags ^= C_EOF; + } + + if (mc->mc_db->md_flags & MDBX_DUPSORT) { + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (op == MDBX_NEXT || op == MDBX_NEXT_DUP) { + rc = + mdbx_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDBX_NEXT); + if (op != MDBX_NEXT || rc != MDBX_NOTFOUND) { + if (likely(rc == MDBX_SUCCESS)) + MDBX_GET_KEY(leaf, key); + return rc; + } + } + } else { + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + if (op == MDBX_NEXT_DUP) + return MDBX_NOTFOUND; + } + } + + mdbx_debug("cursor_next: top page is %" PRIaPGNO " in cursor %p", mp->mp_pgno, + (void *)mc); + if (mc->mc_flags & C_DEL) { + mc->mc_flags ^= C_DEL; + goto skip; + } + + if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) { + mdbx_debug("=====> move to next sibling page"); + if (unlikely((rc = mdbx_cursor_sibling(mc, 1)) != MDBX_SUCCESS)) { + mc->mc_flags |= C_EOF; + return rc; + } + mp = mc->mc_pg[mc->mc_top]; + mdbx_debug("next page is %" PRIaPGNO ", key index %u", mp->mp_pgno, + mc->mc_ki[mc->mc_top]); + } else + mc->mc_ki[mc->mc_top]++; + +skip: + mdbx_debug("==> cursor points to page %" PRIaPGNO + " with %u keys, key index %u", + mp->mp_pgno, NUMKEYS(mp), mc->mc_ki[mc->mc_top]); + + if (IS_LEAF2(mp)) { + key->iov_len = mc->mc_db->md_xsize; + key->iov_base = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->iov_len); + return MDBX_SUCCESS; + } + + mdbx_cassert(mc, IS_LEAF(mp)); + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_xcursor_init1(mc, leaf); + } + if (data) { + if (unlikely((rc = mdbx_node_read(mc, leaf, data)) != MDBX_SUCCESS)) + return rc; + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + rc = mdbx_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + } + + MDBX_GET_KEY(leaf, key); + return MDBX_SUCCESS; +} + +/* Move the cursor to the previous data item. */ +static int mdbx_cursor_prev(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op) { + MDBX_page *mp; + MDBX_node *leaf; + int rc; + + if ((mc->mc_flags & C_DEL) && op == MDBX_PREV_DUP) + return MDBX_NOTFOUND; + + if (!(mc->mc_flags & C_INITIALIZED)) { + rc = mdbx_cursor_last(mc, key, data); + if (unlikely(rc)) + return rc; + mc->mc_ki[mc->mc_top]++; + } + + mp = mc->mc_pg[mc->mc_top]; + if ((mc->mc_db->md_flags & MDBX_DUPSORT) && + mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) { + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (op == MDBX_PREV || op == MDBX_PREV_DUP) { + rc = + mdbx_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDBX_PREV); + if (op != MDBX_PREV || rc != MDBX_NOTFOUND) { + if (likely(rc == MDBX_SUCCESS)) { + MDBX_GET_KEY(leaf, key); + mc->mc_flags &= ~C_EOF; + } + return rc; + } + } + } else { + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + if (op == MDBX_PREV_DUP) + return MDBX_NOTFOUND; + } + } + + mdbx_debug("cursor_prev: top page is %" PRIaPGNO " in cursor %p", mp->mp_pgno, + (void *)mc); + + mc->mc_flags &= ~(C_EOF | C_DEL); + + if (mc->mc_ki[mc->mc_top] == 0) { + mdbx_debug("=====> move to prev sibling page"); + if ((rc = mdbx_cursor_sibling(mc, 0)) != MDBX_SUCCESS) { + return rc; + } + mp = mc->mc_pg[mc->mc_top]; + mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1; + mdbx_debug("prev page is %" PRIaPGNO ", key index %u", mp->mp_pgno, + mc->mc_ki[mc->mc_top]); + } else + mc->mc_ki[mc->mc_top]--; + + mdbx_debug("==> cursor points to page %" PRIaPGNO + " with %u keys, key index %u", + mp->mp_pgno, NUMKEYS(mp), mc->mc_ki[mc->mc_top]); + + if (IS_LEAF2(mp)) { + key->iov_len = mc->mc_db->md_xsize; + key->iov_base = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->iov_len); + return MDBX_SUCCESS; + } + + mdbx_cassert(mc, IS_LEAF(mp)); + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_xcursor_init1(mc, leaf); + } + if (data) { + if (unlikely((rc = mdbx_node_read(mc, leaf, data)) != MDBX_SUCCESS)) + return rc; + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + rc = mdbx_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + } + + MDBX_GET_KEY(leaf, key); + return MDBX_SUCCESS; +} + +/* Set the cursor on a specific data item. */ +static int mdbx_cursor_set(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op, int *exactp) { + int rc; + MDBX_page *mp; + MDBX_node *leaf = NULL; + DKBUF; + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + if ((mc->mc_db->md_flags & MDBX_INTEGERKEY) && + unlikely(key->iov_len != sizeof(uint32_t) && + key->iov_len != sizeof(uint64_t))) { + mdbx_cassert(mc, !"key-size is invalid for MDBX_INTEGERKEY"); + return MDBX_BAD_VALSIZE; + } + + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + + /* See if we're already on the right page */ + if (mc->mc_flags & C_INITIALIZED) { + MDBX_val nodekey; + + mp = mc->mc_pg[mc->mc_top]; + if (!NUMKEYS(mp)) { + mc->mc_ki[mc->mc_top] = 0; + return MDBX_NOTFOUND; + } + if (mp->mp_flags & P_LEAF2) { + nodekey.iov_len = mc->mc_db->md_xsize; + nodekey.iov_base = LEAF2KEY(mp, 0, nodekey.iov_len); + } else { + leaf = NODEPTR(mp, 0); + MDBX_GET_KEY2(leaf, nodekey); + } + rc = mc->mc_dbx->md_cmp(key, &nodekey); + if (rc == 0) { + /* Probably happens rarely, but first node on the page + * was the one we wanted. + */ + mc->mc_ki[mc->mc_top] = 0; + if (exactp) + *exactp = 1; + goto set1; + } + if (rc > 0) { + unsigned i; + unsigned nkeys = NUMKEYS(mp); + if (nkeys > 1) { + if (mp->mp_flags & P_LEAF2) { + nodekey.iov_base = LEAF2KEY(mp, nkeys - 1, nodekey.iov_len); + } else { + leaf = NODEPTR(mp, nkeys - 1); + MDBX_GET_KEY2(leaf, nodekey); + } + rc = mc->mc_dbx->md_cmp(key, &nodekey); + if (rc == 0) { + /* last node was the one we wanted */ + mdbx_cassert(mc, nkeys >= 1 && nkeys <= UINT16_MAX + 1); + mc->mc_ki[mc->mc_top] = (indx_t)(nkeys - 1); + if (exactp) + *exactp = 1; + goto set1; + } + if (rc < 0) { + if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) { + /* This is definitely the right page, skip search_page */ + if (mp->mp_flags & P_LEAF2) { + nodekey.iov_base = + LEAF2KEY(mp, mc->mc_ki[mc->mc_top], nodekey.iov_len); + } else { + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + MDBX_GET_KEY2(leaf, nodekey); + } + rc = mc->mc_dbx->md_cmp(key, &nodekey); + if (rc == 0) { + /* current node was the one we wanted */ + if (exactp) + *exactp = 1; + goto set1; + } + } + rc = 0; + mc->mc_flags &= ~C_EOF; + goto set2; + } + } + /* If any parents have right-sibs, search. + * Otherwise, there's nothing further. */ + for (i = 0; i < mc->mc_top; i++) + if (mc->mc_ki[i] < NUMKEYS(mc->mc_pg[i]) - 1) + break; + if (i == mc->mc_top) { + /* There are no other pages */ + mdbx_cassert(mc, nkeys <= UINT16_MAX); + mc->mc_ki[mc->mc_top] = (uint16_t)nkeys; + return MDBX_NOTFOUND; + } + } + if (!mc->mc_top) { + /* There are no other pages */ + mc->mc_ki[mc->mc_top] = 0; + if (op == MDBX_SET_RANGE && !exactp) { + rc = 0; + goto set1; + } else + return MDBX_NOTFOUND; + } + } else { + mc->mc_pg[0] = 0; + } + + rc = mdbx_page_search(mc, key, 0); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + mp = mc->mc_pg[mc->mc_top]; + mdbx_cassert(mc, IS_LEAF(mp)); + +set2: + leaf = mdbx_node_search(mc, key, exactp); + if (exactp != NULL && !*exactp) { + /* MDBX_SET specified and not an exact match. */ + return MDBX_NOTFOUND; + } + + if (leaf == NULL) { + mdbx_debug("===> inexact leaf not found, goto sibling"); + if (unlikely((rc = mdbx_cursor_sibling(mc, 1)) != MDBX_SUCCESS)) { + mc->mc_flags |= C_EOF; + return rc; /* no entries matched */ + } + mp = mc->mc_pg[mc->mc_top]; + mdbx_cassert(mc, IS_LEAF(mp)); + leaf = NODEPTR(mp, 0); + } + +set1: + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; + + if (IS_LEAF2(mp)) { + if (op == MDBX_SET_RANGE || op == MDBX_SET_KEY) { + key->iov_len = mc->mc_db->md_xsize; + key->iov_base = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->iov_len); + } + return MDBX_SUCCESS; + } + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_xcursor_init1(mc, leaf); + } + if (likely(data)) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (op == MDBX_SET || op == MDBX_SET_KEY || op == MDBX_SET_RANGE) { + rc = mdbx_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + } else { + int ex2, *ex2p; + if (op == MDBX_GET_BOTH) { + ex2p = &ex2; + ex2 = 0; + } else { + ex2p = NULL; + } + rc = mdbx_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, + MDBX_SET_RANGE, ex2p); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + } else if (op == MDBX_GET_BOTH || op == MDBX_GET_BOTH_RANGE) { + MDBX_val olddata; + if (unlikely((rc = mdbx_node_read(mc, leaf, &olddata)) != MDBX_SUCCESS)) + return rc; + rc = mc->mc_dbx->md_dcmp(data, &olddata); + if (rc) { + if (op == MDBX_GET_BOTH || rc > 0) + return MDBX_NOTFOUND; + rc = 0; + } + *data = olddata; + } else { + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + if (unlikely((rc = mdbx_node_read(mc, leaf, data)) != MDBX_SUCCESS)) + return rc; + } + } + + /* The key already matches in all other cases */ + if (op == MDBX_SET_RANGE || op == MDBX_SET_KEY) + MDBX_GET_KEY(leaf, key); + + mdbx_debug("==> cursor placed on key [%s], data [%s]", DKEY(key), DVAL(data)); + return rc; +} + +/* Move the cursor to the first item in the database. */ +static int mdbx_cursor_first(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data) { + int rc; + MDBX_node *leaf; + + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + + if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { + rc = mdbx_page_search(mc, NULL, MDBX_PS_FIRST); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + mdbx_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); + + leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0); + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; + + mc->mc_ki[mc->mc_top] = 0; + + if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { + key->iov_len = mc->mc_db->md_xsize; + key->iov_base = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->iov_len); + return MDBX_SUCCESS; + } + + if (likely(data)) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_cassert(mc, mc->mc_xcursor != nullptr); + mdbx_xcursor_init1(mc, leaf); + rc = mdbx_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + if (unlikely(rc)) + return rc; + } else { + if (unlikely((rc = mdbx_node_read(mc, leaf, data)) != MDBX_SUCCESS)) + return rc; + } + } + MDBX_GET_KEY(leaf, key); + return MDBX_SUCCESS; +} + +/* Move the cursor to the last item in the database. */ +static int mdbx_cursor_last(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data) { + int rc; + MDBX_node *leaf; + + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + + if (likely((mc->mc_flags & (C_EOF | C_DEL)) != C_EOF)) { + if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { + rc = mdbx_page_search(mc, NULL, MDBX_PS_LAST); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + mdbx_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); + } + + mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1; + mc->mc_flags |= C_INITIALIZED | C_EOF; + leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + + if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { + key->iov_len = mc->mc_db->md_xsize; + key->iov_base = + LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->iov_len); + return MDBX_SUCCESS; + } + + if (likely(data)) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_cassert(mc, mc->mc_xcursor != nullptr); + mdbx_xcursor_init1(mc, leaf); + rc = mdbx_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); + if (unlikely(rc)) + return rc; + } else { + if (unlikely((rc = mdbx_node_read(mc, leaf, data)) != MDBX_SUCCESS)) + return rc; + } + } + + MDBX_GET_KEY(leaf, key); + return MDBX_SUCCESS; +} + +int mdbx_cursor_get(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + MDBX_cursor_op op) { + int rc; + int exact = 0; + int (*mfunc)(MDBX_cursor * mc, MDBX_val * key, MDBX_val * data); + + if (unlikely(mc == NULL)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(mc->mc_txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + switch (op) { + case MDBX_GET_CURRENT: { + if (unlikely(!(mc->mc_flags & C_INITIALIZED))) + return MDBX_EINVAL; + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + unsigned nkeys = NUMKEYS(mp); + if (mc->mc_ki[mc->mc_top] >= nkeys) { + mdbx_cassert(mc, nkeys <= UINT16_MAX); + mc->mc_ki[mc->mc_top] = (uint16_t)nkeys; + return MDBX_NOTFOUND; + } + assert(nkeys > 0); + + rc = MDBX_SUCCESS; + if (IS_LEAF2(mp)) { + key->iov_len = mc->mc_db->md_xsize; + key->iov_base = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->iov_len); + } else { + MDBX_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + MDBX_GET_KEY(leaf, key); + if (data) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (unlikely(!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED))) { + mdbx_xcursor_init1(mc, leaf); + rc = mdbx_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + if (unlikely(rc)) + return rc; + } + rc = mdbx_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, + MDBX_GET_CURRENT); + } else { + rc = mdbx_node_read(mc, leaf, data); + } + if (unlikely(rc)) + return rc; + } + } + break; + } + case MDBX_GET_BOTH: + case MDBX_GET_BOTH_RANGE: + if (unlikely(data == NULL)) + return MDBX_EINVAL; + if (unlikely(mc->mc_xcursor == NULL)) + return MDBX_INCOMPATIBLE; + /* FALLTHRU */ + case MDBX_SET: + case MDBX_SET_KEY: + case MDBX_SET_RANGE: + if (unlikely(key == NULL)) + return MDBX_EINVAL; + rc = mdbx_cursor_set(mc, key, data, op, + op == MDBX_SET_RANGE ? NULL : &exact); + break; + case MDBX_GET_MULTIPLE: + if (unlikely(data == NULL || !(mc->mc_flags & C_INITIALIZED))) + return MDBX_EINVAL; + if (unlikely(!(mc->mc_db->md_flags & MDBX_DUPFIXED))) + return MDBX_INCOMPATIBLE; + rc = MDBX_SUCCESS; + if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) || + (mc->mc_xcursor->mx_cursor.mc_flags & C_EOF)) + break; + goto fetchm; + case MDBX_NEXT_MULTIPLE: + if (unlikely(data == NULL)) + return MDBX_EINVAL; + if (unlikely(!(mc->mc_db->md_flags & MDBX_DUPFIXED))) + return MDBX_INCOMPATIBLE; + rc = mdbx_cursor_next(mc, key, data, MDBX_NEXT_DUP); + if (rc == MDBX_SUCCESS) { + if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { + MDBX_cursor *mx; + fetchm: + mx = &mc->mc_xcursor->mx_cursor; + data->iov_len = NUMKEYS(mx->mc_pg[mx->mc_top]) * mx->mc_db->md_xsize; + data->iov_base = PAGEDATA(mx->mc_pg[mx->mc_top]); + mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top]) - 1; + } else { + rc = MDBX_NOTFOUND; + } + } + break; + case MDBX_PREV_MULTIPLE: + if (data == NULL) + return MDBX_EINVAL; + if (!(mc->mc_db->md_flags & MDBX_DUPFIXED)) + return MDBX_INCOMPATIBLE; + rc = MDBX_SUCCESS; + if (!(mc->mc_flags & C_INITIALIZED)) + rc = mdbx_cursor_last(mc, key, data); + if (rc == MDBX_SUCCESS) { + MDBX_cursor *mx = &mc->mc_xcursor->mx_cursor; + if (mx->mc_flags & C_INITIALIZED) { + rc = mdbx_cursor_sibling(mx, 0); + if (rc == MDBX_SUCCESS) + goto fetchm; + } else { + rc = MDBX_NOTFOUND; + } + } + break; + case MDBX_NEXT: + case MDBX_NEXT_DUP: + case MDBX_NEXT_NODUP: + rc = mdbx_cursor_next(mc, key, data, op); + break; + case MDBX_PREV: + case MDBX_PREV_DUP: + case MDBX_PREV_NODUP: + rc = mdbx_cursor_prev(mc, key, data, op); + break; + case MDBX_FIRST: + rc = mdbx_cursor_first(mc, key, data); + break; + case MDBX_FIRST_DUP: + mfunc = mdbx_cursor_first; + mmove: + if (unlikely(data == NULL || !(mc->mc_flags & C_INITIALIZED))) + return MDBX_EINVAL; + if (unlikely(mc->mc_xcursor == NULL)) + return MDBX_INCOMPATIBLE; + if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) { + mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); + return MDBX_NOTFOUND; + } + { + MDBX_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { + MDBX_GET_KEY(leaf, key); + rc = mdbx_node_read(mc, leaf, data); + break; + } + } + if (unlikely(!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED))) + return MDBX_EINVAL; + rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL); + break; + case MDBX_LAST: + rc = mdbx_cursor_last(mc, key, data); + break; + case MDBX_LAST_DUP: + mfunc = mdbx_cursor_last; + goto mmove; + default: + mdbx_debug("unhandled/unimplemented cursor operation %u", op); + return MDBX_EINVAL; + } + + mc->mc_flags &= ~C_DEL; + return rc; +} + +/* Touch all the pages in the cursor stack. Set mc_top. + * Makes sure all the pages are writable, before attempting a write operation. + * [in] mc The cursor to operate on. */ +static int mdbx_cursor_touch(MDBX_cursor *mc) { + int rc = MDBX_SUCCESS; + + if (mc->mc_dbi >= CORE_DBS && !(*mc->mc_dbflag & (DB_DIRTY | DB_DUPDATA))) { + /* Touch DB record of named DB */ + MDBX_cursor mc2; + MDBX_xcursor mcx; + if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) + return MDBX_BAD_DBI; + mdbx_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx); + rc = mdbx_page_search(&mc2, &mc->mc_dbx->md_name, MDBX_PS_MODIFY); + if (unlikely(rc)) + return rc; + *mc->mc_dbflag |= DB_DIRTY; + } + mc->mc_top = 0; + if (mc->mc_snum) { + do { + rc = mdbx_page_touch(mc); + } while (!rc && ++(mc->mc_top) < mc->mc_snum); + mc->mc_top = mc->mc_snum - 1; + } + return rc; +} + +/* Do not spill pages to disk if txn is getting full, may fail instead */ +#define MDBX_NOSPILL 0x8000 + +int mdbx_cursor_put(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + unsigned flags) { + MDBX_env *env; + MDBX_page *fp, *sub_root = NULL; + uint16_t fp_flags; + MDBX_val xdata, *rdata, dkey, olddata; + MDBX_db dummy; + unsigned mcount = 0, dcount = 0, nospill; + size_t nsize; + int rc = MDBX_SUCCESS, rc2; + unsigned nflags; + DKBUF; + + if (unlikely(mc == NULL || key == NULL)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + env = mc->mc_txn->mt_env; + + /* Check this first so counter will always be zero on any early failures. */ + if (flags & MDBX_MULTIPLE) { + if (unlikely(!F_ISSET(mc->mc_db->md_flags, MDBX_DUPFIXED))) + return MDBX_INCOMPATIBLE; + if (unlikely(data[1].iov_len >= INT_MAX)) + return MDBX_EINVAL; + dcount = (unsigned)data[1].iov_len; + data[1].iov_len = 0; + } + + if (flags & MDBX_RESERVE) { + if (unlikely(mc->mc_db->md_flags & (MDBX_DUPSORT | MDBX_REVERSEDUP))) + return MDBX_INCOMPATIBLE; + data->iov_base = nullptr; + } + + nospill = flags & MDBX_NOSPILL; + flags &= ~MDBX_NOSPILL; + + if (unlikely(mc->mc_txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) + return (mc->mc_txn->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS + : MDBX_BAD_TXN; + + if (unlikely(key->iov_len > env->me_maxkey_limit)) + return MDBX_BAD_VALSIZE; + + if (unlikely(data->iov_len > ((mc->mc_db->md_flags & MDBX_DUPSORT) + ? env->me_maxkey_limit + : MDBX_MAXDATASIZE))) + return MDBX_BAD_VALSIZE; + + if ((mc->mc_db->md_flags & MDBX_INTEGERKEY) && + unlikely(key->iov_len != sizeof(uint32_t) && + key->iov_len != sizeof(uint64_t))) { + mdbx_cassert(mc, !"key-size is invalid for MDBX_INTEGERKEY"); + return MDBX_BAD_VALSIZE; + } + + if ((mc->mc_db->md_flags & MDBX_INTEGERDUP) && + unlikely(data->iov_len != sizeof(uint32_t) && + data->iov_len != sizeof(uint64_t))) { + mdbx_cassert(mc, !"data-size is invalid MDBX_INTEGERDUP"); + return MDBX_BAD_VALSIZE; + } + + mdbx_debug("==> put db %d key [%s], size %" PRIuPTR + ", data [%s] size %" PRIuPTR, + DDBI(mc), DKEY(key), key ? key->iov_len : 0, + DVAL((flags & MDBX_RESERVE) ? nullptr : data), data->iov_len); + + int dupdata_flag = 0; + if (flags & MDBX_CURRENT) { + /* Опция MDBX_CURRENT означает, что запрошено обновление текущей записи, + * на которой сейчас стоит курсор. Проверяем что переданный ключ совпадает + * со значением в текущей позиции курсора. + * Здесь проще вызвать mdbx_cursor_get(), так как для обслуживания таблиц + * с MDBX_DUPSORT также требуется текущий размер данных. */ + MDBX_val current_key, current_data; + rc = mdbx_cursor_get(mc, ¤t_key, ¤t_data, MDBX_GET_CURRENT); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + if (mc->mc_dbx->md_cmp(key, ¤t_key) != 0) + return MDBX_EKEYMISMATCH; + + if (F_ISSET(mc->mc_db->md_flags, MDBX_DUPSORT)) { + MDBX_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_cassert(mc, + mc->mc_xcursor != NULL && + (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)); + /* Если за ключом более одного значения, либо если размер данных + * отличается, то вместо inplace обновления требуется удаление и + * последующая вставка. */ + if (mc->mc_xcursor->mx_db.md_entries > 1 || + current_data.iov_len != data->iov_len) { + rc = mdbx_cursor_del(mc, 0); + if (rc != MDBX_SUCCESS) + return rc; + flags -= MDBX_CURRENT; + } + } + } + } + + if (mc->mc_db->md_root == P_INVALID) { + /* new database, cursor has nothing to point to */ + mc->mc_snum = 0; + mc->mc_top = 0; + mc->mc_flags &= ~C_INITIALIZED; + rc = MDBX_NO_ROOT; + } else if ((flags & MDBX_CURRENT) == 0) { + int exact = 0; + MDBX_val d2; + if (flags & MDBX_APPEND) { + MDBX_val k2; + rc = mdbx_cursor_last(mc, &k2, &d2); + if (rc == 0) { + rc = mc->mc_dbx->md_cmp(key, &k2); + if (rc > 0) { + rc = MDBX_NOTFOUND; + mc->mc_ki[mc->mc_top]++; + } else { + /* new key is <= last key */ + rc = MDBX_EKEYMISMATCH; + } + } + } else { + rc = mdbx_cursor_set(mc, key, &d2, MDBX_SET, &exact); + } + if ((flags & MDBX_NOOVERWRITE) && rc == 0) { + mdbx_debug("duplicate key [%s]", DKEY(key)); + *data = d2; + return MDBX_KEYEXIST; + } + if (rc && unlikely(rc != MDBX_NOTFOUND)) + return rc; + } + + mc->mc_flags &= ~C_DEL; + + /* Cursor is positioned, check for room in the dirty list */ + if (!nospill) { + if (flags & MDBX_MULTIPLE) { + rdata = &xdata; + xdata.iov_len = data->iov_len * dcount; + } else { + rdata = data; + } + if (unlikely(rc2 = mdbx_page_spill(mc, key, rdata))) + return rc2; + } + + if (rc == MDBX_NO_ROOT) { + MDBX_page *np; + /* new database, write a root leaf page */ + mdbx_debug("allocating new root leaf page"); + if (unlikely(rc2 = mdbx_page_new(mc, P_LEAF, 1, &np))) { + return rc2; + } + assert(np->mp_flags & P_LEAF); + mdbx_cursor_push(mc, np); + mc->mc_db->md_root = np->mp_pgno; + mc->mc_db->md_depth++; + *mc->mc_dbflag |= DB_DIRTY; + if ((mc->mc_db->md_flags & (MDBX_DUPSORT | MDBX_DUPFIXED)) == MDBX_DUPFIXED) + np->mp_flags |= P_LEAF2; + mc->mc_flags |= C_INITIALIZED; + } else { + /* make sure all cursor pages are writable */ + rc2 = mdbx_cursor_touch(mc); + if (unlikely(rc2)) + return rc2; + } + + bool insert_key, insert_data, do_sub = false; + insert_key = insert_data = (rc != MDBX_SUCCESS); + if (insert_key) { + /* The key does not exist */ + mdbx_debug("inserting key at index %i", mc->mc_ki[mc->mc_top]); + if ((mc->mc_db->md_flags & MDBX_DUPSORT) && + LEAFSIZE(key, data) > env->me_nodemax) { + /* Too big for a node, insert in sub-DB. Set up an empty + * "old sub-page" for prep_subDB to expand to a full page. */ + fp_flags = P_LEAF | P_DIRTY; + fp = env->me_pbuf; + fp->mp_leaf2_ksize = (uint16_t)data->iov_len; /* used if MDBX_DUPFIXED */ + fp->mp_lower = fp->mp_upper = 0; + olddata.iov_len = PAGEHDRSZ; + goto prep_subDB; + } + } else { + /* there's only a key anyway, so this is a no-op */ + if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { + char *ptr; + unsigned ksize = mc->mc_db->md_xsize; + if (key->iov_len != ksize) + return MDBX_BAD_VALSIZE; + ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize); + memcpy(ptr, key->iov_base, ksize); + fix_parent: + /* if overwriting slot 0 of leaf, need to + * update branch key if there is a parent page */ + if (mc->mc_top && !mc->mc_ki[mc->mc_top]) { + unsigned dtop = 1; + mc->mc_top--; + /* slot 0 is always an empty key, find real slot */ + while (mc->mc_top && !mc->mc_ki[mc->mc_top]) { + mc->mc_top--; + dtop++; + } + if (mc->mc_ki[mc->mc_top]) + rc2 = mdbx_update_key(mc, key); + else + rc2 = MDBX_SUCCESS; + mdbx_cassert(mc, mc->mc_top + dtop < UINT16_MAX); + mc->mc_top += (uint16_t)dtop; + if (rc2) + return rc2; + } + return MDBX_SUCCESS; + } + + more:; + MDBX_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + olddata.iov_len = NODEDSZ(leaf); + olddata.iov_base = NODEDATA(leaf); + + /* DB has dups? */ + if (F_ISSET(mc->mc_db->md_flags, MDBX_DUPSORT)) { + /* Prepare (sub-)page/sub-DB to accept the new item, if needed. + * fp: old sub-page or a header faking it. + * mp: new (sub-)page. offset: growth in page size. + * xdata: node data with new page or DB. */ + unsigned i, offset = 0; + MDBX_page *mp = fp = xdata.iov_base = env->me_pbuf; + mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno; + + /* Was a single item before, must convert now */ + if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { + + /* does data match? */ + if (!mc->mc_dbx->md_dcmp(data, &olddata)) { + if (unlikely(flags & (MDBX_NODUPDATA | MDBX_APPENDDUP))) + return MDBX_KEYEXIST; + /* overwrite it */ + goto current; + } + + /* Just overwrite the current item */ + if (flags & MDBX_CURRENT) + goto current; + + /* Back up original data item */ + dupdata_flag = 1; + dkey.iov_len = olddata.iov_len; + dkey.iov_base = memcpy(fp + 1, olddata.iov_base, olddata.iov_len); + + /* Make sub-page header for the dup items, with dummy body */ + fp->mp_flags = P_LEAF | P_DIRTY | P_SUBP; + fp->mp_lower = 0; + xdata.iov_len = PAGEHDRSZ + dkey.iov_len + data->iov_len; + if (mc->mc_db->md_flags & MDBX_DUPFIXED) { + fp->mp_flags |= P_LEAF2; + fp->mp_leaf2_ksize = (uint16_t)data->iov_len; + xdata.iov_len += 2 * data->iov_len; /* leave space for 2 more */ + } else { + xdata.iov_len += 2 * (sizeof(indx_t) + NODESIZE) + + (dkey.iov_len & 1) + (data->iov_len & 1); + } + fp->mp_upper = (uint16_t)(xdata.iov_len - PAGEHDRSZ); + olddata.iov_len = xdata.iov_len; /* pretend olddata is fp */ + } else if (leaf->mn_flags & F_SUBDATA) { + /* Data is on sub-DB, just store it */ + flags |= F_DUPDATA | F_SUBDATA; + goto put_sub; + } else { + /* Data is on sub-page */ + fp = olddata.iov_base; + switch (flags) { + default: + if (!(mc->mc_db->md_flags & MDBX_DUPFIXED)) { + offset = EVEN(NODESIZE + sizeof(indx_t) + data->iov_len); + break; + } + offset = fp->mp_leaf2_ksize; + if (SIZELEFT(fp) < offset) { + offset *= 4; /* space for 4 more */ + break; + } + /* FALLTHRU: Big enough MDBX_DUPFIXED sub-page */ + case MDBX_CURRENT | MDBX_NODUPDATA: + case MDBX_CURRENT: + fp->mp_flags |= P_DIRTY; + fp->mp_pgno = mp->mp_pgno; + mc->mc_xcursor->mx_cursor.mc_pg[0] = fp; + flags |= F_DUPDATA; + goto put_sub; + } + xdata.iov_len = olddata.iov_len + offset; + } + + fp_flags = fp->mp_flags; + if (NODESIZE + NODEKSZ(leaf) + xdata.iov_len > env->me_nodemax) { + /* Too big for a sub-page, convert to sub-DB */ + fp_flags &= ~P_SUBP; + prep_subDB: + dummy.md_xsize = 0; + dummy.md_flags = 0; + if (mc->mc_db->md_flags & MDBX_DUPFIXED) { + fp_flags |= P_LEAF2; + dummy.md_xsize = fp->mp_leaf2_ksize; + dummy.md_flags = MDBX_DUPFIXED; + if (mc->mc_db->md_flags & MDBX_INTEGERDUP) + dummy.md_flags |= MDBX_INTEGERKEY; + } + dummy.md_depth = 1; + dummy.md_branch_pages = 0; + dummy.md_leaf_pages = 1; + dummy.md_overflow_pages = 0; + dummy.md_entries = NUMKEYS(fp); + xdata.iov_len = sizeof(MDBX_db); + xdata.iov_base = &dummy; + if ((rc = mdbx_page_alloc(mc, 1, &mp, MDBX_ALLOC_ALL))) + return rc; + mdbx_cassert(mc, env->me_psize > olddata.iov_len); + offset = env->me_psize - (unsigned)olddata.iov_len; + flags |= F_DUPDATA | F_SUBDATA; + dummy.md_root = mp->mp_pgno; + sub_root = mp; + } + if (mp != fp) { + mp->mp_flags = fp_flags | P_DIRTY; + mp->mp_leaf2_ksize = fp->mp_leaf2_ksize; + mp->mp_lower = fp->mp_lower; + mdbx_cassert(mc, fp->mp_upper + offset <= UINT16_MAX); + mp->mp_upper = (indx_t)(fp->mp_upper + offset); + if (fp_flags & P_LEAF2) { + memcpy(PAGEDATA(mp), PAGEDATA(fp), NUMKEYS(fp) * fp->mp_leaf2_ksize); + } else { + memcpy((char *)mp + mp->mp_upper + PAGEHDRSZ, + (char *)fp + fp->mp_upper + PAGEHDRSZ, + olddata.iov_len - fp->mp_upper - PAGEHDRSZ); + for (i = 0; i < NUMKEYS(fp); i++) { + mdbx_cassert(mc, fp->mp_ptrs[i] + offset <= UINT16_MAX); + mp->mp_ptrs[i] = (indx_t)(fp->mp_ptrs[i] + offset); + } + } + } + + rdata = &xdata; + flags |= F_DUPDATA; + do_sub = true; + if (!insert_key) + mdbx_node_del(mc, 0); + goto new_sub; + } + current: + /* MDBX passes F_SUBDATA in 'flags' to write a DB record */ + if (unlikely((leaf->mn_flags ^ flags) & F_SUBDATA)) + return MDBX_INCOMPATIBLE; + /* overflow page overwrites need special handling */ + if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { + MDBX_page *omp; + pgno_t pg; + int level, ovpages, dpages = OVPAGES(env, data->iov_len); + + memcpy(&pg, olddata.iov_base, sizeof(pg)); + if (unlikely((rc2 = mdbx_page_get(mc, pg, &omp, &level)) != 0)) + return rc2; + ovpages = omp->mp_pages; + + /* Is the ov page large enough? */ + if (ovpages >= dpages) { + if (!(omp->mp_flags & P_DIRTY) && + (level || (env->me_flags & MDBX_WRITEMAP))) { + rc = mdbx_page_unspill(mc->mc_txn, omp, &omp); + if (unlikely(rc)) + return rc; + level = 0; /* dirty in this txn or clean */ + } + /* Is it dirty? */ + if (omp->mp_flags & P_DIRTY) { + /* yes, overwrite it. Note in this case we don't + * bother to try shrinking the page if the new data + * is smaller than the overflow threshold. */ + if (unlikely(level > 1)) { + /* It is writable only in a parent txn */ + MDBX_page *np = mdbx_page_malloc(mc->mc_txn, ovpages); + MDBX_ID2 id2; + if (unlikely(!np)) + return MDBX_ENOMEM; + id2.mid = pg; + id2.mptr = np; + /* Note - this page is already counted in parent's dirtyroom */ + rc2 = mdbx_mid2l_insert(mc->mc_txn->mt_rw_dirtylist, &id2); + mdbx_cassert(mc, rc2 == 0); + + /* Currently we make the page look as with put() in the + * parent txn, in case the user peeks at MDBX_RESERVEd + * or unused parts. Some users treat ovpages specially. */ + const size_t whole = pgno2bytes(env, ovpages); + /* Skip the part where MDBX will put *data. + * Copy end of page, adjusting alignment so + * compiler may copy words instead of bytes. */ + const size_t off = + (PAGEHDRSZ + data->iov_len) & -(intptr_t)sizeof(size_t); + memcpy((size_t *)((char *)np + off), (size_t *)((char *)omp + off), + whole - off); + memcpy(np, omp, PAGEHDRSZ); /* Copy header of page */ + omp = np; + } + SETDSZ(leaf, data->iov_len); + if (F_ISSET(flags, MDBX_RESERVE)) + data->iov_base = PAGEDATA(omp); + else + memcpy(PAGEDATA(omp), data->iov_base, data->iov_len); + return MDBX_SUCCESS; + } + } + if ((rc2 = mdbx_ovpage_free(mc, omp)) != MDBX_SUCCESS) + return rc2; + } else if (data->iov_len == olddata.iov_len) { + assert(EVEN(key->iov_len) == EVEN(leaf->mn_ksize)); + /* same size, just replace it. Note that we could + * also reuse this node if the new data is smaller, + * but instead we opt to shrink the node in that case. */ + if (F_ISSET(flags, MDBX_RESERVE)) + data->iov_base = olddata.iov_base; + else if (!(mc->mc_flags & C_SUB)) + memcpy(olddata.iov_base, data->iov_base, data->iov_len); + else { + mdbx_cassert(mc, NUMKEYS(mc->mc_pg[mc->mc_top]) == 1); + mdbx_cassert(mc, mc->mc_pg[mc->mc_top]->mp_upper == + mc->mc_pg[mc->mc_top]->mp_lower); + mdbx_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top]) && + !IS_LEAF2(mc->mc_pg[mc->mc_top])); + mdbx_cassert(mc, NODEDSZ(leaf) == 0); + mdbx_cassert(mc, leaf->mn_flags == 0); + mdbx_cassert(mc, key->iov_len < UINT16_MAX); + leaf->mn_ksize = (uint16_t)key->iov_len; + memcpy(NODEKEY(leaf), key->iov_base, key->iov_len); + assert((char *)NODEDATA(leaf) + NODEDSZ(leaf) < + (char *)(mc->mc_pg[mc->mc_top]) + env->me_psize); + goto fix_parent; + } + return MDBX_SUCCESS; + } + mdbx_node_del(mc, 0); + } + + rdata = data; + +new_sub: + nflags = flags & NODE_ADD_FLAGS; + nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->iov_len + : mdbx_leaf_size(env, key, rdata); + if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) { + if ((flags & (F_DUPDATA | F_SUBDATA)) == F_DUPDATA) + nflags &= ~MDBX_APPEND; /* sub-page may need room to grow */ + if (!insert_key) + nflags |= MDBX_SPLIT_REPLACE; + rc = mdbx_page_split(mc, key, rdata, P_INVALID, nflags); + } else { + /* There is room already in this leaf page. */ + rc = mdbx_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags); + if (likely(rc == 0)) { + /* Adjust other cursors pointing to mp */ + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = mc->mc_dbi; + unsigned i = mc->mc_top; + MDBX_page *mp = mc->mc_pg[i]; + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == mc || m3->mc_snum < mc->mc_snum || m3->mc_pg[i] != mp) + continue; + if (m3->mc_ki[i] >= mc->mc_ki[i] && insert_key) { + m3->mc_ki[i]++; + } + if (XCURSOR_INITED(m3)) + XCURSOR_REFRESH(m3, mp, m3->mc_ki[i]); + } + } + } + + if (likely(rc == MDBX_SUCCESS)) { + /* Now store the actual data in the child DB. Note that we're + * storing the user data in the keys field, so there are strict + * size limits on dupdata. The actual data fields of the child + * DB are all zero size. */ + if (do_sub) { + int xflags; + size_t ecount; + put_sub: + xdata.iov_len = 0; + xdata.iov_base = ""; + MDBX_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (flags & MDBX_CURRENT) { + xflags = (flags & MDBX_NODUPDATA) + ? MDBX_CURRENT | MDBX_NOOVERWRITE | MDBX_NOSPILL + : MDBX_CURRENT | MDBX_NOSPILL; + } else { + mdbx_xcursor_init1(mc, leaf); + xflags = (flags & MDBX_NODUPDATA) ? MDBX_NOOVERWRITE | MDBX_NOSPILL + : MDBX_NOSPILL; + } + if (sub_root) + mc->mc_xcursor->mx_cursor.mc_pg[0] = sub_root; + /* converted, write the original data first */ + if (dupdata_flag) { + rc = mdbx_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags); + if (unlikely(rc)) + goto bad_sub; + /* we've done our job */ + dkey.iov_len = 0; + } + if (!(leaf->mn_flags & F_SUBDATA) || sub_root) { + /* Adjust other cursors pointing to mp */ + MDBX_cursor *m2; + MDBX_xcursor *mx = mc->mc_xcursor; + unsigned i = mc->mc_top; + MDBX_page *mp = mc->mc_pg[i]; + int nkeys = NUMKEYS(mp); + + for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2 = m2->mc_next) { + if (m2 == mc || m2->mc_snum < mc->mc_snum) + continue; + if (!(m2->mc_flags & C_INITIALIZED)) + continue; + if (m2->mc_pg[i] == mp) { + if (m2->mc_ki[i] == mc->mc_ki[i]) { + mdbx_xcursor_init2(m2, mx, dupdata_flag); + } else if (!insert_key && m2->mc_ki[i] < nkeys) { + XCURSOR_REFRESH(m2, mp, m2->mc_ki[i]); + } + } + } + } + mdbx_cassert(mc, mc->mc_xcursor->mx_db.md_entries < SIZE_MAX); + ecount = (size_t)mc->mc_xcursor->mx_db.md_entries; + if (flags & MDBX_APPENDDUP) + xflags |= MDBX_APPEND; + rc = mdbx_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags); + if (flags & F_SUBDATA) { + void *db = NODEDATA(leaf); + memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDBX_db)); + } + insert_data = (ecount != (size_t)mc->mc_xcursor->mx_db.md_entries); + } + /* Increment count unless we just replaced an existing item. */ + if (insert_data) + mc->mc_db->md_entries++; + if (insert_key) { + /* Invalidate txn if we created an empty sub-DB */ + if (unlikely(rc)) + goto bad_sub; + /* If we succeeded and the key didn't exist before, + * make sure the cursor is marked valid. */ + mc->mc_flags |= C_INITIALIZED; + } + if (flags & MDBX_MULTIPLE) { + if (!rc) { + mcount++; + /* let caller know how many succeeded, if any */ + data[1].iov_len = mcount; + if (mcount < dcount) { + data[0].iov_base = (char *)data[0].iov_base + data[0].iov_len; + insert_key = insert_data = false; + goto more; + } + } + } + return rc; + bad_sub: + if (unlikely(rc == MDBX_KEYEXIST)) + mdbx_error("unexpected %s", "MDBX_KEYEXIST"); + /* should not happen, we deleted that item */ + rc = MDBX_PROBLEM; + } + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return rc; +} + +int mdbx_cursor_del(MDBX_cursor *mc, unsigned flags) { + MDBX_node *leaf; + MDBX_page *mp; + int rc; + + if (unlikely(!mc)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(mc->mc_txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) + return (mc->mc_txn->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS + : MDBX_BAD_TXN; + + if (unlikely(!(mc->mc_flags & C_INITIALIZED))) + return MDBX_EINVAL; + + if (unlikely(mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top]))) + return MDBX_NOTFOUND; + + if (unlikely(!(flags & MDBX_NOSPILL) && + (rc = mdbx_page_spill(mc, NULL, NULL)))) + return rc; + + rc = mdbx_cursor_touch(mc); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + mp = mc->mc_pg[mc->mc_top]; + if (IS_LEAF2(mp)) + goto del_key; + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (flags & MDBX_NODUPDATA) { + /* mdbx_cursor_del0() will subtract the final entry */ + mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1; + mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; + } else { + if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) { + mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); + } + rc = mdbx_cursor_del(&mc->mc_xcursor->mx_cursor, MDBX_NOSPILL); + if (unlikely(rc)) + return rc; + /* If sub-DB still has entries, we're done */ + if (mc->mc_xcursor->mx_db.md_entries) { + if (leaf->mn_flags & F_SUBDATA) { + /* update subDB info */ + void *db = NODEDATA(leaf); + memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDBX_db)); + } else { + MDBX_cursor *m2; + /* shrink fake page */ + mdbx_node_shrink(mp, mc->mc_ki[mc->mc_top]); + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); + /* fix other sub-DB cursors pointed at fake pages on this page */ + for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2 = m2->mc_next) { + if (m2 == mc || m2->mc_snum < mc->mc_snum) + continue; + if (!(m2->mc_flags & C_INITIALIZED)) + continue; + if (m2->mc_pg[mc->mc_top] == mp) { + MDBX_node *n2 = leaf; + if (m2->mc_ki[mc->mc_top] >= NUMKEYS(mp)) + continue; + if (m2->mc_ki[mc->mc_top] != mc->mc_ki[mc->mc_top]) { + n2 = NODEPTR(mp, m2->mc_ki[mc->mc_top]); + if (n2->mn_flags & F_SUBDATA) + continue; + } + m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); + } + } + } + mc->mc_db->md_entries--; + return rc; + } else { + mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; + } + /* otherwise fall thru and delete the sub-DB */ + } + + if (leaf->mn_flags & F_SUBDATA) { + /* add all the child DB's pages to the free list */ + rc = mdbx_drop0(&mc->mc_xcursor->mx_cursor, 0); + if (unlikely(rc)) + goto fail; + } + } + /* MDBX passes F_SUBDATA in 'flags' to delete a DB record */ + else if (unlikely((leaf->mn_flags ^ flags) & F_SUBDATA)) { + rc = MDBX_INCOMPATIBLE; + goto fail; + } + + /* add overflow pages to free list */ + if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { + MDBX_page *omp; + pgno_t pg; + + memcpy(&pg, NODEDATA(leaf), sizeof(pg)); + if (unlikely((rc = mdbx_page_get(mc, pg, &omp, NULL)) || + (rc = mdbx_ovpage_free(mc, omp)))) + goto fail; + } + +del_key: + return mdbx_cursor_del0(mc); + +fail: + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return rc; +} + +/* Allocate and initialize new pages for a database. + * Set MDBX_TXN_ERROR on failure. + * + * [in] mc a cursor on the database being added to. + * [in] flags flags defining what type of page is being allocated. + * [in] num the number of pages to allocate. This is usually 1, + * unless allocating overflow pages for a large record. + * [out] mp Address of a page, or NULL on failure. + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_new(MDBX_cursor *mc, unsigned flags, unsigned num, + MDBX_page **mp) { + MDBX_page *np; + int rc; + + if (unlikely((rc = mdbx_page_alloc(mc, num, &np, MDBX_ALLOC_ALL)))) + return rc; + mdbx_debug("allocated new page #%" PRIaPGNO ", size %u", np->mp_pgno, + mc->mc_txn->mt_env->me_psize); + np->mp_flags = (uint16_t)(flags | P_DIRTY); + np->mp_lower = 0; + np->mp_upper = (indx_t)(mc->mc_txn->mt_env->me_psize - PAGEHDRSZ); + + if (IS_BRANCH(np)) + mc->mc_db->md_branch_pages++; + else if (IS_LEAF(np)) + mc->mc_db->md_leaf_pages++; + else if (IS_OVERFLOW(np)) { + mc->mc_db->md_overflow_pages += num; + np->mp_pages = num; + } + *mp = np; + + return MDBX_SUCCESS; +} + +/* Calculate the size of a leaf node. + * + * The size depends on the environment's page size; if a data item + * is too large it will be put onto an overflow page and the node + * size will only include the key and not the data. Sizes are always + * rounded up to an even number of bytes, to guarantee 2-byte alignment + * of the MDBX_node headers. + * + * [in] env The environment handle. + * [in] key The key for the node. + * [in] data The data for the node. + * + * Returns The number of bytes needed to store the node. */ +static __inline size_t mdbx_leaf_size(MDBX_env *env, MDBX_val *key, + MDBX_val *data) { + size_t sz; + + sz = LEAFSIZE(key, data); + if (sz > env->me_nodemax) { + /* put on overflow page */ + sz -= data->iov_len - sizeof(pgno_t); + } + + return EVEN(sz + sizeof(indx_t)); +} + +/* Calculate the size of a branch node. + * + * The size should depend on the environment's page size but since + * we currently don't support spilling large keys onto overflow + * pages, it's simply the size of the MDBX_node header plus the + * size of the key. Sizes are always rounded up to an even number + * of bytes, to guarantee 2-byte alignment of the MDBX_node headers. + * + * [in] env The environment handle. + * [in] key The key for the node. + * + * Returns The number of bytes needed to store the node. */ +static __inline size_t mdbx_branch_size(MDBX_env *env, MDBX_val *key) { + size_t sz; + + sz = INDXSIZE(key); + if (unlikely(sz > env->me_nodemax)) { + /* put on overflow page */ + /* not implemented */ + mdbx_assert_fail(env, "INDXSIZE(key) <= env->me_nodemax", __FUNCTION__, + __LINE__); + sz -= key->iov_len - sizeof(pgno_t); + } + + return sz + sizeof(indx_t); +} + +/* Add a node to the page pointed to by the cursor. + * Set MDBX_TXN_ERROR on failure. + * + * [in] mc The cursor for this operation. + * [in] indx The index on the page where the new node should be added. + * [in] key The key for the new node. + * [in] data The data for the new node, if any. + * [in] pgno The page number, if adding a branch node. + * [in] flags Flags for the node. + * + * Returns 0 on success, non-zero on failure. Possible errors are: + * + * MDBX_ENOMEM - failed to allocate overflow pages for the node. + * MDBX_PAGE_FULL - there is insufficient room in the page. This error + * should never happen since all callers already calculate + * the page's free space before calling this function. */ +static int mdbx_node_add(MDBX_cursor *mc, unsigned indx, MDBX_val *key, + MDBX_val *data, pgno_t pgno, unsigned flags) { + unsigned i; + size_t node_size = NODESIZE; + intptr_t room; + MDBX_node *node; + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + MDBX_page *ofp = NULL; /* overflow page */ + void *ndata; + DKBUF; + + mdbx_cassert(mc, mp->mp_upper >= mp->mp_lower); + + mdbx_debug("add to %s %spage %" PRIaPGNO " index %i, data size %" PRIuPTR + " key size %" PRIuPTR " [%s]", + IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", + mp->mp_pgno, indx, data ? data->iov_len : 0, + key ? key->iov_len : 0, DKEY(key)); + + if (IS_LEAF2(mp)) { + mdbx_cassert(mc, key); + /* Move higher keys up one slot. */ + const int ksize = mc->mc_db->md_xsize; + char *const ptr = LEAF2KEY(mp, indx, ksize); + const int diff = NUMKEYS(mp) - indx; + if (diff > 0) + memmove(ptr + ksize, ptr, diff * ksize); + /* insert new key */ + memcpy(ptr, key->iov_base, ksize); + + /* Just using these for counting */ + mdbx_cassert(mc, UINT16_MAX - mp->mp_lower >= (int)sizeof(indx_t)); + mp->mp_lower += sizeof(indx_t); + mdbx_cassert(mc, mp->mp_upper >= ksize - sizeof(indx_t)); + mp->mp_upper -= (indx_t)(ksize - sizeof(indx_t)); + return MDBX_SUCCESS; + } + + room = (intptr_t)SIZELEFT(mp) - (intptr_t)sizeof(indx_t); + if (key != NULL) + node_size += key->iov_len; + if (IS_LEAF(mp)) { + mdbx_cassert(mc, key && data); + if (unlikely(F_ISSET(flags, F_BIGDATA))) { + /* Data already on overflow page. */ + node_size += sizeof(pgno_t); + } else if (unlikely(node_size + data->iov_len > + mc->mc_txn->mt_env->me_nodemax)) { + pgno_t ovpages = OVPAGES(mc->mc_txn->mt_env, data->iov_len); + int rc; + /* Put data on overflow page. */ + mdbx_debug("data size is %" PRIuPTR ", node would be %" PRIuPTR + ", put data on overflow page", + data->iov_len, node_size + data->iov_len); + node_size = EVEN(node_size + sizeof(pgno_t)); + if ((intptr_t)node_size > room) + goto full; + if ((rc = mdbx_page_new(mc, P_OVERFLOW, ovpages, &ofp))) + return rc; + mdbx_debug("allocated overflow page %" PRIaPGNO "", ofp->mp_pgno); + flags |= F_BIGDATA; + goto update; + } else { + node_size += data->iov_len; + } + } + node_size = EVEN(node_size); + if (unlikely((intptr_t)node_size > room)) + goto full; + +update: + /* Move higher pointers up one slot. */ + for (i = NUMKEYS(mp); i > indx; i--) + mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; + + /* Adjust free space offsets. */ + size_t ofs = mp->mp_upper - node_size; + mdbx_cassert(mc, ofs >= mp->mp_lower + sizeof(indx_t)); + mdbx_cassert(mc, ofs <= UINT16_MAX); + mp->mp_ptrs[indx] = (uint16_t)ofs; + mp->mp_upper = (uint16_t)ofs; + mp->mp_lower += sizeof(indx_t); + + /* Write the node data. */ + node = NODEPTR(mp, indx); + node->mn_ksize = (key == NULL) ? 0 : (uint16_t)key->iov_len; + node->mn_flags = (uint16_t)flags; + if (IS_LEAF(mp)) + SETDSZ(node, data->iov_len); + else + SETPGNO(node, pgno); + + if (key) + memcpy(NODEKEY(node), key->iov_base, key->iov_len); + + if (IS_LEAF(mp)) { + ndata = NODEDATA(node); + if (unlikely(ofp == NULL)) { + if (unlikely(F_ISSET(flags, F_BIGDATA))) + memcpy(ndata, data->iov_base, sizeof(pgno_t)); + else if (F_ISSET(flags, MDBX_RESERVE)) + data->iov_base = ndata; + else if (likely(ndata != data->iov_base)) + memcpy(ndata, data->iov_base, data->iov_len); + } else { + memcpy(ndata, &ofp->mp_pgno, sizeof(pgno_t)); + ndata = PAGEDATA(ofp); + if (F_ISSET(flags, MDBX_RESERVE)) + data->iov_base = ndata; + else if (likely(ndata != data->iov_base)) + memcpy(ndata, data->iov_base, data->iov_len); + } + } + + return MDBX_SUCCESS; + +full: + mdbx_debug("not enough room in page %" PRIaPGNO ", got %u ptrs", mp->mp_pgno, + NUMKEYS(mp)); + mdbx_debug("upper-lower = %u - %u = %" PRIiPTR "", mp->mp_upper, mp->mp_lower, + room); + mdbx_debug("node size = %" PRIuPTR "", node_size); + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return MDBX_PAGE_FULL; +} + +/* Delete the specified node from a page. + * [in] mc Cursor pointing to the node to delete. + * [in] ksize The size of a node. Only used if the page is + * part of a MDBX_DUPFIXED database. */ +static void mdbx_node_del(MDBX_cursor *mc, size_t ksize) { + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + indx_t indx = mc->mc_ki[mc->mc_top]; + indx_t i, j, numkeys, ptr; + MDBX_node *node; + char *base; + + mdbx_debug("delete node %u on %s page %" PRIaPGNO "", indx, + IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno); + numkeys = NUMKEYS(mp); + mdbx_cassert(mc, indx < numkeys); + + if (IS_LEAF2(mp)) { + mdbx_cassert(mc, ksize >= sizeof(indx_t)); + unsigned diff = numkeys - 1 - indx; + base = LEAF2KEY(mp, indx, ksize); + if (diff) + memmove(base, base + ksize, diff * ksize); + mdbx_cassert(mc, mp->mp_lower >= sizeof(indx_t)); + mp->mp_lower -= sizeof(indx_t); + mdbx_cassert(mc, + (size_t)UINT16_MAX - mp->mp_upper >= ksize - sizeof(indx_t)); + mp->mp_upper += (indx_t)(ksize - sizeof(indx_t)); + return; + } + + node = NODEPTR(mp, indx); + size_t sz = NODESIZE + node->mn_ksize; + if (IS_LEAF(mp)) { + if (F_ISSET(node->mn_flags, F_BIGDATA)) + sz += sizeof(pgno_t); + else + sz += NODEDSZ(node); + } + sz = EVEN(sz); + + ptr = mp->mp_ptrs[indx]; + for (i = j = 0; i < numkeys; i++) { + if (i != indx) { + mp->mp_ptrs[j] = mp->mp_ptrs[i]; + if (mp->mp_ptrs[i] < ptr) { + mdbx_cassert(mc, (size_t)UINT16_MAX - mp->mp_ptrs[j] >= sz); + mp->mp_ptrs[j] += (indx_t)sz; + } + j++; + } + } + + base = (char *)mp + mp->mp_upper + PAGEHDRSZ; + memmove(base + sz, base, ptr - mp->mp_upper); + + mdbx_cassert(mc, mp->mp_lower >= sizeof(indx_t)); + mp->mp_lower -= sizeof(indx_t); + mdbx_cassert(mc, (size_t)UINT16_MAX - mp->mp_upper >= sz); + mp->mp_upper += (indx_t)sz; +} + +/* Compact the main page after deleting a node on a subpage. + * [in] mp The main page to operate on. + * [in] indx The index of the subpage on the main page. */ +static void mdbx_node_shrink(MDBX_page *mp, unsigned indx) { + MDBX_node *node; + MDBX_page *sp, *xp; + char *base; + size_t nsize, delta, len, ptr; + int i; + + node = NODEPTR(mp, indx); + sp = (MDBX_page *)NODEDATA(node); + delta = SIZELEFT(sp); + nsize = NODEDSZ(node) - delta; + + /* Prepare to shift upward, set len = length(subpage part to shift) */ + if (IS_LEAF2(sp)) { + len = nsize; + if (nsize & 1) + return; /* do not make the node uneven-sized */ + } else { + xp = (MDBX_page *)((char *)sp + delta); /* destination subpage */ + for (i = NUMKEYS(sp); --i >= 0;) { + assert(sp->mp_ptrs[i] >= delta); + xp->mp_ptrs[i] = (indx_t)(sp->mp_ptrs[i] - delta); + } + len = PAGEHDRSZ; + } + sp->mp_upper = sp->mp_lower; + sp->mp_pgno = mp->mp_pgno; + SETDSZ(node, nsize); + + /* Shift <lower nodes...initial part of subpage> upward */ + base = (char *)mp + mp->mp_upper + PAGEHDRSZ; + memmove(base + delta, base, (char *)sp + len - base); + + ptr = mp->mp_ptrs[indx]; + for (i = NUMKEYS(mp); --i >= 0;) { + if (mp->mp_ptrs[i] <= ptr) { + assert((size_t)UINT16_MAX - mp->mp_ptrs[i] >= delta); + mp->mp_ptrs[i] += (indx_t)delta; + } + } + assert((size_t)UINT16_MAX - mp->mp_upper >= delta); + mp->mp_upper += (indx_t)delta; +} + +/* Initial setup of a sorted-dups cursor. + * + * Sorted duplicates are implemented as a sub-database for the given key. + * The duplicate data items are actually keys of the sub-database. + * Operations on the duplicate data items are performed using a sub-cursor + * initialized when the sub-database is first accessed. This function does + * the preliminary setup of the sub-cursor, filling in the fields that + * depend only on the parent DB. + * + * [in] mc The main cursor whose sorted-dups cursor is to be initialized. */ +static void mdbx_xcursor_init0(MDBX_cursor *mc) { + MDBX_xcursor *mx = mc->mc_xcursor; + + mx->mx_cursor.mc_xcursor = NULL; + mx->mx_cursor.mc_txn = mc->mc_txn; + mx->mx_cursor.mc_db = &mx->mx_db; + mx->mx_cursor.mc_dbx = &mx->mx_dbx; + mx->mx_cursor.mc_dbi = mc->mc_dbi; + mx->mx_cursor.mc_dbflag = &mx->mx_dbflag; + mx->mx_cursor.mc_snum = 0; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags = C_SUB; + mx->mx_dbx.md_name.iov_len = 0; + mx->mx_dbx.md_name.iov_base = NULL; + mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp; + mx->mx_dbx.md_dcmp = NULL; +} + +/* Final setup of a sorted-dups cursor. + * Sets up the fields that depend on the data from the main cursor. + * [in] mc The main cursor whose sorted-dups cursor is to be initialized. + * [in] node The data containing the MDBX_db record for the sorted-dup database. + */ +static void mdbx_xcursor_init1(MDBX_cursor *mc, MDBX_node *node) { + MDBX_xcursor *mx = mc->mc_xcursor; + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + if (node->mn_flags & F_SUBDATA) { + memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDBX_db)); + mx->mx_cursor.mc_pg[0] = 0; + mx->mx_cursor.mc_snum = 0; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags = C_SUB; + } else { + MDBX_page *fp = NODEDATA(node); + mx->mx_db.md_xsize = 0; + mx->mx_db.md_flags = 0; + mx->mx_db.md_depth = 1; + mx->mx_db.md_branch_pages = 0; + mx->mx_db.md_leaf_pages = 1; + mx->mx_db.md_overflow_pages = 0; + mx->mx_db.md_entries = NUMKEYS(fp); + mx->mx_db.md_root = fp->mp_pgno; + mx->mx_cursor.mc_snum = 1; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags = C_INITIALIZED | C_SUB; + mx->mx_cursor.mc_pg[0] = fp; + mx->mx_cursor.mc_ki[0] = 0; + if (mc->mc_db->md_flags & MDBX_DUPFIXED) { + mx->mx_db.md_flags = MDBX_DUPFIXED; + mx->mx_db.md_xsize = fp->mp_leaf2_ksize; + if (mc->mc_db->md_flags & MDBX_INTEGERDUP) + mx->mx_db.md_flags |= MDBX_INTEGERKEY; + } + } + mdbx_debug("Sub-db -%u root page %" PRIaPGNO "", mx->mx_cursor.mc_dbi, + mx->mx_db.md_root); + mx->mx_dbflag = DB_VALID | DB_USRVALID | DB_DUPDATA; + /* FIXME: #if UINT_MAX < SIZE_MAX + if (mx->mx_dbx.md_cmp == mdbx_cmp_int && mx->mx_db.md_pad == + sizeof(size_t)) + mx->mx_dbx.md_cmp = mdbx_cmp_clong; + #endif */ +} + +/* Fixup a sorted-dups cursor due to underlying update. + * Sets up some fields that depend on the data from the main cursor. + * Almost the same as init1, but skips initialization steps if the + * xcursor had already been used. + * [in] mc The main cursor whose sorted-dups cursor is to be fixed up. + * [in] src_mx The xcursor of an up-to-date cursor. + * [in] new_dupdata True if converting from a non-F_DUPDATA item. */ +static void mdbx_xcursor_init2(MDBX_cursor *mc, MDBX_xcursor *src_mx, + int new_dupdata) { + MDBX_xcursor *mx = mc->mc_xcursor; + + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + if (new_dupdata) { + mx->mx_cursor.mc_snum = 1; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags |= C_INITIALIZED; + mx->mx_cursor.mc_ki[0] = 0; + mx->mx_dbflag = DB_VALID | DB_USRVALID | DB_DUPDATA; + mx->mx_dbx.md_cmp = src_mx->mx_dbx.md_cmp; + } else if (!(mx->mx_cursor.mc_flags & C_INITIALIZED)) { + return; + } + mx->mx_db = src_mx->mx_db; + mx->mx_cursor.mc_pg[0] = src_mx->mx_cursor.mc_pg[0]; + mdbx_debug("Sub-db -%u root page %" PRIaPGNO "", mx->mx_cursor.mc_dbi, + mx->mx_db.md_root); +} + +/* Initialize a cursor for a given transaction and database. */ +static void mdbx_cursor_init(MDBX_cursor *mc, MDBX_txn *txn, MDBX_dbi dbi, + MDBX_xcursor *mx) { + mc->mc_signature = MDBX_MC_SIGNATURE; + mc->mc_next = NULL; + mc->mc_backup = NULL; + mc->mc_dbi = dbi; + mc->mc_txn = txn; + mc->mc_db = &txn->mt_dbs[dbi]; + mc->mc_dbx = &txn->mt_dbxs[dbi]; + mc->mc_dbflag = &txn->mt_dbflags[dbi]; + mc->mc_snum = 0; + mc->mc_top = 0; + mc->mc_pg[0] = 0; + mc->mc_flags = 0; + mc->mc_ki[0] = 0; + mc->mc_xcursor = NULL; + if (txn->mt_dbs[dbi].md_flags & MDBX_DUPSORT) { + mdbx_tassert(txn, mx != NULL); + mx->mx_cursor.mc_signature = MDBX_MC_SIGNATURE; + mc->mc_xcursor = mx; + mdbx_xcursor_init0(mc); + } + mdbx_cassert(mc, mc->mc_txn->mt_txnid >= mc->mc_txn->mt_env->me_oldest[0]); + if (unlikely(*mc->mc_dbflag & DB_STALE)) { + mdbx_page_search(mc, NULL, MDBX_PS_ROOTONLY); + } +} + +int mdbx_cursor_open(MDBX_txn *txn, MDBX_dbi dbi, MDBX_cursor **ret) { + MDBX_cursor *mc; + size_t size = sizeof(MDBX_cursor); + + if (unlikely(!ret || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_VALID))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + if (unlikely(dbi == FREE_DBI && !F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY))) + return MDBX_EINVAL; + + if (txn->mt_dbs[dbi].md_flags & MDBX_DUPSORT) + size += sizeof(MDBX_xcursor); + + if (likely((mc = malloc(size)) != NULL)) { + mdbx_cursor_init(mc, txn, dbi, (MDBX_xcursor *)(mc + 1)); + if (txn->mt_cursors) { + mc->mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = mc; + mc->mc_flags |= C_UNTRACK; + } + } else { + return MDBX_ENOMEM; + } + + *ret = mc; + + return MDBX_SUCCESS; +} + +int mdbx_cursor_renew(MDBX_txn *txn, MDBX_cursor *mc) { + if (unlikely(!mc || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE && + mc->mc_signature != MDBX_MC_READY4CLOSE)) + return MDBX_EINVAL; + + if (unlikely(!TXN_DBI_EXIST(txn, mc->mc_dbi, DB_VALID))) + return MDBX_EINVAL; + + if (unlikely(mc->mc_backup)) + return MDBX_EINVAL; + + if (unlikely((mc->mc_flags & C_UNTRACK) || txn->mt_cursors)) { + MDBX_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi]; + while (*prev && *prev != mc) + prev = &(*prev)->mc_next; + if (*prev == mc) + *prev = mc->mc_next; + mc->mc_signature = MDBX_MC_READY4CLOSE; + } + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + mdbx_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor); + return MDBX_SUCCESS; +} + +/* Return the count of duplicate data items for the current key */ +int mdbx_cursor_count(MDBX_cursor *mc, size_t *countp) { + if (unlikely(mc == NULL || countp == NULL)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(mc->mc_txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(mc->mc_txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + if (unlikely(!(mc->mc_flags & C_INITIALIZED))) + return MDBX_EINVAL; + + if (!mc->mc_snum) { + *countp = 0; + return MDBX_NOTFOUND; + } + + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + if ((mc->mc_flags & C_EOF) && mc->mc_ki[mc->mc_top] >= NUMKEYS(mp)) { + *countp = 0; + return MDBX_NOTFOUND; + } + + *countp = 1; + if (mc->mc_xcursor != NULL) { + MDBX_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_cassert(mc, mc->mc_xcursor && (mc->mc_xcursor->mx_cursor.mc_flags & + C_INITIALIZED)); + *countp = unlikely(mc->mc_xcursor->mx_db.md_entries > SIZE_MAX) + ? SIZE_MAX + : (size_t)mc->mc_xcursor->mx_db.md_entries; + } + } + return MDBX_SUCCESS; +} + +void mdbx_cursor_close(MDBX_cursor *mc) { + if (mc) { + mdbx_ensure(NULL, mc->mc_signature == MDBX_MC_SIGNATURE || + mc->mc_signature == MDBX_MC_READY4CLOSE); + if (!mc->mc_backup) { + /* Remove from txn, if tracked. + * A read-only txn (!C_UNTRACK) may have been freed already, + * so do not peek inside it. Only write txns track cursors. */ + if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) { + MDBX_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi]; + while (*prev && *prev != mc) + prev = &(*prev)->mc_next; + if (*prev == mc) + *prev = mc->mc_next; + } + mc->mc_signature = 0; + free(mc); + } else { + /* cursor closed before nested txn ends */ + mdbx_cassert(mc, mc->mc_signature == MDBX_MC_SIGNATURE); + mc->mc_signature = MDBX_MC_WAIT4EOT; + } + } +} + +MDBX_txn *mdbx_cursor_txn(MDBX_cursor *mc) { + if (unlikely(!mc || mc->mc_signature != MDBX_MC_SIGNATURE)) + return NULL; + return mc->mc_txn; +} + +MDBX_dbi mdbx_cursor_dbi(MDBX_cursor *mc) { + if (unlikely(!mc || mc->mc_signature != MDBX_MC_SIGNATURE)) + return UINT_MAX; + return mc->mc_dbi; +} + +/* Replace the key for a branch node with a new key. + * Set MDBX_TXN_ERROR on failure. + * [in] mc Cursor pointing to the node to operate on. + * [in] key The new key to use. + * Returns 0 on success, non-zero on failure. */ +static int mdbx_update_key(MDBX_cursor *mc, MDBX_val *key) { + MDBX_page *mp; + MDBX_node *node; + char *base; + size_t len; + int delta, ksize, oksize; + indx_t ptr, i, numkeys, indx; + DKBUF; + + indx = mc->mc_ki[mc->mc_top]; + mp = mc->mc_pg[mc->mc_top]; + node = NODEPTR(mp, indx); + ptr = mp->mp_ptrs[indx]; + if (MDBX_DEBUG) { + MDBX_val k2; + char kbuf2[DKBUF_MAXKEYSIZE * 2 + 1]; + k2.iov_base = NODEKEY(node); + k2.iov_len = node->mn_ksize; + mdbx_debug("update key %u (ofs %u) [%s] to [%s] on page %" PRIaPGNO "", + indx, ptr, mdbx_dkey(&k2, kbuf2, sizeof(kbuf2)), DKEY(key), + mp->mp_pgno); + } + + /* Sizes must be 2-byte aligned. */ + ksize = EVEN(key->iov_len); + oksize = EVEN(node->mn_ksize); + delta = ksize - oksize; + + /* Shift node contents if EVEN(key length) changed. */ + if (delta) { + if (delta > 0 && SIZELEFT(mp) < delta) { + pgno_t pgno; + /* not enough space left, do a delete and split */ + mdbx_debug("Not enough room, delta = %d, splitting...", delta); + pgno = NODEPGNO(node); + mdbx_node_del(mc, 0); + return mdbx_page_split(mc, key, NULL, pgno, MDBX_SPLIT_REPLACE); + } + + numkeys = NUMKEYS(mp); + for (i = 0; i < numkeys; i++) { + if (mp->mp_ptrs[i] <= ptr) { + mdbx_cassert(mc, mp->mp_ptrs[i] >= delta); + mp->mp_ptrs[i] -= (indx_t)delta; + } + } + + base = (char *)mp + mp->mp_upper + PAGEHDRSZ; + len = ptr - mp->mp_upper + NODESIZE; + memmove(base - delta, base, len); + mdbx_cassert(mc, mp->mp_upper >= delta); + mp->mp_upper -= (indx_t)delta; + + node = NODEPTR(mp, indx); + } + + /* But even if no shift was needed, update ksize */ + if (node->mn_ksize != key->iov_len) + node->mn_ksize = (uint16_t)key->iov_len; + + if (key->iov_len) + memcpy(NODEKEY(node), key->iov_base, key->iov_len); + + return MDBX_SUCCESS; +} + +static void mdbx_cursor_copy(const MDBX_cursor *csrc, MDBX_cursor *cdst); + +/* Perform act while tracking temporary cursor mn */ +#define WITH_CURSOR_TRACKING(mn, act) \ + do { \ + mdbx_cassert(&(mn), \ + mn.mc_txn->mt_cursors != NULL /* must be not rdonly txt */); \ + MDBX_cursor mc_dummy, *tracked, \ + **tp = &(mn).mc_txn->mt_cursors[mn.mc_dbi]; \ + if ((mn).mc_flags & C_SUB) { \ + mc_dummy.mc_flags = C_INITIALIZED; \ + mc_dummy.mc_xcursor = (MDBX_xcursor *)&(mn); \ + tracked = &mc_dummy; \ + } else { \ + tracked = &(mn); \ + } \ + tracked->mc_next = *tp; \ + *tp = tracked; \ + { act; } \ + *tp = tracked->mc_next; \ + } while (0) + +/* Move a node from csrc to cdst. */ +static int mdbx_node_move(MDBX_cursor *csrc, MDBX_cursor *cdst, int fromleft) { + MDBX_node *srcnode; + MDBX_val key, data; + pgno_t srcpg; + MDBX_cursor mn; + int rc; + unsigned flags; + + DKBUF; + + /* Mark src and dst as dirty. */ + if (unlikely((rc = mdbx_page_touch(csrc)) || (rc = mdbx_page_touch(cdst)))) + return rc; + + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.iov_len = csrc->mc_db->md_xsize; + key.iov_base = LEAF2KEY(csrc->mc_pg[csrc->mc_top], + csrc->mc_ki[csrc->mc_top], key.iov_len); + data.iov_len = 0; + data.iov_base = NULL; + srcpg = 0; + flags = 0; + } else { + srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]); + mdbx_cassert(csrc, !((size_t)srcnode & 1)); + srcpg = NODEPGNO(srcnode); + flags = srcnode->mn_flags; + if (csrc->mc_ki[csrc->mc_top] == 0 && + IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { + unsigned snum = csrc->mc_snum; + MDBX_node *s2; + /* must find the lowest key below src */ + rc = mdbx_page_search_lowest(csrc); + if (unlikely(rc)) + return rc; + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.iov_len = csrc->mc_db->md_xsize; + key.iov_base = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.iov_len); + } else { + s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); + key.iov_len = NODEKSZ(s2); + key.iov_base = NODEKEY(s2); + } + mdbx_cassert(csrc, snum >= 1 && snum <= UINT16_MAX); + csrc->mc_snum = (uint16_t)snum--; + csrc->mc_top = (uint16_t)snum; + } else { + key.iov_len = NODEKSZ(srcnode); + key.iov_base = NODEKEY(srcnode); + } + data.iov_len = NODEDSZ(srcnode); + data.iov_base = NODEDATA(srcnode); + } + mn.mc_xcursor = NULL; + if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) { + unsigned snum = cdst->mc_snum; + MDBX_node *s2; + MDBX_val bkey; + /* must find the lowest key below dst */ + mdbx_cursor_copy(cdst, &mn); + rc = mdbx_page_search_lowest(&mn); + if (unlikely(rc)) + return rc; + if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { + bkey.iov_len = mn.mc_db->md_xsize; + bkey.iov_base = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.iov_len); + } else { + s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); + bkey.iov_len = NODEKSZ(s2); + bkey.iov_base = NODEKEY(s2); + } + mdbx_cassert(csrc, snum >= 1 && snum <= UINT16_MAX); + mn.mc_snum = (uint16_t)snum--; + mn.mc_top = (uint16_t)snum; + mn.mc_ki[snum] = 0; + rc = mdbx_update_key(&mn, &bkey); + if (unlikely(rc)) + return rc; + } + + mdbx_debug("moving %s node %u [%s] on page %" PRIaPGNO + " to node %u on page %" PRIaPGNO "", + IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch", + csrc->mc_ki[csrc->mc_top], DKEY(&key), + csrc->mc_pg[csrc->mc_top]->mp_pgno, cdst->mc_ki[cdst->mc_top], + cdst->mc_pg[cdst->mc_top]->mp_pgno); + + /* Add the node to the destination page. */ + rc = + mdbx_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + /* Delete the node from the source page. */ + mdbx_node_del(csrc, key.iov_len); + + { + /* Adjust other cursors pointing to mp */ + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = csrc->mc_dbi; + MDBX_page *mpd, *mps; + + mps = csrc->mc_pg[csrc->mc_top]; + /* If we're adding on the left, bump others up */ + if (fromleft) { + mpd = cdst->mc_pg[csrc->mc_top]; + for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (csrc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) + continue; + if (m3 != cdst && m3->mc_pg[csrc->mc_top] == mpd && + m3->mc_ki[csrc->mc_top] >= cdst->mc_ki[csrc->mc_top]) { + m3->mc_ki[csrc->mc_top]++; + } + if (m3 != csrc && m3->mc_pg[csrc->mc_top] == mps && + m3->mc_ki[csrc->mc_top] == csrc->mc_ki[csrc->mc_top]) { + m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; + m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; + m3->mc_ki[csrc->mc_top - 1]++; + } + if (XCURSOR_INITED(m3) && IS_LEAF(mps)) + XCURSOR_REFRESH(m3, m3->mc_pg[csrc->mc_top], m3->mc_ki[csrc->mc_top]); + } + } else + /* Adding on the right, bump others down */ + { + for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (csrc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == csrc) + continue; + if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) + continue; + if (m3->mc_pg[csrc->mc_top] == mps) { + if (!m3->mc_ki[csrc->mc_top]) { + m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; + m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; + m3->mc_ki[csrc->mc_top - 1]--; + } else { + m3->mc_ki[csrc->mc_top]--; + } + if (XCURSOR_INITED(m3) && IS_LEAF(mps)) + XCURSOR_REFRESH(m3, m3->mc_pg[csrc->mc_top], + m3->mc_ki[csrc->mc_top]); + } + } + } + } + + /* Update the parent separators. */ + if (csrc->mc_ki[csrc->mc_top] == 0) { + if (csrc->mc_ki[csrc->mc_top - 1] != 0) { + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.iov_base = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.iov_len); + } else { + srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); + key.iov_len = NODEKSZ(srcnode); + key.iov_base = NODEKEY(srcnode); + } + mdbx_debug("update separator for source page %" PRIaPGNO " to [%s]", + csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key)); + mdbx_cursor_copy(csrc, &mn); + mn.mc_snum--; + mn.mc_top--; + /* We want mdbx_rebalance to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, rc = mdbx_update_key(&mn, &key)); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { + MDBX_val nullkey; + indx_t ix = csrc->mc_ki[csrc->mc_top]; + nullkey.iov_len = 0; + csrc->mc_ki[csrc->mc_top] = 0; + rc = mdbx_update_key(csrc, &nullkey); + csrc->mc_ki[csrc->mc_top] = ix; + mdbx_cassert(csrc, rc == MDBX_SUCCESS); + } + } + + if (cdst->mc_ki[cdst->mc_top] == 0) { + if (cdst->mc_ki[cdst->mc_top - 1] != 0) { + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.iov_base = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.iov_len); + } else { + srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0); + key.iov_len = NODEKSZ(srcnode); + key.iov_base = NODEKEY(srcnode); + } + mdbx_debug("update separator for destination page %" PRIaPGNO " to [%s]", + cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key)); + mdbx_cursor_copy(cdst, &mn); + mn.mc_snum--; + mn.mc_top--; + /* We want mdbx_rebalance to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, rc = mdbx_update_key(&mn, &key)); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) { + MDBX_val nullkey; + indx_t ix = cdst->mc_ki[cdst->mc_top]; + nullkey.iov_len = 0; + cdst->mc_ki[cdst->mc_top] = 0; + rc = mdbx_update_key(cdst, &nullkey); + cdst->mc_ki[cdst->mc_top] = ix; + mdbx_cassert(cdst, rc == MDBX_SUCCESS); + } + } + + return MDBX_SUCCESS; +} + +/* Merge one page into another. + * + * The nodes from the page pointed to by csrc will be copied to the page + * pointed to by cdst and then the csrc page will be freed. + * + * [in] csrc Cursor pointing to the source page. + * [in] cdst Cursor pointing to the destination page. + * + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_merge(MDBX_cursor *csrc, MDBX_cursor *cdst) { + MDBX_page *psrc, *pdst; + MDBX_node *srcnode; + MDBX_val key, data; + unsigned nkeys; + int rc; + unsigned i, j; + + psrc = csrc->mc_pg[csrc->mc_top]; + pdst = cdst->mc_pg[cdst->mc_top]; + + mdbx_debug("merging page %" PRIaPGNO " into %" PRIaPGNO "", psrc->mp_pgno, + pdst->mp_pgno); + + mdbx_cassert(csrc, csrc->mc_snum > 1); /* can't merge root page */ + mdbx_cassert(csrc, cdst->mc_snum > 1); + + /* Mark dst as dirty. */ + if (unlikely(rc = mdbx_page_touch(cdst))) + return rc; + + /* get dst page again now that we've touched it. */ + pdst = cdst->mc_pg[cdst->mc_top]; + + /* Move all nodes from src to dst. */ + j = nkeys = NUMKEYS(pdst); + if (IS_LEAF2(psrc)) { + key.iov_len = csrc->mc_db->md_xsize; + key.iov_base = PAGEDATA(psrc); + for (i = 0; i < NUMKEYS(psrc); i++, j++) { + rc = mdbx_node_add(cdst, j, &key, NULL, 0, 0); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + key.iov_base = (char *)key.iov_base + key.iov_len; + } + } else { + for (i = 0; i < NUMKEYS(psrc); i++, j++) { + srcnode = NODEPTR(psrc, i); + if (i == 0 && IS_BRANCH(psrc)) { + MDBX_cursor mn; + MDBX_node *s2; + mdbx_cursor_copy(csrc, &mn); + mn.mc_xcursor = NULL; + /* must find the lowest key below src */ + rc = mdbx_page_search_lowest(&mn); + if (unlikely(rc)) + return rc; + if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { + key.iov_len = mn.mc_db->md_xsize; + key.iov_base = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.iov_len); + } else { + s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); + key.iov_len = NODEKSZ(s2); + key.iov_base = NODEKEY(s2); + } + } else { + key.iov_len = srcnode->mn_ksize; + key.iov_base = NODEKEY(srcnode); + } + + data.iov_len = NODEDSZ(srcnode); + data.iov_base = NODEDATA(srcnode); + rc = mdbx_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), + srcnode->mn_flags); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + } + } + + mdbx_debug("dst page %" PRIaPGNO " now has %u keys (%.1f%% filled)", + pdst->mp_pgno, NUMKEYS(pdst), + (float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10); + + /* Unlink the src page from parent and add to free list. */ + csrc->mc_top--; + mdbx_node_del(csrc, 0); + if (csrc->mc_ki[csrc->mc_top] == 0) { + key.iov_len = 0; + rc = mdbx_update_key(csrc, &key); + if (unlikely(rc)) { + csrc->mc_top++; + return rc; + } + } + csrc->mc_top++; + + psrc = csrc->mc_pg[csrc->mc_top]; + /* If not operating on FreeDB, allow this page to be reused + * in this txn. Otherwise just add to free list. */ + rc = mdbx_page_loose(csrc, psrc); + if (unlikely(rc)) + return rc; + if (IS_LEAF(psrc)) + csrc->mc_db->md_leaf_pages--; + else + csrc->mc_db->md_branch_pages--; + { + /* Adjust other cursors pointing to mp */ + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = csrc->mc_dbi; + unsigned top = csrc->mc_top; + + for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (csrc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == csrc) + continue; + if (m3->mc_snum < csrc->mc_snum) + continue; + if (m3->mc_pg[top] == psrc) { + m3->mc_pg[top] = pdst; + mdbx_cassert(m3, nkeys + m3->mc_ki[top] <= UINT16_MAX); + m3->mc_ki[top] += (indx_t)nkeys; + m3->mc_ki[top - 1] = cdst->mc_ki[top - 1]; + } else if (m3->mc_pg[top - 1] == csrc->mc_pg[top - 1] && + m3->mc_ki[top - 1] > csrc->mc_ki[top - 1]) { + m3->mc_ki[top - 1]--; + } + if (XCURSOR_INITED(m3) && IS_LEAF(psrc)) + XCURSOR_REFRESH(m3, m3->mc_pg[top], m3->mc_ki[top]); + } + } + { + unsigned snum = cdst->mc_snum; + uint16_t depth = cdst->mc_db->md_depth; + mdbx_cursor_pop(cdst); + rc = mdbx_rebalance(cdst); + /* Did the tree height change? */ + if (depth != cdst->mc_db->md_depth) + snum += cdst->mc_db->md_depth - depth; + mdbx_cassert(cdst, snum >= 1 && snum <= UINT16_MAX); + cdst->mc_snum = (uint16_t)snum; + cdst->mc_top = (uint16_t)(snum - 1); + } + return rc; +} + +/* Copy the contents of a cursor. + * [in] csrc The cursor to copy from. + * [out] cdst The cursor to copy to. */ +static void mdbx_cursor_copy(const MDBX_cursor *csrc, MDBX_cursor *cdst) { + unsigned i; + + mdbx_cassert(csrc, + csrc->mc_txn->mt_txnid >= csrc->mc_txn->mt_env->me_oldest[0]); + cdst->mc_txn = csrc->mc_txn; + cdst->mc_dbi = csrc->mc_dbi; + cdst->mc_db = csrc->mc_db; + cdst->mc_dbx = csrc->mc_dbx; + cdst->mc_snum = csrc->mc_snum; + cdst->mc_top = csrc->mc_top; + cdst->mc_flags = csrc->mc_flags; + + for (i = 0; i < csrc->mc_snum; i++) { + cdst->mc_pg[i] = csrc->mc_pg[i]; + cdst->mc_ki[i] = csrc->mc_ki[i]; + } +} + +/* Rebalance the tree after a delete operation. + * [in] mc Cursor pointing to the page where rebalancing should begin. + * Returns 0 on success, non-zero on failure. */ +static int mdbx_rebalance(MDBX_cursor *mc) { + MDBX_node *node; + int rc, fromleft; + unsigned ptop, minkeys, thresh; + MDBX_cursor mn; + indx_t oldki; + + if (IS_BRANCH(mc->mc_pg[mc->mc_top])) { + minkeys = 2; + thresh = 1; + } else { + minkeys = 1; + thresh = FILL_THRESHOLD; + } + mdbx_debug("rebalancing %s page %" PRIaPGNO " (has %u keys, %.1f%% full)", + IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch", + mc->mc_pg[mc->mc_top]->mp_pgno, NUMKEYS(mc->mc_pg[mc->mc_top]), + (float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10); + + if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= thresh && + NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) { + mdbx_debug("no need to rebalance page %" PRIaPGNO ", above fill threshold", + mc->mc_pg[mc->mc_top]->mp_pgno); + return MDBX_SUCCESS; + } + + if (mc->mc_snum < 2) { + MDBX_page *mp = mc->mc_pg[0]; + unsigned nkeys = NUMKEYS(mp); + if (IS_SUBP(mp)) { + mdbx_debug("Can't rebalance a subpage, ignoring"); + return MDBX_SUCCESS; + } + if (nkeys == 0) { + mdbx_debug("tree is completely empty"); + mc->mc_db->md_root = P_INVALID; + mc->mc_db->md_depth = 0; + mc->mc_db->md_leaf_pages = 0; + rc = mdbx_pnl_append(&mc->mc_txn->mt_befree_pages, mp->mp_pgno); + if (unlikely(rc)) + return rc; + /* Adjust cursors pointing to mp */ + mc->mc_snum = 0; + mc->mc_top = 0; + mc->mc_flags &= ~C_INITIALIZED; + { + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = mc->mc_dbi; + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (!(m3->mc_flags & C_INITIALIZED) || (m3->mc_snum < mc->mc_snum)) + continue; + if (m3->mc_pg[0] == mp) { + m3->mc_snum = 0; + m3->mc_top = 0; + m3->mc_flags &= ~C_INITIALIZED; + } + } + } + } else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) { + int i; + mdbx_debug("collapsing root page!"); + rc = mdbx_pnl_append(&mc->mc_txn->mt_befree_pages, mp->mp_pgno); + if (unlikely(rc)) + return rc; + mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0)); + rc = mdbx_page_get(mc, mc->mc_db->md_root, &mc->mc_pg[0], NULL); + if (unlikely(rc)) + return rc; + mc->mc_db->md_depth--; + mc->mc_db->md_branch_pages--; + mc->mc_ki[0] = mc->mc_ki[1]; + for (i = 1; i < mc->mc_db->md_depth; i++) { + mc->mc_pg[i] = mc->mc_pg[i + 1]; + mc->mc_ki[i] = mc->mc_ki[i + 1]; + } + { + /* Adjust other cursors pointing to mp */ + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = mc->mc_dbi; + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == mc) + continue; + if (!(m3->mc_flags & C_INITIALIZED)) + continue; + if (m3->mc_pg[0] == mp) { + for (i = 0; i < mc->mc_db->md_depth; i++) { + m3->mc_pg[i] = m3->mc_pg[i + 1]; + m3->mc_ki[i] = m3->mc_ki[i + 1]; + } + m3->mc_snum--; + m3->mc_top--; + } + } + } + } else { + mdbx_debug("root page %" PRIaPGNO + " doesn't need rebalancing (flags 0x%x)", + mp->mp_pgno, mp->mp_flags); + } + return MDBX_SUCCESS; + } + + /* The parent (branch page) must have at least 2 pointers, + * otherwise the tree is invalid. */ + ptop = mc->mc_top - 1; + mdbx_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1); + + /* Leaf page fill factor is below the threshold. + * Try to move keys from left or right neighbor, or + * merge with a neighbor page. */ + + /* Find neighbors. */ + mdbx_cursor_copy(mc, &mn); + mn.mc_xcursor = NULL; + + oldki = mc->mc_ki[mc->mc_top]; + if (mc->mc_ki[ptop] == 0) { + /* We're the leftmost leaf in our parent. */ + mdbx_debug("reading right neighbor"); + mn.mc_ki[ptop]++; + node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); + rc = mdbx_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL); + if (unlikely(rc)) + return rc; + mn.mc_ki[mn.mc_top] = 0; + mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); + fromleft = 0; + } else { + /* There is at least one neighbor to the left. */ + mdbx_debug("reading left neighbor"); + mn.mc_ki[ptop]--; + node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); + rc = mdbx_page_get(mc, NODEPGNO(node), &mn.mc_pg[mn.mc_top], NULL); + if (unlikely(rc)) + return rc; + mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1; + mc->mc_ki[mc->mc_top] = 0; + fromleft = 1; + } + + mdbx_debug("found neighbor page %" PRIaPGNO " (%u keys, %.1f%% full)", + mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]), + (float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10); + + /* If the neighbor page is above threshold and has enough keys, + * move one key from it. Otherwise we should try to merge them. + * (A branch page must never have less than 2 keys.) */ + if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= thresh && + NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) { + rc = mdbx_node_move(&mn, mc, fromleft); + if (fromleft) { + /* if we inserted on left, bump position up */ + oldki++; + } + } else { + if (!fromleft) { + rc = mdbx_page_merge(&mn, mc); + } else { + oldki += NUMKEYS(mn.mc_pg[mn.mc_top]); + mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1; + /* We want mdbx_rebalance to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, rc = mdbx_page_merge(mc, &mn)); + mdbx_cursor_copy(&mn, mc); + } + mc->mc_flags &= ~C_EOF; + } + mc->mc_ki[mc->mc_top] = oldki; + return rc; +} + +/* Complete a delete operation started by mdbx_cursor_del(). */ +static int mdbx_cursor_del0(MDBX_cursor *mc) { + int rc; + MDBX_page *mp; + indx_t ki; + unsigned nkeys; + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = mc->mc_dbi; + + ki = mc->mc_ki[mc->mc_top]; + mp = mc->mc_pg[mc->mc_top]; + mdbx_node_del(mc, mc->mc_db->md_xsize); + mc->mc_db->md_entries--; + { + /* Adjust other cursors pointing to mp */ + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; + if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) + continue; + if (m3 == mc || m3->mc_snum < mc->mc_snum) + continue; + if (m3->mc_pg[mc->mc_top] == mp) { + if (m3->mc_ki[mc->mc_top] == ki) { + m3->mc_flags |= C_DEL; + if (mc->mc_db->md_flags & MDBX_DUPSORT) { + /* Sub-cursor referred into dataset which is gone */ + m3->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED | C_EOF); + } + continue; + } else if (m3->mc_ki[mc->mc_top] > ki) { + m3->mc_ki[mc->mc_top]--; + } + if (XCURSOR_INITED(m3)) + XCURSOR_REFRESH(m3, m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); + } + } + } + rc = mdbx_rebalance(mc); + + if (likely(rc == MDBX_SUCCESS)) { + /* DB is totally empty now, just bail out. + * Other cursors adjustments were already done + * by mdbx_rebalance and aren't needed here. */ + if (!mc->mc_snum) { + mc->mc_flags |= C_DEL | C_EOF; + return rc; + } + + mp = mc->mc_pg[mc->mc_top]; + nkeys = NUMKEYS(mp); + + /* Adjust other cursors pointing to mp */ + for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2 = m2->mc_next) { + m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; + if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) + continue; + if (m3->mc_snum < mc->mc_snum) + continue; + if (m3->mc_pg[mc->mc_top] == mp) { + /* if m3 points past last node in page, find next sibling */ + if (m3->mc_ki[mc->mc_top] >= mc->mc_ki[mc->mc_top]) { + if (m3->mc_ki[mc->mc_top] >= nkeys) { + rc = mdbx_cursor_sibling(m3, 1); + if (rc == MDBX_NOTFOUND) { + m3->mc_flags |= C_EOF; + rc = MDBX_SUCCESS; + continue; + } + } + if (mc->mc_db->md_flags & MDBX_DUPSORT) { + MDBX_node *node = + NODEPTR(m3->mc_pg[m3->mc_top], m3->mc_ki[m3->mc_top]); + /* If this node has dupdata, it may need to be reinited + * because its data has moved. + * If the xcursor was not initd it must be reinited. + * Else if node points to a subDB, nothing is needed. */ + if (node->mn_flags & F_DUPDATA) { + if (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { + if (!(node->mn_flags & F_SUBDATA)) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); + } else { + mdbx_xcursor_init1(m3, node); + m3->mc_xcursor->mx_cursor.mc_flags |= C_DEL; + } + } + } + } + } + } + mc->mc_flags |= C_DEL; + } + + if (unlikely(rc)) + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return rc; +} + +int mdbx_del(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data) { + if (unlikely(!key || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) + return (txn->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN; + + return mdbx_del0(txn, dbi, key, data, 0); +} + +static int mdbx_del0(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + unsigned flags) { + MDBX_cursor mc; + MDBX_xcursor mx; + MDBX_cursor_op op; + MDBX_val rdata; + int rc, exact = 0; + DKBUF; + + mdbx_debug("====> delete db %u key [%s], data [%s]", dbi, DKEY(key), + DVAL(data)); + + mdbx_cursor_init(&mc, txn, dbi, &mx); + + if (data) { + op = MDBX_GET_BOTH; + rdata = *data; + data = &rdata; + } else { + op = MDBX_SET; + flags |= MDBX_NODUPDATA; + } + rc = mdbx_cursor_set(&mc, key, data, op, &exact); + if (likely(rc == MDBX_SUCCESS)) { + /* let mdbx_page_split know about this cursor if needed: + * delete will trigger a rebalance; if it needs to move + * a node from one page to another, it will have to + * update the parent's separator key(s). If the new sepkey + * is larger than the current one, the parent page may + * run out of space, triggering a split. We need this + * cursor to be consistent until the end of the rebalance. */ + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + rc = mdbx_cursor_del(&mc, flags); + txn->mt_cursors[dbi] = mc.mc_next; + } + return rc; +} + +/* Split a page and insert a new node. + * Set MDBX_TXN_ERROR on failure. + * [in,out] mc Cursor pointing to the page and desired insertion index. + * The cursor will be updated to point to the actual page and index where + * the node got inserted after the split. + * [in] newkey The key for the newly inserted node. + * [in] newdata The data for the newly inserted node. + * [in] newpgno The page number, if the new node is a branch node. + * [in] nflags The NODE_ADD_FLAGS for the new node. + * Returns 0 on success, non-zero on failure. */ +static int mdbx_page_split(MDBX_cursor *mc, MDBX_val *newkey, MDBX_val *newdata, + pgno_t newpgno, unsigned nflags) { + unsigned flags; + int rc = MDBX_SUCCESS, new_root = 0, did_split = 0; + pgno_t pgno = 0; + unsigned i, ptop; + MDBX_env *env = mc->mc_txn->mt_env; + MDBX_node *node; + MDBX_val sepkey, rkey, xdata, *rdata = &xdata; + MDBX_page *copy = NULL; + MDBX_page *rp, *pp; + MDBX_cursor mn; + DKBUF; + + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + unsigned newindx = mc->mc_ki[mc->mc_top]; + unsigned nkeys = NUMKEYS(mp); + + mdbx_debug("-----> splitting %s page %" PRIaPGNO + " and adding [%s] at index %i/%i", + IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno, DKEY(newkey), + mc->mc_ki[mc->mc_top], nkeys); + + /* Create a right sibling. */ + if ((rc = mdbx_page_new(mc, mp->mp_flags, 1, &rp))) + return rc; + rp->mp_leaf2_ksize = mp->mp_leaf2_ksize; + mdbx_debug("new right sibling: page %" PRIaPGNO "", rp->mp_pgno); + + /* Usually when splitting the root page, the cursor + * height is 1. But when called from mdbx_update_key, + * the cursor height may be greater because it walks + * up the stack while finding the branch slot to update. */ + if (mc->mc_top < 1) { + if ((rc = mdbx_page_new(mc, P_BRANCH, 1, &pp))) + goto done; + /* shift current top to make room for new parent */ + for (i = mc->mc_snum; i > 0; i--) { + mc->mc_pg[i] = mc->mc_pg[i - 1]; + mc->mc_ki[i] = mc->mc_ki[i - 1]; + } + mc->mc_pg[0] = pp; + mc->mc_ki[0] = 0; + mc->mc_db->md_root = pp->mp_pgno; + mdbx_debug("root split! new root = %" PRIaPGNO "", pp->mp_pgno); + new_root = mc->mc_db->md_depth++; + + /* Add left (implicit) pointer. */ + if (unlikely((rc = mdbx_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != + MDBX_SUCCESS)) { + /* undo the pre-push */ + mc->mc_pg[0] = mc->mc_pg[1]; + mc->mc_ki[0] = mc->mc_ki[1]; + mc->mc_db->md_root = mp->mp_pgno; + mc->mc_db->md_depth--; + goto done; + } + mc->mc_snum++; + mc->mc_top++; + ptop = 0; + } else { + ptop = mc->mc_top - 1; + mdbx_debug("parent branch page is %" PRIaPGNO "", mc->mc_pg[ptop]->mp_pgno); + } + + mdbx_cursor_copy(mc, &mn); + mn.mc_xcursor = NULL; + mn.mc_pg[mn.mc_top] = rp; + mn.mc_ki[ptop] = mc->mc_ki[ptop] + 1; + + unsigned split_indx; + if (nflags & MDBX_APPEND) { + mn.mc_ki[mn.mc_top] = 0; + sepkey = *newkey; + split_indx = newindx; + nkeys = 0; + } else { + split_indx = (nkeys + 1) / 2; + + if (IS_LEAF2(rp)) { + char *split, *ins; + int x; + unsigned lsize, rsize, ksize; + /* Move half of the keys to the right sibling */ + x = mc->mc_ki[mc->mc_top] - split_indx; + ksize = mc->mc_db->md_xsize; + split = LEAF2KEY(mp, split_indx, ksize); + rsize = (nkeys - split_indx) * ksize; + lsize = (nkeys - split_indx) * sizeof(indx_t); + mdbx_cassert(mc, mp->mp_lower >= lsize); + mp->mp_lower -= (indx_t)lsize; + mdbx_cassert(mc, rp->mp_lower + lsize <= UINT16_MAX); + rp->mp_lower += (indx_t)lsize; + mdbx_cassert(mc, mp->mp_upper + rsize - lsize <= UINT16_MAX); + mp->mp_upper += (indx_t)(rsize - lsize); + mdbx_cassert(mc, rp->mp_upper >= rsize - lsize); + rp->mp_upper -= (indx_t)(rsize - lsize); + sepkey.iov_len = ksize; + if (newindx == split_indx) { + sepkey.iov_base = newkey->iov_base; + } else { + sepkey.iov_base = split; + } + if (x < 0) { + mdbx_cassert(mc, ksize >= sizeof(indx_t)); + ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize); + memcpy(rp->mp_ptrs, split, rsize); + sepkey.iov_base = rp->mp_ptrs; + memmove(ins + ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize); + memcpy(ins, newkey->iov_base, ksize); + mdbx_cassert(mc, UINT16_MAX - mp->mp_lower >= (int)sizeof(indx_t)); + mp->mp_lower += sizeof(indx_t); + mdbx_cassert(mc, mp->mp_upper >= ksize - sizeof(indx_t)); + mp->mp_upper -= (indx_t)(ksize - sizeof(indx_t)); + } else { + if (x) + memcpy(rp->mp_ptrs, split, x * ksize); + ins = LEAF2KEY(rp, x, ksize); + memcpy(ins, newkey->iov_base, ksize); + memcpy(ins + ksize, split + x * ksize, rsize - x * ksize); + mdbx_cassert(mc, UINT16_MAX - rp->mp_lower >= (int)sizeof(indx_t)); + rp->mp_lower += sizeof(indx_t); + mdbx_cassert(mc, rp->mp_upper >= ksize - sizeof(indx_t)); + rp->mp_upper -= (indx_t)(ksize - sizeof(indx_t)); + mdbx_cassert(mc, x <= UINT16_MAX); + mc->mc_ki[mc->mc_top] = (indx_t)x; + } + } else { + size_t psize, nsize, k; + /* Maximum free space in an empty page */ + unsigned pmax = env->me_psize - PAGEHDRSZ; + if (IS_LEAF(mp)) + nsize = mdbx_leaf_size(env, newkey, newdata); + else + nsize = mdbx_branch_size(env, newkey); + nsize = EVEN(nsize); + + /* grab a page to hold a temporary copy */ + copy = mdbx_page_malloc(mc->mc_txn, 1); + if (unlikely(copy == NULL)) { + rc = MDBX_ENOMEM; + goto done; + } + copy->mp_pgno = mp->mp_pgno; + copy->mp_flags = mp->mp_flags; + copy->mp_lower = 0; + mdbx_cassert(mc, env->me_psize - PAGEHDRSZ <= UINT16_MAX); + copy->mp_upper = (indx_t)(env->me_psize - PAGEHDRSZ); + + /* prepare to insert */ + for (unsigned j = i = 0; i < nkeys; i++) { + if (i == newindx) + copy->mp_ptrs[j++] = 0; + copy->mp_ptrs[j++] = mp->mp_ptrs[i]; + } + + /* When items are relatively large the split point needs + * to be checked, because being off-by-one will make the + * difference between success or failure in mdbx_node_add. + * + * It's also relevant if a page happens to be laid out + * such that one half of its nodes are all "small" and + * the other half of its nodes are "large." If the new + * item is also "large" and falls on the half with + * "large" nodes, it also may not fit. + * + * As a final tweak, if the new item goes on the last + * spot on the page (and thus, onto the new page), bias + * the split so the new page is emptier than the old page. + * This yields better packing during sequential inserts. + */ + int dir; + if (nkeys < 20 || nsize > pmax / 16 || newindx >= nkeys) { + /* Find split point */ + psize = 0; + if (newindx <= split_indx || newindx >= nkeys) { + i = 0; + dir = 1; + k = (newindx >= nkeys) ? nkeys : split_indx + 1 + IS_LEAF(mp); + } else { + i = nkeys; + dir = -1; + k = split_indx - 1; + } + for (; i != k; i += dir) { + if (i == newindx) { + psize += nsize; + node = NULL; + } else { + node = (MDBX_node *)((char *)mp + copy->mp_ptrs[i] + PAGEHDRSZ); + psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t); + if (IS_LEAF(mp)) { + if (F_ISSET(node->mn_flags, F_BIGDATA)) + psize += sizeof(pgno_t); + else + psize += NODEDSZ(node); + } + psize = EVEN(psize); + } + if (psize > pmax || i == k - dir) { + split_indx = i + (dir < 0); + break; + } + } + } + if (split_indx == newindx) { + sepkey.iov_len = newkey->iov_len; + sepkey.iov_base = newkey->iov_base; + } else { + node = + (MDBX_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEHDRSZ); + sepkey.iov_len = node->mn_ksize; + sepkey.iov_base = NODEKEY(node); + } + } + } + + mdbx_debug("separator is %d [%s]", split_indx, DKEY(&sepkey)); + + /* Copy separator key to the parent. */ + if (SIZELEFT(mn.mc_pg[ptop]) < mdbx_branch_size(env, &sepkey)) { + int snum = mc->mc_snum; + mn.mc_snum--; + mn.mc_top--; + did_split = 1; + /* We want other splits to find mn when doing fixups */ + WITH_CURSOR_TRACKING( + mn, rc = mdbx_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0)); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + + /* root split? */ + if (mc->mc_snum > snum) + ptop++; + + /* Right page might now have changed parent. + * Check if left page also changed parent. */ + if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && + mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { + for (i = 0; i < ptop; i++) { + mc->mc_pg[i] = mn.mc_pg[i]; + mc->mc_ki[i] = mn.mc_ki[i]; + } + mc->mc_pg[ptop] = mn.mc_pg[ptop]; + if (mn.mc_ki[ptop]) { + mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1; + } else { + /* find right page's left sibling */ + mc->mc_ki[ptop] = mn.mc_ki[ptop]; + rc = mdbx_cursor_sibling(mc, 0); + } + } + } else { + mn.mc_top--; + rc = mdbx_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0); + mn.mc_top++; + } + if (unlikely(rc != MDBX_SUCCESS)) { + if (rc == MDBX_NOTFOUND) /* improper mdbx_cursor_sibling() result */ { + mdbx_error("unexpected %s", "MDBX_NOTFOUND"); + rc = MDBX_PROBLEM; + } + goto done; + } + if (nflags & MDBX_APPEND) { + mc->mc_pg[mc->mc_top] = rp; + mc->mc_ki[mc->mc_top] = 0; + rc = mdbx_node_add(mc, 0, newkey, newdata, newpgno, nflags); + if (rc) + goto done; + for (i = 0; i < mc->mc_top; i++) + mc->mc_ki[i] = mn.mc_ki[i]; + } else if (!IS_LEAF2(mp)) { + /* Move nodes */ + mc->mc_pg[mc->mc_top] = rp; + i = split_indx; + indx_t n = 0; + do { + if (i == newindx) { + rkey.iov_base = newkey->iov_base; + rkey.iov_len = newkey->iov_len; + if (IS_LEAF(mp)) { + rdata = newdata; + } else + pgno = newpgno; + flags = nflags; + /* Update index for the new key. */ + mc->mc_ki[mc->mc_top] = n; + } else { + node = (MDBX_node *)((char *)mp + copy->mp_ptrs[i] + PAGEHDRSZ); + rkey.iov_base = NODEKEY(node); + rkey.iov_len = node->mn_ksize; + if (IS_LEAF(mp)) { + xdata.iov_base = NODEDATA(node); + xdata.iov_len = NODEDSZ(node); + rdata = &xdata; + } else + pgno = NODEPGNO(node); + flags = node->mn_flags; + } + + if (!IS_LEAF(mp) && n == 0) { + /* First branch index doesn't need key data. */ + rkey.iov_len = 0; + } + + rc = mdbx_node_add(mc, n, &rkey, rdata, pgno, flags); + if (rc) + goto done; + if (i == nkeys) { + i = 0; + n = 0; + mc->mc_pg[mc->mc_top] = copy; + } else { + i++; + n++; + } + } while (i != split_indx); + + nkeys = NUMKEYS(copy); + for (i = 0; i < nkeys; i++) + mp->mp_ptrs[i] = copy->mp_ptrs[i]; + mp->mp_lower = copy->mp_lower; + mp->mp_upper = copy->mp_upper; + memcpy(NODEPTR(mp, nkeys - 1), NODEPTR(copy, nkeys - 1), + env->me_psize - copy->mp_upper - PAGEHDRSZ); + + /* reset back to original page */ + if (newindx < split_indx) { + mc->mc_pg[mc->mc_top] = mp; + } else { + mc->mc_pg[mc->mc_top] = rp; + mc->mc_ki[ptop]++; + /* Make sure mc_ki is still valid. */ + if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && + mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { + for (i = 0; i <= ptop; i++) { + mc->mc_pg[i] = mn.mc_pg[i]; + mc->mc_ki[i] = mn.mc_ki[i]; + } + } + } + if (nflags & MDBX_RESERVE) { + node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (!(node->mn_flags & F_BIGDATA)) + newdata->iov_base = NODEDATA(node); + } + } else { + if (newindx >= split_indx) { + mc->mc_pg[mc->mc_top] = rp; + mc->mc_ki[ptop]++; + /* Make sure mc_ki is still valid. */ + if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && + mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { + for (i = 0; i <= ptop; i++) { + mc->mc_pg[i] = mn.mc_pg[i]; + mc->mc_ki[i] = mn.mc_ki[i]; + } + } + } + } + + { + /* Adjust other cursors pointing to mp */ + MDBX_cursor *m2, *m3; + MDBX_dbi dbi = mc->mc_dbi; + nkeys = NUMKEYS(mp); + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == mc) + continue; + if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) + continue; + if (new_root) { + int k; + /* sub cursors may be on different DB */ + if (m3->mc_pg[0] != mp) + continue; + /* root split */ + for (k = new_root; k >= 0; k--) { + m3->mc_ki[k + 1] = m3->mc_ki[k]; + m3->mc_pg[k + 1] = m3->mc_pg[k]; + } + if (m3->mc_ki[0] >= nkeys) { + m3->mc_ki[0] = 1; + } else { + m3->mc_ki[0] = 0; + } + m3->mc_pg[0] = mc->mc_pg[0]; + m3->mc_snum++; + m3->mc_top++; + } + if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) { + if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDBX_SPLIT_REPLACE)) + m3->mc_ki[mc->mc_top]++; + if (m3->mc_ki[mc->mc_top] >= nkeys) { + m3->mc_pg[mc->mc_top] = rp; + mdbx_cassert(mc, m3->mc_ki[mc->mc_top] >= nkeys); + m3->mc_ki[mc->mc_top] -= (indx_t)nkeys; + for (i = 0; i < mc->mc_top; i++) { + m3->mc_ki[i] = mn.mc_ki[i]; + m3->mc_pg[i] = mn.mc_pg[i]; + } + } + } else if (!did_split && m3->mc_top >= ptop && + m3->mc_pg[ptop] == mc->mc_pg[ptop] && + m3->mc_ki[ptop] >= mc->mc_ki[ptop]) { + m3->mc_ki[ptop]++; + } + if (XCURSOR_INITED(m3) && IS_LEAF(mp)) + XCURSOR_REFRESH(m3, m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); + } + } + mdbx_debug("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp)); + +done: + if (copy) /* tmp page */ + mdbx_page_free(env, copy); + if (unlikely(rc)) + mc->mc_txn->mt_flags |= MDBX_TXN_ERROR; + return rc; +} + +int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + unsigned flags) { + MDBX_cursor mc; + MDBX_xcursor mx; + + if (unlikely(!key || !data || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(flags & + ~(MDBX_NOOVERWRITE | MDBX_NODUPDATA | MDBX_RESERVE | + MDBX_APPEND | MDBX_APPENDDUP | MDBX_CURRENT))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) + return (txn->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN; + + mdbx_cursor_init(&mc, txn, dbi, &mx); + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + + int rc = MDBX_SUCCESS; + /* LY: support for update (explicit overwrite) */ + if (flags & MDBX_CURRENT) { + rc = mdbx_cursor_get(&mc, key, NULL, MDBX_SET); + if (likely(rc == MDBX_SUCCESS) && + (txn->mt_dbs[dbi].md_flags & MDBX_DUPSORT)) { + /* LY: allows update (explicit overwrite) only for unique keys */ + MDBX_node *leaf = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_tassert(txn, XCURSOR_INITED(&mc) && + mc.mc_xcursor->mx_db.md_entries > 1); + rc = MDBX_EMULTIVAL; + } + } + } + + if (likely(rc == MDBX_SUCCESS)) + rc = mdbx_cursor_put(&mc, key, data, flags); + txn->mt_cursors[dbi] = mc.mc_next; + + return rc; +} + +#ifndef MDBX_WBUF +#define MDBX_WBUF (1024 * 1024) +#endif +#define MDBX_EOF 0x10 /* mdbx_env_copyfd1() is done reading */ + +/* State needed for a double-buffering compacting copy. */ +typedef struct mdbx_copy { + MDBX_env *mc_env; + MDBX_txn *mc_txn; + mdbx_condmutex_t mc_condmutex; + char *mc_wbuf[2]; + char *mc_over[2]; + size_t mc_wlen[2]; + size_t mc_olen[2]; + mdbx_filehandle_t mc_fd; + volatile int mc_error; + pgno_t mc_next_pgno; + short mc_toggle; /* Buffer number in provider */ + short mc_new; /* (0-2 buffers to write) | (MDBX_EOF at end) */ + /* Error code. Never cleared if set. Both threads can set nonzero + * to fail the copy. Not mutex-protected, MDBX expects atomic int. */ +} mdbx_copy; + +/* Dedicated writer thread for compacting copy. */ +static THREAD_RESULT __cold THREAD_CALL mdbx_env_copythr(void *arg) { + mdbx_copy *my = arg; + char *ptr; + int toggle = 0; + int rc; + +#if defined(F_SETNOSIGPIPE) + /* OS X delivers SIGPIPE to the whole process, not the thread that caused it. + * Disable SIGPIPE using platform specific fcntl. */ + int enabled = 1; + if (fcntl(my->mc_fd, F_SETNOSIGPIPE, &enabled)) + my->mc_error = errno; +#endif + +#if defined(SIGPIPE) && !defined(_WIN32) && !defined(_WIN64) + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGPIPE); + rc = pthread_sigmask(SIG_BLOCK, &set, NULL); + if (rc != 0) + my->mc_error = rc; +#endif + + mdbx_condmutex_lock(&my->mc_condmutex); + while (!my->mc_error) { + while (!my->mc_new) + mdbx_condmutex_wait(&my->mc_condmutex); + if (my->mc_new == 0 + MDBX_EOF) /* 0 buffers, just EOF */ + break; + size_t wsize = my->mc_wlen[toggle]; + ptr = my->mc_wbuf[toggle]; + again: + if (wsize > 0 && !my->mc_error) { + rc = mdbx_write(my->mc_fd, ptr, wsize); + if (rc != MDBX_SUCCESS) { +#if defined(SIGPIPE) && !defined(_WIN32) && !defined(_WIN64) + if (rc == EPIPE) { + /* Collect the pending SIGPIPE, otherwise (at least OS X) + * gives it to the process on thread-exit (ITS#8504). */ + int tmp; + sigwait(&set, &tmp); + } +#endif + my->mc_error = rc; + } + } + + /* If there's an overflow page tail, write it too */ + if (my->mc_olen[toggle]) { + wsize = my->mc_olen[toggle]; + ptr = my->mc_over[toggle]; + my->mc_olen[toggle] = 0; + goto again; + } + my->mc_wlen[toggle] = 0; + toggle ^= 1; + /* Return the empty buffer to provider */ + my->mc_new--; + mdbx_condmutex_signal(&my->mc_condmutex); + } + mdbx_condmutex_unlock(&my->mc_condmutex); + return (THREAD_RESULT)0; +} + +/* Give buffer and/or MDBX_EOF to writer thread, await unused buffer. + * + * [in] my control structure. + * [in] adjust (1 to hand off 1 buffer) | (MDBX_EOF when ending). */ +static int __cold mdbx_env_cthr_toggle(mdbx_copy *my, int adjust) { + mdbx_condmutex_lock(&my->mc_condmutex); + my->mc_new += (short)adjust; + mdbx_condmutex_signal(&my->mc_condmutex); + while (my->mc_new & 2) /* both buffers in use */ + mdbx_condmutex_wait(&my->mc_condmutex); + mdbx_condmutex_unlock(&my->mc_condmutex); + + my->mc_toggle ^= (adjust & 1); + /* Both threads reset mc_wlen, to be safe from threading errors */ + my->mc_wlen[my->mc_toggle] = 0; + return my->mc_error; +} + +/* Depth-first tree traversal for compacting copy. + * [in] my control structure. + * [in,out] pg database root. + * [in] flags includes F_DUPDATA if it is a sorted-duplicate sub-DB. */ +static int __cold mdbx_env_cwalk(mdbx_copy *my, pgno_t *pg, int flags) { + MDBX_cursor mc; + MDBX_node *ni; + MDBX_page *mo, *mp, *leaf; + char *buf, *ptr; + int rc, toggle; + unsigned i; + + /* Empty DB, nothing to do */ + if (*pg == P_INVALID) + return MDBX_SUCCESS; + + memset(&mc, 0, sizeof(mc)); + mc.mc_snum = 1; + mc.mc_txn = my->mc_txn; + + rc = mdbx_page_get(&mc, *pg, &mc.mc_pg[0], NULL); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + rc = mdbx_page_search_root(&mc, NULL, MDBX_PS_FIRST); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + /* Make cursor pages writable */ + buf = ptr = malloc(pgno2bytes(my->mc_env, mc.mc_snum)); + if (buf == NULL) + return MDBX_ENOMEM; + + for (i = 0; i < mc.mc_top; i++) { + mdbx_page_copy((MDBX_page *)ptr, mc.mc_pg[i], my->mc_env->me_psize); + mc.mc_pg[i] = (MDBX_page *)ptr; + ptr += my->mc_env->me_psize; + } + + /* This is writable space for a leaf page. Usually not needed. */ + leaf = (MDBX_page *)ptr; + + toggle = my->mc_toggle; + while (mc.mc_snum > 0) { + unsigned n; + mp = mc.mc_pg[mc.mc_top]; + n = NUMKEYS(mp); + + if (IS_LEAF(mp)) { + if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) { + for (i = 0; i < n; i++) { + ni = NODEPTR(mp, i); + if (ni->mn_flags & F_BIGDATA) { + MDBX_page *omp; + + /* Need writable leaf */ + if (mp != leaf) { + mc.mc_pg[mc.mc_top] = leaf; + mdbx_page_copy(leaf, mp, my->mc_env->me_psize); + mp = leaf; + ni = NODEPTR(mp, i); + } + + pgno_t pgno; + memcpy(&pgno, NODEDATA(ni), sizeof(pgno)); + memcpy(NODEDATA(ni), &my->mc_next_pgno, sizeof(pgno_t)); + rc = mdbx_page_get(&mc, pgno, &omp, NULL); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + if (my->mc_wlen[toggle] >= MDBX_WBUF) { + rc = mdbx_env_cthr_toggle(my, 1); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + toggle = my->mc_toggle; + } + mo = (MDBX_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); + memcpy(mo, omp, my->mc_env->me_psize); + mo->mp_pgno = my->mc_next_pgno; + my->mc_next_pgno += omp->mp_pages; + my->mc_wlen[toggle] += my->mc_env->me_psize; + if (omp->mp_pages > 1) { + my->mc_olen[toggle] = pgno2bytes(my->mc_env, omp->mp_pages - 1); + my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize; + rc = mdbx_env_cthr_toggle(my, 1); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + toggle = my->mc_toggle; + } + } else if (ni->mn_flags & F_SUBDATA) { + MDBX_db db; + + /* Need writable leaf */ + if (mp != leaf) { + mc.mc_pg[mc.mc_top] = leaf; + mdbx_page_copy(leaf, mp, my->mc_env->me_psize); + mp = leaf; + ni = NODEPTR(mp, i); + } + + memcpy(&db, NODEDATA(ni), sizeof(db)); + my->mc_toggle = (short)toggle; + rc = mdbx_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA); + if (rc) + goto done; + toggle = my->mc_toggle; + memcpy(NODEDATA(ni), &db, sizeof(db)); + } + } + } + } else { + mc.mc_ki[mc.mc_top]++; + if (mc.mc_ki[mc.mc_top] < n) { + pgno_t pgno; + again: + ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]); + pgno = NODEPGNO(ni); + rc = mdbx_page_get(&mc, pgno, &mp, NULL); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + mc.mc_top++; + mc.mc_snum++; + mc.mc_ki[mc.mc_top] = 0; + if (IS_BRANCH(mp)) { + /* Whenever we advance to a sibling branch page, + * we must proceed all the way down to its first leaf. */ + mdbx_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize); + goto again; + } else + mc.mc_pg[mc.mc_top] = mp; + continue; + } + } + if (my->mc_wlen[toggle] >= MDBX_WBUF) { + rc = mdbx_env_cthr_toggle(my, 1); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + toggle = my->mc_toggle; + } + mo = (MDBX_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); + mdbx_page_copy(mo, mp, my->mc_env->me_psize); + mo->mp_pgno = my->mc_next_pgno++; + my->mc_wlen[toggle] += my->mc_env->me_psize; + if (mc.mc_top) { + /* Update parent if there is one */ + ni = NODEPTR(mc.mc_pg[mc.mc_top - 1], mc.mc_ki[mc.mc_top - 1]); + SETPGNO(ni, mo->mp_pgno); + mdbx_cursor_pop(&mc); + } else { + /* Otherwise we're done */ + *pg = mo->mp_pgno; + break; + } + } +done: + free(buf); + return rc; +} + +/* Copy environment with compaction. */ +static int __cold mdbx_env_compact(MDBX_env *env, mdbx_filehandle_t fd) { + MDBX_txn *txn = NULL; + mdbx_thread_t thr; + mdbx_copy my; + memset(&my, 0, sizeof(my)); + + int rc = mdbx_condmutex_init(&my.mc_condmutex); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + rc = mdbx_memalign_alloc(env->me_os_psize, MDBX_WBUF * 2, + (void **)&my.mc_wbuf[0]); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + + memset(my.mc_wbuf[0], 0, MDBX_WBUF * 2); + my.mc_wbuf[1] = my.mc_wbuf[0] + MDBX_WBUF; + my.mc_next_pgno = NUM_METAS; + my.mc_env = env; + my.mc_fd = fd; + rc = mdbx_thread_create(&thr, mdbx_env_copythr, &my); + if (unlikely(rc != MDBX_SUCCESS)) + goto done; + + rc = mdbx_txn_begin(env, NULL, MDBX_RDONLY, &txn); + if (unlikely(rc != MDBX_SUCCESS)) + goto finish; + + MDBX_page *meta = mdbx_init_metas(env, my.mc_wbuf[0]); + + /* Set metapage 1 with current main DB */ + pgno_t new_root, root = txn->mt_dbs[MAIN_DBI].md_root; + if ((new_root = root) != P_INVALID) { + /* Count free pages + freeDB pages. Subtract from last_pg + * to find the new last_pg, which also becomes the new root. */ + pgno_t freecount = 0; + MDBX_cursor mc; + MDBX_val key, data; + + mdbx_cursor_init(&mc, txn, FREE_DBI, NULL); + while ((rc = mdbx_cursor_get(&mc, &key, &data, MDBX_NEXT)) == 0) + freecount += *(pgno_t *)data.iov_base; + if (unlikely(rc != MDBX_NOTFOUND)) + goto finish; + + freecount += txn->mt_dbs[FREE_DBI].md_branch_pages + + txn->mt_dbs[FREE_DBI].md_leaf_pages + + txn->mt_dbs[FREE_DBI].md_overflow_pages; + + new_root = txn->mt_next_pgno - 1 - freecount; + meta->mp_meta.mm_geo.next = meta->mp_meta.mm_geo.now = new_root + 1; + meta->mp_meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; + meta->mp_meta.mm_dbs[MAIN_DBI].md_root = new_root; + } else { + /* When the DB is empty, handle it specially to + * fix any breakage like page leaks from ITS#8174. */ + meta->mp_meta.mm_dbs[MAIN_DBI].md_flags = txn->mt_dbs[MAIN_DBI].md_flags; + } + + /* copy canary sequenses if present */ + if (txn->mt_canary.v) { + meta->mp_meta.mm_canary = txn->mt_canary; + meta->mp_meta.mm_canary.v = mdbx_meta_txnid_stable(env, &meta->mp_meta); + } + + /* update signature */ + meta->mp_meta.mm_datasync_sign = mdbx_meta_sign(&meta->mp_meta); + + my.mc_wlen[0] = pgno2bytes(env, NUM_METAS); + my.mc_txn = txn; + rc = mdbx_env_cwalk(&my, &root, 0); + if (rc == MDBX_SUCCESS && root != new_root) { + mdbx_error("unexpected root %" PRIaPGNO " (%" PRIaPGNO ")", root, new_root); + rc = MDBX_PROBLEM; /* page leak or corrupt DB */ + } + +finish: + if (rc != MDBX_SUCCESS) + my.mc_error = rc; + mdbx_env_cthr_toggle(&my, 1 | MDBX_EOF); + rc = mdbx_thread_join(thr); + mdbx_txn_abort(txn); + +done: + mdbx_memalign_free(my.mc_wbuf[0]); + mdbx_condmutex_destroy(&my.mc_condmutex); + return rc ? rc : my.mc_error; +} + +/* Copy environment as-is. */ +static int __cold mdbx_env_copy_asis(MDBX_env *env, mdbx_filehandle_t fd) { + MDBX_txn *txn = NULL; + + /* Do the lock/unlock of the reader mutex before starting the + * write txn. Otherwise other read txns could block writers. */ + int rc = mdbx_txn_begin(env, NULL, MDBX_RDONLY, &txn); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + /* We must start the actual read txn after blocking writers */ + rc = mdbx_txn_end(txn, MDBX_END_RESET_TMP); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; /* FIXME: or just return? */ + + /* Temporarily block writers until we snapshot the meta pages */ + rc = mdbx_txn_lock(env); + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + + rc = mdbx_txn_renew0(txn, MDBX_RDONLY); + if (unlikely(rc != MDBX_SUCCESS)) { + mdbx_txn_unlock(env); + goto bailout; + } + + rc = mdbx_write(fd, env->me_map, pgno2bytes(env, NUM_METAS)); + MDBX_meta *const head = mdbx_meta_head(env); + const uint64_t size = + mdbx_roundup2(pgno2bytes(env, head->mm_geo.now), env->me_os_psize); + mdbx_txn_unlock(env); + + if (likely(rc == MDBX_SUCCESS)) + rc = mdbx_write(fd, env->me_map + pgno2bytes(env, NUM_METAS), + pgno2bytes(env, txn->mt_next_pgno - NUM_METAS)); + + if (likely(rc == MDBX_SUCCESS)) + rc = mdbx_ftruncate(fd, size); + +bailout: + mdbx_txn_abort(txn); + return rc; +} + +int __cold mdbx_env_copy2fd(MDBX_env *env, mdbx_filehandle_t fd, + unsigned flags) { + if (flags & MDBX_CP_COMPACT) + return mdbx_env_compact(env, fd); + + return mdbx_env_copy_asis(env, fd); +} + +int __cold mdbx_env_copy(MDBX_env *env, const char *path, unsigned flags) { + char *lck_pathname; + mdbx_filehandle_t newfd = INVALID_HANDLE_VALUE; + + if (env->me_flags & MDBX_NOSUBDIR) { + lck_pathname = (char *)path; + } else { + size_t len = strlen(path); + len += sizeof(MDBX_DATANAME); + lck_pathname = malloc(len); + if (!lck_pathname) + return MDBX_ENOMEM; + sprintf(lck_pathname, "%s" MDBX_DATANAME, path); + } + + /* The destination path must exist, but the destination file must not. + * We don't want the OS to cache the writes, since the source data is + * already in the OS cache. */ + int rc = + mdbx_openfile(lck_pathname, O_WRONLY | O_CREAT | O_EXCL, 0666, &newfd); + if (rc == MDBX_SUCCESS) { + if (env->me_psize >= env->me_os_psize) { +#ifdef F_NOCACHE /* __APPLE__ */ + (void)fcntl(newfd, F_NOCACHE, 1); +#elif defined(O_DIRECT) && defined(F_GETFL) + /* Set O_DIRECT if the file system supports it */ + if ((rc = fcntl(newfd, F_GETFL)) != -1) + (void)fcntl(newfd, F_SETFL, rc | O_DIRECT); +#endif + } + rc = mdbx_env_copy2fd(env, newfd, flags); + } + + if (!(env->me_flags & MDBX_NOSUBDIR)) + free(lck_pathname); + + if (newfd != INVALID_HANDLE_VALUE) { + int err = mdbx_closefile(newfd); + if (rc == MDBX_SUCCESS && err != rc) + rc = err; + } + + return rc; +} + +int __cold mdbx_env_set_flags(MDBX_env *env, unsigned flags, int onoff) { + if (unlikely(flags & ~CHANGEABLE)) + return MDBX_EINVAL; + + int rc = mdbx_txn_lock(env); + if (unlikely(rc)) + return rc; + + if (onoff) + env->me_flags |= flags; + else + env->me_flags &= ~flags; + + mdbx_txn_unlock(env); + return MDBX_SUCCESS; +} + +int __cold mdbx_env_get_flags(MDBX_env *env, unsigned *arg) { + if (unlikely(!env || !arg)) + return MDBX_EINVAL; + + *arg = env->me_flags & (CHANGEABLE | CHANGELESS); + return MDBX_SUCCESS; +} + +int __cold mdbx_env_set_userctx(MDBX_env *env, void *ctx) { + if (unlikely(!env)) + return MDBX_EINVAL; + env->me_userctx = ctx; + return MDBX_SUCCESS; +} + +void *__cold mdbx_env_get_userctx(MDBX_env *env) { + return env ? env->me_userctx : NULL; +} + +int __cold mdbx_env_set_assert(MDBX_env *env, MDBX_assert_func *func) { + if (unlikely(!env)) + return MDBX_EINVAL; +#if MDBX_DEBUG + env->me_assert_func = func; + return MDBX_SUCCESS; +#else + (void)func; + return MDBX_ENOSYS; +#endif +} + +int __cold mdbx_env_get_path(MDBX_env *env, const char **arg) { + if (unlikely(!env || !arg)) + return MDBX_EINVAL; + + *arg = env->me_path; + return MDBX_SUCCESS; +} + +int __cold mdbx_env_get_fd(MDBX_env *env, mdbx_filehandle_t *arg) { + if (unlikely(!env || !arg)) + return MDBX_EINVAL; + + *arg = env->me_fd; + return MDBX_SUCCESS; +} + +/* Common code for mdbx_dbi_stat() and mdbx_env_stat(). + * [in] env the environment to operate in. + * [in] db the MDBX_db record containing the stats to return. + * [out] arg the address of an MDBX_stat structure to receive the stats. + * Returns 0, this function always succeeds. */ +static int __cold mdbx_stat0(MDBX_env *env, MDBX_db *db, MDBX_stat *arg) { + arg->ms_psize = env->me_psize; + arg->ms_depth = db->md_depth; + arg->ms_branch_pages = db->md_branch_pages; + arg->ms_leaf_pages = db->md_leaf_pages; + arg->ms_overflow_pages = db->md_overflow_pages; + arg->ms_entries = db->md_entries; + return MDBX_SUCCESS; +} + +int __cold mdbx_env_stat(MDBX_env *env, MDBX_stat *arg, size_t bytes) { + MDBX_meta *meta; + + if (unlikely(env == NULL || arg == NULL)) + return MDBX_EINVAL; + if (unlikely(bytes != sizeof(MDBX_stat))) + return MDBX_EINVAL; + + meta = mdbx_meta_head(env); + return mdbx_stat0(env, &meta->mm_dbs[MAIN_DBI], arg); +} + +int __cold mdbx_env_info(MDBX_env *env, MDBX_envinfo *arg, size_t bytes) { + + if (unlikely(env == NULL || arg == NULL)) + return MDBX_EINVAL; + + if (bytes != sizeof(MDBX_envinfo)) + return MDBX_EINVAL; + + const MDBX_meta *const meta0 = METAPAGE(env, 0); + const MDBX_meta *const meta1 = METAPAGE(env, 1); + const MDBX_meta *const meta2 = METAPAGE(env, 2); + const MDBX_meta *meta; + do { + meta = mdbx_meta_head(env); + arg->mi_recent_txnid = mdbx_meta_txnid_fluid(env, meta); + arg->mi_meta0_txnid = mdbx_meta_txnid_fluid(env, meta0); + arg->mi_meta0_sign = meta0->mm_datasync_sign; + arg->mi_meta1_txnid = mdbx_meta_txnid_fluid(env, meta1); + arg->mi_meta1_sign = meta1->mm_datasync_sign; + arg->mi_meta2_txnid = mdbx_meta_txnid_fluid(env, meta2); + arg->mi_meta2_sign = meta2->mm_datasync_sign; + arg->mi_last_pgno = meta->mm_geo.next - 1; + arg->mi_geo.lower = pgno2bytes(env, meta->mm_geo.lower); + arg->mi_geo.upper = pgno2bytes(env, meta->mm_geo.upper); + arg->mi_geo.current = pgno2bytes(env, meta->mm_geo.now); + arg->mi_geo.shrink = pgno2bytes(env, meta->mm_geo.shrink); + arg->mi_geo.grow = pgno2bytes(env, meta->mm_geo.grow); + arg->mi_mapsize = env->me_mapsize; + mdbx_compiler_barrier(); + } while (unlikely(arg->mi_meta0_txnid != mdbx_meta_txnid_fluid(env, meta0) || + arg->mi_meta0_sign != meta0->mm_datasync_sign || + arg->mi_meta1_txnid != mdbx_meta_txnid_fluid(env, meta1) || + arg->mi_meta1_sign != meta1->mm_datasync_sign || + arg->mi_meta2_txnid != mdbx_meta_txnid_fluid(env, meta2) || + arg->mi_meta2_sign != meta2->mm_datasync_sign || + meta != mdbx_meta_head(env) || + arg->mi_recent_txnid != mdbx_meta_txnid_fluid(env, meta))); + + arg->mi_maxreaders = env->me_maxreaders; + arg->mi_numreaders = env->me_lck ? env->me_lck->mti_numreaders : INT32_MAX; + arg->mi_dxb_pagesize = env->me_psize; + arg->mi_sys_pagesize = env->me_os_psize; + + arg->mi_latter_reader_txnid = 0; + if (env->me_lck) { + MDBX_reader *r = env->me_lck->mti_readers; + arg->mi_latter_reader_txnid = arg->mi_recent_txnid; + for (unsigned i = 0; i < arg->mi_numreaders; ++i) { + if (r[i].mr_pid) { + const txnid_t txnid = r[i].mr_txnid; + if (arg->mi_latter_reader_txnid > txnid) + arg->mi_latter_reader_txnid = txnid; + } + } + } + + return MDBX_SUCCESS; +} + +static MDBX_cmp_func *mdbx_default_keycmp(unsigned flags) { + return (flags & MDBX_REVERSEKEY) ? mdbx_cmp_memnr : (flags & MDBX_INTEGERKEY) + ? mdbx_cmp_int_a2 + : mdbx_cmp_memn; +} + +static MDBX_cmp_func *mdbx_default_datacmp(unsigned flags) { + return !(flags & MDBX_DUPSORT) + ? 0 + : ((flags & MDBX_INTEGERDUP) + ? mdbx_cmp_int_ua + : ((flags & MDBX_REVERSEDUP) ? mdbx_cmp_memnr + : mdbx_cmp_memn)); +} + +static int mdbx_dbi_bind(MDBX_txn *txn, const MDBX_dbi dbi, unsigned user_flags, + MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp) { + /* LY: so, accepting only three cases for the table's flags: + * 1) user_flags and both comparators are zero + * = assume that a by-default mode/flags is requested for reading; + * 2) user_flags exactly the same + * = assume that the target mode/flags are requested properly; + * 3) user_flags differs, but table is empty and MDBX_CREATE is provided + * = assume that a properly create request with custom flags; + */ + if ((user_flags ^ txn->mt_dbs[dbi].md_flags) & PERSISTENT_FLAGS) { + /* flags ara differs, check other conditions */ + if (!user_flags && (!keycmp || keycmp == txn->mt_dbxs[dbi].md_cmp) && + (!datacmp || datacmp == txn->mt_dbxs[dbi].md_dcmp)) { + /* no comparators were provided and flags are zero, + * seems that is case #1 above */ + user_flags = txn->mt_dbs[dbi].md_flags; + } else if ((user_flags & MDBX_CREATE) && txn->mt_dbs[dbi].md_entries == 0) { + if (txn->mt_flags & MDBX_TXN_RDONLY) + return /* FIXME: return extended info */ MDBX_EACCESS; + /* make sure flags changes get committed */ + txn->mt_dbs[dbi].md_flags = user_flags & PERSISTENT_FLAGS; + txn->mt_flags |= MDBX_TXN_DIRTY; + } else { + return /* FIXME: return extended info */ MDBX_INCOMPATIBLE; + } + } + + if (!txn->mt_dbxs[dbi].md_cmp || MDBX_DEBUG) { + if (!keycmp) + keycmp = mdbx_default_keycmp(user_flags); + assert(!txn->mt_dbxs[dbi].md_cmp || txn->mt_dbxs[dbi].md_cmp == keycmp); + txn->mt_dbxs[dbi].md_cmp = keycmp; + } + + if (!txn->mt_dbxs[dbi].md_dcmp || MDBX_DEBUG) { + if (!datacmp) + datacmp = mdbx_default_datacmp(user_flags); + assert(!txn->mt_dbxs[dbi].md_dcmp || txn->mt_dbxs[dbi].md_dcmp == datacmp); + txn->mt_dbxs[dbi].md_dcmp = datacmp; + } + + return MDBX_SUCCESS; +} + +int mdbx_dbi_open_ex(MDBX_txn *txn, const char *table_name, unsigned user_flags, + MDBX_dbi *dbi, MDBX_cmp_func *keycmp, + MDBX_cmp_func *datacmp) { + if (unlikely(!txn || !dbi || (user_flags & ~VALID_FLAGS) != 0)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + /* main table? */ + if (!table_name) { + *dbi = MAIN_DBI; + return mdbx_dbi_bind(txn, MAIN_DBI, user_flags, keycmp, datacmp); + } + + if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) { + txn->mt_dbxs[MAIN_DBI].md_cmp = + mdbx_default_keycmp(txn->mt_dbs[MAIN_DBI].md_flags); + txn->mt_dbxs[MAIN_DBI].md_dcmp = + mdbx_default_datacmp(txn->mt_dbs[MAIN_DBI].md_flags); + } + + /* Is the DB already open? */ + size_t len = strlen(table_name); + MDBX_dbi scan, slot = txn->mt_numdbs; + for (scan = txn->mt_numdbs; --scan >= CORE_DBS;) { + if (!txn->mt_dbxs[scan].md_name.iov_len) { + /* Remember this free slot */ + slot = scan; + continue; + } + if (len == txn->mt_dbxs[scan].md_name.iov_len && + !strncmp(table_name, txn->mt_dbxs[scan].md_name.iov_base, len)) { + *dbi = scan; + return mdbx_dbi_bind(txn, scan, user_flags, keycmp, datacmp); + } + } + + /* Fail, if no free slot and max hit */ + MDBX_env *env = txn->mt_env; + if (unlikely(slot >= env->me_maxdbs)) + return MDBX_DBS_FULL; + + /* Cannot mix named table with some main-table flags */ + if (unlikely(txn->mt_dbs[MAIN_DBI].md_flags & + (MDBX_DUPSORT | MDBX_INTEGERKEY))) + return (user_flags & MDBX_CREATE) ? MDBX_INCOMPATIBLE : MDBX_NOTFOUND; + + /* Find the DB info */ + int exact = 0; + MDBX_val key, data; + key.iov_len = len; + key.iov_base = (void *)table_name; + MDBX_cursor mc; + mdbx_cursor_init(&mc, txn, MAIN_DBI, NULL); + int rc = mdbx_cursor_set(&mc, &key, &data, MDBX_SET, &exact); + if (unlikely(rc != MDBX_SUCCESS)) { + if (rc != MDBX_NOTFOUND || !(user_flags & MDBX_CREATE)) + return rc; + } else { + /* make sure this is actually a table */ + MDBX_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); + if (unlikely((node->mn_flags & (F_DUPDATA | F_SUBDATA)) != F_SUBDATA)) + return MDBX_INCOMPATIBLE; + } + + if (rc != MDBX_SUCCESS && unlikely(txn->mt_flags & MDBX_TXN_RDONLY)) + return MDBX_EACCESS; + + /* Done here so we cannot fail after creating a new DB */ + char *namedup = mdbx_strdup(table_name); + if (unlikely(!namedup)) + return MDBX_ENOMEM; + + int err = mdbx_fastmutex_acquire(&env->me_dbi_lock); + if (unlikely(err != MDBX_SUCCESS)) { + free(namedup); + return err; + } + + unsigned dbflag = DB_NEW | DB_VALID | DB_USRVALID; + if (unlikely(rc)) { + /* MDBX_NOTFOUND and MDBX_CREATE: Create new DB */ + assert(rc == MDBX_NOTFOUND); + MDBX_db db_dummy; + memset(&db_dummy, 0, sizeof(db_dummy)); + db_dummy.md_root = P_INVALID; + db_dummy.md_flags = user_flags & PERSISTENT_FLAGS; + data.iov_len = sizeof(db_dummy); + data.iov_base = &db_dummy; + WITH_CURSOR_TRACKING( + mc, + rc = mdbx_cursor_put(&mc, &key, &data, F_SUBDATA | MDBX_NOOVERWRITE)); + + if (unlikely(rc != MDBX_SUCCESS)) + goto bailout; + + dbflag |= DB_DIRTY; + } + + /* Got info, register DBI in this txn */ + txn->mt_dbxs[slot].md_name.iov_base = namedup; + txn->mt_dbxs[slot].md_name.iov_len = len; + txn->mt_dbxs[slot].md_cmp = nullptr; + txn->mt_dbxs[slot].md_dcmp = nullptr; + txn->mt_dbflags[slot] = (uint8_t)dbflag; + txn->mt_dbiseqs[slot] = (env->me_dbiseqs[slot] += 1); + + txn->mt_dbs[slot] = *(MDBX_db *)data.iov_base; + rc = mdbx_dbi_bind(txn, slot, user_flags, keycmp, datacmp); + if (unlikely(rc != MDBX_SUCCESS)) { + assert((dbflag & DB_DIRTY) == 0); + /* cleanup slot */ + txn->mt_dbxs[slot].md_name.iov_base = NULL; + txn->mt_dbxs[slot].md_name.iov_len = 0; + txn->mt_dbflags[slot] = 0; + bailout: + free(namedup); + } else { + *dbi = slot; + if (slot == txn->mt_numdbs) + txn->mt_numdbs++; + } + + mdbx_ensure(env, mdbx_fastmutex_release(&env->me_dbi_lock) == MDBX_SUCCESS); + return rc; +} + +int mdbx_dbi_open(MDBX_txn *txn, const char *table_name, unsigned table_flags, + MDBX_dbi *dbi) { + return mdbx_dbi_open_ex(txn, table_name, table_flags, dbi, nullptr, nullptr); +} + +int __cold mdbx_dbi_stat(MDBX_txn *txn, MDBX_dbi dbi, MDBX_stat *arg, + size_t bytes) { + if (unlikely(!arg || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_VALID))) + return MDBX_EINVAL; + + if (unlikely(bytes != sizeof(MDBX_stat))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + if (unlikely(txn->mt_dbflags[dbi] & DB_STALE)) { + MDBX_cursor mc; + MDBX_xcursor mx; + /* Stale, must read the DB's root. cursor_init does it for us. */ + mdbx_cursor_init(&mc, txn, dbi, &mx); + } + return mdbx_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg); +} + +static int mdbx_dbi_close_locked(MDBX_env *env, MDBX_dbi dbi) { + if (unlikely(dbi < CORE_DBS || dbi >= env->me_maxdbs)) + return MDBX_EINVAL; + + char *ptr = env->me_dbxs[dbi].md_name.iov_base; + /* If there was no name, this was already closed */ + if (unlikely(!ptr)) + return MDBX_BAD_DBI; + + env->me_dbxs[dbi].md_name.iov_base = NULL; + env->me_dbxs[dbi].md_name.iov_len = 0; + env->me_dbflags[dbi] = 0; + env->me_dbiseqs[dbi]++; + free(ptr); + return MDBX_SUCCESS; +} + +int mdbx_dbi_close(MDBX_env *env, MDBX_dbi dbi) { + if (unlikely(dbi < CORE_DBS || dbi >= env->me_maxdbs)) + return MDBX_EINVAL; + + int rc = mdbx_fastmutex_acquire(&env->me_dbi_lock); + if (likely(rc == MDBX_SUCCESS)) { + rc = mdbx_dbi_close_locked(env, dbi); + mdbx_ensure(env, mdbx_fastmutex_release(&env->me_dbi_lock) == MDBX_SUCCESS); + } + return rc; +} + +int mdbx_dbi_flags_ex(MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags, + unsigned *state) { + if (unlikely(!txn || !flags || !state)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_VALID))) + return MDBX_EINVAL; + + *flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS; + *state = txn->mt_dbflags[dbi] & (DB_NEW | DB_DIRTY | DB_STALE); + + return MDBX_SUCCESS; +} + +int mdbx_dbi_flags(MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags) { + unsigned state; + return mdbx_dbi_flags_ex(txn, dbi, flags, &state); +} + +/* Add all the DB's pages to the free list. + * [in] mc Cursor on the DB to free. + * [in] subs non-Zero to check for sub-DBs in this DB. + * Returns 0 on success, non-zero on failure. */ +static int mdbx_drop0(MDBX_cursor *mc, int subs) { + int rc; + + rc = mdbx_page_search(mc, NULL, MDBX_PS_FIRST); + if (likely(rc == MDBX_SUCCESS)) { + MDBX_txn *txn = mc->mc_txn; + MDBX_node *ni; + MDBX_cursor mx; + unsigned i; + + /* DUPSORT sub-DBs have no ovpages/DBs. Omit scanning leaves. + * This also avoids any P_LEAF2 pages, which have no nodes. + * Also if the DB doesn't have sub-DBs and has no overflow + * pages, omit scanning leaves. */ + if ((mc->mc_flags & C_SUB) || (!subs && !mc->mc_db->md_overflow_pages)) + mdbx_cursor_pop(mc); + + mdbx_cursor_copy(mc, &mx); + while (mc->mc_snum > 0) { + MDBX_page *mp = mc->mc_pg[mc->mc_top]; + unsigned n = NUMKEYS(mp); + if (IS_LEAF(mp)) { + for (i = 0; i < n; i++) { + ni = NODEPTR(mp, i); + if (ni->mn_flags & F_BIGDATA) { + MDBX_page *omp; + pgno_t pg; + memcpy(&pg, NODEDATA(ni), sizeof(pg)); + rc = mdbx_page_get(mc, pg, &omp, NULL); + if (unlikely(rc)) + goto done; + mdbx_cassert(mc, IS_OVERFLOW(omp)); + rc = + mdbx_pnl_append_range(&txn->mt_befree_pages, pg, omp->mp_pages); + if (unlikely(rc)) + goto done; + mc->mc_db->md_overflow_pages -= omp->mp_pages; + if (!mc->mc_db->md_overflow_pages && !subs) + break; + } else if (subs && (ni->mn_flags & F_SUBDATA)) { + mdbx_xcursor_init1(mc, ni); + rc = mdbx_drop0(&mc->mc_xcursor->mx_cursor, 0); + if (unlikely(rc)) + goto done; + } + } + if (!subs && !mc->mc_db->md_overflow_pages) + goto pop; + } else { + if (unlikely((rc = mdbx_pnl_need(&txn->mt_befree_pages, n)) != 0)) + goto done; + for (i = 0; i < n; i++) { + pgno_t pg; + ni = NODEPTR(mp, i); + pg = NODEPGNO(ni); + /* free it */ + mdbx_pnl_xappend(txn->mt_befree_pages, pg); + } + } + if (!mc->mc_top) + break; + mdbx_cassert(mc, i <= UINT16_MAX); + mc->mc_ki[mc->mc_top] = (indx_t)i; + rc = mdbx_cursor_sibling(mc, 1); + if (rc) { + if (unlikely(rc != MDBX_NOTFOUND)) + goto done; + /* no more siblings, go back to beginning + * of previous level. */ + pop: + mdbx_cursor_pop(mc); + mc->mc_ki[0] = 0; + for (i = 1; i < mc->mc_snum; i++) { + mc->mc_ki[i] = 0; + mc->mc_pg[i] = mx.mc_pg[i]; + } + } + } + /* free it */ + rc = mdbx_pnl_append(&txn->mt_befree_pages, mc->mc_db->md_root); + done: + if (unlikely(rc)) + txn->mt_flags |= MDBX_TXN_ERROR; + } else if (rc == MDBX_NOTFOUND) { + rc = MDBX_SUCCESS; + } + mc->mc_flags &= ~C_INITIALIZED; + return rc; +} + +int mdbx_drop(MDBX_txn *txn, MDBX_dbi dbi, int del) { + if (unlikely(1 < (unsigned)del || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(TXN_DBI_CHANGED(txn, dbi))) + return MDBX_BAD_DBI; + + if (unlikely(F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY))) + return MDBX_EACCESS; + + MDBX_cursor *mc; + int rc = mdbx_cursor_open(txn, dbi, &mc); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + MDBX_env *env = txn->mt_env; + rc = mdbx_fastmutex_acquire(&env->me_dbi_lock); + if (unlikely(rc != MDBX_SUCCESS)) { + mdbx_cursor_close(mc); + return rc; + } + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) { + rc = MDBX_EINVAL; + goto bailout; + } + + if (unlikely(TXN_DBI_CHANGED(txn, dbi))) { + rc = MDBX_BAD_DBI; + goto bailout; + } + + rc = mdbx_drop0(mc, mc->mc_db->md_flags & MDBX_DUPSORT); + /* Invalidate the dropped DB's cursors */ + for (MDBX_cursor *m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) + m2->mc_flags &= ~(C_INITIALIZED | C_EOF); + if (unlikely(rc)) + goto bailout; + + /* Can't delete the main DB */ + if (del && dbi >= CORE_DBS) { + rc = mdbx_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, F_SUBDATA); + if (likely(!rc)) { + txn->mt_dbflags[dbi] = DB_STALE; + mdbx_dbi_close_locked(env, dbi); + } else { + txn->mt_flags |= MDBX_TXN_ERROR; + } + } else { + /* reset the DB record, mark it dirty */ + txn->mt_dbflags[dbi] |= DB_DIRTY; + txn->mt_dbs[dbi].md_depth = 0; + txn->mt_dbs[dbi].md_branch_pages = 0; + txn->mt_dbs[dbi].md_leaf_pages = 0; + txn->mt_dbs[dbi].md_overflow_pages = 0; + txn->mt_dbs[dbi].md_entries = 0; + txn->mt_dbs[dbi].md_root = P_INVALID; + txn->mt_dbs[dbi].md_seq = 0; + + txn->mt_flags |= MDBX_TXN_DIRTY; + } + +bailout: + mdbx_cursor_close(mc); + mdbx_ensure(env, mdbx_fastmutex_release(&env->me_dbi_lock) == MDBX_SUCCESS); + return rc; +} + +int mdbx_set_compare(MDBX_txn *txn, MDBX_dbi dbi, MDBX_cmp_func *cmp) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + txn->mt_dbxs[dbi].md_cmp = cmp; + return MDBX_SUCCESS; +} + +int mdbx_set_dupsort(MDBX_txn *txn, MDBX_dbi dbi, MDBX_cmp_func *cmp) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + txn->mt_dbxs[dbi].md_dcmp = cmp; + return MDBX_SUCCESS; +} + +int __cold mdbx_reader_list(MDBX_env *env, MDBX_msg_func *func, void *ctx) { + char buf[64]; + int rc = 0, first = 1; + + if (unlikely(!env || !func)) + return -MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + const MDBX_lockinfo *const lck = env->me_lck; + const unsigned snap_nreaders = lck->mti_numreaders; + for (unsigned i = 0; i < snap_nreaders; i++) { + if (lck->mti_readers[i].mr_pid) { + const txnid_t txnid = lck->mti_readers[i].mr_txnid; + if (txnid == ~(txnid_t)0) + snprintf(buf, sizeof(buf), "%10" PRIuPTR " %" PRIxPTR " -\n", + (uintptr_t)lck->mti_readers[i].mr_pid, + (uintptr_t)lck->mti_readers[i].mr_tid); + else + snprintf(buf, sizeof(buf), "%10" PRIuPTR " %" PRIxPTR " %" PRIaTXN "\n", + (uintptr_t)lck->mti_readers[i].mr_pid, + (uintptr_t)lck->mti_readers[i].mr_tid, txnid); + + if (first) { + first = 0; + rc = func(" pid thread txnid\n", ctx); + if (rc < 0) + break; + } + rc = func(buf, ctx); + if (rc < 0) + break; + } + } + if (first) + rc = func("(no active readers)\n", ctx); + + return rc; +} + +/* Insert pid into list if not already present. + * return -1 if already present. */ +static int __cold mdbx_pid_insert(mdbx_pid_t *ids, mdbx_pid_t pid) { + /* binary search of pid in list */ + unsigned base = 0; + unsigned cursor = 1; + int val = 0; + unsigned n = ids[0]; + + while (n > 0) { + unsigned pivot = n >> 1; + cursor = base + pivot + 1; + val = pid - ids[cursor]; + + if (val < 0) { + n = pivot; + } else if (val > 0) { + base = cursor; + n -= pivot + 1; + } else { + /* found, so it's a duplicate */ + return -1; + } + } + + if (val > 0) + ++cursor; + + ids[0]++; + for (n = ids[0]; n > cursor; n--) + ids[n] = ids[n - 1]; + ids[n] = pid; + return 0; +} + +int __cold mdbx_reader_check(MDBX_env *env, int *dead) { + if (unlikely(!env || env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EINVAL; + if (dead) + *dead = 0; + return mdbx_reader_check0(env, false, dead); +} + +/* Return: + * MDBX_RESULT_TRUE - done and mutex recovered + * MDBX_SUCCESS - done + * Otherwise errcode. */ +int __cold mdbx_reader_check0(MDBX_env *env, int rdt_locked, int *dead) { + assert(rdt_locked >= 0); + + if (unlikely(env->me_pid != mdbx_getpid())) { + env->me_flags |= MDBX_FATAL_ERROR; + return MDBX_PANIC; + } + + MDBX_lockinfo *const lck = env->me_lck; + const unsigned snap_nreaders = lck->mti_numreaders; + mdbx_pid_t *pids = alloca((snap_nreaders + 1) * sizeof(mdbx_pid_t)); + pids[0] = 0; + + int rc = MDBX_SUCCESS, count = 0; + for (unsigned i = 0; i < snap_nreaders; i++) { + const mdbx_pid_t pid = lck->mti_readers[i].mr_pid; + if (pid == 0) + continue /* skip empty */; + if (pid == env->me_pid) + continue /* skip self */; + if (mdbx_pid_insert(pids, pid) != 0) + continue /* such pid already processed */; + + int err = mdbx_rpid_check(env, pid); + if (err == MDBX_RESULT_TRUE) + continue /* reader is live */; + + if (err != MDBX_SUCCESS) { + rc = err; + break /* mdbx_rpid_check() failed */; + } + + /* stale reader found */ + if (!rdt_locked) { + err = mdbx_rdt_lock(env); + if (MDBX_IS_ERROR(err)) { + rc = err; + break; + } + + rdt_locked = -1; + if (err == MDBX_RESULT_TRUE) { + /* mutex recovered, the mdbx_mutex_failed() checked all readers */ + rc = MDBX_RESULT_TRUE; + break; + } + + /* a other process may have clean and reused slot, recheck */ + if (lck->mti_readers[i].mr_pid != pid) + continue; + + err = mdbx_rpid_check(env, pid); + if (MDBX_IS_ERROR(err)) { + rc = err; + break; + } + + if (err != MDBX_SUCCESS) + continue /* the race with other process, slot reused */; + } + + /* clean it */ + for (unsigned j = i; j < snap_nreaders; j++) { + if (lck->mti_readers[j].mr_pid == pid) { + mdbx_debug("clear stale reader pid %" PRIuPTR " txn %" PRIaTXN "", + (size_t)pid, lck->mti_readers[j].mr_txnid); + lck->mti_readers[j].mr_pid = 0; + lck->mti_reader_finished_flag = true; + count++; + } + } + } + + if (rdt_locked < 0) + mdbx_rdt_unlock(env); + + if (dead) + *dead = count; + return rc; +} + +int __cold mdbx_setup_debug(int flags, MDBX_debug_func *logger) { + unsigned ret = mdbx_runtime_flags; + mdbx_runtime_flags = flags; + +#ifdef __linux__ + if (flags & MDBX_DBG_DUMP) { + int core_filter_fd = open("/proc/self/coredump_filter", O_TRUNC | O_RDWR); + if (core_filter_fd >= 0) { + char buf[32]; + const unsigned r = pread(core_filter_fd, buf, sizeof(buf), 0); + if (r > 0 && r < sizeof(buf)) { + buf[r] = 0; + unsigned long mask = strtoul(buf, NULL, 16); + if (mask != ULONG_MAX) { + mask |= 1 << 3 /* Dump file-backed shared mappings */; + mask |= 1 << 6 /* Dump shared huge pages */; + mask |= 1 << 8 /* Dump shared DAX pages */; + unsigned w = snprintf(buf, sizeof(buf), "0x%lx\n", mask); + if (w > 0 && w < sizeof(buf)) { + w = pwrite(core_filter_fd, buf, w, 0); + (void)w; + } + } + } + close(core_filter_fd); + } + } +#endif /* __linux__ */ + + mdbx_debug_logger = logger; + return ret; +} + +static txnid_t __cold mdbx_oomkick(MDBX_env *env, const txnid_t laggard) { + mdbx_debug("DB size maxed out"); + + int retry; + for (retry = 0; retry < INT_MAX; ++retry) { + txnid_t oldest = mdbx_reclaiming_detent(env); + mdbx_assert(env, oldest < env->me_txn0->mt_txnid); + mdbx_assert(env, oldest >= laggard); + mdbx_assert(env, oldest >= env->me_oldest[0]); + if (oldest == laggard) + return oldest; + + if (MDBX_IS_ERROR(mdbx_reader_check0(env, false, NULL))) + break; + + MDBX_reader *const rtbl = env->me_lck->mti_readers; + MDBX_reader *asleep = nullptr; + for (int i = env->me_lck->mti_numreaders; --i >= 0;) { + if (rtbl[i].mr_pid) { + mdbx_jitter4testing(true); + const txnid_t snap = rtbl[i].mr_txnid; + if (oldest > snap && laggard <= /* ignore pending updates */ snap) { + oldest = snap; + asleep = &rtbl[i]; + } + } + } + + if (laggard < oldest || !asleep) { + if (retry && env->me_oom_func) { + /* LY: notify end of oom-loop */ + const txnid_t gap = oldest - laggard; + env->me_oom_func(env, 0, 0, laggard, + (gap < UINT_MAX) ? (unsigned)gap : UINT_MAX, -retry); + } + mdbx_notice("oom-kick: update oldest %" PRIaTXN " -> %" PRIaTXN, + env->me_oldest[0], oldest); + mdbx_assert(env, env->me_oldest[0] <= oldest); + return env->me_oldest[0] = oldest; + } + + mdbx_tid_t tid; + mdbx_pid_t pid; + int rc; + + if (!env->me_oom_func) + break; + + pid = asleep->mr_pid; + tid = asleep->mr_tid; + if (asleep->mr_txnid != laggard || pid <= 0) + continue; + + const txnid_t gap = + mdbx_meta_txnid_stable(env, mdbx_meta_head(env)) - laggard; + rc = env->me_oom_func(env, pid, tid, laggard, + (gap < UINT_MAX) ? (unsigned)gap : UINT_MAX, retry); + if (rc < 0) + break; + + if (rc) { + asleep->mr_txnid = ~(txnid_t)0; + env->me_lck->mti_reader_finished_flag = true; + if (rc > 1) { + asleep->mr_tid = 0; + asleep->mr_pid = 0; + mdbx_coherent_barrier(); + } + } + } + + if (retry && env->me_oom_func) { + /* LY: notify end of oom-loop */ + env->me_oom_func(env, 0, 0, laggard, 0, -retry); + } + return mdbx_find_oldest(env->me_txn); +} + +int __cold mdbx_env_set_syncbytes(MDBX_env *env, size_t bytes) { + if (unlikely(!env)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + env->me_sync_threshold = bytes; + return env->me_map ? mdbx_env_sync(env, 0) : MDBX_SUCCESS; +} + +int __cold mdbx_env_set_oomfunc(MDBX_env *env, MDBX_oom_func *oomfunc) { + if (unlikely(!env)) + return MDBX_EINVAL; + + if (unlikely(env->me_signature != MDBX_ME_SIGNATURE)) + return MDBX_EBADSIGN; + + env->me_oom_func = oomfunc; + return MDBX_SUCCESS; +} + +MDBX_oom_func *__cold mdbx_env_get_oomfunc(MDBX_env *env) { + return likely(env && env->me_signature == MDBX_ME_SIGNATURE) + ? env->me_oom_func + : NULL; +} + +#ifdef __SANITIZE_THREAD__ +/* LY: avoid tsan-trap by me_txn, mm_last_pg and mt_next_pgno */ +__attribute__((no_sanitize_thread, noinline)) +#endif +int mdbx_txn_straggler(MDBX_txn *txn, int *percent) +{ + if (unlikely(!txn)) + return -MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + MDBX_env *env = txn->mt_env; + if (unlikely((txn->mt_flags & MDBX_RDONLY) == 0)) { + if (percent) + *percent = + (int)((txn->mt_next_pgno * UINT64_C(100) + txn->mt_end_pgno / 2) / + txn->mt_end_pgno); + return -1; + } + + txnid_t recent; + MDBX_meta *meta; + do { + meta = mdbx_meta_head(env); + recent = mdbx_meta_txnid_fluid(env, meta); + if (percent) { + const pgno_t maxpg = meta->mm_geo.now; + *percent = (int)((meta->mm_geo.next * UINT64_C(100) + maxpg / 2) / maxpg); + } + } while (unlikely(recent != mdbx_meta_txnid_fluid(env, meta))); + + txnid_t lag = recent - txn->mt_ro_reader->mr_txnid; + return (lag > INT_MAX) ? INT_MAX : (int)lag; +} + +typedef struct mdbx_walk_ctx { + MDBX_txn *mw_txn; + void *mw_user; + MDBX_pgvisitor_func *mw_visitor; +} mdbx_walk_ctx_t; + +/* Depth-first tree traversal. */ +static int __cold mdbx_env_walk(mdbx_walk_ctx_t *ctx, const char *dbi, + pgno_t pg, int deep) { + MDBX_page *mp; + int rc, i, nkeys; + size_t header_size, unused_size, payload_size, align_bytes; + const char *type; + + if (pg == P_INVALID) + return MDBX_SUCCESS; /* empty db */ + + MDBX_cursor mc; + memset(&mc, 0, sizeof(mc)); + mc.mc_snum = 1; + mc.mc_txn = ctx->mw_txn; + + rc = mdbx_page_get(&mc, pg, &mp, NULL); + if (rc) + return rc; + if (pg != mp->mp_pgno) + return MDBX_CORRUPTED; + + nkeys = NUMKEYS(mp); + header_size = IS_LEAF2(mp) ? PAGEHDRSZ : PAGEHDRSZ + mp->mp_lower; + unused_size = SIZELEFT(mp); + payload_size = 0; + + /* LY: Don't use mask here, e.g bitwise + * (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP). + * Pages should not me marked dirty/loose or otherwise. */ + switch (mp->mp_flags) { + case P_BRANCH: + type = "branch"; + if (nkeys < 1) + return MDBX_CORRUPTED; + break; + case P_LEAF: + type = "leaf"; + break; + case P_LEAF | P_SUBP: + type = "dupsort-subleaf"; + break; + case P_LEAF | P_LEAF2: + type = "dupfixed-leaf"; + break; + case P_LEAF | P_LEAF2 | P_SUBP: + type = "dupsort-dupfixed-subleaf"; + break; + case P_META: + case P_OVERFLOW: + default: + return MDBX_CORRUPTED; + } + + for (align_bytes = i = 0; i < nkeys; + align_bytes += ((payload_size + align_bytes) & 1), i++) { + MDBX_node *node; + + if (IS_LEAF2(mp)) { + /* LEAF2 pages have no mp_ptrs[] or node headers */ + payload_size += mp->mp_leaf2_ksize; + continue; + } + + node = NODEPTR(mp, i); + payload_size += NODESIZE + node->mn_ksize; + + if (IS_BRANCH(mp)) { + rc = mdbx_env_walk(ctx, dbi, NODEPGNO(node), deep); + if (rc) + return rc; + continue; + } + + assert(IS_LEAF(mp)); + if (node->mn_flags & F_BIGDATA) { + MDBX_page *omp; + pgno_t *opg; + size_t over_header, over_payload, over_unused; + + payload_size += sizeof(pgno_t); + opg = NODEDATA(node); + rc = mdbx_page_get(&mc, *opg, &omp, NULL); + if (rc) + return rc; + if (*opg != omp->mp_pgno) + return MDBX_CORRUPTED; + /* LY: Don't use mask here, e.g bitwise + * (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP). + * Pages should not me marked dirty/loose or otherwise. */ + if (P_OVERFLOW != omp->mp_flags) + return MDBX_CORRUPTED; + + over_header = PAGEHDRSZ; + over_payload = NODEDSZ(node); + over_unused = pgno2bytes(ctx->mw_txn->mt_env, omp->mp_pages) - + over_payload - over_header; + + rc = ctx->mw_visitor(*opg, omp->mp_pages, ctx->mw_user, dbi, + "overflow-data", 1, over_payload, over_header, + over_unused); + if (rc) + return rc; + continue; + } + + payload_size += NODEDSZ(node); + if (node->mn_flags & F_SUBDATA) { + MDBX_db *db = NODEDATA(node); + char *name = NULL; + + if (!(node->mn_flags & F_DUPDATA)) { + name = NODEKEY(node); + ptrdiff_t namelen = (char *)db - name; + name = memcpy(alloca(namelen + 1), name, namelen); + name[namelen] = 0; + } + rc = mdbx_env_walk(ctx, (name && name[0]) ? name : dbi, db->md_root, + deep + 1); + if (rc) + return rc; + } + } + + return ctx->mw_visitor(mp->mp_pgno, 1, ctx->mw_user, dbi, type, nkeys, + payload_size, header_size, unused_size + align_bytes); +} + +int __cold mdbx_env_pgwalk(MDBX_txn *txn, MDBX_pgvisitor_func *visitor, + void *user) { + if (unlikely(!txn)) + return MDBX_BAD_TXN; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + mdbx_walk_ctx_t ctx; + ctx.mw_txn = txn; + ctx.mw_user = user; + ctx.mw_visitor = visitor; + + int rc = visitor(0, NUM_METAS, user, "meta", "meta", NUM_METAS, + sizeof(MDBX_meta) * NUM_METAS, PAGEHDRSZ * NUM_METAS, + (txn->mt_env->me_psize - sizeof(MDBX_meta) - PAGEHDRSZ) * + NUM_METAS); + if (!rc) + rc = mdbx_env_walk(&ctx, "free", txn->mt_dbs[FREE_DBI].md_root, 0); + if (!rc) + rc = mdbx_env_walk(&ctx, "main", txn->mt_dbs[MAIN_DBI].md_root, 0); + if (!rc) + rc = visitor(P_INVALID, 0, user, NULL, NULL, 0, 0, 0, 0); + return rc; +} + +int mdbx_canary_put(MDBX_txn *txn, const mdbx_canary *canary) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + if (unlikely(F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY))) + return MDBX_EACCESS; + + if (likely(canary)) { + if (txn->mt_canary.x == canary->x && txn->mt_canary.y == canary->y && + txn->mt_canary.z == canary->z) + return MDBX_SUCCESS; + txn->mt_canary.x = canary->x; + txn->mt_canary.y = canary->y; + txn->mt_canary.z = canary->z; + } + txn->mt_canary.v = txn->mt_txnid; + + if ((txn->mt_flags & MDBX_TXN_DIRTY) == 0) { + MDBX_env *env = txn->mt_env; + txn->mt_flags |= MDBX_TXN_DIRTY; + env->me_sync_pending += env->me_psize; + } + + return MDBX_SUCCESS; +} + +int mdbx_canary_get(MDBX_txn *txn, mdbx_canary *canary) { + if (unlikely(txn == NULL || canary == NULL)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + *canary = txn->mt_canary; + return MDBX_SUCCESS; +} + +int mdbx_cursor_on_first(MDBX_cursor *mc) { + if (unlikely(mc == NULL)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + if (!(mc->mc_flags & C_INITIALIZED)) + return MDBX_RESULT_FALSE; + + for (unsigned i = 0; i < mc->mc_snum; ++i) { + if (mc->mc_ki[i]) + return MDBX_RESULT_FALSE; + } + + return MDBX_RESULT_TRUE; +} + +int mdbx_cursor_on_last(MDBX_cursor *mc) { + if (unlikely(mc == NULL)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + if (!(mc->mc_flags & C_INITIALIZED)) + return MDBX_RESULT_FALSE; + + for (unsigned i = 0; i < mc->mc_snum; ++i) { + unsigned nkeys = NUMKEYS(mc->mc_pg[i]); + if (mc->mc_ki[i] < nkeys - 1) + return MDBX_RESULT_FALSE; + } + + return MDBX_RESULT_TRUE; +} + +int mdbx_cursor_eof(MDBX_cursor *mc) { + if (unlikely(mc == NULL)) + return MDBX_EINVAL; + + if (unlikely(mc->mc_signature != MDBX_MC_SIGNATURE)) + return MDBX_EBADSIGN; + + if ((mc->mc_flags & C_INITIALIZED) == 0) + return MDBX_RESULT_TRUE; + + if (mc->mc_snum == 0) + return MDBX_RESULT_TRUE; + + if ((mc->mc_flags & C_EOF) && + mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) + return MDBX_RESULT_TRUE; + + return MDBX_RESULT_FALSE; +} + +static int mdbx_is_samedata(const MDBX_val *a, const MDBX_val *b) { + return a->iov_len == b->iov_len && + memcmp(a->iov_base, b->iov_base, a->iov_len) == 0; +} + +/* Позволяет обновить или удалить существующую запись с получением + * в old_data предыдущего значения данных. При этом если new_data равен + * нулю, то выполняется удаление, иначе обновление/вставка. + * + * Текущее значение может находиться в уже измененной (грязной) странице. + * В этом случае страница будет перезаписана при обновлении, а само старое + * значение утрачено. Поэтому исходно в old_data должен быть передан + * дополнительный буфер для копирования старого значения. + * Если переданный буфер слишком мал, то функция вернет -1, установив + * old_data->iov_len в соответствующее значение. + * + * Для не-уникальных ключей также возможен второй сценарий использования, + * когда посредством old_data из записей с одинаковым ключом для + * удаления/обновления выбирается конкретная. Для выбора этого сценария + * во flags следует одновременно указать MDBX_CURRENT и MDBX_NOOVERWRITE. + * Именно эта комбинация выбрана, так как она лишена смысла, и этим позволяет + * идентифицировать запрос такого сценария. + * + * Функция может быть замещена соответствующими операциями с курсорами + * после двух доработок (TODO): + * - внешняя аллокация курсоров, в том числе на стеке (без malloc). + * - получения статуса страницы по адресу (знать о P_DIRTY). + */ +int mdbx_replace(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *new_data, + MDBX_val *old_data, unsigned flags) { + if (unlikely(!key || !old_data || !txn || old_data == new_data)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(old_data->iov_base == NULL && old_data->iov_len)) + return MDBX_EINVAL; + + if (unlikely(new_data == NULL && !(flags & MDBX_CURRENT))) + return MDBX_EINVAL; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(flags & + ~(MDBX_NOOVERWRITE | MDBX_NODUPDATA | MDBX_RESERVE | + MDBX_APPEND | MDBX_APPENDDUP | MDBX_CURRENT))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) + return (txn->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN; + + MDBX_cursor mc; + MDBX_xcursor mx; + mdbx_cursor_init(&mc, txn, dbi, &mx); + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + + int rc; + MDBX_val present_key = *key; + if (F_ISSET(flags, MDBX_CURRENT | MDBX_NOOVERWRITE)) { + /* в old_data значение для выбора конкретного дубликата */ + if (unlikely(!(txn->mt_dbs[dbi].md_flags & MDBX_DUPSORT))) { + rc = MDBX_EINVAL; + goto bailout; + } + + /* убираем лишний бит, он был признаком запрошенного режима */ + flags -= MDBX_NOOVERWRITE; + + rc = mdbx_cursor_get(&mc, &present_key, old_data, MDBX_GET_BOTH); + if (rc != MDBX_SUCCESS) + goto bailout; + + if (new_data) { + /* обновление конкретного дубликата */ + if (mdbx_is_samedata(old_data, new_data)) + /* если данные совпадают, то ничего делать не надо */ + goto bailout; + } + } else { + /* в old_data буфер для сохранения предыдущего значения */ + if (unlikely(new_data && old_data->iov_base == new_data->iov_base)) + return MDBX_EINVAL; + MDBX_val present_data; + rc = mdbx_cursor_get(&mc, &present_key, &present_data, MDBX_SET_KEY); + if (unlikely(rc != MDBX_SUCCESS)) { + old_data->iov_base = NULL; + old_data->iov_len = rc; + if (rc != MDBX_NOTFOUND || (flags & MDBX_CURRENT)) + goto bailout; + } else if (flags & MDBX_NOOVERWRITE) { + rc = MDBX_KEYEXIST; + *old_data = present_data; + goto bailout; + } else { + MDBX_page *page = mc.mc_pg[mc.mc_top]; + if (txn->mt_dbs[dbi].md_flags & MDBX_DUPSORT) { + if (flags & MDBX_CURRENT) { + /* для не-уникальных ключей позволяем update/delete только если ключ + * один */ + MDBX_node *leaf = NODEPTR(page, mc.mc_ki[mc.mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_tassert(txn, XCURSOR_INITED(&mc) && + mc.mc_xcursor->mx_db.md_entries > 1); + if (mc.mc_xcursor->mx_db.md_entries > 1) { + rc = MDBX_EMULTIVAL; + goto bailout; + } + } + /* если данные совпадают, то ничего делать не надо */ + if (new_data && mdbx_is_samedata(&present_data, new_data)) { + *old_data = *new_data; + goto bailout; + } + /* В оригинальной LMDB фладок MDBX_CURRENT здесь приведет + * к замене данных без учета MDBX_DUPSORT сортировки, + * но здесь это в любом случае допустимо, так как мы + * проверили что для ключа есть только одно значение. */ + } else if ((flags & MDBX_NODUPDATA) && + mdbx_is_samedata(&present_data, new_data)) { + /* если данные совпадают и установлен MDBX_NODUPDATA */ + rc = MDBX_KEYEXIST; + goto bailout; + } + } else { + /* если данные совпадают, то ничего делать не надо */ + if (new_data && mdbx_is_samedata(&present_data, new_data)) { + *old_data = *new_data; + goto bailout; + } + flags |= MDBX_CURRENT; + } + + if (page->mp_flags & P_DIRTY) { + if (unlikely(old_data->iov_len < present_data.iov_len)) { + old_data->iov_base = NULL; + old_data->iov_len = present_data.iov_len; + rc = MDBX_RESULT_TRUE; + goto bailout; + } + memcpy(old_data->iov_base, present_data.iov_base, present_data.iov_len); + old_data->iov_len = present_data.iov_len; + } else { + *old_data = present_data; + } + } + } + + if (likely(new_data)) + rc = mdbx_cursor_put(&mc, key, new_data, flags); + else + rc = mdbx_cursor_del(&mc, 0); + +bailout: + txn->mt_cursors[dbi] = mc.mc_next; + return rc; +} + +int mdbx_get_ex(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + size_t *values_count) { + DKBUF; + mdbx_debug("===> get db %u key [%s]", dbi, DKEY(key)); + + if (unlikely(!key || !data || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + MDBX_cursor mc; + MDBX_xcursor mx; + mdbx_cursor_init(&mc, txn, dbi, &mx); + + int exact = 0; + int rc = mdbx_cursor_set(&mc, key, data, MDBX_SET_KEY, &exact); + if (unlikely(rc != MDBX_SUCCESS)) { + if (rc == MDBX_NOTFOUND && values_count) + *values_count = 0; + return rc; + } + + if (values_count) { + *values_count = 1; + if (mc.mc_xcursor != NULL) { + MDBX_node *leaf = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdbx_tassert(txn, mc.mc_xcursor == &mx && + (mx.mx_cursor.mc_flags & C_INITIALIZED)); + *values_count = (sizeof(*values_count) >= sizeof(mx.mx_db.md_entries) || + mx.mx_db.md_entries <= SIZE_MAX) + ? (size_t)mx.mx_db.md_entries + : SIZE_MAX; + } + } + } + return MDBX_SUCCESS; +} + +/* Функция сообщает находится ли указанный адрес в "грязной" странице у + * заданной пишущей транзакции. В конечном счете это позволяет избавиться от + * лишнего копирования данных из НЕ-грязных страниц. + * + * "Грязные" страницы - это те, которые уже были изменены в ходе пишущей + * транзакции. Соответственно, какие-либо дальнейшие изменения могут привести + * к перезаписи таких страниц. Поэтому все функции, выполняющие изменения, в + * качестве аргументов НЕ должны получать указатели на данные в таких + * страницах. В свою очередь "НЕ грязные" страницы перед модификацией будут + * скопированы. + * + * Другими словами, данные из "грязных" страниц должны быть либо скопированы + * перед передачей в качестве аргументов для дальнейших модификаций, либо + * отвергнуты на стадии проверки корректности аргументов. + * + * Таким образом, функция позволяет как избавится от лишнего копирования, + * так и выполнить более полную проверку аргументов. + * + * ВАЖНО: Передаваемый указатель должен указывать на начало данных. Только + * так гарантируется что актуальный заголовок страницы будет физически + * расположен в той-же странице памяти, в том числе для многостраничных + * P_OVERFLOW страниц с длинными данными. */ +int mdbx_is_dirty(const MDBX_txn *txn, const void *ptr) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(txn->mt_flags & MDBX_TXN_RDONLY)) + return MDBX_RESULT_FALSE; + + const MDBX_env *env = txn->mt_env; + const uintptr_t mask = ~(uintptr_t)(env->me_psize - 1); + const MDBX_page *page = (const MDBX_page *)((uintptr_t)ptr & mask); + + /* LY: Тут не всё хорошо с абсолютной достоверностью результата, + * так как флажок P_DIRTY в LMDB может означать не совсем то, + * что было исходно задумано, детали см в логике кода mdbx_page_touch(). + * + * Более того, в режиме БЕЗ WRITEMAP грязные страницы выделяются через + * malloc(), т.е. находятся вне mmap-диапазона и тогда чтобы отличить + * действительно грязную страницу от указателя на данные пользователя + * следует сканировать dirtylist, что накладно. + * + * Тем не менее, однозначно страница "не грязная" (не будет переписана + * во время транзакции) если адрес находится внутри mmap-диапазона + * и в заголовке страницы нет флажка P_DIRTY. */ + if (env->me_map < (uint8_t *)page) { + const size_t usedbytes = pgno2bytes(env, txn->mt_next_pgno); + if ((uint8_t *)page < env->me_map + usedbytes) { + /* страница внутри диапазона, смотрим на флажки */ + return (page->mp_flags & (P_DIRTY | P_LOOSE | P_KEEP)) + ? MDBX_RESULT_TRUE + : MDBX_RESULT_FALSE; + } + /* Гипотетически здесь возможна ситуация, когда указатель адресует что-то + * в пределах mmap, но за границей распределенных страниц. Это тяжелая + * ошибка, к которой не возможно прийти без каких-то больших нарушений. + * Поэтому не проверяем этот случай кроме как assert-ом, на то что + * страница вне mmap-диаппазона. */ + mdbx_tassert(txn, (uint8_t *)page >= env->me_map + env->me_mapsize); + } + + /* Страница вне используемого mmap-диапазона, т.е. либо в функцию был + * передан некорректный адрес, либо адрес в теневой странице, которая была + * выделена посредством malloc(). + * + * Для WRITE_MAP режима такая страница однозначно "не грязная", + * а для режимов без WRITE_MAP следует просматривать списки dirty + * и spilled страниц у каких-либо транзакций (в том числе дочерних). + * + * Поэтому для WRITE_MAP возвращаем false, а для остальных режимов + * всегда true. Такая логика имеет ряд преимуществ: + * - не тратим время на просмотр списков; + * - результат всегда безопасен (может быть ложно-положительным, + * но не ложно-отрицательным); + * - результат не зависит от вложенности транзакций и от относительного + * положения переданной транзакции в этой рекурсии. */ + return (env->me_flags & MDBX_WRITEMAP) ? MDBX_RESULT_FALSE : MDBX_RESULT_TRUE; +} + +int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, + uint64_t increment) { + if (unlikely(!txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_EBADSIGN; + + if (unlikely(txn->mt_owner != mdbx_thread_self())) + return MDBX_THREAD_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(TXN_DBI_CHANGED(txn, dbi))) + return MDBX_BAD_DBI; + + MDBX_db *dbs = &txn->mt_dbs[dbi]; + if (likely(result)) + *result = dbs->md_seq; + + if (likely(increment > 0)) { + if (unlikely(txn->mt_flags & MDBX_TXN_BLOCKED)) + return MDBX_BAD_TXN; + + if (unlikely(F_ISSET(txn->mt_flags, MDBX_TXN_RDONLY))) + return MDBX_EACCESS; + + uint64_t new = dbs->md_seq + increment; + if (unlikely(new < increment)) + return MDBX_RESULT_TRUE; + + assert(new > dbs->md_seq); + dbs->md_seq = new; + txn->mt_flags |= MDBX_TXN_DIRTY; + txn->mt_dbflags[dbi] |= DB_DIRTY; + } + + return MDBX_SUCCESS; +} + +/*----------------------------------------------------------------------------*/ +/* attribute support functions for Nexenta */ + +static __inline int mdbx_attr_peek(MDBX_val *data, mdbx_attr_t *attrptr) { + if (unlikely(data->iov_len < sizeof(mdbx_attr_t))) + return MDBX_INCOMPATIBLE; + + if (likely(attrptr != NULL)) + *attrptr = *(mdbx_attr_t *)data->iov_base; + data->iov_len -= sizeof(mdbx_attr_t); + data->iov_base = + likely(data->iov_len > 0) ? ((mdbx_attr_t *)data->iov_base) + 1 : NULL; + + return MDBX_SUCCESS; +} + +static __inline int mdbx_attr_poke(MDBX_val *reserved, MDBX_val *data, + mdbx_attr_t attr, unsigned flags) { + mdbx_attr_t *space = reserved->iov_base; + if (flags & MDBX_RESERVE) { + if (likely(data != NULL)) { + data->iov_base = data->iov_len ? space + 1 : NULL; + } + } else { + *space = attr; + if (likely(data != NULL)) { + memcpy(space + 1, data->iov_base, data->iov_len); + } + } + + return MDBX_SUCCESS; +} + +int mdbx_cursor_get_attr(MDBX_cursor *mc, MDBX_val *key, MDBX_val *data, + mdbx_attr_t *attrptr, MDBX_cursor_op op) { + int rc = mdbx_cursor_get(mc, key, data, op); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + return mdbx_attr_peek(data, attrptr); +} + +int mdbx_get_attr(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + uint64_t *attrptr) { + int rc = mdbx_get(txn, dbi, key, data); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + return mdbx_attr_peek(data, attrptr); +} + +int mdbx_put_attr(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + mdbx_attr_t attr, unsigned flags) { + MDBX_val reserve; + reserve.iov_base = NULL; + reserve.iov_len = (data ? data->iov_len : 0) + sizeof(mdbx_attr_t); + + int rc = mdbx_put(txn, dbi, key, &reserve, flags | MDBX_RESERVE); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + return mdbx_attr_poke(&reserve, data, attr, flags); +} + +int mdbx_cursor_put_attr(MDBX_cursor *cursor, MDBX_val *key, MDBX_val *data, + mdbx_attr_t attr, unsigned flags) { + MDBX_val reserve; + reserve.iov_base = NULL; + reserve.iov_len = (data ? data->iov_len : 0) + sizeof(mdbx_attr_t); + + int rc = mdbx_cursor_put(cursor, key, &reserve, flags | MDBX_RESERVE); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + return mdbx_attr_poke(&reserve, data, attr, flags); +} + +int mdbx_set_attr(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, MDBX_val *data, + mdbx_attr_t attr) { + if (unlikely(!key || !txn)) + return MDBX_EINVAL; + + if (unlikely(txn->mt_signature != MDBX_MT_SIGNATURE)) + return MDBX_VERSION_MISMATCH; + + if (unlikely(!TXN_DBI_EXIST(txn, dbi, DB_USRVALID))) + return MDBX_EINVAL; + + if (unlikely(txn->mt_flags & (MDBX_TXN_RDONLY | MDBX_TXN_BLOCKED))) + return (txn->mt_flags & MDBX_TXN_RDONLY) ? MDBX_EACCESS : MDBX_BAD_TXN; + + MDBX_cursor mc; + MDBX_xcursor mx; + MDBX_val old_data; + mdbx_cursor_init(&mc, txn, dbi, &mx); + int rc = mdbx_cursor_set(&mc, key, &old_data, MDBX_SET, NULL); + if (unlikely(rc != MDBX_SUCCESS)) { + if (rc == MDBX_NOTFOUND && data) { + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + rc = mdbx_cursor_put_attr(&mc, key, data, attr, 0); + txn->mt_cursors[dbi] = mc.mc_next; + } + return rc; + } + + mdbx_attr_t old_attr = 0; + rc = mdbx_attr_peek(&old_data, &old_attr); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + + if (old_attr == attr && (!data || (data->iov_len == old_data.iov_len && + memcmp(data->iov_base, old_data.iov_base, + old_data.iov_len) == 0))) + return MDBX_SUCCESS; + + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + rc = mdbx_cursor_put_attr(&mc, key, data ? data : &old_data, attr, + MDBX_CURRENT); + txn->mt_cursors[dbi] = mc.mc_next; + return rc; +} + +//---------------------------------------------------------------------------- + +#ifdef __SANITIZE_ADDRESS__ +LIBMDBX_API __attribute__((weak)) const char *__asan_default_options() { + return "symbolize=1:allow_addr2line=1:" +#ifdef _DEBUG + "debug=1:" +#endif /* _DEBUG */ + "report_globals=1:" + "replace_str=1:replace_intrin=1:" + "malloc_context_size=9:" + "detect_leaks=1:" + "check_printf=1:" + "detect_deadlocks=1:" +#ifndef LTO_ENABLED + "check_initialization_order=1:" +#endif + "detect_stack_use_after_return=1:" + "intercept_tls_get_addr=1:" + "decorate_proc_maps=1:" + "abort_on_error=1"; +} +#endif /* __SANITIZE_ADDRESS__ */ diff --git a/plugins/Dbx_mdb/src/mdbx/mdbx.h b/plugins/Dbx_mdb/src/mdbx/mdbx.h new file mode 100644 index 0000000000..4d95ae252a --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/mdbx.h @@ -0,0 +1,1815 @@ +/* LICENSE AND COPYRUSTING ***************************************************** + * + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + * + * --- + * + * This code is derived from "LMDB engine" written by + * Howard Chu (Symas Corporation), which itself derived from btree.c + * written by Martin Hedenfalk. + * + * --- + * + * Portions Copyright 2011-2015 Howard Chu, Symas Corp. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + * + * --- + * + * Portions Copyright (c) 2009, 2010 Martin Hedenfalk <martin@bzero.se> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +/* ACKNOWLEDGEMENTS ************************************************************ + * + * Howard Chu (Symas Corporation) - the author of LMDB, + * from which originated the MDBX in 2015. + * + * Martin Hedenfalk <martin@bzero.se> - the author of `btree.c` code, + * which was used for begin development of LMDB. */ + +#pragma once +#ifndef LIBMDBX_H +#define LIBMDBX_H + +/* IMPENDING CHANGES WARNING *************************************************** + * + * Now MDBX is under active development and until November 2017 is expected a + * big change both of API and database format. Unfortunately those update will + * lead to loss of compatibility with previous versions. + * + * The aim of this revolution in providing a clearer robust API and adding new + * features, including the database properties. */ + +/*--------------------------------------------------------------------------*/ + +#ifdef _MSC_VER +#pragma warning(push, 1) +#pragma warning(disable : 4548) /* expression before comma has no effect; \ + expected expression with side - effect */ +#pragma warning(disable : 4530) /* C++ exception handler used, but unwind \ + * semantics are not enabled. Specify /EHsc */ +#pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \ + * mode specified; termination on exception is \ + * not guaranteed. Specify /EHsc */ +#endif /* _MSC_VER (warnings) */ + +#include <stdarg.h> +#include <stddef.h> +#include <stdint.h> + +#if defined(_WIN32) || defined(_WIN64) + +#include <windows.h> +#include <winnt.h> +#ifndef __mode_t_defined +typedef unsigned short mode_t; +#endif +typedef HANDLE mdbx_filehandle_t; +typedef DWORD mdbx_pid_t; +typedef DWORD mdbx_tid_t; +#define MDBX_ENODATA ERROR_HANDLE_EOF +#define MDBX_EINVAL ERROR_INVALID_PARAMETER +#define MDBX_EACCESS ERROR_ACCESS_DENIED +#define MDBX_ENOMEM ERROR_OUTOFMEMORY +#define MDBX_EROFS ERROR_FILE_READ_ONLY +#define MDBX_ENOSYS ERROR_NOT_SUPPORTED +#define MDBX_EIO ERROR_WRITE_FAULT +#define MDBX_EPERM ERROR_INVALID_FUNCTION +#define MDBX_EINTR ERROR_CANCELLED + +#else + +#include <errno.h> /* for error codes */ +#include <pthread.h> /* for pthread_t */ +#include <sys/types.h> /* for pid_t */ +#include <sys/uio.h> /* for truct iovec */ +#define HAVE_STRUCT_IOVEC 1 +typedef int mdbx_filehandle_t; +typedef pid_t mdbx_pid_t; +typedef pthread_t mdbx_tid_t; +#define MDBX_ENODATA ENODATA +#define MDBX_EINVAL EINVAL +#define MDBX_EACCESS EACCES +#define MDBX_ENOMEM ENOMEM +#define MDBX_EROFS EROFS +#define MDBX_ENOSYS ENOSYS +#define MDBX_EIO EIO +#define MDBX_EPERM EPERM +#define MDBX_EINTR EINTR +#endif + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +/*--------------------------------------------------------------------------*/ + +#ifndef __has_attribute +#define __has_attribute(x) (0) +#endif + +#ifndef __dll_export +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(__GNUC__) || __has_attribute(dllexport) +#define __dll_export __attribute__((dllexport)) +#elif defined(_MSC_VER) +#define __dll_export __declspec(dllexport) +#else +#define __dll_export +#endif +#elif defined(__GNUC__) || __has_attribute(visibility) +#define __dll_export __attribute__((visibility("default"))) +#else +#define __dll_export +#endif +#endif /* __dll_export */ + +#ifndef __dll_import +#if defined(_WIN32) || defined(__CYGWIN__) +#if defined(__GNUC__) || __has_attribute(dllimport) +#define __dll_import __attribute__((dllimport)) +#elif defined(_MSC_VER) +#define __dll_import __declspec(dllimport) +#else +#define __dll_import +#endif +#else +#define __dll_import +#endif +#endif /* __dll_import */ + +/*--------------------------------------------------------------------------*/ + +#define MDBX_VERSION_MAJOR 0 +#define MDBX_VERSION_MINOR 0 + +#if defined(LIBMDBX_EXPORTS) +#define LIBMDBX_API __dll_export +#elif defined(LIBMDBX_IMPORTS) +#define LIBMDBX_API __dll_import +#else +#define LIBMDBX_API +#endif /* LIBMDBX_API */ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mdbx_version_info { + uint8_t major; + uint8_t minor; + uint16_t release; + uint32_t revision; + struct { + const char *datetime; + const char *tree; + const char *commit; + const char *describe; + } git; +} mdbx_version_info; + +typedef struct mdbx_build_info { + const char *datetime; + const char *target; + const char *options; + const char *compiler; + const char *flags; +} mdbx_build_info; + +extern LIBMDBX_API const mdbx_version_info mdbx_version; +extern LIBMDBX_API const mdbx_build_info mdbx_build; + +/* The name of the lock file in the DB environment */ +#define MDBX_LOCKNAME "/mdbx.lck" +/* The name of the data file in the DB environment */ +#define MDBX_DATANAME "/mdbx.dat" +/* The suffix of the lock file when no subdir is used */ +#define MDBX_LOCK_SUFFIX "-lck" + +/* Opaque structure for a database environment. + * + * A DB environment supports multiple databases, all residing in the same + * shared-memory map. */ +typedef struct MDBX_env MDBX_env; + +/* Opaque structure for a transaction handle. + * + * All database operations require a transaction handle. Transactions may be + * read-only or read-write. */ +typedef struct MDBX_txn MDBX_txn; + +/* A handle for an individual database in the DB environment. */ +typedef uint32_t MDBX_dbi; + +/* Opaque structure for navigating through a database */ +typedef struct MDBX_cursor MDBX_cursor; + +/* Generic structure used for passing keys and data in and out + * of the database. + * + * Values returned from the database are valid only until a subsequent + * update operation, or the end of the transaction. Do not modify or + * free them, they commonly point into the database itself. + * + * Key sizes must be between 1 and mdbx_env_get_maxkeysize() inclusive. + * The same applies to data sizes in databases with the MDBX_DUPSORT flag. + * Other data items can in theory be from 0 to 0xffffffff bytes long. */ +#ifndef HAVE_STRUCT_IOVEC +struct iovec { + void *iov_base; + size_t iov_len; +}; +#define HAVE_STRUCT_IOVEC +#endif /* HAVE_STRUCT_IOVEC */ + +typedef struct iovec MDBX_val; + +/* The maximum size of a data item. + * MDBX only store a 32 bit value for node sizes. */ +#define MDBX_MAXDATASIZE INT32_MAX + +/* A callback function used to compare two keys in a database */ +typedef int(MDBX_cmp_func)(const MDBX_val *a, const MDBX_val *b); + +/* Environment Flags */ +/* no environment directory */ +#define MDBX_NOSUBDIR 0x4000u +/* don't fsync after commit */ +#define MDBX_NOSYNC 0x10000u +/* read only */ +#define MDBX_RDONLY 0x20000u +/* don't fsync metapage after commit */ +#define MDBX_NOMETASYNC 0x40000u +/* use writable mmap */ +#define MDBX_WRITEMAP 0x80000u +/* use asynchronous msync when MDBX_WRITEMAP is used */ +#define MDBX_MAPASYNC 0x100000u +/* tie reader locktable slots to MDBX_txn objects instead of to threads */ +#define MDBX_NOTLS 0x200000u +/* don't do any locking, caller must manage their own locks + * WARNING: libmdbx don't support this mode. */ +#define MDBX_NOLOCK__UNSUPPORTED 0x400000u +/* don't do readahead */ +#define MDBX_NORDAHEAD 0x800000u +/* don't initialize malloc'd memory before writing to datafile */ +#define MDBX_NOMEMINIT 0x1000000u +/* aim to coalesce FreeDB records */ +#define MDBX_COALESCE 0x2000000u +/* LIFO policy for reclaiming FreeDB records */ +#define MDBX_LIFORECLAIM 0x4000000u +/* make a steady-sync only on close and explicit env-sync */ +#define MDBX_UTTERLY_NOSYNC (MDBX_NOSYNC | MDBX_MAPASYNC) +/* debuging option, fill/perturb released pages */ +#define MDBX_PAGEPERTURB 0x8000000u + +/* Database Flags */ +/* use reverse string keys */ +#define MDBX_REVERSEKEY 0x02u +/* use sorted duplicates */ +#define MDBX_DUPSORT 0x04u +/* numeric keys in native byte order, either uint32_t or uint64_t. + * The keys must all be of the same size. */ +#define MDBX_INTEGERKEY 0x08u +/* with MDBX_DUPSORT, sorted dup items have fixed size */ +#define MDBX_DUPFIXED 0x10u +/* with MDBX_DUPSORT, dups are MDBX_INTEGERKEY-style integers */ +#define MDBX_INTEGERDUP 0x20u +/* with MDBX_DUPSORT, use reverse string dups */ +#define MDBX_REVERSEDUP 0x40u +/* create DB if not already existing */ +#define MDBX_CREATE 0x40000u + +/* Write Flags */ +/* For put: Don't write if the key already exists. */ +#define MDBX_NOOVERWRITE 0x10u +/* Only for MDBX_DUPSORT + * For put: don't write if the key and data pair already exist. + * For mdbx_cursor_del: remove all duplicate data items. */ +#define MDBX_NODUPDATA 0x20u +/* For mdbx_cursor_put: overwrite the current key/data pair + * MDBX allows this flag for mdbx_put() for explicit overwrite/update without + * insertion. */ +#define MDBX_CURRENT 0x40u +/* For put: Just reserve space for data, don't copy it. Return a + * pointer to the reserved space. */ +#define MDBX_RESERVE 0x10000u +/* Data is being appended, don't split full pages. */ +#define MDBX_APPEND 0x20000u +/* Duplicate data is being appended, don't split full pages. */ +#define MDBX_APPENDDUP 0x40000u +/* Store multiple data items in one call. Only for MDBX_DUPFIXED. */ +#define MDBX_MULTIPLE 0x80000u + +/* Copy Flags */ +/* Compacting copy: Omit free space from copy, and renumber all + * pages sequentially. */ +#define MDBX_CP_COMPACT 1u + +/* Cursor Get operations. + * + * This is the set of all operations for retrieving data + * using a cursor. */ +typedef enum MDBX_cursor_op { + MDBX_FIRST, /* Position at first key/data item */ + MDBX_FIRST_DUP, /* MDBX_DUPSORT-only: Position at first data item + * of current key. */ + MDBX_GET_BOTH, /* MDBX_DUPSORT-only: Position at key/data pair. */ + MDBX_GET_BOTH_RANGE, /* MDBX_DUPSORT-only: position at key, nearest data. */ + MDBX_GET_CURRENT, /* Return key/data at current cursor position */ + MDBX_GET_MULTIPLE, /* MDBX_DUPFIXED-only: Return key and up to a page of + * duplicate data items from current cursor position. + * Move cursor to prepare for MDBX_NEXT_MULTIPLE.*/ + MDBX_LAST, /* Position at last key/data item */ + MDBX_LAST_DUP, /* MDBX_DUPSORT-only: Position at last data item + * of current key. */ + MDBX_NEXT, /* Position at next data item */ + MDBX_NEXT_DUP, /* MDBX_DUPSORT-only: Position at next data item + * of current key. */ + MDBX_NEXT_MULTIPLE, /* MDBX_DUPFIXED-only: Return key and up to a page of + * duplicate data items from next cursor position. + * Move cursor to prepare for MDBX_NEXT_MULTIPLE. */ + MDBX_NEXT_NODUP, /* Position at first data item of next key */ + MDBX_PREV, /* Position at previous data item */ + MDBX_PREV_DUP, /* MDBX_DUPSORT-only: Position at previous data item + * of current key. */ + MDBX_PREV_NODUP, /* Position at last data item of previous key */ + MDBX_SET, /* Position at specified key */ + MDBX_SET_KEY, /* Position at specified key, return both key and data */ + MDBX_SET_RANGE, /* Position at first key greater than or equal to + * specified key. */ + MDBX_PREV_MULTIPLE /* MDBX_DUPFIXED-only: Position at previous page and + * return key and up to a page of duplicate data items. */ +} MDBX_cursor_op; + +/* Return Codes + * BerkeleyDB uses -30800 to -30999, we'll go under them */ + +/* Successful result */ +#define MDBX_SUCCESS 0 +#define MDBX_RESULT_FALSE MDBX_SUCCESS +#define MDBX_RESULT_TRUE (-1) + +/* key/data pair already exists */ +#define MDBX_KEYEXIST (-30799) +/* key/data pair not found (EOF) */ +#define MDBX_NOTFOUND (-30798) +/* Requested page not found - this usually indicates corruption */ +#define MDBX_PAGE_NOTFOUND (-30797) +/* Located page was wrong type */ +#define MDBX_CORRUPTED (-30796) +/* Update of meta page failed or environment had fatal error */ +#define MDBX_PANIC (-30795) +/* DB file version mismatch with libmdbx */ +#define MDBX_VERSION_MISMATCH (-30794) +/* File is not a valid MDBX file */ +#define MDBX_INVALID (-30793) +/* Environment mapsize reached */ +#define MDBX_MAP_FULL (-30792) +/* Environment maxdbs reached */ +#define MDBX_DBS_FULL (-30791) +/* Environment maxreaders reached */ +#define MDBX_READERS_FULL (-30790) +/* Txn has too many dirty pages */ +#define MDBX_TXN_FULL (-30788) +/* Cursor stack too deep - internal error */ +#define MDBX_CURSOR_FULL (-30787) +/* Page has not enough space - internal error */ +#define MDBX_PAGE_FULL (-30786) +/* Database contents grew beyond environment mapsize */ +#define MDBX_MAP_RESIZED (-30785) +/* Operation and DB incompatible, or DB type changed. This can mean: + * - The operation expects an MDBX_DUPSORT / MDBX_DUPFIXED database. + * - Opening a named DB when the unnamed DB has MDBX_DUPSORT/MDBX_INTEGERKEY. + * - Accessing a data record as a database, or vice versa. + * - The database was dropped and recreated with different flags. */ +#define MDBX_INCOMPATIBLE (-30784) +/* Invalid reuse of reader locktable slot */ +#define MDBX_BAD_RSLOT (-30783) +/* Transaction must abort, has a child, or is invalid */ +#define MDBX_BAD_TXN (-30782) +/* Unsupported size of key/DB name/data, or wrong DUPFIXED size */ +#define MDBX_BAD_VALSIZE (-30781) +/* The specified DBI was changed unexpectedly */ +#define MDBX_BAD_DBI (-30780) +/* Unexpected problem - txn should abort */ +#define MDBX_PROBLEM (-30779) +/* The last defined error code */ +#define MDBX_LAST_ERRCODE MDBX_PROBLEM + +/* The mdbx_put() or mdbx_replace() was called for key, + that has more that one associated value. */ +#define MDBX_EMULTIVAL (-30421) + +/* Bad signature of a runtime object(s), this can mean: + * - memory corruption or double-free; + * - ABI version mismatch (rare case); */ +#define MDBX_EBADSIGN (-30420) + +/* Database should be recovered, but this could NOT be done automatically + * right now (e.g. in readonly mode and so forth). */ +#define MDBX_WANNA_RECOVERY (-30419) + +/* The given key value is mismatched to the current cursor position, + * when mdbx_cursor_put() called with MDBX_CURRENT option. */ +#define MDBX_EKEYMISMATCH (-30418) + +/* Database is too large for current system, + * e.g. could NOT be mapped into RAM. */ +#define MDBX_TOO_LARGE (-30417) + +/* A thread has attempted to use a not owned object, + * e.g. a transaction that started by another thread. */ +#define MDBX_THREAD_MISMATCH (-30416) + +/* Statistics for a database in the environment */ +typedef struct MDBX_stat { + uint32_t ms_psize; /* Size of a database page. + * This is currently the same for all databases. */ + uint32_t ms_depth; /* Depth (height) of the B-tree */ + uint64_t ms_branch_pages; /* Number of internal (non-leaf) pages */ + uint64_t ms_leaf_pages; /* Number of leaf pages */ + uint64_t ms_overflow_pages; /* Number of overflow pages */ + uint64_t ms_entries; /* Number of data items */ +} MDBX_stat; + +/* Information about the environment */ +typedef struct MDBX_envinfo { + struct { + uint64_t lower; /* lower limit for datafile size */ + uint64_t upper; /* upper limit for datafile size */ + uint64_t current; /* current datafile size */ + uint64_t shrink; /* shrink theshold for datafile */ + uint64_t grow; /* growth step for datafile */ + } mi_geo; + uint64_t mi_mapsize; /* Size of the data memory map */ + uint64_t mi_last_pgno; /* ID of the last used page */ + uint64_t mi_recent_txnid; /* ID of the last committed transaction */ + uint64_t mi_latter_reader_txnid; /* ID of the last reader transaction */ + uint64_t mi_meta0_txnid, mi_meta0_sign; + uint64_t mi_meta1_txnid, mi_meta1_sign; + uint64_t mi_meta2_txnid, mi_meta2_sign; + uint32_t mi_maxreaders; /* max reader slots in the environment */ + uint32_t mi_numreaders; /* max reader slots used in the environment */ + uint32_t mi_dxb_pagesize; /* database pagesize */ + uint32_t mi_sys_pagesize; /* system pagesize */ +} MDBX_envinfo; + +/* Return a string describing a given error code. + * + * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3) + * function. If the error code is greater than or equal to 0, then the string + * returned by the system function strerror(3) is returned. If the error code + * is less than 0, an error string corresponding to the MDBX library error is + * returned. See errors for a list of MDBX-specific error codes. + * + * [in] err The error code + * + * Returns "error message" The description of the error */ +LIBMDBX_API const char *mdbx_strerror(int errnum); +LIBMDBX_API const char *mdbx_strerror_r(int errnum, char *buf, size_t buflen); + +/* Create an MDBX environment handle. + * + * This function allocates memory for a MDBX_env structure. To release + * the allocated memory and discard the handle, call mdbx_env_close(). + * Before the handle may be used, it must be opened using mdbx_env_open(). + * Various other options may also need to be set before opening the handle, + * e.g. mdbx_env_set_mapsize(), mdbx_env_set_maxreaders(), + * mdbx_env_set_maxdbs(), depending on usage requirements. + * + * [out] env The address where the new handle will be stored + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_create(MDBX_env **penv); + +/* Open an environment handle. + * + * If this function fails, mdbx_env_close() must be called to discard + * the MDBX_env handle. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] path The directory in which the database files reside. + * This directory must already exist and be writable. + * [in] flags Special options for this environment. This parameter + * must be set to 0 or by bitwise OR'ing together one + * or more of the values described here. + * + * Flags set by mdbx_env_set_flags() are also used: + * - MDBX_NOSUBDIR + * By default, MDBX creates its environment in a directory whose + * pathname is given in path, and creates its data and lock files + * under that directory. With this option, path is used as-is for + * the database main data file. The database lock file is the path + * with "-lock" appended. + * + * - MDBX_RDONLY + * Open the environment in read-only mode. No write operations will + * be allowed. MDBX will still modify the lock file - except on + * read-only filesystems, where MDBX does not use locks. + * + * - MDBX_WRITEMAP + * Use a writeable memory map unless MDBX_RDONLY is set. This uses fewer + * mallocs but loses protection from application bugs like wild pointer + * writes and other bad updates into the database. + * This may be slightly faster for DBs that fit entirely in RAM, + * but is slower for DBs larger than RAM. + * Incompatible with nested transactions. + * Do not mix processes with and without MDBX_WRITEMAP on the same + * environment. This can defeat durability (mdbx_env_sync etc). + * + * - MDBX_NOMETASYNC + * Flush system buffers to disk only once per transaction, omit the + * metadata flush. Defer that until the system flushes files to disk, + * or next non-MDBX_RDONLY commit or mdbx_env_sync(). This optimization + * maintains database integrity, but a system crash may undo the last + * committed transaction. I.e. it preserves the ACI (atomicity, + * consistency, isolation) but not D (durability) database property. + * This flag may be changed at any time using mdbx_env_set_flags(). + * + * - MDBX_NOSYNC + * Don't flush system buffers to disk when committing a transaction. + * This optimization means a system crash can corrupt the database or + * lose the last transactions if buffers are not yet flushed to disk. + * The risk is governed by how often the system flushes dirty buffers + * to disk and how often mdbx_env_sync() is called. However, if the + * filesystem preserves write order and the MDBX_WRITEMAP and/or + * MDBX_LIFORECLAIM flags are not used, transactions exhibit ACI + * (atomicity, consistency, isolation) properties and only lose D + * (durability). I.e. database integrity is maintained, but a system + * crash may undo the final transactions. + * + * Note that (MDBX_NOSYNC | MDBX_WRITEMAP) leaves the system with no + * hint for when to write transactions to disk. + * Therefore the (MDBX_MAPASYNC | MDBX_WRITEMAP) may be preferable. + * This flag may be changed at any time using mdbx_env_set_flags(). + * + * - MDBX_UTTERLY_NOSYNC (internally MDBX_NOSYNC | MDBX_MAPASYNC) + * FIXME: TODO + * + * - MDBX_MAPASYNC + * When using MDBX_WRITEMAP, use asynchronous flushes to disk. As with + * MDBX_NOSYNC, a system crash can then corrupt the database or lose + * the last transactions. Calling mdbx_env_sync() ensures on-disk + * database integrity until next commit. This flag may be changed at + * any time using mdbx_env_set_flags(). + * + * - MDBX_NOTLS + * Don't use Thread-Local Storage. Tie reader locktable slots to + * MDBX_txn objects instead of to threads. I.e. mdbx_txn_reset() keeps + * the slot reseved for the MDBX_txn object. A thread may use parallel + * read-only transactions. A read-only transaction may span threads if + * the user synchronizes its use. Applications that multiplex many + * user threads over individual OS threads need this option. Such an + * application must also serialize the write transactions in an OS + * thread, since MDBX's write locking is unaware of the user threads. + * + * - MDBX_NOLOCK (don't supported by MDBX) + * Don't do any locking. If concurrent access is anticipated, the + * caller must manage all concurrency itself. For proper operation + * the caller must enforce single-writer semantics, and must ensure + * that no readers are using old transactions while a writer is + * active. The simplest approach is to use an exclusive lock so that + * no readers may be active at all when a writer begins. + * + * - MDBX_NORDAHEAD + * Turn off readahead. Most operating systems perform readahead on + * read requests by default. This option turns it off if the OS + * supports it. Turning it off may help random read performance + * when the DB is larger than RAM and system RAM is full. + * + * - MDBX_NOMEMINIT + * Don't initialize malloc'd memory before writing to unused spaces + * in the data file. By default, memory for pages written to the data + * file is obtained using malloc. While these pages may be reused in + * subsequent transactions, freshly malloc'd pages will be initialized + * to zeroes before use. This avoids persisting leftover data from other + * code (that used the heap and subsequently freed the memory) into the + * data file. Note that many other system libraries may allocate and free + * memory from the heap for arbitrary uses. E.g., stdio may use the heap + * for file I/O buffers. This initialization step has a modest performance + * cost so some applications may want to disable it using this flag. This + * option can be a problem for applications which handle sensitive data + * like passwords, and it makes memory checkers like Valgrind noisy. This + * flag is not needed with MDBX_WRITEMAP, which writes directly to the + * mmap instead of using malloc for pages. The initialization is also + * skipped if MDBX_RESERVE is used; the caller is expected to overwrite + * all of the memory that was reserved in that case. This flag may be + * changed at any time using mdbx_env_set_flags(). + * + * - MDBX_COALESCE + * Aim to coalesce records while reclaiming FreeDB. This flag may be + * changed at any time using mdbx_env_set_flags(). + * FIXME: TODO + * + * - MDBX_LIFORECLAIM + * LIFO policy for reclaiming FreeDB records. This significantly reduce + * write IPOs in case MDBX_NOSYNC with periodically checkpoints. + * FIXME: TODO + * + * [in] mode The UNIX permissions to set on created files. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_VERSION_MISMATCH - the version of the MDBX library doesn't match the + * version that created the database environment. + * - MDBX_INVALID - the environment file headers are corrupted. + * - MDBX_ENOENT - the directory specified by the path parameter + * doesn't exist. + * - MDBX_EACCES - the user didn't have permission to access + * the environment files. + * - MDBX_EAGAIN - the environment was locked by another process. */ +LIBMDBX_API int mdbx_env_open(MDBX_env *env, const char *path, unsigned flags, + mode_t mode); +LIBMDBX_API int mdbx_env_open_ex(MDBX_env *env, const char *path, + unsigned flags, mode_t mode, int *exclusive); + +/* Copy an MDBX environment to the specified path, with options. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * NOTE: This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under "Caveats" section. + * + * [in] env An environment handle returned by mdbx_env_create(). It must + * have already been opened successfully. + * [in] path The directory in which the copy will reside. This directory + * must already exist and be writable but must otherwise be empty. + * [in] flags Special options for this operation. This parameter must be set + * to 0 or by bitwise OR'ing together one or more of the values + * described here: + * + * - MDBX_CP_COMPACT + * Perform compaction while copying: omit free pages and sequentially + * renumber all pages in output. This option consumes little bit more + * CPU for processing, but may running quickly than the default, on + * account skipping free pages. + * + * NOTE: Currently it fails if the environment has suffered a page leak. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_copy(MDBX_env *env, const char *path, unsigned flags); + +/* Copy an MDBX environment to the specified file descriptor, + * with options. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. See + * mdbx_env_copy() for further details. + * + * NOTE: This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under "Caveats" section. + * + * [in] env An environment handle returned by mdbx_env_create(). It must + * have already been opened successfully. + * [in] fd The filedescriptor to write the copy to. It must have already + * been opened for Write access. + * [in] flags Special options for this operation. See mdbx_env_copy() for + * options. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_copy2fd(MDBX_env *env, mdbx_filehandle_t fd, + unsigned flags); + +/* Return statistics about the MDBX environment. + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] stat The address of an MDBX_stat structure where the statistics + * will be copied */ +LIBMDBX_API int mdbx_env_stat(MDBX_env *env, MDBX_stat *stat, size_t bytes); + +/* Return information about the MDBX environment. + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] stat The address of an MDBX_envinfo structure + * where the information will be copied */ +LIBMDBX_API int mdbx_env_info(MDBX_env *env, MDBX_envinfo *info, size_t bytes); + +/* Flush the data buffers to disk. + * + * Data is always written to disk when mdbx_txn_commit() is called, + * but the operating system may keep it buffered. MDBX always flushes + * the OS buffers upon commit as well, unless the environment was + * opened with MDBX_NOSYNC or in part MDBX_NOMETASYNC. This call is + * not valid if the environment was opened with MDBX_RDONLY. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] force If non-zero, force a synchronous flush. Otherwise if the + * environment has the MDBX_NOSYNC flag set the flushes will be + * omitted, and with MDBX_MAPASYNC they will be asynchronous. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EACCES - the environment is read-only. + * - MDBX_EINVAL - an invalid parameter was specified. + * - MDBX_EIO - an error occurred during synchronization. */ +LIBMDBX_API int mdbx_env_sync(MDBX_env *env, int force); + +/* Close the environment and release the memory map. + * + * Only a single thread may call this function. All transactions, databases, + * and cursors must already be closed before calling this function. Attempts + * to use any such handles after calling this function will cause a SIGSEGV. + * The environment handle will be freed and must not be used again after this + * call. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] dont_sync A dont'sync flag, if non-zero the last checkpoint (meta-page + * update) will be kept "as is" and may be still "weak" in the + * NOSYNC/MAPASYNC modes. Such "weak" checkpoint will be + * ignored on opening next time, and transactions since the + * last non-weak checkpoint (meta-page update) will rolledback + * for consistency guarantee. */ +LIBMDBX_API void mdbx_env_close(MDBX_env *env); + +/* Set environment flags. + * + * This may be used to set some flags in addition to those from + * mdbx_env_open(), or to unset these flags. If several threads + * change the flags at the same time, the result is undefined. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] flags The flags to change, bitwise OR'ed together + * [in] onoff A non-zero value sets the flags, zero clears them. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_env_set_flags(MDBX_env *env, unsigned flags, int onoff); + +/* Get environment flags. + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] flags The address of an integer to store the flags + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_env_get_flags(MDBX_env *env, unsigned *flags); + +/* Return the path that was used in mdbx_env_open(). + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] path Address of a string pointer to contain the path. + * This is the actual string in the environment, not a copy. + * It should not be altered in any way. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_env_get_path(MDBX_env *env, const char **path); + +/* Return the file descriptor for the given environment. + * + * NOTE: All MDBX file descriptors have FD_CLOEXEC and + * could't be used after exec() and or fork(). + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] fd Address of a int to contain the descriptor. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_env_get_fd(MDBX_env *env, mdbx_filehandle_t *fd); + +/* Set the size of the memory map to use for this environment. + * + * The size should be a multiple of the OS page size. The default is + * 10485760 bytes. The size of the memory map is also the maximum size + * of the database. The value should be chosen as large as possible, + * to accommodate future growth of the database. + * This function should be called after mdbx_env_create() and before + * mdbx_env_open(). It may be called at later times if no transactions + * are active in this process. Note that the library does not check for + * this condition, the caller must ensure it explicitly. + * + * The new size takes effect immediately for the current process but + * will not be persisted to any others until a write transaction has been + * committed by the current process. Also, only mapsize increases are + * persisted into the environment. + * + * If the mapsize is increased by another process, and data has grown + * beyond the range of the current mapsize, mdbx_txn_begin() will + * return MDBX_MAP_RESIZED. This function may be called with a size + * of zero to adopt the new size. + * + * Any attempt to set a size smaller than the space already consumed by the + * environment will be silently changed to the current size of the used space. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] size The size in bytes + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified, + * or the environment has an active write transaction. */ +LIBMDBX_API int mdbx_env_set_mapsize(MDBX_env *env, size_t size); +LIBMDBX_API int mdbx_env_set_geometry(MDBX_env *env, intptr_t size_lower, + intptr_t size_now, intptr_t size_upper, + intptr_t growth_step, + intptr_t shrink_threshold, + intptr_t pagesize); + +/* Set the maximum number of threads/reader slots for the environment. + * + * This defines the number of slots in the lock table that is used to track + * readers in the the environment. The default is 61. + * Starting a read-only transaction normally ties a lock table slot to the + * current thread until the environment closes or the thread exits. If + * MDBX_NOTLS is in use, mdbx_txn_begin() instead ties the slot to the + * MDBX_txn object until it or the MDBX_env object is destroyed. + * This function may only be called after mdbx_env_create() and before + * mdbx_env_open(). + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] readers The maximum number of reader lock table slots + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified, + * or the environment is already open. */ +LIBMDBX_API int mdbx_env_set_maxreaders(MDBX_env *env, unsigned readers); + +/* Get the maximum number of threads/reader slots for the environment. + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] readers Address of an integer to store the number of readers + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_env_get_maxreaders(MDBX_env *env, unsigned *readers); + +/* Set the maximum number of named databases for the environment. + * + * This function is only needed if multiple databases will be used in the + * environment. Simpler applications that use the environment as a single + * unnamed database can ignore this option. + * This function may only be called after mdbx_env_create() and before + * mdbx_env_open(). + * + * Currently a moderate number of slots are cheap but a huge number gets + * expensive: 7-120 words per transaction, and every mdbx_dbi_open() + * does a linear search of the opened slots. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] dbs The maximum number of databases + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified, + * or the environment is already open. */ +LIBMDBX_API int mdbx_env_set_maxdbs(MDBX_env *env, MDBX_dbi dbs); + +/* Get the maximum size of keys and MDBX_DUPSORT data we can write. + * + * [in] env An environment handle returned by mdbx_env_create() + * + * Returns The maximum size of a key we can write. */ +LIBMDBX_API int mdbx_env_get_maxkeysize(MDBX_env *env); +LIBMDBX_API int mdbx_get_maxkeysize(size_t pagesize); + +/* Set application information associated with the MDBX_env. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] ctx An arbitrary pointer for whatever the application needs. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_set_userctx(MDBX_env *env, void *ctx); + +/* Get the application information associated with the MDBX_env. + * + * [in] env An environment handle returned by mdbx_env_create() + * Returns The pointer set by mdbx_env_set_userctx(). */ +LIBMDBX_API void *mdbx_env_get_userctx(MDBX_env *env); + +/* A callback function for most MDBX assert() failures, + * called before printing the message and aborting. + * + * [in] env An environment handle returned by mdbx_env_create(). + * [in] msg The assertion message, not including newline. */ +typedef void MDBX_assert_func(const MDBX_env *env, const char *msg, + const char *function, unsigned line); + +/* Set or reset the assert() callback of the environment. + * + * Disabled if libmdbx is buillt with MDBX_DEBUG=0. + * NOTE: This hack should become obsolete as mdbx's error handling matures. + * + * [in] env An environment handle returned by mdbx_env_create(). + * [in] func An MDBX_assert_func function, or 0. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_set_assert(MDBX_env *env, MDBX_assert_func *func); + +/* Create a transaction for use with the environment. + * + * The transaction handle may be discarded using mdbx_txn_abort() + * or mdbx_txn_commit(). + * NOTE: A transaction and its cursors must only be used by a single + * thread, and a thread may only have a single transaction at a time. + * If MDBX_NOTLS is in use, this does not apply to read-only transactions. + * NOTE: Cursors may not span transactions. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] parent If this parameter is non-NULL, the new transaction will be + * a nested transaction, with the transaction indicated by parent + * as its parent. Transactions may be nested to any level. + * A parent transaction and its cursors may not issue any other + * operations than mdbx_txn_commit and mdbx_txn_abort while it + * has active child transactions. + * [in] flags Special options for this transaction. This parameter + * must be set to 0 or by bitwise OR'ing together one or more + * of the values described here. + * + * - MDBX_RDONLY + * This transaction will not perform any write operations. + * + * [out] txn Address where the new MDBX_txn handle will be stored + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_PANIC - a fatal error occurred earlier and the environment + * must be shut down. + * - MDBX_MAP_RESIZED - another process wrote data beyond this MDBX_env's + * mapsize and this environment's map must be resized + * as well. See mdbx_env_set_mapsize(). + * - MDBX_READERS_FULL - a read-only transaction was requested and the reader + * lock table is full. See mdbx_env_set_maxreaders(). + * - MDBX_ENOMEM - out of memory. */ +LIBMDBX_API int mdbx_txn_begin(MDBX_env *env, MDBX_txn *parent, unsigned flags, + MDBX_txn **txn); + +/* Returns the transaction's MDBX_env + * + * [in] txn A transaction handle returned by mdbx_txn_begin() */ +LIBMDBX_API MDBX_env *mdbx_txn_env(MDBX_txn *txn); + +/* Return the transaction's ID. + * + * This returns the identifier associated with this transaction. For a + * read-only transaction, this corresponds to the snapshot being read; + * concurrent readers will frequently have the same transaction ID. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * + * Returns A transaction ID, valid if input is an active transaction. */ +LIBMDBX_API uint64_t mdbx_txn_id(MDBX_txn *txn); + +/* Commit all the operations of a transaction into the database. + * + * The transaction handle is freed. It and its cursors must not be used + * again after this call, except with mdbx_cursor_renew(). + * + * A cursor must be closed explicitly always, before + * or after its transaction ends. It can be reused with + * mdbx_cursor_renew() before finally closing it. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. + * - MDBX_ENOSPC - no more disk space. + * - MDBX_EIO - a low-level I/O error occurred while writing. + * - MDBX_ENOMEM - out of memory. */ +LIBMDBX_API int mdbx_txn_commit(MDBX_txn *txn); + +/* Abandon all the operations of the transaction instead of saving them. + * + * The transaction handle is freed. It and its cursors must not be used + * again after this call, except with mdbx_cursor_renew(). + * + * A cursor must be closed explicitly always, before or after its transaction + * ends. It can be reused with mdbx_cursor_renew() before finally closing it. + * + * [in] txn A transaction handle returned by mdbx_txn_begin(). */ +LIBMDBX_API int mdbx_txn_abort(MDBX_txn *txn); + +/* Reset a read-only transaction. + * + * Abort the transaction like mdbx_txn_abort(), but keep the transaction + * handle. Therefore mdbx_txn_renew() may reuse the handle. This saves + * allocation overhead if the process will start a new read-only transaction + * soon, and also locking overhead if MDBX_NOTLS is in use. The reader table + * lock is released, but the table slot stays tied to its thread or + * MDBX_txn. Use mdbx_txn_abort() to discard a reset handle, and to free + * its lock table slot if MDBX_NOTLS is in use. + * + * Cursors opened within the transaction must not be used + * again after this call, except with mdbx_cursor_renew(). + * + * Reader locks generally don't interfere with writers, but they keep old + * versions of database pages allocated. Thus they prevent the old pages + * from being reused when writers commit new data, and so under heavy load + * the database size may grow much more rapidly than otherwise. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() */ +LIBMDBX_API int mdbx_txn_reset(MDBX_txn *txn); + +/* Renew a read-only transaction. + * + * This acquires a new reader lock for a transaction handle that had been + * released by mdbx_txn_reset(). It must be called before a reset transaction + * may be used again. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_PANIC - a fatal error occurred earlier and the environment + * must be shut down. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_txn_renew(MDBX_txn *txn); + +/* Open a table in the environment. + * + * A table handle denotes the name and parameters of a table, independently + * of whether such a table exists. The table handle may be discarded by + * calling mdbx_dbi_close(). The old table handle is returned if the table + * was already open. The handle may only be closed once. + * + * The table handle will be private to the current transaction until + * the transaction is successfully committed. If the transaction is + * aborted the handle will be closed automatically. + * After a successful commit the handle will reside in the shared + * environment, and may be used by other transactions. + * + * This function must not be called from multiple concurrent + * transactions in the same process. A transaction that uses + * this function must finish (either commit or abort) before + * any other transaction in the process may use this function. + * + * To use named table (with name != NULL), mdbx_env_set_maxdbs() + * must be called before opening the environment. Table names are + * keys in the internal unnamed table, and may be read but not written. + * + * [in] txn transaction handle returned by mdbx_txn_begin() + * [in] name The name of the table to open. If only a single + * table is needed in the environment, this value may be NULL. + * [in] flags Special options for this table. This parameter must be set + * to 0 or by bitwise OR'ing together one or more of the values + * described here: + * - MDBX_REVERSEKEY + * Keys are strings to be compared in reverse order, from the end + * of the strings to the beginning. By default, Keys are treated as + * strings and compared from beginning to end. + * - MDBX_DUPSORT + * Duplicate keys may be used in the table. Or, from another point of + * view, keys may have multiple data items, stored in sorted order. By + * default keys must be unique and may have only a single data item. + * - MDBX_INTEGERKEY + * Keys are binary integers in native byte order, either uin32_t or + * uint64_t, and will be sorted as such. The keys must all be of the + * same size. + * - MDBX_DUPFIXED + * This flag may only be used in combination with MDBX_DUPSORT. This + * option tells the library that the data items for this database are + * all the same size, which allows further optimizations in storage and + * retrieval. When all data items are the same size, the MDBX_GET_MULTIPLE, + * MDBX_NEXT_MULTIPLE and MDBX_PREV_MULTIPLE cursor operations may be used + * to retrieve multiple items at once. + * - MDBX_INTEGERDUP + * This option specifies that duplicate data items are binary integers, + * similar to MDBX_INTEGERKEY keys. + * - MDBX_REVERSEDUP + * This option specifies that duplicate data items should be compared as + * strings in reverse order (the comparison is performed in the direction + * from the last byte to the first). + * - MDBX_CREATE + * Create the named database if it doesn't exist. This option is not + * allowed in a read-only transaction or a read-only environment. + * + * [out] dbi Address where the new MDBX_dbi handle will be stored + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_NOTFOUND - the specified database doesn't exist in the + * environment and MDBX_CREATE was not specified. + * - MDBX_DBS_FULL - too many databases have been opened. + * See mdbx_env_set_maxdbs(). */ +LIBMDBX_API int mdbx_dbi_open_ex(MDBX_txn *txn, const char *name, + unsigned flags, MDBX_dbi *dbi, + MDBX_cmp_func *keycmp, MDBX_cmp_func *datacmp); +LIBMDBX_API int mdbx_dbi_open(MDBX_txn *txn, const char *name, unsigned flags, + MDBX_dbi *dbi); + +/* Retrieve statistics for a database. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [out] stat The address of an MDBX_stat structure where the statistics + * will be copied + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_dbi_stat(MDBX_txn *txn, MDBX_dbi dbi, MDBX_stat *stat, + size_t bytes); + +/* Retrieve the DB flags for a database handle. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [out] flags Address where the flags will be returned. + * [out] state Address where the state will be returned. + * + * Returns A non-zero error value on failure and 0 on success. */ +#define MDBX_TBL_DIRTY 0x01 /* DB was written in this txn */ +#define MDBX_TBL_STALE 0x02 /* Named-DB record is older than txnID */ +#define MDBX_TBL_NEW 0x04 /* Named-DB handle opened in this txn */ +LIBMDBX_API int mdbx_dbi_flags_ex(MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags, + unsigned *state); +LIBMDBX_API int mdbx_dbi_flags(MDBX_txn *txn, MDBX_dbi dbi, unsigned *flags); + +/* Close a database handle. Normally unnecessary. + * + * Use with care: + * FIXME: This call is not mutex protected. Handles should only be closed by + * a single thread, and only if no other threads are going to reference + * the database handle or one of its cursors any further. Do not close + * a handle if an existing transaction has modified its database. + * Doing so can cause misbehavior from database corruption to errors + * like MDBX_BAD_VALSIZE (since the DB name is gone). + * + * Closing a database handle is not necessary, but lets mdbx_dbi_open() + * reuse the handle value. Usually it's better to set a bigger + * mdbx_env_set_maxdbs(), unless that value would be large. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] dbi A database handle returned by mdbx_dbi_open() + */ +LIBMDBX_API int mdbx_dbi_close(MDBX_env *env, MDBX_dbi dbi); + +/* Empty or delete+close a database. + * + * See mdbx_dbi_close() for restrictions about closing the DB handle. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] del 0 to empty the DB, 1 to delete it from the environment + * and close the DB handle. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_drop(MDBX_txn *txn, MDBX_dbi dbi, int del); + +/* Get items from a database. + * + * This function retrieves key/data pairs from the database. The address + * and length of the data associated with the specified key are returned + * in the structure to which data refers. + * If the database supports duplicate keys (MDBX_DUPSORT) then the + * first data item for the key will be returned. Retrieval of other + * items requires the use of mdbx_cursor_get(). + * + * NOTE: The memory pointed to by the returned values is owned by the + * database. The caller need not dispose of the memory, and may not + * modify it in any way. For values returned in a read-only transaction + * any modification attempts will cause a SIGSEGV. + * + * NOTE: Values returned from the database are valid only until a + * subsequent update operation, or the end of the transaction. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] key The key to search for in the database + * [in,out] data The data corresponding to the key + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_NOTFOUND - the key was not in the database. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_get(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data); + +/* Store items into a database. + * + * This function stores key/data pairs in the database. The default behavior + * is to enter the new key/data pair, replacing any previously existing key + * if duplicates are disallowed, or adding a duplicate data item if + * duplicates are allowed (MDBX_DUPSORT). + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] key The key to store in the database + * [in,out] data The data to store + * [in] flags Special options for this operation. This parameter must be + * set to 0 or by bitwise OR'ing together one or more of the + * values described here. + * + * - MDBX_NODUPDATA + * Enter the new key/data pair only if it does not already appear + * in the database. This flag may only be specified if the database + * was opened with MDBX_DUPSORT. The function will return MDBX_KEYEXIST + * if the key/data pair already appears in the database. + * + * - MDBX_NOOVERWRITE + * Enter the new key/data pair only if the key does not already appear + * in the database. The function will return MDBX_KEYEXIST if the key + * already appears in the database, even if the database supports + * duplicates (MDBX_DUPSORT). The data parameter will be set to point + * to the existing item. + * + * - MDBX_CURRENT + * Update an single existing entry, but not add new ones. The function + * will return MDBX_NOTFOUND if the given key not exist in the database. + * Or the MDBX_EMULTIVAL in case duplicates for the given key. + * + * - MDBX_RESERVE + * Reserve space for data of the given size, but don't copy the given + * data. Instead, return a pointer to the reserved space, which the + * caller can fill in later - before the next update operation or the + * transaction ends. This saves an extra memcpy if the data is being + * generated later. MDBX does nothing else with this memory, the caller + * is expected to modify all of the space requested. This flag must not + * be specified if the database was opened with MDBX_DUPSORT. + * + * - MDBX_APPEND + * Append the given key/data pair to the end of the database. This option + * allows fast bulk loading when keys are already known to be in the + * correct order. Loading unsorted keys with this flag will cause + * a MDBX_EKEYMISMATCH error. + * + * - MDBX_APPENDDUP + * As above, but for sorted dup data. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_KEYEXIST + * - MDBX_MAP_FULL - the database is full, see mdbx_env_set_mapsize(). + * - MDBX_TXN_FULL - the transaction has too many dirty pages. + * - MDBX_EACCES - an attempt was made to write in a read-only transaction. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_put(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data, unsigned flags); + +/* Delete items from a database. + * + * This function removes key/data pairs from the database. + * + * The data parameter is NOT ignored regardless the database does + * support sorted duplicate data items or not. If the data parameter + * is non-NULL only the matching data item will be deleted. + * + * This function will return MDBX_NOTFOUND if the specified key/data + * pair is not in the database. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] key The key to delete from the database + * [in] data The data to delete + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EACCES - an attempt was made to write in a read-only transaction. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_del(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data); + +/* Create a cursor handle. + * + * A cursor is associated with a specific transaction and database. + * A cursor cannot be used when its database handle is closed. Nor + * when its transaction has ended, except with mdbx_cursor_renew(). + * It can be discarded with mdbx_cursor_close(). + * + * A cursor must be closed explicitly always, before + * or after its transaction ends. It can be reused with + * mdbx_cursor_renew() before finally closing it. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [out] cursor Address where the new MDBX_cursor handle will be stored + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_open(MDBX_txn *txn, MDBX_dbi dbi, + MDBX_cursor **cursor); + +/* Close a cursor handle. + * + * The cursor handle will be freed and must not be used again after this call. + * Its transaction must still be live if it is a write-transaction. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() */ +LIBMDBX_API void mdbx_cursor_close(MDBX_cursor *cursor); + +/* Renew a cursor handle. + * + * A cursor is associated with a specific transaction and database. + * Cursors that are only used in read-only transactions may be re-used, + * to avoid unnecessary malloc/free overhead. The cursor may be associated + * with a new read-only transaction, and referencing the same database handle + * as it was created with. + * + * This may be done whether the previous transaction is live or dead. + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_renew(MDBX_txn *txn, MDBX_cursor *cursor); + +/* Return the cursor's transaction handle. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() */ +LIBMDBX_API MDBX_txn *mdbx_cursor_txn(MDBX_cursor *cursor); + +/* Return the cursor's database handle. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() */ +LIBMDBX_API MDBX_dbi mdbx_cursor_dbi(MDBX_cursor *cursor); + +/* Retrieve by cursor. + * + * This function retrieves key/data pairs from the database. The address and + * length of the key are returned in the object to which key refers (except + * for the case of the MDBX_SET option, in which the key object is unchanged), + * and the address and length of the data are returned in the object to which + * data refers. See mdbx_get() for restrictions on using the output values. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * [in,out] key The key for a retrieved item + * [in,out] data The data of a retrieved item + * [in] op A cursor operation MDBX_cursor_op + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_NOTFOUND - no matching key found. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_get(MDBX_cursor *cursor, MDBX_val *key, + MDBX_val *data, MDBX_cursor_op op); + +/* Store by cursor. + * + * This function stores key/data pairs into the database. The cursor is + * positioned at the new item, or on failure usually near it. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * [in] key The key operated on. + * [in] data The data operated on. + * [in] flags Options for this operation. This parameter + * must be set to 0 or one of the values described here: + * + * - MDBX_CURRENT + * Replace the item at the current cursor position. The key parameter + * must still be provided, and must match it, otherwise the function + * return MDBX_EKEYMISMATCH. + * + * NOTE: MDBX unlike LMDB allows you to change the size of the data and + * automatically handles reordering for sorted duplicates (MDBX_DUPSORT). + * + * - MDBX_NODUPDATA + * Enter the new key/data pair only if it does not already appear in the + * database. This flag may only be specified if the database was opened + * with MDBX_DUPSORT. The function will return MDBX_KEYEXIST if the + * key/data pair already appears in the database. + * + * - MDBX_NOOVERWRITE + * Enter the new key/data pair only if the key does not already appear + * in the database. The function will return MDBX_KEYEXIST if the key + * already appears in the database, even if the database supports + * duplicates (MDBX_DUPSORT). + * + * - MDBX_RESERVE + * Reserve space for data of the given size, but don't copy the given + * data. Instead, return a pointer to the reserved space, which the + * caller can fill in later - before the next update operation or the + * transaction ends. This saves an extra memcpy if the data is being + * generated later. This flag must not be specified if the database + * was opened with MDBX_DUPSORT. + * + * - MDBX_APPEND + * Append the given key/data pair to the end of the database. No key + * comparisons are performed. This option allows fast bulk loading when + * keys are already known to be in the correct order. Loading unsorted + * keys with this flag will cause a MDBX_KEYEXIST error. + * + * - MDBX_APPENDDUP + * As above, but for sorted dup data. + * + * - MDBX_MULTIPLE + * Store multiple contiguous data elements in a single request. This flag + * may only be specified if the database was opened with MDBX_DUPFIXED. + * The data argument must be an array of two MDBX_vals. The iov_len of the + * first MDBX_val must be the size of a single data element. The iov_base + * of the first MDBX_val must point to the beginning of the array of + * contiguous data elements. The iov_len of the second MDBX_val must be + * the count of the number of data elements to store. On return this + * field will be set to the count of the number of elements actually + * written. The iov_base of the second MDBX_val is unused. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EKEYMISMATCH + * - MDBX_MAP_FULL - the database is full, see mdbx_env_set_mapsize(). + * - MDBX_TXN_FULL - the transaction has too many dirty pages. + * - MDBX_EACCES - an attempt was made to write in a read-only transaction. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_put(MDBX_cursor *cursor, MDBX_val *key, + MDBX_val *data, unsigned flags); + +/* Delete current key/data pair + * + * This function deletes the key/data pair to which the cursor refers. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * [in] flags Options for this operation. This parameter must be set to 0 + * or one of the values described here. + * + * - MDBX_NODUPDATA + * Delete all of the data items for the current key. This flag may only + * be specified if the database was opened with MDBX_DUPSORT. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EACCES - an attempt was made to write in a read-only transaction. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_del(MDBX_cursor *cursor, unsigned flags); + +/* Return count of duplicates for current key. + * + * This call is only valid on databases that support sorted duplicate data + * items MDBX_DUPSORT. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * [out] countp Address where the count will be stored + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EINVAL - cursor is not initialized, or an invalid parameter + * was specified. */ +LIBMDBX_API int mdbx_cursor_count(MDBX_cursor *cursor, size_t *countp); + +/* Compare two data items according to a particular database. + * + * This returns a comparison as if the two data items were keys in the + * specified database. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] a The first item to compare + * [in] b The second item to compare + * + * Returns < 0 if a < b, 0 if a == b, > 0 if a > b */ +LIBMDBX_API int mdbx_cmp(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, + const MDBX_val *b); + +/* Compare two data items according to a particular database. + * + * This returns a comparison as if the two items were data items of + * the specified database. The database must have the MDBX_DUPSORT flag. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] a The first item to compare + * [in] b The second item to compare + * + * Returns < 0 if a < b, 0 if a == b, > 0 if a > b */ +LIBMDBX_API int mdbx_dcmp(MDBX_txn *txn, MDBX_dbi dbi, const MDBX_val *a, + const MDBX_val *b); + +/* A callback function used to print a message from the library. + * + * [in] msg The string to be printed. + * [in] ctx An arbitrary context pointer for the callback. + * + * Returns < 0 on failure, >= 0 on success. */ +typedef int(MDBX_msg_func)(const char *msg, void *ctx); + +/* Dump the entries in the reader lock table. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] func A MDBX_msg_func function + * [in] ctx Anything the message function needs + * + * Returns < 0 on failure, >= 0 on success. */ +LIBMDBX_API int mdbx_reader_list(MDBX_env *env, MDBX_msg_func *func, void *ctx); + +/* Check for stale entries in the reader lock table. + * + * [in] env An environment handle returned by mdbx_env_create() + * [out] dead Number of stale slots that were cleared + * + * Returns 0 on success, non-zero on failure. */ +LIBMDBX_API int mdbx_reader_check(MDBX_env *env, int *dead); + +LIBMDBX_API char *mdbx_dkey(const MDBX_val *key, char *const buf, + const size_t bufsize); + +LIBMDBX_API int mdbx_env_close_ex(MDBX_env *env, int dont_sync); + +/* Set threshold to force flush the data buffers to disk, + * even of MDBX_NOSYNC, MDBX_NOMETASYNC and MDBX_MAPASYNC flags + * in the environment. + * + * Data is always written to disk when mdbx_txn_commit() is called, + * but the operating system may keep it buffered. MDBX always flushes + * the OS buffers upon commit as well, unless the environment was + * opened with MDBX_NOSYNC or in part MDBX_NOMETASYNC. + * + * The default is 0, than mean no any threshold checked, and no additional + * flush will be made. + * + * [in] env An environment handle returned by mdbx_env_create() + * [in] bytes The size in bytes of summary changes when a synchronous + * flush would be made. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_set_syncbytes(MDBX_env *env, size_t bytes); + +/* Returns a lag of the reading for the given transaction. + * + * Returns an information for estimate how much given read-only + * transaction is lagging relative the to actual head. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [out] percent Percentage of page allocation in the database. + * + * Returns Number of transactions committed after the given was started for + * read, or -1 on failure. */ +LIBMDBX_API int mdbx_txn_straggler(MDBX_txn *txn, int *percent); + +/* A callback function for killing a laggard readers, + * but also could waiting ones. Called in case of MDBX_MAP_FULL error. + * + * [in] env An environment handle returned by mdbx_env_create(). + * [in] pid pid of the reader process. + * [in] tid thread_id of the reader thread. + * [in] txn Transaction number on which stalled. + * [in] gap A lag from the last commited txn. + * [in] retry A retry number, less that zero for notify end of OOM-loop. + * + * Returns -1 on failure (reader is not killed), + * 0 should wait or retry, + * 1 drop reader txn-lock (reading-txn was aborted), + * >1 drop reader registration (reader process was killed). */ +typedef int(MDBX_oom_func)(MDBX_env *env, int pid, mdbx_tid_t tid, uint64_t txn, + unsigned gap, int retry); + +/* Set the OOM callback. + * + * Callback will be called only on out-of-pages case for killing + * a laggard readers to allowing reclaiming of freeDB. + * + * [in] env An environment handle returned by mdbx_env_create(). + * [in] oomfunc A MDBX_oom_func function or NULL to disable. + * + * Returns A non-zero error value on failure and 0 on success. */ +LIBMDBX_API int mdbx_env_set_oomfunc(MDBX_env *env, MDBX_oom_func *oom_func); + +/* Get the current oom_func callback. + * + * Callback will be called only on out-of-pages case for killing + * a laggard readers to allowing reclaiming of freeDB. + * + * [in] env An environment handle returned by mdbx_env_create(). + * + * Returns A MDBX_oom_func function or NULL if disabled. */ +LIBMDBX_API MDBX_oom_func *mdbx_env_get_oomfunc(MDBX_env *env); + +#define MDBX_DBG_ASSERT 1 +#define MDBX_DBG_PRINT 2 +#define MDBX_DBG_TRACE 4 +#define MDBX_DBG_EXTRA 8 +#define MDBX_DBG_AUDIT 16 +#define MDBX_DBG_JITTER 32 +#define MDBX_DBG_DUMP 64 + +typedef void MDBX_debug_func(int type, const char *function, int line, + const char *msg, va_list args); + +LIBMDBX_API int mdbx_setup_debug(int flags, MDBX_debug_func *logger); + +typedef int MDBX_pgvisitor_func(uint64_t pgno, unsigned pgnumber, void *ctx, + const char *dbi, const char *type, + size_t nentries, size_t payload_bytes, + size_t header_bytes, size_t unused_bytes); +LIBMDBX_API int mdbx_env_pgwalk(MDBX_txn *txn, MDBX_pgvisitor_func *visitor, + void *ctx); + +typedef struct mdbx_canary { uint64_t x, y, z, v; } mdbx_canary; + +LIBMDBX_API int mdbx_canary_put(MDBX_txn *txn, const mdbx_canary *canary); +LIBMDBX_API int mdbx_canary_get(MDBX_txn *txn, mdbx_canary *canary); + +/* Returns: + * - MDBX_RESULT_TRUE + * when no more data available or cursor not positioned; + * - MDBX_RESULT_FALSE + * when data available; + * - Otherwise the error code. */ +LIBMDBX_API int mdbx_cursor_eof(MDBX_cursor *mc); + +/* Returns: MDBX_RESULT_TRUE, MDBX_RESULT_FALSE or Error code. */ +LIBMDBX_API int mdbx_cursor_on_first(MDBX_cursor *mc); + +/* Returns: MDBX_RESULT_TRUE, MDBX_RESULT_FALSE or Error code. */ +LIBMDBX_API int mdbx_cursor_on_last(MDBX_cursor *mc); + +LIBMDBX_API int mdbx_replace(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *new_data, MDBX_val *old_data, + unsigned flags); +/* Same as mdbx_get(), but: + * 1) if values_count is not NULL, then returns the count + * of multi-values/duplicates for a given key. + * 2) updates the key for pointing to the actual key's data inside DB. */ +LIBMDBX_API int mdbx_get_ex(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data, size_t *values_count); + +LIBMDBX_API int mdbx_is_dirty(const MDBX_txn *txn, const void *ptr); + +LIBMDBX_API int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, + uint64_t increment); + +/*----------------------------------------------------------------------------*/ +/* attribute support functions for Nexenta */ +typedef uint_fast64_t mdbx_attr_t; + +/* Store by cursor with attribute. + * + * This function stores key/data pairs into the database. The cursor is + * positioned at the new item, or on failure usually near it. + * + * NOTE: Internally based on MDBX_RESERVE feature, + * therefore doesn't support MDBX_DUPSORT. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * [in] key The key operated on. + * [in] data The data operated on. + * [in] attr The attribute. + * [in] flags Options for this operation. This parameter must be set to 0 + * or one of the values described here: + * + * - MDBX_CURRENT + * Replace the item at the current cursor position. The key parameter + * must still be provided, and must match it, otherwise the function + * return MDBX_EKEYMISMATCH. + * + * - MDBX_APPEND + * Append the given key/data pair to the end of the database. No key + * comparisons are performed. This option allows fast bulk loading when + * keys are already known to be in the correct order. Loading unsorted + * keys with this flag will cause a MDBX_KEYEXIST error. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_EKEYMISMATCH + * - MDBX_MAP_FULL - the database is full, see mdbx_env_set_mapsize(). + * - MDBX_TXN_FULL - the transaction has too many dirty pages. + * - MDBX_EACCES - an attempt was made to write in a read-only transaction. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_put_attr(MDBX_cursor *cursor, MDBX_val *key, + MDBX_val *data, mdbx_attr_t attr, + unsigned flags); + +/* Store items and attributes into a database. + * + * This function stores key/data pairs in the database. The default behavior + * is to enter the new key/data pair, replacing any previously existing key + * if duplicates are disallowed. + * + * NOTE: Internally based on MDBX_RESERVE feature, + * therefore doesn't support MDBX_DUPSORT. + * + * [in] txn A transaction handle returned by mdbx_txn_begin(). + * [in] dbi A database handle returned by mdbx_dbi_open(). + * [in] key The key to store in the database. + * [in] attr The attribute to store in the database. + * [in,out] data The data to store. + * [in] flags Special options for this operation. This parameter must be + * set to 0 or by bitwise OR'ing together one or more of the + * values described here: + * + * - MDBX_NOOVERWRITE + * Enter the new key/data pair only if the key does not already appear + * in the database. The function will return MDBX_KEYEXIST if the key + * already appears in the database. The data parameter will be set to + * point to the existing item. + * + * - MDBX_CURRENT + * Update an single existing entry, but not add new ones. The function + * will return MDBX_NOTFOUND if the given key not exist in the database. + * Or the MDBX_EMULTIVAL in case duplicates for the given key. + * + * - MDBX_APPEND + * Append the given key/data pair to the end of the database. This option + * allows fast bulk loading when keys are already known to be in the + * correct order. Loading unsorted keys with this flag will cause + * a MDBX_EKEYMISMATCH error. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_KEYEXIST + * - MDBX_MAP_FULL - the database is full, see mdbx_env_set_mapsize(). + * - MDBX_TXN_FULL - the transaction has too many dirty pages. + * - MDBX_EACCES - an attempt was made to write in a read-only transaction. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_put_attr(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data, mdbx_attr_t attr, unsigned flags); + +/* Set items attribute from a database. + * + * This function stores key/data pairs attribute to the database. + * + * NOTE: Internally based on MDBX_RESERVE feature, + * therefore doesn't support MDBX_DUPSORT. + * + * [in] txn A transaction handle returned by mdbx_txn_begin(). + * [in] dbi A database handle returned by mdbx_dbi_open(). + * [in] key The key to search for in the database. + * [in] data The data to be stored or NULL to save previous value. + * [in] attr The attribute to be stored. + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_NOTFOUND - the key-value pair was not in the database. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_set_attr(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data, mdbx_attr_t attr); + +/* Get items attribute from a database cursor. + * + * This function retrieves key/data pairs from the database. The address and + * length of the key are returned in the object to which key refers (except + * for the case of the MDBX_SET option, in which the key object is unchanged), + * and the address and length of the data are returned in the object to which + * data refers. See mdbx_get() for restrictions on using the output values. + * + * [in] cursor A cursor handle returned by mdbx_cursor_open() + * [in,out] key The key for a retrieved item + * [in,out] data The data of a retrieved item + * [in] op A cursor operation MDBX_cursor_op + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_NOTFOUND - no matching key found. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_cursor_get_attr(MDBX_cursor *mc, MDBX_val *key, + MDBX_val *data, mdbx_attr_t *attrptr, + MDBX_cursor_op op); + +/* Get items attribute from a database. + * + * This function retrieves key/data pairs from the database. The address + * and length of the data associated with the specified key are returned + * in the structure to which data refers. + * If the database supports duplicate keys (MDBX_DUPSORT) then the + * first data item for the key will be returned. Retrieval of other + * items requires the use of mdbx_cursor_get(). + * + * NOTE: The memory pointed to by the returned values is owned by the + * database. The caller need not dispose of the memory, and may not + * modify it in any way. For values returned in a read-only transaction + * any modification attempts will cause a SIGSEGV. + * + * NOTE: Values returned from the database are valid only until a + * subsequent update operation, or the end of the transaction. + * + * [in] txn A transaction handle returned by mdbx_txn_begin() + * [in] dbi A database handle returned by mdbx_dbi_open() + * [in] key The key to search for in the database + * [in,out] data The data corresponding to the key + * + * Returns A non-zero error value on failure and 0 on success, some + * possible errors are: + * - MDBX_NOTFOUND - the key was not in the database. + * - MDBX_EINVAL - an invalid parameter was specified. */ +LIBMDBX_API int mdbx_get_attr(MDBX_txn *txn, MDBX_dbi dbi, MDBX_val *key, + MDBX_val *data, mdbx_attr_t *attrptr); + +#ifdef __cplusplus +} +#endif + +#endif /* LIBMDBX_H */ diff --git a/plugins/Dbx_mdb/src/mdbx/osal.c b/plugins/Dbx_mdb/src/mdbx/osal.c new file mode 100644 index 0000000000..d9026cfadf --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/osal.c @@ -0,0 +1,1002 @@ +/* https://en.wikipedia.org/wiki/Operating_system_abstraction_layer */ + +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + */ + +#include "./bits.h" + +#if defined(_WIN32) || defined(_WIN64) +#include <winternl.h> + +static int waitstatus2errcode(DWORD result) { + switch (result) { + case WAIT_OBJECT_0: + return MDBX_SUCCESS; + case WAIT_FAILED: + return GetLastError(); + case WAIT_ABANDONED: + return ERROR_ABANDONED_WAIT_0; + case WAIT_IO_COMPLETION: + return ERROR_USER_APC; + case WAIT_TIMEOUT: + return ERROR_TIMEOUT; + default: + return ERROR_UNHANDLED_ERROR; + } +} + +/* Map a result from an NTAPI call to WIN32 error code. */ +static int ntstatus2errcode(NTSTATUS status) { + DWORD dummy; + OVERLAPPED ov; + memset(&ov, 0, sizeof(ov)); + ov.Internal = status; + return GetOverlappedResult(NULL, &ov, &dummy, FALSE) ? MDBX_SUCCESS + : GetLastError(); +} + +/* We use native NT APIs to setup the memory map, so that we can + * let the DB file grow incrementally instead of always preallocating + * the full size. These APIs are defined in <wdm.h> and <ntifs.h> + * but those headers are meant for driver-level development and + * conflict with the regular user-level headers, so we explicitly + * declare them here. Using these APIs also means we must link to + * ntdll.dll, which is not linked by default in user code. */ +#pragma comment(lib, "ntdll.lib") + +extern NTSTATUS NTAPI NtCreateSection( + OUT PHANDLE SectionHandle, IN ACCESS_MASK DesiredAccess, + IN OPTIONAL POBJECT_ATTRIBUTES ObjectAttributes, + IN OPTIONAL PLARGE_INTEGER MaximumSize, IN ULONG SectionPageProtection, + IN ULONG AllocationAttributes, IN OPTIONAL HANDLE FileHandle); + +extern NTSTATUS NTAPI NtExtendSection(IN HANDLE SectionHandle, + IN PLARGE_INTEGER NewSectionSize); + +typedef enum _SECTION_INHERIT { ViewShare = 1, ViewUnmap = 2 } SECTION_INHERIT; + +extern NTSTATUS NTAPI NtMapViewOfSection( + IN HANDLE SectionHandle, IN HANDLE ProcessHandle, IN OUT PVOID *BaseAddress, + IN ULONG_PTR ZeroBits, IN SIZE_T CommitSize, + IN OUT OPTIONAL PLARGE_INTEGER SectionOffset, IN OUT PSIZE_T ViewSize, + IN SECTION_INHERIT InheritDisposition, IN ULONG AllocationType, + IN ULONG Win32Protect); + +extern NTSTATUS NTAPI NtUnmapViewOfSection(IN HANDLE ProcessHandle, + IN OPTIONAL PVOID BaseAddress); + +extern NTSTATUS NTAPI NtClose(HANDLE Handle); + +extern NTSTATUS NTAPI NtAllocateVirtualMemory( + IN HANDLE ProcessHandle, IN OUT PVOID *BaseAddress, IN ULONG ZeroBits, + IN OUT PULONG RegionSize, IN ULONG AllocationType, IN ULONG Protect); + +extern NTSTATUS NTAPI NtFreeVirtualMemory(IN HANDLE ProcessHandle, + IN PVOID *BaseAddress, + IN OUT PULONG RegionSize, + IN ULONG FreeType); + +#ifndef FILE_PROVIDER_CURRENT_VERSION +typedef struct _FILE_PROVIDER_EXTERNAL_INFO_V1 { + ULONG Version; + ULONG Algorithm; + ULONG Flags; +} FILE_PROVIDER_EXTERNAL_INFO_V1, *PFILE_PROVIDER_EXTERNAL_INFO_V1; +#endif + +#ifndef STATUS_OBJECT_NOT_EXTERNALLY_BACKED +#define STATUS_OBJECT_NOT_EXTERNALLY_BACKED ((NTSTATUS)0xC000046DL) +#endif +#ifndef STATUS_INVALID_DEVICE_REQUEST +#define STATUS_INVALID_DEVICE_REQUEST ((NTSTATUS)0xC0000010L) +#endif + +extern NTSTATUS +NtFsControlFile(IN HANDLE FileHandle, IN OUT HANDLE Event, + IN OUT PVOID /* PIO_APC_ROUTINE */ ApcRoutine, + IN OUT PVOID ApcContext, OUT PIO_STATUS_BLOCK IoStatusBlock, + IN ULONG FsControlCode, IN OUT PVOID InputBuffer, + IN ULONG InputBufferLength, OUT OPTIONAL PVOID OutputBuffer, + IN ULONG OutputBufferLength); + +#endif /* _WIN32 || _WIN64 */ + +/*----------------------------------------------------------------------------*/ + +#ifndef _MSC_VER +/* Prototype should match libc runtime. ISO POSIX (2003) & LSB 3.1 */ +__nothrow __noreturn void __assert_fail(const char *assertion, const char *file, + unsigned line, const char *function); +#else +__extern_C __declspec(dllimport) void __cdecl _assert(char const *message, + char const *filename, + unsigned line); +#endif /* _MSC_VER */ + +#ifndef mdbx_assert_fail +void __cold mdbx_assert_fail(const MDBX_env *env, const char *msg, + const char *func, int line) { +#if MDBX_DEBUG + if (env && env->me_assert_func) { + env->me_assert_func(env, msg, func, line); + return; + } +#else + (void)env; +#endif /* MDBX_DEBUG */ + + if (mdbx_debug_logger) + mdbx_debug_log(MDBX_DBG_ASSERT, func, line, "assert: %s\n", msg); +#ifndef _MSC_VER + __assert_fail(msg, "mdbx", line, func); +#else + _assert(msg, func, line); +#endif /* _MSC_VER */ +} +#endif /* mdbx_assert_fail */ + +__cold void mdbx_panic(const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); +#ifdef _MSC_VER + if (IsDebuggerPresent()) { + OutputDebugString("\r\n" FIXME "\r\n"); + FatalExit(ERROR_UNHANDLED_ERROR); + } +#elif _XOPEN_SOURCE >= 700 || _POSIX_C_SOURCE >= 200809L || \ + (__GLIBC_PREREQ(1, 0) && !__GLIBC_PREREQ(2, 10) && defined(_GNU_SOURCE)) + vdprintf(STDERR_FILENO, fmt, ap); +#else +#error FIXME +#endif + va_end(ap); + abort(); +} + +/*----------------------------------------------------------------------------*/ + +#ifndef mdbx_asprintf +int mdbx_asprintf(char **strp, const char *fmt, ...) { + va_list ap, ones; + + va_start(ap, fmt); + va_copy(ones, ap); +#ifdef _MSC_VER + int needed = _vscprintf(fmt, ap); +#elif defined(vsnprintf) || defined(_BSD_SOURCE) || _XOPEN_SOURCE >= 500 || \ + defined(_ISOC99_SOURCE) || _POSIX_C_SOURCE >= 200112L + int needed = vsnprintf(nullptr, 0, fmt, ap); +#else +#error FIXME +#endif + va_end(ap); + + if (unlikely(needed < 0 || needed >= INT_MAX)) { + *strp = nullptr; + va_end(ones); + return needed; + } + + *strp = malloc(needed + 1); + if (unlikely(*strp == nullptr)) { + va_end(ones); + SetLastError(MDBX_ENOMEM); + return -1; + } + +#if defined(vsnprintf) || defined(_BSD_SOURCE) || _XOPEN_SOURCE >= 500 || \ + defined(_ISOC99_SOURCE) || _POSIX_C_SOURCE >= 200112L + int actual = vsnprintf(*strp, needed + 1, fmt, ones); +#else +#error FIXME +#endif + va_end(ones); + + assert(actual == needed); + if (unlikely(actual < 0)) { + free(*strp); + *strp = nullptr; + } + return actual; +} +#endif /* mdbx_asprintf */ + +#ifndef mdbx_memalign_alloc +int mdbx_memalign_alloc(size_t alignment, size_t bytes, void **result) { +#if _MSC_VER + *result = _aligned_malloc(bytes, alignment); + return *result ? MDBX_SUCCESS : MDBX_ENOMEM /* ERROR_OUTOFMEMORY */; +#elif __GLIBC_PREREQ(2, 16) || __STDC_VERSION__ >= 201112L + *result = memalign(alignment, bytes); + return *result ? MDBX_SUCCESS : errno; +#elif _POSIX_VERSION >= 200112L + *result = nullptr; + return posix_memalign(result, alignment, bytes); +#else +#error FIXME +#endif +} +#endif /* mdbx_memalign_alloc */ + +#ifndef mdbx_memalign_free +void mdbx_memalign_free(void *ptr) { +#if _MSC_VER + _aligned_free(ptr); +#else + free(ptr); +#endif +} +#endif /* mdbx_memalign_free */ + +/*----------------------------------------------------------------------------*/ + +int mdbx_condmutex_init(mdbx_condmutex_t *condmutex) { +#if defined(_WIN32) || defined(_WIN64) + int rc = MDBX_SUCCESS; + condmutex->event = NULL; + condmutex->mutex = CreateMutex(NULL, FALSE, NULL); + if (!condmutex->mutex) + return GetLastError(); + + condmutex->event = CreateEvent(NULL, FALSE, FALSE, NULL); + if (!condmutex->event) { + rc = GetLastError(); + (void)CloseHandle(condmutex->mutex); + condmutex->mutex = NULL; + } + return rc; +#else + memset(condmutex, 0, sizeof(mdbx_condmutex_t)); + int rc = pthread_mutex_init(&condmutex->mutex, NULL); + if (rc == 0) { + rc = pthread_cond_init(&condmutex->cond, NULL); + if (rc != 0) + (void)pthread_mutex_destroy(&condmutex->mutex); + } + return rc; +#endif +} + +static bool is_allzeros(const void *ptr, size_t bytes) { + const uint8_t *u8 = ptr; + for (size_t i = 0; i < bytes; ++i) + if (u8[i] != 0) + return false; + return true; +} + +int mdbx_condmutex_destroy(mdbx_condmutex_t *condmutex) { + int rc = MDBX_EINVAL; +#if defined(_WIN32) || defined(_WIN64) + if (condmutex->event) { + rc = CloseHandle(condmutex->event) ? MDBX_SUCCESS : GetLastError(); + if (rc == MDBX_SUCCESS) + condmutex->event = NULL; + } + if (condmutex->mutex) { + rc = CloseHandle(condmutex->mutex) ? MDBX_SUCCESS : GetLastError(); + if (rc == MDBX_SUCCESS) + condmutex->mutex = NULL; + } +#else + if (!is_allzeros(&condmutex->cond, sizeof(condmutex->cond))) { + rc = pthread_cond_destroy(&condmutex->cond); + if (rc == 0) + memset(&condmutex->cond, 0, sizeof(condmutex->cond)); + } + if (!is_allzeros(&condmutex->mutex, sizeof(condmutex->mutex))) { + rc = pthread_mutex_destroy(&condmutex->mutex); + if (rc == 0) + memset(&condmutex->mutex, 0, sizeof(condmutex->mutex)); + } +#endif + return rc; +} + +int mdbx_condmutex_lock(mdbx_condmutex_t *condmutex) { +#if defined(_WIN32) || defined(_WIN64) + DWORD code = WaitForSingleObject(condmutex->mutex, INFINITE); + return waitstatus2errcode(code); +#else + return pthread_mutex_lock(&condmutex->mutex); +#endif +} + +int mdbx_condmutex_unlock(mdbx_condmutex_t *condmutex) { +#if defined(_WIN32) || defined(_WIN64) + return ReleaseMutex(condmutex->mutex) ? MDBX_SUCCESS : GetLastError(); +#else + return pthread_mutex_unlock(&condmutex->mutex); +#endif +} + +int mdbx_condmutex_signal(mdbx_condmutex_t *condmutex) { +#if defined(_WIN32) || defined(_WIN64) + return SetEvent(condmutex->event) ? MDBX_SUCCESS : GetLastError(); +#else + return pthread_cond_signal(&condmutex->cond); +#endif +} + +int mdbx_condmutex_wait(mdbx_condmutex_t *condmutex) { +#if defined(_WIN32) || defined(_WIN64) + DWORD code = + SignalObjectAndWait(condmutex->mutex, condmutex->event, INFINITE, FALSE); + if (code == WAIT_OBJECT_0) + code = WaitForSingleObject(condmutex->mutex, INFINITE); + return waitstatus2errcode(code); +#else + return pthread_cond_wait(&condmutex->cond, &condmutex->mutex); +#endif +} + +/*----------------------------------------------------------------------------*/ + +int mdbx_fastmutex_init(mdbx_fastmutex_t *fastmutex) { +#if defined(_WIN32) || defined(_WIN64) + InitializeCriticalSection(fastmutex); + return MDBX_SUCCESS; +#else + return pthread_mutex_init(fastmutex, NULL); +#endif +} + +int mdbx_fastmutex_destroy(mdbx_fastmutex_t *fastmutex) { +#if defined(_WIN32) || defined(_WIN64) + DeleteCriticalSection(fastmutex); + return MDBX_SUCCESS; +#else + return pthread_mutex_destroy(fastmutex); +#endif +} + +int mdbx_fastmutex_acquire(mdbx_fastmutex_t *fastmutex) { +#if defined(_WIN32) || defined(_WIN64) + EnterCriticalSection(fastmutex); + return MDBX_SUCCESS; +#else + return pthread_mutex_lock(fastmutex); +#endif +} + +int mdbx_fastmutex_release(mdbx_fastmutex_t *fastmutex) { +#if defined(_WIN32) || defined(_WIN64) + LeaveCriticalSection(fastmutex); + return MDBX_SUCCESS; +#else + return pthread_mutex_unlock(fastmutex); +#endif +} + +/*----------------------------------------------------------------------------*/ + +int mdbx_openfile(const char *pathname, int flags, mode_t mode, + mdbx_filehandle_t *fd) { + *fd = INVALID_HANDLE_VALUE; +#if defined(_WIN32) || defined(_WIN64) + (void)mode; + + DWORD DesiredAccess; + DWORD ShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; + DWORD FlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; + switch (flags & (O_RDONLY | O_WRONLY | O_RDWR)) { + default: + return ERROR_INVALID_PARAMETER; + case O_RDONLY: + DesiredAccess = GENERIC_READ; + break; + case O_WRONLY: /* assume for MDBX_env_copy() and friends output */ + DesiredAccess = GENERIC_WRITE; + ShareMode = 0; + FlagsAndAttributes |= FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH; + break; + case O_RDWR: + DesiredAccess = GENERIC_READ | GENERIC_WRITE; + break; + } + + DWORD CreationDisposition; + switch (flags & (O_EXCL | O_CREAT)) { + default: + return ERROR_INVALID_PARAMETER; + case 0: + CreationDisposition = OPEN_EXISTING; + break; + case O_EXCL | O_CREAT: + CreationDisposition = CREATE_NEW; + FlagsAndAttributes |= FILE_ATTRIBUTE_NOT_CONTENT_INDEXED; + break; + case O_CREAT: + CreationDisposition = OPEN_ALWAYS; + FlagsAndAttributes |= FILE_ATTRIBUTE_NOT_CONTENT_INDEXED; + break; + } + + *fd = CreateFileA(pathname, DesiredAccess, ShareMode, NULL, + CreationDisposition, FlagsAndAttributes, NULL); + + if (*fd == INVALID_HANDLE_VALUE) + return GetLastError(); + if ((flags & O_CREAT) && GetLastError() != ERROR_ALREADY_EXISTS) { + /* set FILE_ATTRIBUTE_NOT_CONTENT_INDEXED for new file */ + DWORD FileAttributes = GetFileAttributesA(pathname); + if (FileAttributes == INVALID_FILE_ATTRIBUTES || + !SetFileAttributesA(pathname, FileAttributes | + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)) { + int rc = GetLastError(); + CloseHandle(*fd); + *fd = INVALID_HANDLE_VALUE; + return rc; + } + } +#else + +#ifdef O_CLOEXEC + flags |= O_CLOEXEC; +#endif + *fd = open(pathname, flags, mode); + if (*fd < 0) + return errno; +#if defined(FD_CLOEXEC) && defined(F_GETFD) + flags = fcntl(*fd, F_GETFD); + if (flags >= 0) + (void)fcntl(*fd, F_SETFD, flags | FD_CLOEXEC); +#endif +#endif + return MDBX_SUCCESS; +} + +int mdbx_closefile(mdbx_filehandle_t fd) { +#if defined(_WIN32) || defined(_WIN64) + return CloseHandle(fd) ? MDBX_SUCCESS : GetLastError(); +#else + return (close(fd) == 0) ? MDBX_SUCCESS : errno; +#endif +} + +int mdbx_pread(mdbx_filehandle_t fd, void *buf, size_t bytes, uint64_t offset) { + if (bytes > MAX_WRITE) + return MDBX_EINVAL; +#if defined(_WIN32) || defined(_WIN64) + + OVERLAPPED ov; + ov.hEvent = 0; + ov.Offset = (DWORD)offset; + ov.OffsetHigh = HIGH_DWORD(offset); + + DWORD read = 0; + if (unlikely(!ReadFile(fd, buf, (DWORD)bytes, &read, &ov))) { + int rc = GetLastError(); + return (rc == MDBX_SUCCESS) ? /* paranoia */ ERROR_READ_FAULT : rc; + } +#else + STATIC_ASSERT_MSG(sizeof(off_t) >= sizeof(size_t), + "libmdbx requires 64-bit file I/O on 64-bit systems"); + intptr_t read = pread(fd, buf, bytes, offset); + if (read < 0) { + int rc = errno; + return (rc == MDBX_SUCCESS) ? /* paranoia */ MDBX_EIO : rc; + } +#endif + return (bytes == (size_t)read) ? MDBX_SUCCESS : MDBX_ENODATA; +} + +int mdbx_pwrite(mdbx_filehandle_t fd, const void *buf, size_t bytes, + uint64_t offset) { +#if defined(_WIN32) || defined(_WIN64) + if (bytes > MAX_WRITE) + return ERROR_INVALID_PARAMETER; + + OVERLAPPED ov; + ov.hEvent = 0; + ov.Offset = (DWORD)offset; + ov.OffsetHigh = HIGH_DWORD(offset); + + DWORD written; + if (likely(WriteFile(fd, buf, (DWORD)bytes, &written, &ov))) + return (bytes == written) ? MDBX_SUCCESS : MDBX_EIO /* ERROR_WRITE_FAULT */; + return GetLastError(); +#else + int rc; + intptr_t written; + do { + STATIC_ASSERT_MSG(sizeof(off_t) >= sizeof(size_t), + "libmdbx requires 64-bit file I/O on 64-bit systems"); + written = pwrite(fd, buf, bytes, offset); + if (likely(bytes == (size_t)written)) + return MDBX_SUCCESS; + rc = errno; + } while (rc == EINTR); + return (written < 0) ? rc : MDBX_EIO /* Use which error code (ENOSPC)? */; +#endif +} + +int mdbx_pwritev(mdbx_filehandle_t fd, struct iovec *iov, int iovcnt, + uint64_t offset, size_t expected_written) { +#if defined(_WIN32) || defined(_WIN64) + size_t written = 0; + for (int i = 0; i < iovcnt; ++i) { + int rc = mdbx_pwrite(fd, iov[i].iov_base, iov[i].iov_len, offset); + if (unlikely(rc != MDBX_SUCCESS)) + return rc; + written += iov[i].iov_len; + offset += iov[i].iov_len; + } + return (expected_written == written) ? MDBX_SUCCESS + : MDBX_EIO /* ERROR_WRITE_FAULT */; +#else + int rc; + intptr_t written; + do { + STATIC_ASSERT_MSG(sizeof(off_t) >= sizeof(size_t), + "libmdbx requires 64-bit file I/O on 64-bit systems"); + written = pwritev(fd, iov, iovcnt, offset); + if (likely(expected_written == (size_t)written)) + return MDBX_SUCCESS; + rc = errno; + } while (rc == EINTR); + return (written < 0) ? rc : MDBX_EIO /* Use which error code? */; +#endif +} + +int mdbx_write(mdbx_filehandle_t fd, const void *buf, size_t bytes) { +#ifdef SIGPIPE + sigset_t set, old; + sigemptyset(&set); + sigaddset(&set, SIGPIPE); + int rc = pthread_sigmask(SIG_BLOCK, &set, &old); + if (rc != 0) + return rc; +#endif + + const char *ptr = buf; + for (;;) { + size_t chunk = (MAX_WRITE < bytes) ? MAX_WRITE : bytes; +#if defined(_WIN32) || defined(_WIN64) + DWORD written; + if (unlikely(!WriteFile(fd, ptr, (DWORD)chunk, &written, NULL))) + return GetLastError(); +#else + intptr_t written = write(fd, ptr, chunk); + if (written < 0) { + int rc = errno; +#ifdef SIGPIPE + if (rc == EPIPE) { + /* Collect the pending SIGPIPE, otherwise at least OS X + * gives it to the process on thread-exit (ITS#8504). */ + int tmp; + sigwait(&set, &tmp); + written = 0; + continue; + } + pthread_sigmask(SIG_SETMASK, &old, NULL); +#endif + return rc; + } +#endif + if (likely(bytes == (size_t)written)) { +#ifdef SIGPIPE + pthread_sigmask(SIG_SETMASK, &old, NULL); +#endif + return MDBX_SUCCESS; + } + ptr += written; + bytes -= written; + } +} + +int mdbx_filesync(mdbx_filehandle_t fd, bool filesize_changed) { +#if defined(_WIN32) || defined(_WIN64) + (void)filesize_changed; + return FlushFileBuffers(fd) ? MDBX_SUCCESS : GetLastError(); +#elif __GLIBC_PREREQ(2, 16) || _BSD_SOURCE || _XOPEN_SOURCE || \ + (__GLIBC_PREREQ(2, 8) && _POSIX_C_SOURCE >= 200112L) + for (;;) { +/* LY: It is no reason to use fdatasync() here, even in case + * no such bug in a kernel. Because "no-bug" mean that a kernel + * internally do nearly the same, e.g. fdatasync() == fsync() + * when no-kernel-bug and file size was changed. + * + * So, this code is always safe and without appreciable + * performance degradation. + * + * For more info about of a corresponding fdatasync() bug + * see http://www.spinics.net/lists/linux-ext4/msg33714.html */ +#if _POSIX_C_SOURCE >= 199309L || _XOPEN_SOURCE >= 500 || \ + defined(_POSIX_SYNCHRONIZED_IO) + if (!filesize_changed && fdatasync(fd) == 0) + return MDBX_SUCCESS; +#else + (void)filesize_changed; +#endif + if (fsync(fd) == 0) + return MDBX_SUCCESS; + int rc = errno; + if (rc != EINTR) + return rc; + } +#else +#error FIXME +#endif +} + +int mdbx_filesize_sync(mdbx_filehandle_t fd) { +#if defined(_WIN32) || defined(_WIN64) + (void)fd; + /* Nothing on Windows (i.e. newer 100% steady) */ + return MDBX_SUCCESS; +#else + for (;;) { + if (fsync(fd) == 0) + return MDBX_SUCCESS; + int rc = errno; + if (rc != EINTR) + return rc; + } +#endif +} + +int mdbx_filesize(mdbx_filehandle_t fd, uint64_t *length) { +#if defined(_WIN32) || defined(_WIN64) + BY_HANDLE_FILE_INFORMATION info; + if (!GetFileInformationByHandle(fd, &info)) + return GetLastError(); + *length = info.nFileSizeLow | (uint64_t)info.nFileSizeHigh << 32; +#else + struct stat st; + + STATIC_ASSERT_MSG(sizeof(off_t) <= sizeof(uint64_t), + "libmdbx requires 64-bit file I/O on 64-bit systems"); + if (fstat(fd, &st)) + return errno; + + *length = st.st_size; +#endif + return MDBX_SUCCESS; +} + +int mdbx_ftruncate(mdbx_filehandle_t fd, uint64_t length) { +#if defined(_WIN32) || defined(_WIN64) + LARGE_INTEGER li; + li.QuadPart = length; + return (SetFilePointerEx(fd, li, NULL, FILE_BEGIN) && SetEndOfFile(fd)) + ? MDBX_SUCCESS + : GetLastError(); +#else + STATIC_ASSERT_MSG(sizeof(off_t) >= sizeof(size_t), + "libmdbx requires 64-bit file I/O on 64-bit systems"); + return ftruncate(fd, length) == 0 ? MDBX_SUCCESS : errno; +#endif +} + +/*----------------------------------------------------------------------------*/ + +int mdbx_thread_key_create(mdbx_thread_key_t *key) { +#if defined(_WIN32) || defined(_WIN64) + *key = TlsAlloc(); + return (*key != TLS_OUT_OF_INDEXES) ? MDBX_SUCCESS : GetLastError(); +#else + return pthread_key_create(key, mdbx_rthc_dtor); +#endif +} + +void mdbx_thread_key_delete(mdbx_thread_key_t key) { +#if defined(_WIN32) || defined(_WIN64) + mdbx_ensure(NULL, TlsFree(key)); +#else + mdbx_ensure(NULL, pthread_key_delete(key) == 0); +#endif +} + +void *mdbx_thread_rthc_get(mdbx_thread_key_t key) { +#if defined(_WIN32) || defined(_WIN64) + return TlsGetValue(key); +#else + return pthread_getspecific(key); +#endif +} + +void mdbx_thread_rthc_set(mdbx_thread_key_t key, const void *value) { +#if defined(_WIN32) || defined(_WIN64) + mdbx_ensure(NULL, TlsSetValue(key, (void *)value)); +#else + mdbx_ensure(NULL, pthread_setspecific(key, value) == 0); +#endif +} + +int mdbx_thread_create(mdbx_thread_t *thread, + THREAD_RESULT(THREAD_CALL *start_routine)(void *), + void *arg) { +#if defined(_WIN32) || defined(_WIN64) + *thread = CreateThread(NULL, 0, start_routine, arg, 0, NULL); + return *thread ? MDBX_SUCCESS : GetLastError(); +#else + return pthread_create(thread, NULL, start_routine, arg); +#endif +} + +int mdbx_thread_join(mdbx_thread_t thread) { +#if defined(_WIN32) || defined(_WIN64) + DWORD code = WaitForSingleObject(thread, INFINITE); + return waitstatus2errcode(code); +#else + void *unused_retval = &unused_retval; + return pthread_join(thread, &unused_retval); +#endif +} + +/*----------------------------------------------------------------------------*/ + +int mdbx_msync(mdbx_mmap_t *map, size_t offset, size_t length, int async) { + uint8_t *ptr = (uint8_t *)map->address + offset; +#if defined(_WIN32) || defined(_WIN64) + if (FlushViewOfFile(ptr, length) && (async || FlushFileBuffers(map->fd))) + return MDBX_SUCCESS; + return GetLastError(); +#else + const int mode = async ? MS_ASYNC : MS_SYNC; + return (msync(ptr, length, mode) == 0) ? MDBX_SUCCESS : errno; +#endif +} + +int mdbx_mmap(int flags, mdbx_mmap_t *map, size_t must, size_t limit) { + assert(must <= limit); +#if defined(_WIN32) || defined(_WIN64) + map->length = 0; + map->current = 0; + map->section = NULL; + map->address = nullptr; + + if (GetFileType(map->fd) != FILE_TYPE_DISK) + return ERROR_FILE_OFFLINE; + + FILE_REMOTE_PROTOCOL_INFO RemoteProtocolInfo; + if (GetFileInformationByHandleEx(map->fd, FileRemoteProtocolInfo, + &RemoteProtocolInfo, + sizeof(RemoteProtocolInfo))) { + if ((RemoteProtocolInfo.Flags & (REMOTE_PROTOCOL_INFO_FLAG_LOOPBACK | + REMOTE_PROTOCOL_INFO_FLAG_OFFLINE)) != + REMOTE_PROTOCOL_INFO_FLAG_LOOPBACK) + return ERROR_FILE_OFFLINE; + } + + NTSTATUS rc; +#ifdef _WIN64 + struct { + WOF_EXTERNAL_INFO wof_info; + union { + WIM_PROVIDER_EXTERNAL_INFO wim_info; + FILE_PROVIDER_EXTERNAL_INFO_V1 file_info; + }; + size_t reserved_for_microsoft_madness[42]; + } GetExternalBacking_OutputBuffer; + IO_STATUS_BLOCK StatusBlock; + rc = NtFsControlFile(map->fd, NULL, NULL, NULL, &StatusBlock, + FSCTL_GET_EXTERNAL_BACKING, NULL, 0, + &GetExternalBacking_OutputBuffer, + sizeof(GetExternalBacking_OutputBuffer)); + if (rc != STATUS_OBJECT_NOT_EXTERNALLY_BACKED && + rc != STATUS_INVALID_DEVICE_REQUEST) + return NT_SUCCESS(rc) ? ERROR_FILE_OFFLINE : ntstatus2errcode(rc); +#endif + + WCHAR PathBuffer[INT16_MAX]; + DWORD VolumeSerialNumber, FileSystemFlags; + if (!GetVolumeInformationByHandleW(map->fd, PathBuffer, INT16_MAX, + &VolumeSerialNumber, NULL, + &FileSystemFlags, NULL, 0)) + return GetLastError(); + + if ((flags & MDBX_RDONLY) == 0) { + if (FileSystemFlags & (FILE_SEQUENTIAL_WRITE_ONCE | FILE_READ_ONLY_VOLUME | + FILE_VOLUME_IS_COMPRESSED)) + return ERROR_FILE_OFFLINE; + } + + if (!GetFinalPathNameByHandleW(map->fd, PathBuffer, INT16_MAX, + FILE_NAME_NORMALIZED | VOLUME_NAME_NT)) + return GetLastError(); + + if (_wcsnicmp(PathBuffer, L"\\Device\\Mup\\", 12) == 0) + return ERROR_FILE_OFFLINE; + + if (GetFinalPathNameByHandleW(map->fd, PathBuffer, INT16_MAX, + FILE_NAME_NORMALIZED | VOLUME_NAME_DOS)) { + UINT DriveType = GetDriveTypeW(PathBuffer); + if (DriveType == DRIVE_NO_ROOT_DIR && + wcsncmp(PathBuffer, L"\\\\?\\", 4) == 0 && + wcsncmp(PathBuffer + 5, L":\\", 2) == 0) { + PathBuffer[7] = 0; + DriveType = GetDriveTypeW(PathBuffer + 4); + } + switch (DriveType) { + case DRIVE_CDROM: + if (flags & MDBX_RDONLY) + break; + // fall through + case DRIVE_UNKNOWN: + case DRIVE_NO_ROOT_DIR: + case DRIVE_REMOTE: + default: + return ERROR_FILE_OFFLINE; + case DRIVE_REMOVABLE: + case DRIVE_FIXED: + case DRIVE_RAMDISK: + break; + } + } + + rc = NtCreateSection( + &map->section, + /* DesiredAccess */ SECTION_MAP_READ | SECTION_EXTEND_SIZE | + ((flags & MDBX_WRITEMAP) ? SECTION_MAP_WRITE : 0), + /* ObjectAttributes */ NULL, /* MaximumSize */ NULL, + /* SectionPageProtection */ (flags & MDBX_RDONLY) ? PAGE_READONLY + : PAGE_READWRITE, + /* AllocationAttributes */ SEC_RESERVE, map->fd); + + if (!NT_SUCCESS(rc)) + return ntstatus2errcode(rc); + + map->address = nullptr; + SIZE_T ViewSize = (flags & MDBX_RDONLY) ? must : limit; + rc = NtMapViewOfSection( + map->section, GetCurrentProcess(), &map->address, + /* ZeroBits */ 0, + /* CommitSize */ must, + /* SectionOffset */ NULL, &ViewSize, + /* InheritDisposition */ ViewUnmap, + /* AllocationType */ (flags & MDBX_RDONLY) ? 0 : MEM_RESERVE, + /* Win32Protect */ (flags & MDBX_WRITEMAP) ? PAGE_READWRITE + : PAGE_READONLY); + + if (!NT_SUCCESS(rc)) { + NtClose(map->section); + map->section = 0; + map->address = nullptr; + return ntstatus2errcode(rc); + } + assert(map->address != MAP_FAILED); + + uint64_t filesize; + rc = mdbx_filesize(map->fd, &filesize); + if (rc != MDBX_SUCCESS) { + NtClose(map->section); + NtUnmapViewOfSection(GetCurrentProcess(), map->address); + map->section = 0; + map->address = nullptr; + return rc; + } + + map->current = (must < filesize) ? must : (size_t)filesize; + map->length = ViewSize; + return MDBX_SUCCESS; +#else + (void)must; + map->address = mmap( + NULL, limit, (flags & MDBX_WRITEMAP) ? PROT_READ | PROT_WRITE : PROT_READ, + MAP_SHARED, map->fd, 0); + if (likely(map->address != MAP_FAILED)) { + map->length = limit; + return MDBX_SUCCESS; + } + map->length = 0; + map->address = nullptr; + return errno; +#endif +} + +int mdbx_munmap(mdbx_mmap_t *map) { +#if defined(_WIN32) || defined(_WIN64) + if (map->section) + NtClose(map->section); + NTSTATUS rc = NtUnmapViewOfSection(GetCurrentProcess(), map->address); + if (!NT_SUCCESS(rc)) + ntstatus2errcode(rc); + map->length = 0; + map->current = 0; + map->address = nullptr; +#else + if (unlikely(munmap(map->address, map->length))) + return errno; + map->length = 0; + map->address = nullptr; +#endif + return MDBX_SUCCESS; +} + +int mdbx_mresize(int flags, mdbx_mmap_t *map, size_t atleast, size_t limit) { + assert(atleast <= limit); +#if defined(_WIN32) || defined(_WIN64) + if (limit < map->length) { + /* Windows is unable shrinking a mapped section */ + return ERROR_USER_MAPPED_FILE; + } + if (limit > map->length) { + /* extend */ + LARGE_INTEGER new_size; + new_size.QuadPart = limit; + NTSTATUS rc = NtExtendSection(map->section, &new_size); + if (!NT_SUCCESS(rc)) + return ntstatus2errcode(rc); + map->length = limit; + } + if (atleast < map->current) { + /* Windows is unable shrinking a mapped file */ + uint8_t *ptr = (uint8_t *)map->address + atleast; + if (!VirtualFree(ptr, map->current - atleast, MEM_DECOMMIT)) + return MDBX_RESULT_TRUE; + + map->current = atleast; + int rc = mdbx_ftruncate(map->fd, atleast); + return (rc != MDBX_SUCCESS) ? MDBX_RESULT_TRUE : rc; + } + if (atleast > map->current) { + /* growth */ + uint8_t *ptr = (uint8_t *)map->address + map->current; + if (ptr != + VirtualAlloc(ptr, atleast - map->current, MEM_COMMIT, + (flags & MDBX_WRITEMAP) ? PAGE_READWRITE : PAGE_READONLY)) + return GetLastError(); + map->current = atleast; + } + + uint64_t filesize; + int rc = mdbx_filesize(map->fd, &filesize); + if (rc != MDBX_SUCCESS) + return rc; + if (filesize < atleast) { + rc = mdbx_ftruncate(map->fd, atleast); + if (rc != MDBX_SUCCESS) + return rc; + } + return MDBX_SUCCESS; +#else + (void)flags; + if (limit != map->length) { + void *ptr = mremap(map->address, map->length, limit, MREMAP_MAYMOVE); + if (ptr == MAP_FAILED) + return errno; + map->address = ptr; + map->length = limit; + } + return mdbx_ftruncate(map->fd, atleast); +#endif +} + +/*----------------------------------------------------------------------------*/ + +__cold void mdbx_osal_jitter(bool tiny) { + for (;;) { +#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ + defined(__x86_64__) + const unsigned salt = 277u * (unsigned)__rdtsc(); +#else + const unsigned salt = rand(); +#endif + + const unsigned coin = salt % (tiny ? 29u : 43u); + if (coin < 43 / 3) + break; +#if defined(_WIN32) || defined(_WIN64) + SwitchToThread(); + if (coin > 43 * 2 / 3) + Sleep(1); +#else + sched_yield(); + if (coin > 43 * 2 / 3) + usleep(coin); +#endif + } +} diff --git a/plugins/Dbx_mdb/src/mdbx/osal.h b/plugins/Dbx_mdb/src/mdbx/osal.h new file mode 100644 index 0000000000..4216a5346a --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/osal.h @@ -0,0 +1,645 @@ +/* https://en.wikipedia.org/wiki/Operating_system_abstraction_layer */ + +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + */ + +#pragma once + +/*----------------------------------------------------------------------------*/ +/* Microsoft compiler generates a lot of warning for self includes... */ + +#ifdef _MSC_VER +#pragma warning(push, 1) +#pragma warning(disable : 4548) /* expression before comma has no effect; \ + expected expression with side - effect */ +#pragma warning(disable : 4530) /* C++ exception handler used, but unwind \ + * semantics are not enabled. Specify /EHsc */ +#pragma warning(disable : 4577) /* 'noexcept' used with no exception handling \ + * mode specified; termination on exception is \ + * not guaranteed. Specify /EHsc */ +#if !defined(_CRT_SECURE_NO_WARNINGS) +#define _CRT_SECURE_NO_WARNINGS +#endif +#endif /* _MSC_VER (warnings) */ + +/*----------------------------------------------------------------------------*/ +/* C99 includes */ + +#include <inttypes.h> +#include <stddef.h> +#include <stdint.h> +#include <stdlib.h> + +#include <assert.h> +#include <fcntl.h> +#include <limits.h> +#include <malloc.h> +#include <stdio.h> +#include <string.h> +#include <time.h> + +#ifndef _POSIX_C_SOURCE +#ifdef _POSIX_SOURCE +#define _POSIX_C_SOURCE 1 +#else +#define _POSIX_C_SOURCE 0 +#endif +#endif + +#ifndef _XOPEN_SOURCE +#define _XOPEN_SOURCE 0 +#endif + +/*----------------------------------------------------------------------------*/ +/* Systems includes */ + +#if defined(_WIN32) || defined(_WIN64) +#include <windows.h> +#include <winnt.h> +#define HAVE_SYS_STAT_H +#define HAVE_SYS_TYPES_H +typedef HANDLE mdbx_thread_t; +typedef unsigned mdbx_thread_key_t; +#define MDBX_OSAL_SECTION HANDLE +#define MAP_FAILED NULL +#define HIGH_DWORD(v) ((DWORD)((sizeof(v) > 4) ? ((uint64_t)(v) >> 32) : 0)) +#define THREAD_CALL WINAPI +#define THREAD_RESULT DWORD +typedef struct { + HANDLE mutex; + HANDLE event; +} mdbx_condmutex_t; +typedef CRITICAL_SECTION mdbx_fastmutex_t; +#else +#include <pthread.h> +#include <signal.h> +#include <sys/file.h> +#include <sys/mman.h> +#include <sys/param.h> +#include <sys/stat.h> +#include <sys/uio.h> +#include <unistd.h> +typedef pthread_t mdbx_thread_t; +typedef pthread_key_t mdbx_thread_key_t; +#define INVALID_HANDLE_VALUE (-1) +#define THREAD_CALL +#define THREAD_RESULT void * +typedef struct { + pthread_mutex_t mutex; + pthread_cond_t cond; +} mdbx_condmutex_t; +typedef pthread_mutex_t mdbx_fastmutex_t; +#endif /* Platform */ + +#ifndef SSIZE_MAX +#define SSIZE_MAX INTPTR_MAX +#endif + +#ifdef HAVE_SYS_STAT_H +#include <sys/stat.h> +#endif +#ifdef HAVE_SYS_TYPES_H +#include <sys/types.h> +#endif +#ifdef HAVE_SYS_FILE_H +#include <sys/file.h> +#endif + +#if !defined(UNALIGNED_OK) +#if defined(__i386) || defined(__x86_64__) || defined(_M_IX86) || \ + defined(_M_X64) || defined(i386) || defined(_X86_) || defined(__i386__) || \ + defined(_X86_64_) +#define UNALIGNED_OK 1 +#else +#define UNALIGNED_OK 0 +#endif +#endif /* UNALIGNED_OK */ + +#if (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF +#error \ + "Sanity checking failed: Two's complement, reasonably sized integer types" +#endif + +/*----------------------------------------------------------------------------*/ +/* Compiler's includes for builtins/intrinsics */ + +#ifdef _MSC_VER + +#if _MSC_FULL_VER < 190024215 +#if _MSC_FULL_VER < 180040629 && defined(_M_IX86) +#error Please use Visual Studio 2015 (MSC 19.0) or newer for 32-bit target. +#else +#pragma message( \ + "It is recommended to use Visual Studio 2015 (MSC 19.0) or newer.") +#endif +#endif + +#include <intrin.h> + +#elif __GNUC_PREREQ(4, 4) || defined(__clang__) +#if defined(__i386__) || defined(__x86_64__) +#include <cpuid.h> +#include <x86intrin.h> +#endif +#elif defined(__INTEL_COMPILER) +#include <intrin.h> +#elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) +#include <mbarrier.h> +#elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && \ + (defined(HP_IA64) || defined(__ia64)) +#include <machine/sys/inline.h> +#elif defined(__IBMC__) && defined(__powerpc) +#include <atomic.h> +#elif defined(_AIX) +#include <builtins.h> +#include <sys/atomic_op.h> +#elif (defined(__osf__) && defined(__DECC)) || defined(__alpha) +#include <c_asm.h> +#include <machine/builtins.h> +#elif defined(__MWERKS__) +/* CodeWarrior - troubles ? */ +#pragma gcc_extensions +#elif defined(__SNC__) +/* Sony PS3 - troubles ? */ +#else +#error Unknown C compiler, please use GNU C 5.x or newer +#endif /* Compiler */ + +/*----------------------------------------------------------------------------*/ +/* Byteorder */ + +#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \ + !defined(__ORDER_BIG_ENDIAN__) + +#if defined(HAVE_ENDIAN_H) +#include <endian.h> +#elif defined(HAVE_SYS_PARAM_H) +#include <sys/param.h> /* for endianness */ +#elif defined(HAVE_NETINET_IN_H) && defined(HAVE_RESOLV_H) +#include <netinet/in.h> +#include <resolv.h> /* defines BYTE_ORDER on HPUX and Solaris */ +#endif + +#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +#define __ORDER_LITTLE_ENDIAN__ __LITTLE_ENDIAN +#define __ORDER_BIG_ENDIAN__ __BIG_ENDIAN +#define __BYTE_ORDER__ __BYTE_ORDER +#else +#define __ORDER_LITTLE_ENDIAN__ 1234 +#define __ORDER_BIG_ENDIAN__ 4321 +#if defined(__LITTLE_ENDIAN__) || defined(_LITTLE_ENDIAN) || \ + defined(__ARMEL__) || defined(__THUMBEL__) || defined(__AARCH64EL__) || \ + defined(__MIPSEL__) || defined(_MIPSEL) || defined(__MIPSEL) || \ + defined(__i386) || defined(__x86_64__) || defined(_M_IX86) || \ + defined(_M_X64) || defined(i386) || defined(_X86_) || defined(__i386__) || \ + defined(_X86_64_) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__e2k__) +#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ +#elif defined(__BIG_ENDIAN__) || defined(_BIG_ENDIAN) || defined(__ARMEB__) || \ + defined(__THUMBEB__) || defined(__AARCH64EB__) || defined(__MIPSEB__) || \ + defined(_MIPSEB) || defined(__MIPSEB) || defined(_M_IA64) +#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__ +#else +#error __BYTE_ORDER__ should be defined. +#endif +#endif +#endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \ + __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ +#error Unsupported byte order. +#endif + +/*----------------------------------------------------------------------------*/ +/* Memory/Compiler barriers, cache coherence */ + +static __inline void mdbx_compiler_barrier(void) { +#if defined(__clang__) || defined(__GNUC__) + __asm__ __volatile__("" ::: "memory"); +#elif defined(_MSC_VER) + _ReadWriteBarrier(); +#elif defined(__INTEL_COMPILER) /* LY: Intel Compiler may mimic GCC and MSC */ + __memory_barrier(); + if (type > MDBX_BARRIER_COMPILER) +#if defined(__ia64__) || defined(__ia64) || defined(_M_IA64) + __mf(); +#elif defined(__i386__) || defined(__x86_64__) + _mm_mfence(); +#else +#error "Unknown target for Intel Compiler, please report to us." +#endif +#elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) + __compiler_barrier(); +#elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && \ + (defined(HP_IA64) || defined(__ia64)) + _Asm_sched_fence(/* LY: no-arg meaning 'all expect ALU', e.g. 0x3D3D */); +#elif defined(_AIX) || defined(__ppc__) || defined(__powerpc__) || \ + defined(__ppc64__) || defined(__powerpc64__) + __fence(); +#else +#error "Could not guess the kind of compiler, please report to us." +#endif +} + +static __inline void mdbx_memory_barrier(void) { +#if __has_extension(c_atomic) || __has_extension(cxx_atomic) + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +#elif defined(__ATOMIC_SEQ_CST) + __atomic_thread_fence(__ATOMIC_SEQ_CST); +#elif defined(__clang__) || defined(__GNUC__) + __sync_synchronize(); +#elif defined(_MSC_VER) + MemoryBarrier(); +#elif defined(__INTEL_COMPILER) /* LY: Intel Compiler may mimic GCC and MSC */ +#if defined(__ia64__) || defined(__ia64) || defined(_M_IA64) + __mf(); +#elif defined(__i386__) || defined(__x86_64__) + _mm_mfence(); +#else +#error "Unknown target for Intel Compiler, please report to us." +#endif +#elif defined(__SUNPRO_C) || defined(__sun) || defined(sun) + __machine_rw_barrier(); +#elif (defined(_HPUX_SOURCE) || defined(__hpux) || defined(__HP_aCC)) && \ + (defined(HP_IA64) || defined(__ia64)) + _Asm_mf(); +#elif defined(_AIX) || defined(__ppc__) || defined(__powerpc__) || \ + defined(__ppc64__) || defined(__powerpc64__) + __lwsync(); +#else +#error "Could not guess the kind of compiler, please report to us." +#endif +} + +/*----------------------------------------------------------------------------*/ +/* Cache coherence and invalidation */ + +#if defined(__i386__) || defined(__x86_64__) || defined(_M_AMD64) || \ + defined(_M_IX86) || defined(__i386) || defined(__amd64) || \ + defined(i386) || defined(__x86_64) || defined(_AMD64_) || defined(_M_X64) +#define MDBX_CACHE_IS_COHERENT 1 +#elif defined(__hppa) || defined(__hppa__) +#define MDBX_CACHE_IS_COHERENT 1 +#endif + +#ifndef MDBX_CACHE_IS_COHERENT +#define MDBX_CACHE_IS_COHERENT 0 +#endif + +#ifndef MDBX_CACHELINE_SIZE +#if defined(SYSTEM_CACHE_ALIGNMENT_SIZE) +#define MDBX_CACHELINE_SIZE SYSTEM_CACHE_ALIGNMENT_SIZE +#elif defined(__ia64__) || defined(__ia64) || defined(_M_IA64) +#define MDBX_CACHELINE_SIZE 128 +#else +#define MDBX_CACHELINE_SIZE 64 +#endif +#endif /* MDBX_CACHELINE_SIZE */ + +#ifndef __cache_aligned +#define __cache_aligned __aligned(MDBX_CACHELINE_SIZE) +#endif + +#if MDBX_CACHE_IS_COHERENT +#define mdbx_coherent_barrier() mdbx_compiler_barrier() +#else +#define mdbx_coherent_barrier() mdbx_memory_barrier() +#endif + +#if defined(__mips) && defined(__linux) +/* Only MIPS has explicit cache control */ +#include <asm/cachectl.h> +#endif + +static __inline void mdbx_invalidate_cache(void *addr, size_t nbytes) { + mdbx_coherent_barrier(); +#if defined(__mips) && defined(__linux) + /* MIPS has cache coherency issues. + * Note: for any nbytes >= on-chip cache size, entire is flushed. */ + cacheflush(addr, nbytes, DCACHE); +#elif defined(_M_MRX000) || defined(_MIPS_) +#error "Sorry, cacheflush() for MIPS not implemented" +#else + /* LY: assume no relevant mmap/dcache issues. */ + (void)addr; + (void)nbytes; +#endif +} + +/*----------------------------------------------------------------------------*/ +/* libc compatibility stuff */ + +#ifndef mdbx_assert_fail +void mdbx_assert_fail(const MDBX_env *env, const char *msg, const char *func, + int line); +#endif /* mdbx_assert_fail */ + +#if __GLIBC_PREREQ(2, 1) +#define mdbx_asprintf asprintf +#else +int mdbx_asprintf(char **strp, const char *fmt, ...); +#endif + +#ifdef _MSC_VER + +#ifndef snprintf +#define snprintf(buffer, buffer_size, format, ...) \ + _snprintf_s(buffer, buffer_size, _TRUNCATE, format, __VA_ARGS__) +#endif /* snprintf */ + +#ifndef vsnprintf +#define vsnprintf(buffer, buffer_size, format, args) \ + _vsnprintf_s(buffer, buffer_size, _TRUNCATE, format, args) +#endif /* vsnprintf */ + +#ifdef _ASSERTE +#undef assert +#define assert _ASSERTE +#endif + +#endif /* _MSC_VER */ + +/*----------------------------------------------------------------------------*/ +/* OS abstraction layer stuff */ + +/* max bytes to write in one call */ +#define MAX_WRITE UINT32_C(0x3fff0000) + +/* Get the size of a memory page for the system. + * This is the basic size that the platform's memory manager uses, and is + * fundamental to the use of memory-mapped files. */ +static __inline size_t mdbx_syspagesize(void) { +#if defined(_WIN32) || defined(_WIN64) + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + return sysconf(_SC_PAGE_SIZE); +#endif +} + +static __inline char *mdbx_strdup(const char *str) { +#ifdef _MSC_VER + return _strdup(str); +#else + return strdup(str); +#endif +} + +static __inline int mdbx_get_errno(void) { +#if defined(_WIN32) || defined(_WIN64) + DWORD rc = GetLastError(); +#else + int rc = errno; +#endif + return rc; +} + +int mdbx_memalign_alloc(size_t alignment, size_t bytes, void **result); +void mdbx_memalign_free(void *ptr); + +int mdbx_condmutex_init(mdbx_condmutex_t *condmutex); +int mdbx_condmutex_lock(mdbx_condmutex_t *condmutex); +int mdbx_condmutex_unlock(mdbx_condmutex_t *condmutex); +int mdbx_condmutex_signal(mdbx_condmutex_t *condmutex); +int mdbx_condmutex_wait(mdbx_condmutex_t *condmutex); +int mdbx_condmutex_destroy(mdbx_condmutex_t *condmutex); + +int mdbx_fastmutex_init(mdbx_fastmutex_t *fastmutex); +int mdbx_fastmutex_acquire(mdbx_fastmutex_t *fastmutex); +int mdbx_fastmutex_release(mdbx_fastmutex_t *fastmutex); +int mdbx_fastmutex_destroy(mdbx_fastmutex_t *fastmutex); + +int mdbx_pwritev(mdbx_filehandle_t fd, struct iovec *iov, int iovcnt, + uint64_t offset, size_t expected_written); +int mdbx_pread(mdbx_filehandle_t fd, void *buf, size_t count, uint64_t offset); +int mdbx_pwrite(mdbx_filehandle_t fd, const void *buf, size_t count, + uint64_t offset); +int mdbx_write(mdbx_filehandle_t fd, const void *buf, size_t count); + +int mdbx_thread_create(mdbx_thread_t *thread, + THREAD_RESULT(THREAD_CALL *start_routine)(void *), + void *arg); +int mdbx_thread_join(mdbx_thread_t thread); +int mdbx_thread_key_create(mdbx_thread_key_t *key); +void mdbx_thread_key_delete(mdbx_thread_key_t key); +void *mdbx_thread_rthc_get(mdbx_thread_key_t key); +void mdbx_thread_rthc_set(mdbx_thread_key_t key, const void *value); + +int mdbx_filesync(mdbx_filehandle_t fd, bool fullsync); +int mdbx_filesize_sync(mdbx_filehandle_t fd); +int mdbx_ftruncate(mdbx_filehandle_t fd, uint64_t length); +int mdbx_filesize(mdbx_filehandle_t fd, uint64_t *length); +int mdbx_openfile(const char *pathname, int flags, mode_t mode, + mdbx_filehandle_t *fd); +int mdbx_closefile(mdbx_filehandle_t fd); + +typedef struct mdbx_mmap_param { + union { + void *address; + uint8_t *dxb; + struct MDBX_lockinfo *lck; + }; + mdbx_filehandle_t fd; + size_t length; /* mapping length, but NOT a size of file or DB */ +#if defined(_WIN32) || defined(_WIN64) + size_t current; /* mapped region size, e.g. file and DB */ +#endif +#ifdef MDBX_OSAL_SECTION + MDBX_OSAL_SECTION section; +#endif +} mdbx_mmap_t; + +int mdbx_mmap(int flags, mdbx_mmap_t *map, size_t must, size_t limit); +int mdbx_munmap(mdbx_mmap_t *map); +int mdbx_mresize(int flags, mdbx_mmap_t *map, size_t current, size_t wanna); +int mdbx_msync(mdbx_mmap_t *map, size_t offset, size_t length, int async); + +static __inline mdbx_pid_t mdbx_getpid(void) { +#if defined(_WIN32) || defined(_WIN64) + return GetCurrentProcessId(); +#else + return getpid(); +#endif +} + +static __inline mdbx_tid_t mdbx_thread_self(void) { +#if defined(_WIN32) || defined(_WIN64) + return GetCurrentThreadId(); +#else + return pthread_self(); +#endif +} + +void mdbx_osal_jitter(bool tiny); + +/*----------------------------------------------------------------------------*/ +/* lck stuff */ + +#if defined(_WIN32) || defined(_WIN64) +#undef MDBX_OSAL_LOCK +#define MDBX_OSAL_LOCK_SIGN UINT32_C(0xF10C) +#else +#define MDBX_OSAL_LOCK pthread_mutex_t +#define MDBX_OSAL_LOCK_SIGN UINT32_C(0x8017) +#endif + +int mdbx_lck_init(MDBX_env *env); + +int mdbx_lck_seize(MDBX_env *env); +int mdbx_lck_downgrade(MDBX_env *env, bool complete); +int mdbx_lck_upgrade(MDBX_env *env); +void mdbx_lck_destroy(MDBX_env *env); + +int mdbx_rdt_lock(MDBX_env *env); +void mdbx_rdt_unlock(MDBX_env *env); + +int mdbx_txn_lock(MDBX_env *env); +void mdbx_txn_unlock(MDBX_env *env); + +int mdbx_rpid_set(MDBX_env *env); +int mdbx_rpid_clear(MDBX_env *env); + +/* Checks reader by pid. + * + * Returns: + * MDBX_RESULT_TRUE, if pid is live (unable to acquire lock) + * MDBX_RESULT_FALSE, if pid is dead (lock acquired) + * or otherwise the errcode. */ +int mdbx_rpid_check(MDBX_env *env, mdbx_pid_t pid); + +/*----------------------------------------------------------------------------*/ +/* Atomics */ + +#if !defined(__cplusplus) && (__STDC_VERSION__ >= 201112L) && \ + !defined(__STDC_NO_ATOMICS__) && \ + (__GNUC_PREREQ(4, 9) || __CLANG_PREREQ(3, 8) || \ + !(defined(__GNUC__) || defined(__clang__))) +#include <stdatomic.h> +#elif defined(__GNUC__) || defined(__clang__) +/* LY: nothing required */ +#elif defined(_MSC_VER) +#pragma warning(disable : 4163) /* 'xyz': not available as an intrinsic */ +#pragma warning(disable : 4133) /* 'function': incompatible types - from \ + 'size_t' to 'LONGLONG' */ +#pragma warning(disable : 4244) /* 'return': conversion from 'LONGLONG' to \ + 'std::size_t', possible loss of data */ +#pragma warning(disable : 4267) /* 'function': conversion from 'size_t' to \ + 'long', possible loss of data */ +#pragma intrinsic(_InterlockedExchangeAdd, _InterlockedCompareExchange) +#pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedCompareExchange64) +#elif defined(__APPLE__) +#include <libkern/OSAtomic.h> +#else +#error FIXME atomic-ops +#endif + +static __inline uint32_t mdbx_atomic_add32(volatile uint32_t *p, uint32_t v) { +#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT) + assert(atomic_is_lock_free(p)); + return atomic_fetch_add((_Atomic uint32_t *)p, v); +#elif defined(__GNUC__) || defined(__clang__) + return __sync_fetch_and_add(p, v); +#else +#ifdef _MSC_VER + return _InterlockedExchangeAdd(p, v); +#endif +#ifdef __APPLE__ + return OSAtomicAdd32(v, (volatile int32_t *)p); +#endif +#endif +} + +static __inline uint64_t mdbx_atomic_add64(volatile uint64_t *p, uint64_t v) { +#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT) + assert(atomic_is_lock_free(p)); + return atomic_fetch_add((_Atomic uint64_t *)p, v); +#elif defined(__GNUC__) || defined(__clang__) + return __sync_fetch_and_add(p, v); +#else +#ifdef _MSC_VER + return _InterlockedExchangeAdd64((volatile int64_t *)p, v); +#endif +#ifdef __APPLE__ + return OSAtomicAdd64(v, (volatile int64_t *)p); +#endif +#endif +} + +#define mdbx_atomic_sub32(p, v) mdbx_atomic_add32(p, 0 - (v)) +#define mdbx_atomic_sub64(p, v) mdbx_atomic_add64(p, 0 - (v)) + +static __inline bool mdbx_atomic_compare_and_swap32(volatile uint32_t *p, + uint32_t c, uint32_t v) { +#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT) + assert(atomic_is_lock_free(p)); + return atomic_compare_exchange_strong((_Atomic uint32_t *)p, &c, v); +#elif defined(__GNUC__) || defined(__clang__) + return __sync_bool_compare_and_swap(p, c, v); +#else +#ifdef _MSC_VER + return c == _InterlockedCompareExchange(p, v, c); +#endif +#ifdef __APPLE__ + return c == OSAtomicCompareAndSwap32Barrier(c, v, (volatile int32_t *)p); +#endif +#endif +} + +static __inline bool mdbx_atomic_compare_and_swap64(volatile uint64_t *p, + uint64_t c, uint64_t v) { +#if !defined(__cplusplus) && defined(ATOMIC_VAR_INIT) + assert(atomic_is_lock_free(p)); + return atomic_compare_exchange_strong((_Atomic uint64_t *)p, &c, v); +#elif defined(__GNUC__) || defined(__clang__) + return __sync_bool_compare_and_swap(p, c, v); +#else +#ifdef _MSC_VER + return c == _InterlockedCompareExchange64((volatile int64_t *)p, v, c); +#endif +#ifdef __APPLE__ + return c == OSAtomicCompareAndSwap64Barrier(c, v, (volatile uint64_t *)p); +#endif +#endif +} + +/*----------------------------------------------------------------------------*/ + +#if defined(_MSC_VER) && _MSC_VER >= 1900 && _MSC_VER < 1920 +/* LY: MSVC 2015/2017 has buggy/inconsistent PRIuPTR/PRIxPTR macros + * for internal format-args checker. */ +#undef PRIuPTR +#undef PRIiPTR +#undef PRIdPTR +#undef PRIxPTR +#define PRIuPTR "Iu" +#define PRIiPTR "Ii" +#define PRIdPTR "Id" +#define PRIxPTR "Ix" +#define PRIuSIZE "zu" +#define PRIiSIZE "zi" +#define PRIdSIZE "zd" +#define PRIxSIZE "zx" +#endif /* fix PRI*PTR for _MSC_VER */ + +#ifndef PRIuSIZE +#define PRIuSIZE PRIuPTR +#define PRIiSIZE PRIiPTR +#define PRIdSIZE PRIdPTR +#define PRIxSIZE PRIxPTR +#endif /* PRI*SIZE macros for MSVC */ + +#ifdef _MSC_VER +#pragma warning(pop) +#endif diff --git a/plugins/Dbx_mdb/src/mdbx/version.c b/plugins/Dbx_mdb/src/mdbx/version.c new file mode 100644 index 0000000000..ef4f99088d --- /dev/null +++ b/plugins/Dbx_mdb/src/mdbx/version.c @@ -0,0 +1,34 @@ +/* + * Copyright 2015-2017 Leonid Yuriev <leo@yuriev.ru> + * and other libmdbx authors: please see AUTHORS file. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * <http://www.OpenLDAP.org/license.html>. + */ + +#include "./bits.h" + +#if MDBX_VERSION_MAJOR != 0 || MDBX_VERSION_MINOR != 0 +#error "API version mismatch!" +#endif + +#define MDBX_VERSION_RELEASE 0 +#define MDBX_VERSION_REVISION 0 + +/*LIBMDBX_EXPORTS*/ const mdbx_version_info mdbx_version = { + MDBX_VERSION_MAJOR, + MDBX_VERSION_MINOR, + MDBX_VERSION_RELEASE, + MDBX_VERSION_REVISION, + {"@MDBX_GIT_TIMESTAMP@", "@MDBX_GIT_TREE@", "@MDBX_GIT_COMMIT@", + "@MDBX_GIT_DESCRIBE@"}}; + +/*LIBMDBX_EXPORTS*/ const mdbx_build_info mdbx_build = { + "@MDBX_BUILD_TIMESTAMP@", "@MDBX_BUILD_TAGRET@", "@MDBX_BUILD_OPTIONS@", + "@MDBX_BUILD_COMPILER@", "@MDBX_BUILD_FLAGS@"}; diff --git a/plugins/Dbx_mdb/src/stdafx.h b/plugins/Dbx_mdb/src/stdafx.h index 958847ee5e..1fc3afc599 100644 --- a/plugins/Dbx_mdb/src/stdafx.h +++ b/plugins/Dbx_mdb/src/stdafx.h @@ -43,7 +43,7 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #include <m_netlib.h>
#include <m_gui.h>
-#include "lmdb/lmdb.h"
+#include "mdbx/mdbx.h"
#ifndef thread_local
# define thread_local __declspec(thread)
@@ -52,58 +52,58 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. class txn_ptr
{
- MDB_txn *m_txn;
+ MDBX_txn *m_txn;
public:
- __forceinline txn_ptr(MDB_env *pEnv)
+ __forceinline txn_ptr(MDBX_env *pEnv)
{
- mdb_txn_begin(pEnv, NULL, 0, &m_txn);
+ mdbx_txn_begin(pEnv, NULL, 0, &m_txn);
}
__forceinline ~txn_ptr()
{
if (m_txn)
- mdb_txn_abort(m_txn);
+ mdbx_txn_abort(m_txn);
}
- __forceinline operator MDB_txn*() const { return m_txn; }
+ __forceinline operator MDBX_txn*() const { return m_txn; }
__forceinline int commit()
{
- MDB_txn *tmp = m_txn;
+ MDBX_txn *tmp = m_txn;
m_txn = nullptr;
- return mdb_txn_commit(tmp);
+ return mdbx_txn_commit(tmp);
}
__forceinline void abort()
{
- mdb_txn_abort(m_txn);
+ mdbx_txn_abort(m_txn);
m_txn = NULL;
}
};
-struct CMDB_txn_ro
+struct CMDBX_txn_ro
{
- MDB_txn *m_txn;
+ MDBX_txn *m_txn;
bool bIsActive;
mir_cs cs;
- __forceinline CMDB_txn_ro() : m_txn(nullptr), bIsActive(false) {}
+ __forceinline CMDBX_txn_ro() : m_txn(nullptr), bIsActive(false) {}
- __forceinline operator MDB_txn* () { return m_txn; }
- __forceinline MDB_txn** operator &() { return &m_txn; }
+ __forceinline operator MDBX_txn* () { return m_txn; }
+ __forceinline MDBX_txn** operator &() { return &m_txn; }
};
class txn_ptr_ro
{
- CMDB_txn_ro &m_txn;
+ CMDBX_txn_ro &m_txn;
bool bNeedReset;
mir_cslock lock;
public:
- __forceinline txn_ptr_ro(CMDB_txn_ro &txn) : m_txn(txn), bNeedReset(!txn.bIsActive), lock(m_txn.cs)
+ __forceinline txn_ptr_ro(CMDBX_txn_ro &txn) : m_txn(txn), bNeedReset(!txn.bIsActive), lock(m_txn.cs)
{
if (bNeedReset)
{
- mdb_txn_renew(m_txn);
+ mdbx_txn_renew(m_txn);
m_txn.bIsActive = true;
}
}
@@ -111,48 +111,48 @@ public: {
if (bNeedReset)
{
- mdb_txn_reset(m_txn);
+ mdbx_txn_reset(m_txn);
m_txn.bIsActive = false;
}
}
- __forceinline operator MDB_txn*() const { return m_txn; }
+ __forceinline operator MDBX_txn*() const { return m_txn; }
};
class cursor_ptr
{
- MDB_cursor *m_cursor;
+ MDBX_cursor *m_cursor;
public:
- __forceinline cursor_ptr(MDB_txn *_txn, MDB_dbi _dbi)
+ __forceinline cursor_ptr(MDBX_txn *_txn, MDBX_dbi _dbi)
{
- if (mdb_cursor_open(_txn, _dbi, &m_cursor) != MDB_SUCCESS)
+ if (mdbx_cursor_open(_txn, _dbi, &m_cursor) != MDBX_SUCCESS)
m_cursor = NULL;
}
__forceinline ~cursor_ptr()
{
if (m_cursor)
- mdb_cursor_close(m_cursor);
+ mdbx_cursor_close(m_cursor);
}
- __forceinline operator MDB_cursor*() const { return m_cursor; }
+ __forceinline operator MDBX_cursor*() const { return m_cursor; }
};
class cursor_ptr_ro
{
- MDB_cursor *m_cursor;
+ MDBX_cursor *m_cursor;
public:
- __forceinline cursor_ptr_ro(MDB_cursor *cursor) : m_cursor(cursor)
+ __forceinline cursor_ptr_ro(MDBX_cursor *cursor) : m_cursor(cursor)
{
- mdb_cursor_renew(mdb_cursor_txn(m_cursor), m_cursor);
+ mdbx_cursor_renew(mdbx_cursor_txn(m_cursor), m_cursor);
}
- __forceinline operator MDB_cursor*() const { return m_cursor; }
+ __forceinline operator MDBX_cursor*() const { return m_cursor; }
};
-#define MDB_CHECK(A,B) \
+#define MDBX_CHECK(A,B) \
switch (A) { \
- case MDB_SUCCESS: break; \
- case MDB_MAP_FULL: continue; \
+ case MDBX_SUCCESS: break; \
+ case MDBX_MAP_FULL: continue; \
default: return (B); }
diff --git a/tools/build_scripts/z1_ReBuild_Full.bat b/tools/build_scripts/z1_ReBuild_Full.bat index e237da8e1f..5e9a7b1038 100644 --- a/tools/build_scripts/z1_ReBuild_Full.bat +++ b/tools/build_scripts/z1_ReBuild_Full.bat @@ -118,7 +118,6 @@ call :checksum Icons dll copy /V /Y ..\..\docs\mirandaboot.ini copy /V /Y ..\..\redist\x%tp%\DbChecker.bat -copy /V /Y ..\..\redist\x%tp%\"%comp%"\*.dll copy /V /Y ..\..\redist\x%tp%\bass\*.dll "Plugins\BASS" popd |