From 7c6a377c0b9fa42a21cd5c740244a706a2e2f382 Mon Sep 17 00:00:00 2001 From: Cory McWilliams Date: Sat, 12 Mar 2016 18:50:43 +0000 Subject: [PATCH] sandboxos => tildefriends git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@3157 ed5197a5-7fde-0310-b194-c3ffbd925b24 --- COPYING | 46 + LICENSE | 661 + README.md | 32 + SConstruct | 102 + core/agplv3-88x31.png | Bin 0 -> 1883 bytes core/auth.html | 17 + core/auth.js | 209 + core/bCrypt.js | 533 + core/client.js | 403 + core/core.js | 413 + core/edit.html | 28 + core/editor.js | 92 + core/favicon.png | Bin 0 -> 320 bytes core/form.js | 33 + core/httpd.js | 286 + core/index.html | 31 + core/isaac.js | 239 + core/network.js | 102 + core/style.css | 181 + core/terminal.js | 290 + data/wiki/development | 62 + data/wiki/index | 65 + deps/liblmdb/.gitignore | 23 + deps/liblmdb/CHANGES | 182 + deps/liblmdb/COPYRIGHT | 20 + deps/liblmdb/Doxyfile | 1631 +++ deps/liblmdb/LICENSE | 47 + deps/liblmdb/Makefile | 111 + deps/liblmdb/lmdb.h | 1584 +++ deps/liblmdb/mdb.c | 10011 ++++++++++++++++ deps/liblmdb/mdb_copy.1 | 54 + deps/liblmdb/mdb_copy.c | 82 + deps/liblmdb/mdb_dump.1 | 75 + deps/liblmdb/mdb_dump.c | 317 + deps/liblmdb/mdb_load.1 | 77 + deps/liblmdb/mdb_load.c | 456 + deps/liblmdb/mdb_stat.1 | 64 + deps/liblmdb/mdb_stat.c | 263 + deps/liblmdb/midl.c | 358 + deps/liblmdb/midl.h | 185 + deps/liblmdb/mtest.c | 177 + deps/liblmdb/mtest2.c | 124 + deps/liblmdb/mtest3.c | 133 + deps/liblmdb/mtest4.c | 168 + deps/liblmdb/mtest5.c | 135 + deps/liblmdb/mtest6.c | 141 + deps/liblmdb/sample-bdb.txt | 73 + deps/liblmdb/sample-mdb.txt | 62 + deps/liblmdb/tooltag | 22 + deps/win32/unistd.h | 8 + packages/cory/about/about.js | 57 + .../cory/administration/administration.js | 137 + packages/cory/bbs/bbs.js | 315 + packages/cory/documentation/documentation.js | 93 + packages/cory/index/index.js | 53 + packages/cory/mmoturtle/mmoturtle.js | 124 + packages/cory/smtp/smtp.js | 74 + packages/cory/todo/todo.js | 204 + packages/cory/turtle/turtle.js | 41 + packages/cory/xmpp/xmpp.js | 952 ++ src/Database.cpp | 190 + src/Database.h | 44 + src/File.cpp | 129 + src/File.h | 19 + src/Mutex.cpp | 32 + src/Mutex.h | 26 + src/PacketStream.cpp | 85 + src/PacketStream.h | 34 + src/Serialize.cpp | 210 + src/Serialize.h | 47 + src/Socket.cpp | 653 + src/Socket.h | 90 + src/Task.cpp | 749 ++ src/Task.h | 163 + src/TaskStub.cpp | 254 + src/TaskStub.h | 63 + src/TaskTryCatch.cpp | 57 + src/TaskTryCatch.h | 18 + src/Tls.cpp | 1126 ++ src/Tls.h | 50 + src/TlsContextWrapper.cpp | 115 + src/TlsContextWrapper.h | 42 + src/main.cpp | 75 + tests/01-nop | 7 + tests/02-valgrind | 7 + tests/03-child | 19 + tests/04-promise | 27 + tests/05-promise-remote-throw | 26 + tests/06-restartTask | 80 + tests/07-promise-remote-reject | 29 + tests/08-database | 34 + tests/09-this | 9 + tools/run-tests | 36 + tools/update-deps | 118 + 94 files changed, 27121 insertions(+) create mode 100644 COPYING create mode 100644 LICENSE create mode 100644 README.md create mode 100644 SConstruct create mode 100644 core/agplv3-88x31.png create mode 100644 core/auth.html create mode 100644 core/auth.js create mode 100644 core/bCrypt.js create mode 100644 core/client.js create mode 100644 core/core.js create mode 100644 core/edit.html create mode 100644 core/editor.js create mode 100644 core/favicon.png create mode 100644 core/form.js create mode 100644 core/httpd.js create mode 100644 core/index.html create mode 100644 core/isaac.js create mode 100644 core/network.js create mode 100644 core/style.css create mode 100644 core/terminal.js create mode 100644 data/wiki/development create mode 100644 data/wiki/index create mode 100644 deps/liblmdb/.gitignore create mode 100644 deps/liblmdb/CHANGES create mode 100644 deps/liblmdb/COPYRIGHT create mode 100644 deps/liblmdb/Doxyfile create mode 100644 deps/liblmdb/LICENSE create mode 100644 deps/liblmdb/Makefile create mode 100644 deps/liblmdb/lmdb.h create mode 100644 deps/liblmdb/mdb.c create mode 100644 deps/liblmdb/mdb_copy.1 create mode 100644 deps/liblmdb/mdb_copy.c create mode 100644 deps/liblmdb/mdb_dump.1 create mode 100644 deps/liblmdb/mdb_dump.c create mode 100644 deps/liblmdb/mdb_load.1 create mode 100644 deps/liblmdb/mdb_load.c create mode 100644 deps/liblmdb/mdb_stat.1 create mode 100644 deps/liblmdb/mdb_stat.c create mode 100644 deps/liblmdb/midl.c create mode 100644 deps/liblmdb/midl.h create mode 100644 deps/liblmdb/mtest.c create mode 100644 deps/liblmdb/mtest2.c create mode 100644 deps/liblmdb/mtest3.c create mode 100644 deps/liblmdb/mtest4.c create mode 100644 deps/liblmdb/mtest5.c create mode 100644 deps/liblmdb/mtest6.c create mode 100644 deps/liblmdb/sample-bdb.txt create mode 100644 deps/liblmdb/sample-mdb.txt create mode 100644 deps/liblmdb/tooltag create mode 100644 deps/win32/unistd.h create mode 100644 packages/cory/about/about.js create mode 100644 packages/cory/administration/administration.js create mode 100644 packages/cory/bbs/bbs.js create mode 100644 packages/cory/documentation/documentation.js create mode 100644 packages/cory/index/index.js create mode 100644 packages/cory/mmoturtle/mmoturtle.js create mode 100644 packages/cory/smtp/smtp.js create mode 100644 packages/cory/todo/todo.js create mode 100644 packages/cory/turtle/turtle.js create mode 100644 packages/cory/xmpp/xmpp.js create mode 100644 src/Database.cpp create mode 100644 src/Database.h create mode 100644 src/File.cpp create mode 100644 src/File.h create mode 100644 src/Mutex.cpp create mode 100644 src/Mutex.h create mode 100644 src/PacketStream.cpp create mode 100644 src/PacketStream.h create mode 100644 src/Serialize.cpp create mode 100644 src/Serialize.h create mode 100644 src/Socket.cpp create mode 100644 src/Socket.h create mode 100644 src/Task.cpp create mode 100644 src/Task.h create mode 100644 src/TaskStub.cpp create mode 100644 src/TaskStub.h create mode 100644 src/TaskTryCatch.cpp create mode 100644 src/TaskTryCatch.h create mode 100644 src/Tls.cpp create mode 100644 src/Tls.h create mode 100644 src/TlsContextWrapper.cpp create mode 100644 src/TlsContextWrapper.h create mode 100644 src/main.cpp create mode 100755 tests/01-nop create mode 100755 tests/02-valgrind create mode 100755 tests/03-child create mode 100755 tests/04-promise create mode 100755 tests/05-promise-remote-throw create mode 100755 tests/06-restartTask create mode 100755 tests/07-promise-remote-reject create mode 100755 tests/08-database create mode 100755 tests/09-this create mode 100755 tools/run-tests create mode 100755 tools/update-deps diff --git a/COPYING b/COPYING new file mode 100644 index 00000000..9fd87c7f --- /dev/null +++ b/COPYING @@ -0,0 +1,46 @@ +sandboxos - An operating system for the web. +Copyright (C) 2014 Cory McWilliams + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation, either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . + +Additional permission under GNU GPL version 3 section 7 + +If you modify this Program, or any covered work, by linking or combining it +with V8 (or a modified version of that library), containing parts covered by +the terms of the New BSD License, the licensors of this Program grant you +additional permission to convey the resulting work. {Corresponding Source for +a non-source form of such a combination shall include the source code for the +parts of V8 used as well as that of the covered work.} + +If you modify this Program, or any covered work, by linking or combining it +with libuv (or a modified version of that library), containing parts covered by +the terms of the MIT License, the licensors of this Program grant you +additional permission to convey the resulting work. {Corresponding Source for +a non-source form of such a combination shall include the source code for the +parts of libuv used as well as that of the covered work.} + +If you modify this Program, or any covered work, by linking or combining it +with liblmdb (or a modified version of that library), containing parts covered +by the terms of the OpenLDAP Public License, the licensors of this Program +grant you additional permission to convey the resulting work. {Corresponding +Source for a non-source form of such a combination shall include the source +code for the parts of liblmdb used as well as that of the covered work.} + +If you modify this Program, or any covered work, by linking or combining it +with javascript-bcrypt (or a modified version of that library), containing +parts covered by the terms of the New BSD License, the licensors of this +Program grant you additional permission to convey the resulting work. +{Corresponding Source for a non-source form of such a combination shall include +the source code for the parts of javascript-bcrypt used as well as that of the +covered work.} diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..dba13ed2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md new file mode 100644 index 00000000..deda69a4 --- /dev/null +++ b/README.md @@ -0,0 +1,32 @@ +# SandboxOS +SandboxOS is a program that aims to securely host pure JavaScript web applications. + +## Goals +1. Make it easy to run all sorts of servers and web applications. +2. Provide a security model that is easy to understand and protects your data. +3. Make creating and sharing web applications accessible to anyone from a web interface. + +## Building +SandboxOS is [routinely](https://www.unprompted.com/projects/build/sandboxos) built on Linux, Windows, and OS X. + +1. Get and build [Google V8](https://code.google.com/p/v8-wiki/wiki/UsingGit) (latest 3.30). +2. Get and build [libuv](https://github.com/libuv/libuv) (latest 1.0). +3. Run: + ``` + scons uv=path/to/libuv v8=path/to/v8 + ``` + +## Running +Running the built sandbox executable will start a web server. This is a good starting point: . + +To grant users administrator privileges, create a JSON file named data/auth/permissions.js containing a list of users and their permissions, like this: +``` +{ + "cory": ["administrator"] +} +``` + +This is a work in progress. Everything else can be managed entirely from the web interface. + +## License +All code unless otherwise noted in [COPYING](https://github.com/unprompted/sandboxos/blob/master/COPYING) is provided under the [Affero GPL 3.0](https://github.com/unprompted/sandboxos/blob/master/LICENSE) license. diff --git a/SConstruct b/SConstruct new file mode 100644 index 00000000..24daf136 --- /dev/null +++ b/SConstruct @@ -0,0 +1,102 @@ +#!/usr/bin/python + +import os +import sys + +options = Variables('options.cache', ARGUMENTS) +options.AddVariables(PathVariable('uv', 'Location of libuv', '../sys/libuv')) +options.AddVariables(PathVariable('v8', 'Location of v8', '../sys/v8')) +options.AddVariables(BoolVariable('package', 'Build a package', False)) + +VariantDir('build/src', 'src', duplicate=0) +VariantDir('build/deps', 'deps', duplicate=0) +kwargs = {} +if sys.platform == 'darwin': + kwargs['CXX'] = 'clang++' + +env = Environment(options=options, tools=['default', 'packaging'], **kwargs) +options.Save('options.cache', env) +Help(options.GenerateHelpText(env)) + +v8 = env['v8'] +uv = env['uv'] +env.Append(CPPPATH=[ + os.path.join(v8, 'include'), + v8, + os.path.join(uv, 'include'), + os.path.join('deps', 'liblmdb'), +]) +if sys.platform == 'win32': + env.Append(LIBS=['v8_base_0', 'v8_base_1', 'v8_base_2', 'v8_base_3', 'v8_libbase', 'v8_libplatform', 'v8_nosnapshot', 'icui18n', 'icuuc', 'libuv', 'advapi32', 'winmm', 'wsock32', 'ws2_32', 'psapi', 'iphlpapi']) + env.Append(CXXFLAGS=['/EHsc', '/MT', '/Zi', '/Gy']) + env.Append(CFLAGS=['/EHsc', '/MT', '/Zi', '/Gy']) + env.Append(LIBPATH=[ + os.path.join(v8, 'build/Release/lib'), + os.path.join(uv, 'Release/lib'), + ]) + env.Append(LINKFLAGS=['/RELEASE', '/OPT:REF', '/OPT:ICF']) +elif sys.platform == 'darwin': + env.Append(LIBS=['v8_base', 'v8_libbase', 'v8_libplatform', 'v8_nosnapshot', 'icui18n', 'icuuc', 'icudata', 'pthread', 'uv']) + env.Append(CXXFLAGS=['--std=c++11', '-g', '-Wall', '-stdlib=libstdc++']) + env.Append(CFLAGS=['-g', '-Wall']) + env.Append(LINKFLAGS=['-g', '-stdlib=libstdc++']) + env.Append(LIBPATH=[ + os.path.join(v8, 'xcodebuild/Release'), + os.path.join(uv, 'build/Release'), + ]) +else: + env.Append(LIBS=['v8_base', 'v8_libbase', 'v8_libplatform', 'v8_nosnapshot', 'icui18n', 'icuuc', 'icudata', 'pthread', 'uv', 'rt', 'dl']) + env.Append(CXXFLAGS=['--std=c++0x', '-g', '-Wall']) + env.Append(CFLAGS=['-g', '-Wall']) + env.Append(LINKFLAGS=['-g']) + env.Append(LIBPATH=[ + os.path.join(v8, 'out/native/obj.target/third_party/icu'), + os.path.join(v8, 'out/native/obj.target/tools/gyp'), + os.path.join(uv, 'out/Debug/obj.target'), + ]) + +ldapEnv = env.Clone() +if sys.platform == 'win32': + ldapEnv.Append(CPPPATH=['deps/win32']) +lmdb = ldapEnv.Library('build/lmdb', [ + 'build/deps/liblmdb/mdb.c', + 'build/deps/liblmdb/midl.c', +]) +env.Append(LIBS=[lmdb]) + +if sys.platform == 'linux2': + env.Append(LIBS=['crypto', 'ssl']) + +source = [s for s in Glob('build/src/*.cpp') if not os.path.basename(str(s)).startswith("SecureSocket_")] +if sys.platform == 'darwin': + env.Append(FRAMEWORKS=['CoreFoundation', 'Security']) +elif sys.platform == 'win32': + env.Append(LIBS=['Crypt32']) +env.Program('sandboxos', source) + +def listAllFiles(root): + for root, dirs, files in os.walk(root): + for f in files: + if not f.startswith('.'): + yield os.path.join(root, f) + hidden = [d for d in dirs if d.startswith('.')] + for d in hidden: + dirs.remove(d) + +if env['package'] and sys.platform == 'win32': + files = [ + 'COPYING', + 'LICENSE', + 'SConstruct', + 'sandboxos.exe', + ] + files += listAllFiles('src') + files += listAllFiles('packages') + files += listAllFiles('core') + env.Package( + NAME='SandboxOS', + target='dist/SandboxOS-win32.zip', + PACKAGETYPE='zip', + PACKAGEROOT='SandboxOS-win32', + source=files + ) diff --git a/core/agplv3-88x31.png b/core/agplv3-88x31.png new file mode 100644 index 0000000000000000000000000000000000000000..7a472a0d8f3dca824af43755da32020507ae3a69 GIT binary patch literal 1883 zcmV-h2c-CkP)Px#32;bRa{vGWCjbBfCjq_^R0se700(qQO+^RT0tgi!FnpsB!~g&Q24YJ`L;(K) z{{a7>y{D4^00!GhL_t(&-tC$RY*s}O#|sp>t0@RH$`M3?qQO!`swg240Y$7Lq)?2A z<`WPjLJ1;BOOT?}h7^jTP&q{95U5-ckN`!D+(8Zt=#A{%7I_Oky6S& zu=N)9e-Pw@s~`tVfI=S>ioXYJ0Poop+!Wj>#bklKCEVvZZ~*)cj)D{@Fv4MV*8m4> z3hr0R6`TY%l~7+hzz^Us_y>#!RXv)IBQ^zR6k#BLgGt~ckjc7hU?Z4mVK&$;`dH&2 ztN4Z!RkFFFP9IApli_ zohGF`c$j^tm6#{`%rRVdMzsH;Y@8@>G01Nf2k?2&jpZSl+#1%W7D2gRgAw48sM`jr ziG?x%tmMucX;E-GNU^Qnu097fkk?HHQ6Q89T*UfXmg~M}yIO63f#ob$;pwc)Pu33L z+APo4|PqnEwpVFtI?lr(rpU#H%Pk2zC@u^&8(&~6{ z(Q-g7n}QcO+#rL8SR~!9Q-XA1kCkj3%WrCqdlG5N_Ltb!kL9YMu7UcXJM#cpl)cNc zy@!bTRMBMSux_6Z#j0Q_Q9RU!-05KkwBdp3)i<664jI}`26v%Q zaEdj!;6QpCI$B20qOs-h`7a_90mVX=!z~fv3e96#$#P{&fT0sX`MH+%b=th%k6KLzV%R>2 z@|{^u@G!yo!rNGD=sRCa8Yv&VjdlPRb=AINIMf<|J)oVNa%+fyVJ}TWBfiyeyZQ4x z>4WxDDY}-!y{f6GO+-oItpCnyXztMWSCs3;l;si^6X?Z*NYz7PDmM!31D!%VocK>ngg4Ikxq2{blB=n^}C_SE<}6Z~05uMISf+lPCYhfcISSIZK+ez-MaIE9;o! z2-Ds(rn#hDRbKiHlaDK21APX5v<4Fv1+Vlm_-7dLO%$(D^0bIeZn2)s7pGJ9B+XF( zyw6PBS>|}J@}1XN`k>c%50%b5?c-rw3g(Xub8W@@j%FYf%WPV0mk2G6QQB~8D$;Vp z@?1$?27ZCIS%tWxl;P(<`|>SD0uQ&~E2lz+w}bhLr$7DDB@7ha;`Q-#=98H%Q~frh zC)15ielIODbYp1=y_#>z{B9S z{8X+Sm4I2!wB%4Kz6J9e>S27zw+4@cvGij4@=e1iA7kBFOvP3O<7sQV7QLhY6E0r; zz+hzaZ7^`)$}8k4o+hHDV^m}YxBEoq0}j$gX43nZ#$?KNCcYCHW-kzlmPDTk8a0MT zi=bHbKCKVU!8lN*cw6A+X?dfJa^94?wJ5eeNMyQkh6*4`=JTJMtLa7=<|^%ajHXca z^-X)AzRKAJwu1x^Ub5d;-Hg`imRVW`p5?b=+2FYP7=+)D?FREeKi4q#&hPg9{R?~S VPG}_jwHp8c002ovPDHLkV1iNPinag% literal 0 HcmV?d00001 diff --git a/core/auth.html b/core/auth.html new file mode 100644 index 00000000..5c52a2f4 --- /dev/null +++ b/core/auth.html @@ -0,0 +1,17 @@ + + + Auth + + + + + + +

Login

+
$(SESSION)
+ + diff --git a/core/auth.js b/core/auth.js new file mode 100644 index 00000000..b51b64fa --- /dev/null +++ b/core/auth.js @@ -0,0 +1,209 @@ +"use strict"; +var kAccountsFile = "data/auth/accounts.json"; + +var gAccounts = {}; +var gTokens = {}; + +var bCryptLib = require('bCrypt'); +bCrypt = new bCryptLib.bCrypt(); + +var form = require('form'); + +File.makeDirectory("data"); +File.makeDirectory("data/auth"); +File.makeDirectory("data/auth/db"); +var gDatabase = new Database("data/auth/db"); + +try { + gAccounts = JSON.parse(File.readFile(kAccountsFile)); +} catch (error) { +} + +function readSession(session) { + var result = session ? gDatabase.get("session_" + session) : null; + + if (result) { + result = JSON.parse(result); + + let kRefreshInterval = 1 * 60 * 60 * 1000; + let now = Date.now(); + if (!result.lastAccess || result.lastAccess < now - kRefreshInterval) { + result.lastAccess = now; + writeSession(session, result); + } + } + + return result; +} + +function writeSession(session, value) { + gDatabase.set("session_" + session, JSON.stringify(value)); +} + +function removeSession(session, value) { + gDatabase.remove("session_" + session); +} + +function newSession() { + var alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + var result = ""; + for (var i = 0; i < 32; i++) { + result += alphabet.charAt(Math.floor(Math.random() * alphabet.length)); + } + return result; +} + +function verifyPassword(password, hash) { + return bCrypt.hashpw(password, hash) == hash; +} + +function hashPassword(password) { + var salt = bCrypt.gensalt(12); + return bCrypt.hashpw(password, salt); +} + +function noAdministrator() { + return !gGlobalSettings || !gGlobalSettings.permissions || !Object.keys(gGlobalSettings.permissions).some(function(name) { + return gGlobalSettings.permissions[name].indexOf("administration") != -1; + }); +} + +function makeAdministrator(name) { + if (!gGlobalSettings.permissions) { + gGlobalSettings.permissions = {}; + } + if (!gGlobalSettings.permissions[name]) { + gGlobalSettings.permissions[name] = []; + } + if (gGlobalSettings.permissions[name].indexOf("administration") == -1) { + gGlobalSettings.permissions[name].push("administration"); + } + setGlobalSettings(gGlobalSettings); +} + +function authHandler(request, response) { + var session = getCookies(request.headers).session; + if (request.uri == "/login") { + var sessionIsNew = false; + var loginError; + + var formData = form.decodeForm(request.query); + + if (request.method == "POST" || formData.submit) { + session = newSession(); + sessionIsNew = true; + formData = form.decodeForm(request.body, formData); + if (formData.submit == "Login") { + if (formData.register == "1") { + if (!gAccounts[formData.name] && + formData.password == formData.confirm) { + gAccounts[formData.name] = {password: hashPassword(formData.password)}; + writeSession(session, {name: formData.name}); + File.writeFile(kAccountsFile, JSON.stringify(gAccounts)); + if (noAdministrator()) { + makeAdministrator(formData.name); + } + } else { + loginError = "Error registering account."; + } + } else { + if (gAccounts[formData.name] && + verifyPassword(formData.password, gAccounts[formData.name].password)) { + writeSession(session, {name: formData.name}); + if (noAdministrator()) { + makeAdministrator(formData.name); + } + } else { + loginError = "Invalid username or password."; + } + } + } else { + // Proceed as Guest + writeSession(session, {name: "guest"}); + } + } + + var cookie = "session=" + session + "; path=/; Max-Age=604800"; + var entry = readSession(session); + if (entry && formData.return) { + response.writeHead(303, {"Location": formData.return, "Set-Cookie": cookie}); + response.end(); + } else { + var html = File.readFile("core/auth.html"); + var contents = ""; + + if (entry) { + if (sessionIsNew) { + contents += '
Welcome back, ' + entry.name + '.
\n'; + } else { + contents += '
You are already logged in, ' + entry.name + '.
\n'; + } + contents += '\n'; + } else { + contents += '
\n'; + if (loginError) { + contents += "

" + loginError + "

\n"; + } + contents += '
Halt. Who goes there?
\n' + contents += '
\n'; + contents += '
\n' + if (noAdministrator()) { + contents += '
There is currently no administrator. You will be made administrator.
\n'; + } + contents += '
\n'; + contents += '
\n'; + contents += '\n'; + contents += '
\n'; + contents += '
\n'; + contents += '
'; + contents += '
- or -
'; + contents += '
\n'; + contents += '\n'; + contents += '
\n'; + contents += '
\n'; + contents += '
'; + } + var text = html.replace("$(SESSION)", contents); + response.writeHead(200, {"Content-Type": "text/html; charset=utf-6", "Set-Cookie": cookie, "Content-Length": text.length}); + response.end(text); + } + } else if (request.uri == "/login/logout") { + removeSession(session); + response.writeHead(303, {"Set-Cookie": "session=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT", "Location": "/login" + (request.query ? "?" + request.query : "")}); + response.end(); + } else { + response.writeHead(200, {"Content-Type": "text/plain; charset=utf-8", "Connection": "close"}); + response.end("Hello, " + request.client.peerName + "."); + } +} + +function getPermissions(session) { + var permissions; + var entry = readSession(session); + if (entry) { + permissions = getPermissionsForUser(entry.name); + permissions.authenticated = entry.name !== "guest"; + } + return permissions || {}; +} + +function getPermissionsForUser(userName) { + var permissions = {}; + if (gGlobalSettings && gGlobalSettings.permissions && gGlobalSettings.permissions[userName]) { + for (var i in gGlobalSettings.permissions[userName]) { + permissions[gGlobalSettings.permissions[userName][i]] = true; + } + } + return permissions; +} + +function query(headers) { + var session = getCookies(headers).session; + var entry; + if (entry = readSession(session)) { + return {session: entry, permissions: getPermissions(session)}; + } +} + +exports.handler = authHandler; +exports.query = query; diff --git a/core/bCrypt.js b/core/bCrypt.js new file mode 100644 index 00000000..bcd8ec25 --- /dev/null +++ b/core/bCrypt.js @@ -0,0 +1,533 @@ +var isaac = require('isaac'); + +function bCrypt() { + this.GENSALT_DEFAULT_LOG2_ROUNDS = 10; + this.BCRYPT_SALT_LEN = 16; + this.BLOWFISH_NUM_ROUNDS = 16; + this.MAX_EXECUTION_TIME = 100; + this.P_orig = [0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, + 0x299f31d0, 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, + 0xbe5466cf, 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, + 0xb5470917, 0x9216d5d9, 0x8979fb1b]; + this.S_orig = [0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, + 0x6a267e96, 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, + 0x0801f2e2, 0x858efc16, 0x636920d8, 0x71574e69, 0xa458fea3, + 0xf4933d7e, 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, + 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, 0xc5d1b023, + 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, + 0x55605c60, 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, + 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, 0xa15486af, + 0x7c72e993, 0xb3ee1411, 0x636fbc2a, 0x2ba9c55d, 0x741831f6, + 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, 0x7a325381, + 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, + 0xe98575b1, 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, + 0x0f6d6ff3, 0x83f44239, 0x2e0b4482, 0xa4842004, 0x69c8f04a, + 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, + 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, 0x6eef0b6c, + 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, + 0x3b8b5ebe, 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, + 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, 0x37d0d724, + 0xd00a1248, 0xdb0fead3, 0x49f1c09b, 0x075372c9, 0x80991b7b, + 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, + 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, + 0x9b30952c, 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, + 0x660f2807, 0x192e4bb3, 0xc0cba857, 0x45c8740f, 0xd20b5f39, + 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, + 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, 0x3c7516df, + 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, + 0xdf1769db, 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, + 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, + 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, 0x9a53e479, 0xb6f84565, + 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, + 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, + 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, + 0x8888b812, 0x900df01c, 0x4fad5ea0, 0x688fc31c, 0xd1cff191, + 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, + 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, 0xb4a84fe0, + 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, + 0xfb9d35cf, 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, + 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, 0x2464369b, + 0xf009b91e, 0x5563911d, 0x59dfa6aa, 0x78c14389, 0xd95a537f, + 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, 0x11c81968, + 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, + 0x571be91f, 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, + 0xff34052e, 0xc5855664, 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, + 0x6e85076a, 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, + 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, 0xecaa8c71, + 0x699a17ff, 0x5664526c, 0xc2b19ee1, 0x193602a5, 0x75094c29, + 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, + 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, + 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, + 0xb79c5305, 0xaa500737, 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, + 0x5716f2b8, 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, + 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, 0xd19113f9, + 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, + 0xa4751e41, 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, + 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, 0x2cb81290, + 0x24977c79, 0x5679b072, 0xbcaf89af, 0xde9a771f, 0xd9930810, + 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, 0x501adde6, + 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, + 0x3215d908, 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, + 0x50940002, 0x133ae4dd, 0x71dff89e, 0x10314e55, 0x81ac77d6, + 0x5f11199b, 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, + 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, 0x86e34570, + 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, + 0x9c10b36a, 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, + 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, 0x5223a708, + 0xf71312b6, 0xebadfe6e, 0xeac31f66, 0xe3bc4595, 0xa67bc883, + 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, 0x65582185, + 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, + 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, + 0xd59e9e0b, 0xcbaade14, 0xeecc86bc, 0x60622ca7, 0x9cab5cab, + 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, + 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, 0x9b540b19, + 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, + 0x7858ba99, 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, + 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, + 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, 0x5d4a14d9, 0xe864b7e3, + 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, + 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, + 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, + 0x47848a0b, 0x5692b285, 0x095bbf00, 0xad19489d, 0x1462b174, + 0x23820e00, 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, + 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, 0x7cde3759, + 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, + 0x800bcadc, 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, + 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, 0xc5c43465, + 0x713e38d8, 0x3d28f89e, 0xf16dff20, 0x153e21e7, 0x8fb03d4a, + 0xe6e39f2b, 0xdb83adf7, 0xe93d5a68, 0x948140f7, 0xf64c261c, + 0x94692934, 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, + 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, 0x1e39f62e, + 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, + 0x31cb8504, 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, + 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, + 0xd7486900, 0x680ec0a4, 0x27a18dee, 0x4f3ffea2, 0xe887ad8c, + 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, 0xce78a399, + 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, + 0xdd5b4332, 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, + 0x454056ac, 0xba489527, 0x55533a3a, 0x20838d87, 0xfe6ba9b7, + 0xd096954b, 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, + 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, 0xfdf8e802, + 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, + 0x5d886e17, 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, + 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, + 0x02e1329e, 0xaf664fd1, 0xcad18115, 0x6b2395e0, 0x333e92e1, + 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, + 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, + 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, + 0x991be14c, 0xdb6e6b0d, 0xc67b5510, 0x6d672c37, 0x2765d43b, + 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, + 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, 0xbb132f88, + 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, + 0x782ef11c, 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, + 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, + 0x0a121386, 0xd90cec6e, 0xd5abea2a, 0x64af674e, 0xda86a85f, + 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, 0x60787bf8, + 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, + 0xbde8ae24, 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, + 0xf474ef38, 0x8789bdc2, 0x5366f9c3, 0xc8b38e74, 0xb475f255, + 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, + 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, 0xb90bace1, + 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, + 0x1d6efe10, 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, + 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, + 0xa70683fa, 0xa002b5c4, 0x0de6d027, 0x9af88c27, 0x773f8641, + 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, 0x006058aa, + 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, + 0x4b7c0188, 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, + 0x1ac15bb4, 0xd39eb8fc, 0xed545578, 0x08fca5b5, 0xd83d7cd3, + 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, + 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, 0xd79a3234, + 0x92638212, 0x670efa8e, 0x406000e0, 0x3a39ce37, 0xd3faf5cf, + 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, 0xd3822740, + 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, + 0xbc946e79, 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, + 0xd5730a1d, 0x4cd04dc6, 0x2939bbdb, 0xa9ba4650, 0xac9526e8, + 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, + 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, 0x83c061ba, + 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, + 0x77fa0a59, 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, + 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, + 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, 0x1f9f25cf, 0xadf2b89b, + 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, 0x47b0acfd, + 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, + 0x88f46dba, 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, + 0x97271aec, 0xa93a072a, 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, + 0x26dcf319, 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, + 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, 0x4de81751, + 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, + 0x6413e680, 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, + 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, + 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, 0xdda26a7e, 0x3a59ff45, + 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, 0x8d6612ae, + 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, + 0x4eb4e2cc, 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, + 0x06b89fb4, 0xce6ea048, 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, + 0x277227f8, 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, + 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, 0xe01cc87e, + 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, + 0xe0b12b4f, 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, + 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, 0xfae59361, + 0xceb69ceb, 0xc2a86459, 0x12baa8d1, 0xb6c1075e, 0xe3056a0c, + 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, + 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, + 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, + 0xce6279cf, 0xcd3e7e6f, 0x1618b166, 0xfd2c1d05, 0x848fd2c5, + 0xf6fb2299, 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, + 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, 0xde966292, + 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, + 0x35bdd2f6, 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, + 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, 0xba38209c, + 0xf746ce76, 0x77afa1c5, 0x20756060, 0x85cbfe4e, 0x8ae88dd8, + 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, + 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6]; + this.bf_crypt_ciphertext = [0x4f727068, 0x65616e42, 0x65686f6c, 0x64657253, + 0x63727944, 0x6f756274]; + this.base64_code = ['.', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', + 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', + 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', + '9']; + this.index_64 = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, -1, -1, -1, -1, -1, -1, -1, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, -1, -1, -1, -1, -1, -1, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, -1, -1, -1, -1, -1]; + this.P; + this.S; + this.lr; + this.offp; +}; +bCrypt.prototype.getByte = function(c) { + var ret = 0; + try { + var b = c.charCodeAt(0); + } catch (err) { + b = c; + } + if (b > 127) { + return -128 + (b % 128); + } else { + return b; + } +}; +bCrypt.prototype.encode_base64 = function(d, len) { + var off = 0; + var rs = []; + var c1; + var c2; + if (len <= 0 || len > d.length) + throw "Invalid len"; + while (off < len) { + c1 = d[off++] & 0xff; + rs.push(this.base64_code[(c1 >> 2) & 0x3f]); + c1 = (c1 & 0x03) << 4; + if (off >= len) { + rs.push(this.base64_code[c1 & 0x3f]); + break; + } + c2 = d[off++] & 0xff; + c1 |= (c2 >> 4) & 0x0f; + rs.push(this.base64_code[c1 & 0x3f]); + c1 = (c2 & 0x0f) << 2; + if (off >= len) { + rs.push(this.base64_code[c1 & 0x3f]); + break; + } + c2 = d[off++] & 0xff; + c1 |= (c2 >> 6) & 0x03; + rs.push(this.base64_code[c1 & 0x3f]); + rs.push(this.base64_code[c2 & 0x3f]); + } + return rs.join(''); +}; +bCrypt.prototype.char64 = function(x) { + var code = x.charCodeAt(0); + if (code < 0 || code > this.index_64.length) { + return -1; + } + return this.index_64[code]; +}; +bCrypt.prototype.decode_base64 = function(s, maxolen) { + var off = 0; + var slen = s.length; + var olen = 0; + var rs = []; + var c1, c2, c3, c4, o; + if (maxolen <= 0) + throw "Invalid maxolen"; + while (off < slen - 1 && olen < maxolen) { + c1 = this.char64(s.charAt(off++)); + c2 = this.char64(s.charAt(off++)); + if (c1 == -1 || c2 == -1) { + break; + } + o = this.getByte(c1 << 2); + o |= (c2 & 0x30) >> 4; + rs.push(String.fromCharCode(o)); + if (++olen >= maxolen || off >= slen) { + break; + } + c3 = this.char64(s.charAt(off++)); + if (c3 == -1) { + break; + } + o = this.getByte((c2 & 0x0f) << 4); + o |= (c3 & 0x3c) >> 2; + rs.push(String.fromCharCode(o)); + if (++olen >= maxolen || off >= slen) { + break; + } + c4 = this.char64(s.charAt(off++)); + o = this.getByte((c3 & 0x03) << 6); + o |= c4; + rs.push(String.fromCharCode(o)); + ++olen; + } + var ret = []; + for (off = 0; off < olen; off++) { + ret.push(this.getByte(rs[off])); + } + return ret; +}; +bCrypt.prototype.encipher = function(lr, off) { + var i; + var n; + var l = lr[off]; + var r = lr[off + 1]; + + l ^= this.P[0]; + for (i = 0; i <= this.BLOWFISH_NUM_ROUNDS - 2;) { + // Feistel substitution on left word + n = this.S[(l >> 24) & 0xff]; + n += this.S[0x100 | ((l >> 16) & 0xff)]; + n ^= this.S[0x200 | ((l >> 8) & 0xff)]; + n += this.S[0x300 | (l & 0xff)]; + r ^= n ^ this.P[++i]; + + // Feistel substitution on right word + n = this.S[(r >> 24) & 0xff]; + n += this.S[0x100 | ((r >> 16) & 0xff)]; + n ^= this.S[0x200 | ((r >> 8) & 0xff)]; + n += this.S[0x300 | (r & 0xff)]; + l ^= n ^ this.P[++i]; + } + lr[off] = r ^ this.P[this.BLOWFISH_NUM_ROUNDS + 1]; + lr[off + 1] = l; +}; +bCrypt.prototype.streamtoword = function(data, offp) { + var i; + var word = 0; + var off = offp; + for (i = 0; i < 4; i++) { + word = (word << 8) | (data[off] & 0xff); + off = (off + 1) % data.length; + } + this.offp = off; + return word; +}; +bCrypt.prototype.init_key = function() { + this.P = this.P_orig.slice(); + this.S = this.S_orig.slice(); +}; +bCrypt.prototype.key = function(key) { + var i; + this.offp = 0; + var lr = new Array(0x00000000, 0x00000000); + var plen = this.P.length; + var slen = this.S.length; + + for (i = 0; i < plen; i++) { + this.P[i] = this.P[i] ^ this.streamtoword(key, this.offp); + } + for (i = 0; i < plen; i += 2) { + this.encipher(lr, 0); + this.P[i] = lr[0]; + this.P[i + 1] = lr[1]; + } + + for (i = 0; i < slen; i += 2) { + this.encipher(lr, 0); + this.S[i] = lr[0]; + this.S[i + 1] = lr[1]; + } +}; +bCrypt.prototype.ekskey = function(data, key) { + var i; + this.offp = 0; + var lr = new Array(0x00000000, 0x00000000); + var plen = this.P.length; + var slen = this.S.length; + + for (i = 0; i < plen; i++) + this.P[i] = this.P[i] ^ this.streamtoword(key, this.offp); + this.offp = 0; + for (i = 0; i < plen; i += 2) { + lr[0] ^= this.streamtoword(data, this.offp); + lr[1] ^= this.streamtoword(data, this.offp); + this.encipher(lr, 0); + this.P[i] = lr[0]; + this.P[i + 1] = lr[1]; + } + for (i = 0; i < slen; i += 2) { + lr[0] ^= this.streamtoword(data, this.offp); + lr[1] ^= this.streamtoword(data, this.offp); + this.encipher(lr, 0); + this.S[i] = lr[0]; + this.S[i + 1] = lr[1]; + } +}; + +bCrypt.prototype.crypt_raw = function(password, salt, log_rounds) { + var rounds; + var j; + var cdata = this.bf_crypt_ciphertext.slice(); + var clen = cdata.length; + var one_percent; + + if (log_rounds < 4 || log_rounds > 31) + throw "Bad number of rounds"; + if (salt.length != this.BCRYPT_SALT_LEN) + throw "Bad salt length"; + + rounds = 1 << log_rounds; + one_percent = Math.floor(rounds / 100) + 1; + this.init_key(); + this.ekskey(salt, password); + + var obj = this; + var i = 0; + for (; i < rounds;) { + i = i + 1; + obj.key(password); + obj.key(salt); + } + + for (i = 0; i < 64; i++) { + for (j = 0; j < (clen >> 1); j++) { + obj.encipher(cdata, j << 1); + } + } + var ret = []; + for (i = 0; i < clen; i++) { + ret.push(obj.getByte((cdata[i] >> 24) & 0xff)); + ret.push(obj.getByte((cdata[i] >> 16) & 0xff)); + ret.push(obj.getByte((cdata[i] >> 8) & 0xff)); + ret.push(obj.getByte(cdata[i] & 0xff)); + } + return ret; +} + +bCrypt.prototype.hashpw = function(password, salt) { + var real_salt; + var passwordb = []; + var saltb = []; + var hashed = []; + var minor = String.fromCharCode(0); + var rounds = 0; + var off = 0; + + if (!progress){ + var progress = function() {}; + } + + if (salt.charAt(0) != '$' || salt.charAt(1) != '2') + throw "Invalid salt version"; + if (salt.charAt(2) == '$') + off = 3; + else { + minor = salt.charAt(2); + if (minor != 'a' || salt.charAt(3) != '$') + throw "Invalid salt revision"; + off = 4; + } + + // Extract number of rounds + if (salt.charAt(off + 2) > '$') + throw "Missing salt rounds"; + var r1 = parseInt(salt.substring(off, off + 1)) * 10; + var r2 = parseInt(salt.substring(off + 1, off + 2)); + rounds = r1 + r2; + real_salt = salt.substring(off + 3, off + 25); + password = password + (minor >= 'a' ? "\000" : ""); + for (var r = 0; r < password.length; r++) { + passwordb.push(this.getByte(password.charAt(r))); + } + saltb = this.decode_base64(real_salt, this.BCRYPT_SALT_LEN); + var obj = this; + var hashed = this.crypt_raw(passwordb, saltb, rounds); + var rs = []; + rs.push("$2"); + if (minor >= 'a') + rs.push(minor); + rs.push("$"); + if (rounds < 10) + rs.push("0"); + rs.push(rounds.toString()); + rs.push("$"); + rs.push(obj.encode_base64(saltb, saltb.length)); + rs.push(obj.encode_base64(hashed, obj.bf_crypt_ciphertext.length * 4 - 1)); + return rs.join(''); +}; + +bCrypt.prototype.gensalt = function(rounds) { + var iteration_count = rounds; + if (iteration_count < 4 || iteration_count > 31) { + iteration_count = this.GENSALT_DEFAULT_LOG2_ROUNDS; + } + var output = []; + output.push("$2a$"); + if (iteration_count < 10) + output.push("0"); + output.push(iteration_count.toString()); + output.push('$'); + var s1 = []; + for (var r = 0; r < this.BCRYPT_SALT_LEN; r++){ + s1.push(Math.abs(isaac.rand())); + } + output.push(this.encode_base64(s1,this.BCRYPT_SALT_LEN)) + return output.join(''); +}; + +bCrypt.prototype.ready = function(){ + return true; +}; + +exports.bCrypt = bCrypt; diff --git a/core/client.js b/core/client.js new file mode 100644 index 00000000..ad5098ef --- /dev/null +++ b/core/client.js @@ -0,0 +1,403 @@ +"use strict"; + +var gHaveIndex = -1; +var gSessionId; +var gCredentials; +var gErrorCount = 0; + +function enter(event) { + if (event.keyCode == 13) { + send(); + event.preventDefault(); + } else if (event.keyCode == 186 + && !event.metaKey + && !event.altKey + && !event.ctrlKey + && !event.shiftKey) { + var value = $("#input").val(); + if (value && value[value.length - 1] == '\\') { + $("#input").val(value.substring(0, value.length - 1) + ";"); + event.preventDefault(); + } else { + storeTarget(value); + $("#input").val(""); + event.preventDefault(); + } + } +} + +function url() { + var hash = window.location.href.indexOf('#'); + var question = window.location.href.indexOf('?'); + var end = hash != -1 ? hash : question; + return end != -1 ? window.location.href.substring(0, end) : window.location.href; +} + +function storeTarget(target) { + $("#target").text(target || ""); +} + +function split(container, children) { + if (container) { + while (container.firstChild) { + container.removeChild(container.firstChild); + } + } + if (children) { + for (var i = 0; i < children.length; i++) { + if (children[i].name) { + var node = document.createElement("div"); + node.setAttribute("id", "terminal_" + children[i].name); + var grow = children[i].grow || "1"; + var shrink = children[i].shrink || "1"; + var basis = children[i].basis || "auto"; + node.setAttribute("style", "flex: " + grow + " " + shrink + " " + basis); + node.setAttribute("class", "terminal"); + container.appendChild(node); + } else if (children[i].type) { + node = document.createElement("div"); + if (children[i].type == "horizontal") { + node.setAttribute("class", "hbox"); + } else if (children[i].type == "vertical") { + node.setAttribute("class", "vbox"); + } + container.appendChild(node); + split(node, children[i].children); + } + } + } +} + +function receive() { + $.ajax({ + url: url() + "/receive?sessionId=" + gSessionId, + method: "POST", + data: gHaveIndex.toString(), + dataType: "json", + }).then(function(data) { + for (var i in data.lines) { + var line = data.lines[i]; + + var target = document.getElementsByClassName("terminal")[0].id; + if (line && line.terminal) { + if (document.getElementById("terminal_" + line.terminal)) { + target = "terminal_" + line.terminal; + } + line = line.value; + } + if (line && line.action == "ping") { + // PONG + } else if (line && line.action == "session") { + gSessionId = line.session.sessionId; + gCredentials = line.session.credentials; + updateLogin(); + } else if (line && line[0] && line[0].action == "ready") { + if (window.location.hash) { + send({event: "hashChange", hash: window.location.hash}); + } + } else if (line && line[0] && line[0].action == "notify") { + new Notification(line[0].title, line[0].options); + } else if (line && line[0] && line[0].action == "title") { + window.document.title = line[0].value; + } else if (line && line[0] && line[0].action == "prompt") { + var prompt = document.getElementById("prompt"); + while (prompt.firstChild) { + prompt.removeChild(prompt.firstChild); + } + prompt.appendChild(document.createTextNode(line[0].value)); + } else if (line && line[0] && line[0].action == "password") { + var prompt = document.getElementById("input"); + prompt.setAttribute("type", line[0].value ? "password" : "text"); + } else if (line && line[0] && line[0].action == "hash") { + window.location.hash = line[0].value; + } else if (line && line[0] && line[0].action == "update") { + document.getElementById("update").setAttribute("Style", "display: inline"); + } else if (line && line[0] && line[0].action == "split") { + split(document.getElementById("terminals"), line[0].options); + } else if (line && line[0] && line[0].action == "postMessageToIframe") { + var iframe = document.getElementById("iframe_" + line[0].name); + if (iframe) { + iframe.contentWindow.postMessage(line[0].message, "*"); + } + } else { + print(document.getElementById(target), line); + } + } + if ("index" in data) { + gHaveIndex = data.index; + } + receive(); + if (gErrorCount) { + document.getElementById("status").setAttribute("style", "display: none"); + } + gErrorCount = 0; + }).fail(function(xhr, message, error) { + var node = document.getElementById("status"); + while (node.firstChild) { + node.removeChild(node.firstChild); + } + node.appendChild(document.createTextNode("ERROR: " + JSON.stringify([message, error]))); + node.setAttribute("style", "display: inline; color: #dc322f"); + if (gErrorCount < 60) { + setTimeout(receive, 1000); + } else { + setTimeout(receive, 60 * 1000); + } + gErrorCount++; + }); +} + +function autoNewLine(terminal) { + terminal.appendChild(document.createElement("br")); +} + +function print(terminal, data) { + autoNewLine(terminal); + printStructured(terminal, data); + autoScroll(terminal); +} + +function printStructured(container, data) { + if (typeof data == "string") { + container.appendChild(document.createTextNode(data)); + } else if (data && data[0] !== undefined) { + for (var i in data) { + printStructured(container, data[i]); + } + } else if (data && data.action == "clear") { + while (container.firstChild) { + container.removeChild(container.firstChild); + } + } else if (data) { + var node; + if (data.href) { + node = document.createElement("a"); + node.setAttribute("href", data.href); + node.setAttribute("target", "_blank"); + } else if (data.iframe) { + node = document.createElement("iframe"); + node.setAttribute("srcdoc", data.iframe); + node.setAttribute("sandbox", "allow-forms allow-scripts"); + node.setAttribute("width", data.width || 320); + node.setAttribute("height", data.height || 240); + if (data.name) { + node.setAttribute("id", "iframe_" + data.name); + } + } else if (data.image) { + node = document.createElement("img"); + node.setAttribute("src", data.image); + } else { + node = document.createElement("span"); + } + if (data.style) { + node.setAttribute("style", data.style); + } + if (data.class) { + node.setAttribute("class", data.class); + } + var value = data.value || data.href || data.command || ""; + if (!value && data.message && data.stackTrace) { + printStructured(node, data.message); + node.appendChild(document.createElement("br")); + printStructured(node, data.fileName + ":" + data.lineNumber + ":"); + node.appendChild(document.createElement("br")); + if (data.stackTrace.length) { + for (var i = 0; i < data.stackTrace.length; i++) { + printStructured(node, data.stackTrace[i]); + node.appendChild(document.createElement("br")); + } + } else { + printStructured(node, data.sourceLine); + } + } else if (value === undefined) { + printStructured(node, JSON.stringify(value)); + } else { + printStructured(node, value); + } + if (data.command) { + node.dataset.command = data.command; + node.onclick = commandClick; + node.setAttribute("class", "command"); + } + container.appendChild(node); + } else { + printStructured(container, JSON.stringify(data)); + } +} + +function commandClick() { + send(this.dataset.command); + $("#input").focus(); +} + +function autoScroll(terminal) { + terminal.scrollTop = terminal.scrollHeight - terminal.clientHeight; +} + +function send(command) { + var value = command; + if (!command) { + var target = $("#target").text(); + var prefix = target ? target + " " : ""; + value = prefix + $("#input").val(); + $("#input").val(""); + } + $.ajax({ + url: url() + "/send?sessionId=" + gSessionId, + method: "POST", + data: JSON.stringify(value), + dataType: "text", + }).fail(function(xhr, status, error) { + var node = document.getElementById("status"); + while (node.firstChild) { + node.removeChild(node.firstChild); + } + node.appendChild(document.createTextNode("Send failed: " + JSON.stringify([status, error]))); + node.setAttribute("style", "display: inline; color: #dc322f"); + }); +} + +function updateLogin() { + var login = document.getElementById("login"); + while (login.firstChild) { + login.removeChild(login.firstChild); + } + + var a = document.createElement("a"); + if (gCredentials && gCredentials.session) { + a.appendChild(document.createTextNode("logout " + gCredentials.session.name)); + a.setAttribute("href", "/login/logout?return=" + encodeURIComponent(url())); + } else if (window.location.href.indexOf("?guest=1") != -1) { + window.location.href = "/login?submit=Proceed+as+Guest&return=" + encodeURIComponent(url()); + } else { + window.location.href = "/login?return=" + encodeURIComponent(url()); + } + login.appendChild(a); +} + +var gOriginalInput; +function dragHover(event) { + event.stopPropagation(); + event.preventDefault(); + if (event.type == "dragover") { + if (!$("#input").hasClass("drop")) { + $("#input").addClass("drop"); + gOriginalInput = $("#input").val(); + $("#input").val("drop file to upload"); + } + } else { + $("#input").removeClass("drop"); + $("#input").val(gOriginalInput); + } +} + +function fixImage(sourceData, maxWidth, maxHeight, callback) { + var result = sourceData; + var image = new Image(); + image.crossOrigin = "anonymous"; + image.referrerPolicy = "no-referrer"; + image.onload = function() { + if (image.width > maxWidth || image.height > maxHeight) { + var downScale = Math.min(maxWidth / image.width, maxHeight / image.height); + var canvas = document.createElement("canvas"); + canvas.width = image.width * downScale; + canvas.height = image.height * downScale; + var context = canvas.getContext("2d"); + context.clearRect(0, 0, canvas.width, canvas.height); + image.width = canvas.width; + image.height = canvas.height; + context.drawImage(image, 0, 0, image.width, image.height); + result = canvas.toDataURL(); + } + callback(result); + }; + image.src = sourceData; +} + +function sendImage(image) { + fixImage(image, 320, 240, function(result) { + send({image: result}); + }); +} + +function fileDropRead(event) { + sendImage(event.target.result); +} + +function fileDrop(event) { + dragHover(event); + + var done = false; + if (!done) { + var files = event.target.files || event.dataTransfer.files; + for (var i = 0; i < files.length; i++) { + var file = files[i]; + if (file.type.substring(0, "image/".length) == "image/") { + var reader = new FileReader(); + reader.onloadend = fileDropRead; + reader.readAsDataURL(file); + done = true; + } + } + } + + if (!done) { + var html = event.dataTransfer.getData("text/html"); + var match = / process.lastActive) { + // We lost them. + process.task.kill(); + again = false; + } else { + // Idle. Ping them. + process.terminal.ping(); + process.lastPing = now; + } + + if (again) { + setTimeout(ping.bind(process), process.timeout); + } +} + +function postMessageInternal(from, to, message) { + return invoke(to.eventHandlers['onMessage'], [getUser(from, from), message]); +} + +function getService(service) { + var process = this; + var serviceProcess = getServiceProcess(process.packageOwner, process.packageName, service); + return serviceProcess.ready.then(function() { + return { + postMessage: postMessageInternal.bind(process, process, serviceProcess), + } + }); +} + +function getSessionProcess(packageOwner, packageName, session, options) { + var actualOptions = {terminal: true, timeout: kPingInterval}; + if (options) { + for (var i in options) { + actualOptions[i] = options[i]; + } + } + return getProcess(packageOwner, packageName, 'session_' + session, actualOptions); +} + +function getServiceProcess(packageOwner, packageName, service, options) { + return getProcess(packageOwner, packageName, 'service_' + packageOwner + '_' + packageName + '_' + service, options || {}); +} + +function badName(name) { + var bad = false; + if (name) { + for (var i = 0; i < name.length; i++) { + if ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_".indexOf(name.charAt(i)) == -1) { + bad = true; + break; + } + } + } + return bad; +} + +function getManifest(fileName) { + var manifest = []; + var lines = File.readFile(fileName).split("\n").map(x => x.trimRight()); + for (var i = 0; i < lines.length; i++) { + if (lines[i].substring(0, 4) == "//! ") { + manifest.push(lines[i].substring(4)); + } + } + return manifest.length ? JSON.parse(manifest.join("\n")) : null; +} + +function getProcess(packageOwner, packageName, key, options) { + var process = gProcesses[key]; + if (!process + && !(options && "create" in options && !options.create) + && !badName(packageOwner) + && !badName(packageName)) { + try { + print("Creating task for " + packageName + " " + key); + var fileName = "packages/" + packageOwner + "/" + packageName + "/" + packageName + ".js"; + var manifest = getManifest(fileName); + process = {}; + process.key = key; + process.index = gProcessIndex++; + process.userName = options.userName || ('user' + process.index); + process.credentials = options.credentials || {}; + process.task = new Task(); + process.eventHandlers = {}; + process.packageOwner = packageOwner; + process.packageName = packageName; + if (options.terminal) { + process.terminal = new Terminal(); + } + process.database = null; + process.lastActive = Date.now(); + process.lastPing = null; + process.timeout = options.timeout; + process.connections = []; + var resolveReady; + var rejectReady; + process.ready = new Promise(function(resolve, reject) { + resolveReady = resolve; + rejectReady = reject; + }); + gProcesses[key] = process; + process.task.onExit = function(exitCode, terminationSignal) { + broadcastEvent('onSessionEnd', [getUser(process, process)]); + if (process.terminal) { + if (terminationSignal) { + process.terminal.print("Process terminated with signal " + terminationSignal + "."); + } else { + process.terminal.print("Process ended with exit code " + exitCode + "."); + } + } + for (let i = 0; i < process.connections.length; i++) { + process.connections[i].close(); + } + process.connections.length = 0; + delete gProcesses[key]; + }; + if (process.timeout > 0) { + setTimeout(ping.bind(process), process.timeout); + } + var imports = { + 'core': { + 'broadcast': broadcast.bind(process), + 'getService': getService.bind(process), + 'getPackages': getPackages.bind(process), + 'getUsers': getUsers.bind(process), + 'register': function(eventName, handler) { + if (!process.eventHandlers[eventName]) { + process.eventHandlers[eventName] = []; + } + process.eventHandlers[eventName].push(handler); + }, + 'getUser': getUser.bind(null, process, process), + 'user': getUser(process, process), + }, + 'database': { + 'get': databaseGet.bind(process), + 'set': databaseSet.bind(process), + 'remove': databaseRemove.bind(process), + 'getAll': databaseGetAll.bind(process), + }, + }; + if (options.terminal) { + imports.terminal = { + 'print': process.terminal.print.bind(process.terminal), + 'clear': process.terminal.clear.bind(process.terminal), + 'readLine': process.terminal.readLine.bind(process.terminal), + 'notify': process.terminal.notify.bind(process.terminal), + 'setEcho': process.terminal.setEcho.bind(process.terminal), + 'setTitle': process.terminal.setTitle.bind(process.terminal), + 'setPrompt': process.terminal.setPrompt.bind(process.terminal), + 'setPassword': process.terminal.setPassword.bind(process.terminal), + 'setHash': process.terminal.setHash.bind(process.terminal), + 'split': process.terminal.split.bind(process.terminal), + 'select': process.terminal.select.bind(process.terminal), + 'postMessageToIframe': process.terminal.postMessageToIframe.bind(process.terminal), + }; + } + if (manifest + && manifest.permissions + && manifest.permissions.indexOf("administration") != -1) { + if (getPermissionsForUser(packageOwner).administration) { + imports.administration = { + 'setGlobalSettings': setGlobalSettings.bind(process), + 'getGlobalSettings': getGlobalSettings.bind(process), + 'getStatistics': function() { return statistics; }, + }; + } else { + throw new Error(packageOwner + " does not have right to permission 'administration'."); + } + } + if (manifest + && manifest.permissions + && manifest.permissions.indexOf("network") != -1) { + if (getPermissionsForUser(packageOwner).network) { + imports.network = { + 'newConnection': newConnection.bind(process), + }; + } else { + throw new Error(packageOwner + " does not have right to permission 'network'."); + } + } + process.task.setImports(imports); + print("Activating task"); + process.task.activate(); + print("Executing task"); + process.task.execute(fileName).then(function() { + print("Task ready"); + broadcastEvent('onSessionBegin', [getUser(process, process)]); + resolveReady(process); + }).catch(function(error) { + printError(process.terminal, error); + rejectReady(); + }); + } catch (error) { + printError(process.terminal, error); + rejectReady(); + } + } + return process; +} + +function updateProcesses(packageOwner, packageName) { + for (var i in gProcesses) { + var process = gProcesses[i]; + if (process.packageOwner == packageOwner + && process.packageName == packageName) { + if (process.terminal) { + process.terminal.notifyUpdate(); + } else { + process.task.kill(); + } + } + } +} + +function makeDirectoryForFile(fileName) { + var parts = fileName.split("/"); + var path = ""; + for (var i = 0; i < parts.length - 1; i++) { + path += parts[i]; + File.makeDirectory(path); + path += "/"; + } +} + +function getGlobalSettings() { + return gGlobalSettings; +} + +function setGlobalSettings(settings) { + makeDirectoryForFile(kGlobalSettingsFile); + if (!File.writeFile(kGlobalSettingsFile, JSON.stringify(settings))) { + gGlobalSettings = settings; + } else { + throw new Error("Unable to save settings."); + } +} + +try { + gGlobalSettings = JSON.parse(File.readFile(kGlobalSettingsFile)); +} catch (error) { + print("Error loading settings from " + kGlobalSettingsFile + ": " + error); +} + +var kIgnore = ["/favicon.ico"]; + +var auth = require("auth"); +var httpd = require("httpd"); +httpd.all("/login", auth.handler); +httpd.all("", function(request, response) { + var match; + if (request.uri === "/" || request.uri === "") { + response.writeHead(303, {"Location": gGlobalSettings.index, "Content-Length": "0"}); + return response.end(); + } else if (match = /^\/terminal(\/.*)/.exec(request.uri)) { + return terminal.handler(request, response, null, null, match[1]); + } else if (match = /^\/\~([^\/]+)\/([^\/]+)(.*)/.exec(request.uri)) { + return terminal.handler(request, response, match[1], match[2], match[3]); + } else { + var data = "File not found."; + response.writeHead(404, {"Content-Type": "text/plain; charset=utf-8", "Content-Length": data.length.toString()}); + return response.end(data); + } +}); diff --git a/core/edit.html b/core/edit.html new file mode 100644 index 00000000..8b4879e5 --- /dev/null +++ b/core/edit.html @@ -0,0 +1,28 @@ + + + Web Terminal + + + + + + + + + + +
+
+ + + + + + +
+ + + +
+ + diff --git a/core/editor.js b/core/editor.js new file mode 100644 index 00000000..8bd0dfa5 --- /dev/null +++ b/core/editor.js @@ -0,0 +1,92 @@ +var gBackup; +var gEditor; + +$(document).ready(function() { + gEditor = CodeMirror.fromTextArea(document.getElementById("editor"), { + 'theme': 'base16-dark', + 'lineNumbers': true, + 'tabSize': 4, + 'indentUnit': 4, + 'indentWithTabs': true, + 'showTrailingSpace': true, + }); + gBackup = gEditor.getValue(); +}); + +function explodePath() { + return /^\/~([^\/]+)\/([^\/]+)(.*)/.exec(window.location.pathname); +} + +function packageOwner() { + return explodePath()[1]; +} + +function packageName() { + return explodePath()[2]; +} + +function back(uri) { + if (uri) { + window.location.pathname = uri; + } else { + window.location.pathname = "/~" + packageOwner() + "/" + packageName(); + } +} + +function save(newName) { + document.getElementById("save").disabled = true; + document.getElementById("saveAs").disabled = true; + + var contents = gEditor.getValue(); + var run = document.getElementById("run").checked; + + return $.ajax({ + type: "POST", + url: newName ? "../" + newName + "/save" : "save", + data: contents, + dataType: "text", + }).done(function(uri) { + gBackup = contents; + if (run) { + back(uri); + } + }).fail(function(xhr, status, error) { + alert("Unable to save: " + xhr.responseText); + }).always(function() { + document.getElementById("save").disabled = false; + document.getElementById("saveAs").disabled = false; + }); +} + +function saveAs() { + var newName = prompt("Save as:", packageName()); + if (newName) { + save(newName); + } +} + +function revert() { + gEditor.setValue(gBackup); +} + +function addLicense() { + var contents = "/*\n" + + "\n" + + "Copyright (C) \n".replace("", new Date().getFullYear()) + + "\n" + + "This program is free software: you can redistribute it and/or modify\n" + + "it under the terms of the GNU Affero General Public License as published by\n" + + "the Free Software Foundation, either version 3 of the License, or\n" + + "(at your option) any later version.\n" + + "\n" + + "This program is distributed in the hope that it will be useful,\n" + + "but WITHOUT ANY WARRANTY; without even the implied warranty of\n" + + "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" + + "GNU Affero General Public License for more details.\n" + + "\n" + + "You should have received a copy of the GNU Affero General Public License\n" + + "along with this program. If not, see .\n" + + "*/\n\n" + + gEditor.getValue(); + gEditor.setValue(contents); +} diff --git a/core/favicon.png b/core/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..30cb532b91f813d9f131ddd3bd9855110e06baea GIT binary patch literal 320 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!61|;P_|4#%`Y)RhkE)4%caKYZ?lYt_f1s;*b z3=G`DAk4@xYmNj^kiEpy*OmPNqdX%Y-}K_GPk};`C9V-A&iT2ysd*&~&PAz-C8;S2 z<(VZJ3hti10pX2&;y^_!JY5_^EKV;C+AVrmLEv&=r4ZXEy>~Bf9zDF{i%N+XV@om5 zL!sBUwrAX%tR*XyA21$DnB)BEVZMpvzu!#>aVhO;iVPE8p38c~aNu}zv(O2a6GiI5 z_sZ?$Y_`kVt()_8VTWG9^KHxx8I#klwS=zOAKLSFO~3Z0Oo7-#t|8AheOa8izR`+t zuUgxNo2&QUvAcdfuYS`ktJD9Fv1o4RWY8>rEX?+iSL%m#+>Vl>{4B`^b)cIWJYD@< J);T3K0RT?wcas1B literal 0 HcmV?d00001 diff --git a/core/form.js b/core/form.js new file mode 100644 index 00000000..4cba9bec --- /dev/null +++ b/core/form.js @@ -0,0 +1,33 @@ +function decode(encoded) { + var result = ""; + for (var i = 0; i < encoded.length; i++) { + var c = encoded[i]; + if (c == "+") { + result += " "; + } else if (c == "%") { + result += String.fromCharCode(parseInt(encoded.slice(i + 1, i + 3), 16)); + i += 2; + } else { + result += c; + } + } + return result; +} + +function decodeForm(encoded, initial) { + var result = initial || {}; + if (encoded) { + encoded = encoded.trim(); + var items = encoded.split('&'); + for (var i = 0; i < items.length; i++) { + var item = items[i]; + var equals = item.indexOf('='); + var key = decode(item.slice(0, equals)); + var value = decode(item.slice(equals + 1)); + result[key] = value; + } + } + return result; +} + +exports.decodeForm = decodeForm; diff --git a/core/httpd.js b/core/httpd.js new file mode 100644 index 00000000..39fd8077 --- /dev/null +++ b/core/httpd.js @@ -0,0 +1,286 @@ +var gHandlers = []; + +function logError(error) { + print("ERROR " + error); +} + +function addHandler(handler) { + var added = false; + for (var i in gHandlers) { + if (gHandlers[i].path == handler.path) { + gHandlers[i] = handler; + added = true; + break; + } + } + if (!added) { + gHandlers.push(handler); + added = true; + } +} + +function all(prefix, handler) { + addHandler({ + owner: this, + path: prefix, + invoke: handler, + }); +} + +function Request(method, uri, version, headers, body, client) { + this.method = method; + var index = uri.indexOf("?"); + if (index != -1) { + this.uri = uri.slice(0, index); + this.query = uri.slice(index + 1); + } else { + this.uri = uri; + this.query = undefined; + } + this.version = version; + this.headers = headers; + this.client = {peerName: client.peerName}; + this.body = body; + return this; +} + +function findHandler(request) { + var matchedHandler = null; + for (var name in gHandlers) { + var handler = gHandlers[name]; + if (request.uri == handler.path || request.uri.slice(0, handler.path.length + 1) == handler.path + '/') { + matchedHandler = handler; + break; + } + } + return matchedHandler; +} + +function Response(request, client) { + var kStatusText = { + 200: 'OK', + 303: 'See other', + 404: 'File not found', + 500: 'Internal server error', + }; + var _started = false; + var _finished = false; + var _keepAlive = false; + var _chunked = false; + return { + writeHead: function(status) { + if (_started) { + throw new Error("Response.writeHead called multiple times."); + } + var reason; + var headers; + if (arguments.length == 3) { + reason = arguments[1]; + headers = arguments[2]; + } else { + reason = kStatusText[status]; + headers = arguments[1]; + } + var lowerHeaders = {}; + var requestVersion = request.version.split("/")[1].split("."); + var responseVersion = (requestVersion[0] >= 1 && requestVersion[0] >= 1) ? "1.1" : "1.0"; + var headerString = "HTTP/" + responseVersion + " " + status + " " + reason + "\r\n"; + for (var i in headers) { + headerString += i + ": " + headers[i] + "\r\n"; + lowerHeaders[i.toLowerCase()] = headers[i]; + } + if ("connection" in lowerHeaders) { + _keepAlive = lowerHeaders["connection"].toLowerCase() == "keep-alive"; + } else { + _keepAlive = ((request.version == "HTTP/1.0" && ("connection" in lowerHeaders && lowerHeaders["connection"].toLowerCase() == "keep-alive")) || + (request.version == "HTTP/1.1" && (!("connection" in lowerHeaders) || lowerHeaders["connection"].toLowerCase() != "close"))); + headerString += "Connection: " + (_keepAlive ? "keep-alive" : "close") + "\r\n"; + } + _chunked = _keepAlive && !("content-length" in lowerHeaders); + if (_chunked) { + headerString += "Transfer-Encoding: chunked\r\n"; + } + headerString += "\r\n"; + _started = true; + client.write(headerString); + }, + end: function(data) { + if (_finished) { + throw new Error("Response.end called multiple times."); + } + if (data) { + if (_chunked) { + client.write(data.length.toString(16) + "\r\n" + data + "\r\n" + "0\r\n\r\n"); + } else { + client.write(data); + } + } else if (_chunked) { + client.write("0\r\n\r\n"); + } + _finished = true; + if (!_keepAlive) { + client.shutdown(); + } + }, + reportError: function(error) { + if (!_started) { + client.write("HTTP/1.1 500 Internal Server Error\r\nContent-Type: text/plain; charset=utf-8\r\n\r\n"); + } + if (!_finished) { + client.write("500 Internal Server Error\r\n\r\n" + error.stackTrace); + client.shutdown(); + } + logError(client.peerName + " - - [" + new Date() + "] " + error); + }, + isConnected: function() { return client.isConnected; }, + }; +} + +function handleRequest(request, response) { + var handler = findHandler(request); + + print(request.client.peerName + " - - [" + new Date() + "] " + request.method + " " + request.uri + " " + request.version + " \"" + request.headers["user-agent"] + "\""); + + if (handler) { + try { + handler.invoke(request, response); + } catch (error) { + response.reportError(error); + } + } else { + response.writeHead(200, {"Content-Type": "text/plain; charset=utf-8"}); + response.end("No handler found for request: " + request.uri); + } +} + +function handleConnection(client) { + var inputBuffer = ""; + var request; + var headers = {}; + var lineByLine = true; + var bodyToRead = -1; + var body; + + function reset() { + inputBuffer = ""; + request = undefined; + headers = {}; + lineByLine = true; + bodyToRead = -1; + body = undefined; + } + + function finish() { + try { + var requestObject = new Request(request[0], request[1], request[2], headers, body, client); + var response = new Response(requestObject, client); + handleRequest(requestObject, response) + if (client.isConnected) { + reset(); + } + } catch (error) { + response.reportError(error); + } + } + + function handleLine(line, length) { + if (bodyToRead == -1) { + if (!request) { + request = line.split(' '); + return true; + } else if (line) { + var colon = line.indexOf(':'); + var key = line.slice(0, colon).trim(); + var value = line.slice(colon + 1).trim(); + headers[key.toLowerCase()] = value; + return true; + } else { + if (headers["content-length"] != undefined) { + bodyToRead = parseInt(headers["content-length"]); + lineByLine = false; + body = ""; + return true; + } else { + finish(); + return false; + } + } + } else { + body += line; + bodyToRead -= length; + if (bodyToRead <= 0) { + finish(); + } + } + } + + client.onError(function(error) { + logError(client.peerName + " - - [" + new Date() + "] " + error); + }); + + client.read(function(data) { + if (data) { + inputBuffer += data; + var more = true; + while (more) { + if (lineByLine) { + more = false; + var end = inputBuffer.indexOf('\n'); + var realEnd = end; + if (end > 0 && inputBuffer[end - 1] == '\r') { + --end; + } + if (end != -1) { + var line = inputBuffer.slice(0, end); + inputBuffer = inputBuffer.slice(realEnd + 1); + more = handleLine(line, realEnd + 1); + } + } else { + more = handleLine(inputBuffer, inputBuffer.length); + inputBuffer = ""; + } + } + } + }); +} + +var kBacklog = 8; +var kHost = "0.0.0.0" +var kHttpPort = gGlobalSettings.httpPort || 12345; +var kHttpsPort = gGlobalSettings.httpsPort || 12346; + +var socket = new Socket(); +socket.bind(kHost, kHttpPort).then(function() { + var listenResult = socket.listen(kBacklog, function() { + socket.accept().then(handleConnection).catch(function(error) { + logError("[" + new Date() + "] " + error); + }); + }); +}).catch(function(error) { + logError("[" + new Date() + "] " + error); +}); + +var privateKey = File.readFile("data/httpd/privatekey.pem"); +var certificate = File.readFile("data/httpd/certificate.pem"); + +if (privateKey && certificate) { + var tls = new TlsContext(); + tls.setPrivateKey(privateKey); + tls.setCertificate(certificate); + + var secureSocket = new Socket(); + secureSocket.bind(kHost, kHttpsPort).then(function() { + secureSocket.listen(kBacklog, function() { + secureSocket.accept().then(function(client) { + handleConnection(client); + client.startTls(tls).catch(function(error) { + logError("[" + new Date() + "] [" + client.peerName + "] " + error); + }); + }).catch(function(error) { + logError("[" + new Date() + "] " + error); + }); + }); + }); +} + +exports.all = all; diff --git a/core/index.html b/core/index.html new file mode 100644 index 00000000..9c7e7cfa --- /dev/null +++ b/core/index.html @@ -0,0 +1,31 @@ + + + Tilde Friends + + + + + +
+ +
+
+ + > + +
+ + +
+ + diff --git a/core/isaac.js b/core/isaac.js new file mode 100644 index 00000000..a4e07d7b --- /dev/null +++ b/core/isaac.js @@ -0,0 +1,239 @@ +/* ---------------------------------------------------------------------- + * Copyright (c) 2012 Yves-Marie K. Rinquin + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * ---------------------------------------------------------------------- + * + * ISAAC is a cryptographically secure pseudo-random number generator + * (or CSPRNG for short) designed by Robert J. Jenkins Jr. in 1996 and + * based on RC4. It is designed for speed and security. + * + * ISAAC's informations & analysis: + * http://burtleburtle.net/bob/rand/isaac.html + * ISAAC's implementation details: + * http://burtleburtle.net/bob/rand/isaacafa.html + * + * ISAAC succesfully passed TestU01 + * + * ---------------------------------------------------------------------- + * + * Usage: + * + * var random_number = isaac.random(); + * + * Output: [ 0x00000000; 0xffffffff] + * [-2147483648; 2147483647] + * + */ + + +/* js string (ucs-2/utf16) to a 32-bit integer (utf-8 chars, little-endian) array */ +String.prototype.toIntArray = function() { + var w1, w2, u, r4 = [], r = [], i = 0; + var s = this + '\0\0\0'; // pad string to avoid discarding last chars + var l = s.length - 1; + + while(i < l) { + w1 = s.charCodeAt(i++); + w2 = s.charCodeAt(i+1); + if (w1 < 0x0080) { + // 0x0000 - 0x007f code point: basic ascii + r4.push(w1); + } else if(w1 < 0x0800) { + // 0x0080 - 0x07ff code point + r4.push(((w1 >>> 6) & 0x1f) | 0xc0); + r4.push(((w1 >>> 0) & 0x3f) | 0x80); + } else if((w1 & 0xf800) != 0xd800) { + // 0x0800 - 0xd7ff / 0xe000 - 0xffff code point + r4.push(((w1 >>> 12) & 0x0f) | 0xe0); + r4.push(((w1 >>> 6) & 0x3f) | 0x80); + r4.push(((w1 >>> 0) & 0x3f) | 0x80); + } else if(((w1 & 0xfc00) == 0xd800) + && ((w2 & 0xfc00) == 0xdc00)) { + // 0xd800 - 0xdfff surrogate / 0x10ffff - 0x10000 code point + u = ((w2 & 0x3f) | ((w1 & 0x3f) << 10)) + 0x10000; + r4.push(((u >>> 18) & 0x07) | 0xf0); + r4.push(((u >>> 12) & 0x3f) | 0x80); + r4.push(((u >>> 6) & 0x3f) | 0x80); + r4.push(((u >>> 0) & 0x3f) | 0x80); + i++; + } else { + // invalid char + } + /* add integer (four utf-8 value) to array */ + if(r4.length > 3) { + // little endian + r.push((r4.shift() << 0) | (r4.shift() << 8) | + (r4.shift() << 16) | (r4.shift() << 24)); + } + } + + return r; +} + +/* isaac module pattern */ +var isaac = (function(){ + + /* private: internal states */ + var m = Array(256), // internal memory + acc = 0, // accumulator + brs = 0, // last result + cnt = 0, // counter + r = Array(256), // result array + gnt = 0; // generation counter + + seed(Math.random() * 0xffffffff); + + /* private: 32-bit integer safe adder */ + function add(x, y) { + var lsb = (x & 0xffff) + (y & 0xffff); + var msb = (x >>> 16) + (y >>> 16) + (lsb >>> 16); + return (msb << 16) | (lsb & 0xffff); + } + + /* public: initialisation */ + function reset() { + acc = brs = cnt = 0; + for(var i = 0; i < 256; ++i) + m[i] = r[i] = 0; + gnt = 0; + } + + /* public: seeding function */ + function seed(s) { + var a, b, c, d, e, f, g, h, i; + + /* seeding the seeds of love */ + a = b = c = d = + e = f = g = h = 0x9e3779b9; /* the golden ratio */ + + if(s && typeof(s) === 'string') + s = s.toIntArray(); + + if(s && typeof(s) === 'number') { + s = [s]; + } + + if(s instanceof Array) { + reset(); + for(i = 0; i < s.length; i++) + r[i & 0xff] += (typeof(s[i]) === 'number') ? s[i] : 0; + } + + /* private: seed mixer */ + function seed_mix() { + a ^= b << 11; d = add(d, a); b = add(b, c); + b ^= c >>> 2; e = add(e, b); c = add(c, d); + c ^= d << 8; f = add(f, c); d = add(d, e); + d ^= e >>> 16; g = add(g, d); e = add(e, f); + e ^= f << 10; h = add(h, e); f = add(f, g); + f ^= g >>> 4; a = add(a, f); g = add(g, h); + g ^= h << 8; b = add(b, g); h = add(h, a); + h ^= a >>> 9; c = add(c, h); a = add(a, b); + } + + for(i = 0; i < 4; i++) /* scramble it */ + seed_mix(); + + for(i = 0; i < 256; i += 8) { + if(s) { /* use all the information in the seed */ + a = add(a, r[i + 0]); b = add(b, r[i + 1]); + c = add(c, r[i + 2]); d = add(d, r[i + 3]); + e = add(e, r[i + 4]); f = add(f, r[i + 5]); + g = add(g, r[i + 6]); h = add(h, r[i + 7]); + } + seed_mix(); + /* fill in m[] with messy stuff */ + m[i + 0] = a; m[i + 1] = b; m[i + 2] = c; m[i + 3] = d; + m[i + 4] = e; m[i + 5] = f; m[i + 6] = g; m[i + 7] = h; + } + if(s) { + /* do a second pass to make all of the seed affect all of m[] */ + for(i = 0; i < 256; i += 8) { + a = add(a, m[i + 0]); b = add(b, m[i + 1]); + c = add(c, m[i + 2]); d = add(d, m[i + 3]); + e = add(e, m[i + 4]); f = add(f, m[i + 5]); + g = add(g, m[i + 6]); h = add(h, m[i + 7]); + seed_mix(); + /* fill in m[] with messy stuff (again) */ + m[i + 0] = a; m[i + 1] = b; m[i + 2] = c; m[i + 3] = d; + m[i + 4] = e; m[i + 5] = f; m[i + 6] = g; m[i + 7] = h; + } + } + + prng(); /* fill in the first set of results */ + gnt = 256; /* prepare to use the first set of results */; + } + + /* public: isaac generator, n = number of run */ + function prng(n){ + var i, x, y; + + n = (n && typeof(n) === 'number') + ? Math.abs(Math.floor(n)) : 1; + + while(n--) { + cnt = add(cnt, 1); + brs = add(brs, cnt); + + for(i = 0; i < 256; i++) { + switch(i & 3) { + case 0: acc ^= acc << 13; break; + case 1: acc ^= acc >>> 6; break; + case 2: acc ^= acc << 2; break; + case 3: acc ^= acc >>> 16; break; + } + acc = add(m[(i + 128) & 0xff], acc); x = m[i]; + m[i] = y = add(m[(x >>> 2) & 0xff], add(acc, brs)); + r[i] = brs = add(m[(y >>> 10) & 0xff], x); + } + } + } + + /* public: return a random number between */ + function rand() { + if(!gnt--) { + prng(); gnt = 255; + } + return r[gnt]; + } + + /* public: return internals in an object*/ + function internals(){ + return {a: acc, b: brs, c: cnt, m: m, r: r}; + } + + /* return class object */ + return { + 'reset': reset, + 'seed': seed, + 'prng': prng, + 'rand': rand, + 'internals': internals + }; +})(); /* declare and execute */ + +/* public: output*/ +isaac.random = function() { + return 0.5 + this.rand() * 2.3283064365386963e-10; // 2^-32 +} + +exports.rand = isaac.rand; diff --git a/core/network.js b/core/network.js new file mode 100644 index 00000000..f9089e53 --- /dev/null +++ b/core/network.js @@ -0,0 +1,102 @@ +"use strict"; + +function Connection() { + this.socket = null; + this.buffer = null; + this.onReadCallback = null; + this.onErrorCallback = null; + this.tlsContext = null; + this._exported = null; + return this; +} + +Connection.prototype.connect = function(host, port) { + let connection = this; + connection.close(); + connection.socket = new Socket(); + return connection.socket.connect(host, port).then(function() { + connection.buffer = ""; + return Promise.all([ + connection.socket.onError(function(error) { + if (connection.onErrorCallback) { + connection.onErrorCallback(error); + } + connection.close(); + }), + connection.socket.read(function(data) { + if (connection.onReadCallback) { + connection.onReadCallback(data); + } else { + connection.buffer += data; + } + }), + ]); + }); +}; + +Connection.prototype.isConnected = function() { + return this.socket && this.socket.isConnected; +}; + +Connection.prototype.read = function(callback) { + this.onReadCallback = callback; + if (this.buffer) { + callback(this.buffer); + } + this.buffer = ""; +}; + +Connection.prototype.onError = function(callback) { + this.onErrorCallback = callback; +}; + +Connection.prototype.write = function(data) { + return this.socket.write(data); +}; + +Connection.prototype.close = function() { + let socket = this.socket; + this.socket = null; + if (socket) { + return socket.close(); + } +}; + +Connection.prototype.startTls = function() { + return this.socket.startTls(this.tlsContext); +}; + +Connection.prototype.getPeerCertificate = function() { + return this.socket.peerCertificate; +}; + +Connection.prototype.addTrustedCertificate = function(certificate) { + if (!this.tlsContext) { + this.tlsContext = new TlsContext(); + } + return this.tlsContext.addTrustedCertificate(certificate); +}; + +Connection.prototype.exported = function() { + if (!this._exported) { + this._exported = { + isConnected: this.isConnected.bind(this), + connect: this.connect.bind(this), + startTls: this.startTls.bind(this), + write: this.write.bind(this), + read: this.read.bind(this), + onError: this.onError.bind(this), + close: this.close.bind(this), + getPeerCertificate: this.getPeerCertificate.bind(this), + addTrustedCertificate: this.addTrustedCertificate.bind(this), + }; + } + return this._exported; +}; + +function newConnection() { + let process = this; + let connection = new Connection(); + process.connections.push(connection); + return connection.exported(); +} diff --git a/core/style.css b/core/style.css new file mode 100644 index 00000000..8249b619 --- /dev/null +++ b/core/style.css @@ -0,0 +1,181 @@ +body { + font-family: monospace; + background-color: #002b36; + color: #eee8d5; +} + +#body { + display: flex; + flex-flow: column; + width: 100%; + height: 100%; +} + +.navigation { + height: auto; + width: 100%; +} + +#terminals { + flex: 1; + flex-flow: column; +} + +#logo { + vertical-align: middle; +} + +.terminal { + outline: none; + white-space: pre-wrap; + resize: none; + overflow: auto; + height: auto; + width: auto; +} + +.hbox { + display: flex; + flex-direction: row; +} + +.vbox { + display: flex; + flex-direction: column; +} + +.terminal > img { + vertical-align: middle; +} + +a:link, .command { + color: #268bd2; + cursor: pointer; +} + +a:visited, .command:visited { + color: #6c71c4; + cursor: pointer; +} + +a:hover, .command:hover { + color: #2aa198; + cursor: pointer; +} + +a:active, .command:active { + color: #dc322f; + cursor: pointer; +} + +.input { + display: flex; + flex-flow: row; +} + +#prompt { + padding-right: 0.5em; + color: #eee8d5; +} + +#input { + flex: 1; + border: 0; + outline: none; + font-family: monospace; + background-color: #002b36; + color: #eee8d5; +} + +#input.drop { + border: 2px solid; + color: #cb4b16; +} + +.CodeMirror { + height: 100%; + padding: 0; +} + +.CodeMirror-scroll { + height: 100%; + padding: 0; +} + +.cm-tab { + background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAMCAYAAAAkuj5RAAAAAXNSR0IArs4c6QAAAGFJREFUSMft1LsRQFAQheHPowAKoACx3IgEKtaEHujDjORSgWTH/ZOdnZOcM/sgk/kFFWY0qV8foQwS4MKBCS3qR6ixBJvElOobYAtivseIE120FaowJPN75GMu8j/LfMwNjh4HUpwg4LUAAAAASUVORK5CYII=); + background-position: right; + background-repeat: no-repeat; + -webkit-filter: invert(100%); + filter: invert(100%); +} + +.cm-trailingspace { + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAACCAYAAAB/qH1jAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3QUXCToH00Y1UgAAACFJREFUCNdjPMDBUc/AwNDAAAFMTAwMDA0OP34wQgX/AQBYgwYEx4f9lQAAAABJRU5ErkJggg==); + background-position: bottom left; + background-repeat: repeat-x; +} + +#status { + display: none; +} + +#update { + display: none; +} + +#login { + float: right; +} + +#auth_greeting { + text-align: center; +} + +#auth { + display: flex; + flex-flow: row; + align-content: center; + align-items: center; + text-align: center; + justify-content: center; +} + +#auth_login { + flex: 0 1 auto; + text-align: right; +} + +#auth_or { + flex: 0 1 auto; + padding: 1em; +} + +#auth_guest { + flex: 0 1 auto; +} + +.notice { + color: #cb4b16; + margin: 1em; + padding: 1em; + border: 1px solid #cb4b16; +} + +/* Solarized Color Scheme Colors */ +.base03 { color: #002b36; } +.base02 { color: #073642; } +.base01 { color: #586e75; } +.base00 { color: #657b83; } +.base0 { color: #839496; } +.base1 { color: #93a1a1; } +.base2 { color: #eee8d5; } +.base3 { color: #fdf6e3; } +.yellow { color: #b58900; } +.orange { color: #cb4b16; } +.red { color: #dc322f; } +.magenta { color: #d33682; } +.violet { color: #6c71c4; } +.blue { color: #268bd2; } +.cyan { color: #2aa198; } +.green { color: #859900; } diff --git a/core/terminal.js b/core/terminal.js new file mode 100644 index 00000000..82e8b34e --- /dev/null +++ b/core/terminal.js @@ -0,0 +1,290 @@ +"use strict"; + +var kStaticFiles = [ + {uri: '', path: 'index.html', type: 'text/html; charset=utf-8'}, + {uri: '/edit', path: 'edit.html', type: 'text/html; charset=utf-8'}, + {uri: '/style.css', path: 'style.css', type: 'text/css; charset=utf-8'}, + {uri: '/favicon.png', path: 'favicon.png', type: 'image/png'}, + {uri: '/client.js', path: 'client.js', type: 'text/javascript; charset=utf-8'}, + {uri: '/editor.js', path: 'editor.js', type: 'text/javascript; charset=utf-8'}, + {uri: '/agplv3-88x31.png', path: 'agplv3-88x31.png', type: 'image/png'}, +]; + +var auth = require('auth'); +var form = require('form'); + +function Terminal() { + this._waiting = []; + this._index = 0; + this._firstLine = 0; + this._lines = []; + this._lastRead = null; + this._lastWrite = null; + this._echo = true; + this._readLine = null; + this._selected = null; + return this; +} + +Terminal.kBacklog = 64; + +Terminal.prototype.dispatch = function(data) { + for (var i in this._waiting) { + this._waiting[i](data); + } + this._waiting.length = 0; +} + +Terminal.prototype.print = function() { + var data = arguments; + if (this._selected) { + data = { + terminal: this._selected, + value: data + }; + } + this._lines.push(data); + this._index++; + if (this._lines.length >= Terminal.kBacklog * 2) { + this._firstLine = this._index - Terminal.kBacklog; + this._lines = this._lines.slice(this._lines.length - Terminal.kBacklog); + } + this.dispatch({index: this._index - 1, lines: [data]}); + this._lastWrite = new Date(); +} + +Terminal.prototype.notify = function(title, options) { + this.print({action: "notify", title: title, options: options}); +} + +Terminal.prototype.setTitle = function(value) { + this.print({action: "title", value: value}); +} + +Terminal.prototype.setPrompt = function(value) { + this.print({action: "prompt", value: value}); +} + +Terminal.prototype.setPassword = function(value) { + this.print({action: "password", value: value}); +} + +Terminal.prototype.setHash = function(value) { + this.print({action: "hash", value: value}); +} + +Terminal.prototype.notifyUpdate = function() { + this.print({action: "update"}); +} + +Terminal.prototype.split = function(options) { + this.print({action: "split", options: options}); +} + +Terminal.prototype.select = function(name) { + this._selected = name; +} + +Terminal.prototype.postMessageToIframe = function(name, message) { + this.print({action: "postMessageToIframe", name: name, message: message}); +} + +Terminal.prototype.clear = function() { + //this._lines.length = 0; + //this._firstLine = this._index; + this.print({action: "clear"}); +} + +Terminal.prototype.ping = function() { + this.dispatch({index: this._index - 1, lines: [{action: "ping"}]}); +} + +Terminal.prototype.getOutput = function(haveIndex) { + var terminal = this; + terminal._lastRead = new Date(); + return new Promise(function(resolve) { + if (haveIndex < terminal._index - 1) { + resolve({index: terminal._index - 1, lines: terminal._lines.slice(Math.max(0, haveIndex + 1 - terminal._firstLine))}); + } else { + terminal._waiting.push(resolve); + } + }); +} + +Terminal.prototype.setEcho = function(echo) { + this._echo = echo; +} + +Terminal.prototype.readLine = function() { + var self = this; + if (self._readLine) { + self._readLine[1](); + } + return new Promise(function(resolve, reject) { + self._readLine = [resolve, reject]; + }); +} + +function invoke(handlers, argv) { + var promises = []; + if (handlers) { + for (var i = 0; i < handlers.length; ++i) { + promises.push(handlers[i].apply({}, argv)); + } + } + return Promise.all(promises); +} + +function handler(request, response, packageOwner, packageName, uri) { + var found = false; + + if (badName(packageOwner) || badName(packageName)) { + var data = "File not found"; + response.writeHead(404, {"Content-Type": "text/plain; charset=utf-8", "Content-Length": data.length}); + response.end(data); + found = true; + } + + if (!found) { + for (var i in kStaticFiles) { + if (uri === kStaticFiles[i].uri) { + found = true; + var data = File.readFile("core/" + kStaticFiles[i].path); + if (kStaticFiles[i].uri == "") { + data = data.replace("$(VIEW_SOURCE)", "/~" + packageOwner + "/" + packageName + "/view"); + data = data.replace("$(EDIT_SOURCE)", "/~" + packageOwner + "/" + packageName + "/edit"); + } else if (kStaticFiles[i].uri == "/edit") { + var source = File.readFile("packages/" + packageOwner + "/" + packageName + "/" + packageName + ".js") || ""; + source = source.replace(/([&<>"])/g, function(x, item) { + return {'&': '&', '"': '"', '<': '<', '>': '>'}[item]; + }); + data = data.replace("$(SOURCE)", source); + } + response.writeHead(200, {"Content-Type": kStaticFiles[i].type, "Content-Length": data.length}); + response.end(data); + break; + } + } + } + + if (!found) { + var process; + if (uri === "/view") { + var data = File.readFile("packages/" + packageOwner + "/" + packageName + "/" + packageName + ".js"); + response.writeHead(200, {"Content-Type": "text/javascript; charset=utf-8", "Content-Length": data.length}); + response.end(data); + } else if (uri == "/save") { + var credentials = auth.query(request.headers); + var userName = credentials && credentials.session && credentials.session.name ? credentials.session.name : "guest"; + if (badName(packageName)) { + response.writeHead(403, {"Content-Type": "text/plain; charset=utf-8"}); + response.end("Invalid package name: " + packageName); + } else if (badName(userName)) { + response.writeHead(403, {"Content-Type": "text/plain; charset=utf-8"}); + response.end("Invalid user name: " + userName); + } else { + File.makeDirectory("packages/" + userName); + File.makeDirectory("packages/" + userName + "/" + packageName); + if (!File.writeFile("packages/" + userName + "/" + packageName + "/" + packageName + ".js", request.body || "")) { + response.writeHead(200, {"Content-Type": "text/plain; charset=utf-8"}); + response.end("/~" + userName + "/" + packageName); + updateProcesses(userName, packageName); + } else { + response.writeHead(500, {"Content-Type": "text/plain; charset=utf-8"}); + response.end("Problem saving: " + packageName); + } + } + } else { + var options = {}; + var credentials = auth.query(request.headers); + if (credentials && credentials.session) { + options.userName = credentials.session.name; + } + options.credentials = credentials; + var sessionId = form.decodeForm(request.query).sessionId; + var isNewSession = false; + if (!getSessionProcess(packageOwner, packageName, sessionId, {create: false})) { + sessionId = makeSessionId(); + isNewSession = true; + } + process = getSessionProcess(packageOwner, packageName, sessionId, options); + process.lastActive = Date.now(); + + if (uri === "/send") { + if (isNewSession) { + response.writeHead(403, {"Content-Type": "text/plain; charset=utf-8"}); + response.end("Too soon."); + } else { + var command = JSON.parse(request.body); + var eventName = 'unknown'; + if (typeof command == "string") { + if (process.terminal._echo) { + process.terminal.print("> " + command); + } + if (process.terminal._readLine) { + let promise = process.terminal._readLine; + process.terminal._readLine = null; + promise[0](command); + } + eventName = 'onInput'; + } else if (command.event) { + eventName = command.event; + } + return invoke(process.eventHandlers[eventName], [command]).then(function() { + response.writeHead(200, { + "Content-Type": "text/plain; charset=utf-8", + "Content-Length": "0", + "Cache-Control": "no-cache, no-store, must-revalidate", + "Pragma": "no-cache", + "Expires": "0", + }); + response.end(""); + }).catch(function(error) { + process.terminal.print(error); + }); + } + } else if (uri === "/receive") { + if (isNewSession) { + var data = JSON.stringify({ + lines: [ + { + action: "session", + session: { + sessionId: sessionId, + credentials: credentials, + } + }, + ] + }); + response.writeHead(200, { + "Content-Type": "text/plain; charset=utf-8", + "Content-Length": data.length.toString(), + "Cache-Control": "no-cache, no-store, must-revalidate", + "Pragma": "no-cache", + "Expires": "0", + }); + process.ready.then(function() { + process.terminal.print({action: "ready", ready: true}); + }).catch(function(error) { + process.terminal.print({action: "ready", error: error}); + }); + response.end(data); + } else { + return process.terminal.getOutput(parseInt(request.body)).then(function(output) { + var data = JSON.stringify(output); + response.writeHead(200, { + "Content-Type": "text/plain; charset=utf-8", + "Content-Length": data.length.toString(), + "Cache-Control": "no-cache, no-store, must-revalidate", + "Pragma": "no-cache", + "Expires": "0", + }); + response.end(data); + }); + } + } + } + } +} + +exports.handler = handler; diff --git a/data/wiki/development b/data/wiki/development new file mode 100644 index 00000000..9836901f --- /dev/null +++ b/data/wiki/development @@ -0,0 +1,62 @@ += SandboxOS App Development Guide = +This is a brief introduction on developing SandboxOS apps targeted at people who are already familiar with web development. + +== Packages == +A package is a directory of files. '''package.json''' is the only file with special meaning. + +Here is an example package.json: +{{{ +#!json +{ + "name": "chat", + "start": "backend.js", + "imports": ["auth", "httpd", "filesystem"], + "href": "/chat", + "description": "A basic multi-user chat example." +} +}}} + + * '''name''': identifies the package. If it is not unique, any existing installed package of the same name will be replaced when installing the package. + * '''start''': specifies the JavaScript file which is the entry point of the task. When a new process is started for this package, this script is executed within it. + * '''imports''': list of package/task names which this package wants to be able to access. + * '''href''': link to the task's entry page used by [/tasks /tasks]. + * '''description''': human-readable description of the package, displayed by [/tasks /tasks]. + +== Promises == + +JavaScript promises are used heavily. Invoking any method on another task will return a Promise object. Execution will return immediately but go asynchronous. That usually looks like this: +{{{ +#!javascript +imports.email.sendMessage(message).then(function(result) { + // When sendMessage completes, result is the return value. +}).catch(function(error) { + // If sendMessage fails (or calling it somehow fails), error is the reason. +}); +// sendMessage returns immediately and execution continues on. +}}} + +This is a completely inadequate explanation of the nuances involved, but it's a starting point. Promises can be created and chained and combined in interesting ways. + +== Inter-Task Communication == +Tasks have access to the exported functions on any task declared in their package imports. + +In addition, functions passed between tasks can be called by the receiving task or passed along further. + +Here is an untested, made-up example with two hypothetical tasks, '''math''' and '''test''': + +'''math.js''': +{{{ +#!javascript +exports = { + sum: function(a, b) { return a + b; }, + multiply: function(a, b) { return a * b; }, +}; +}}} + +'''test.js''': +{{{ +#!javascript +imports.math.sum(4, 5).then(function(result) { + // result === 9 +}); +}}} \ No newline at end of file diff --git a/data/wiki/index b/data/wiki/index new file mode 100644 index 00000000..59429615 --- /dev/null +++ b/data/wiki/index @@ -0,0 +1,65 @@ += SandboxOS = + +I embedded a JavaScript engine in a C++ application and used it to make some webapps. I made a wiki with it. That is what you are looking at. It's not especially good, but the interesting part is that you, as a visitor to this web site, have the power to rewrite the wiki webapp itself into something better. + +== Goals == +I've tried writing lofty descriptions about why I think this is cool, but I'm bad at that part. + +Goals include: + +=== 1. Make it easy to run servers. === + * SandboxOS can be made to run on lots of platforms. + * SandboxOS can be made trivial to install. I want to put it in app stores. + * SandboxOS is an app store in itself, so think "installing blogging software == installing a mobile game". And then think of a webapp that is 10x better than that. +=== 2. Make a thing that is by default pretty secure. === + * Actual access to most system resources (filesystem + network, currently) is restricted to a few apps that hand it out in very limited ways. + * Apps will only get access to resources when granted by the user. Think iOS or Android. + * The idea is that it can be made as secure as Google Chrome. It currently uses V8 and process isolation, but there's a lot more to do. In the end I want to trust installing random apps as much as or more than I trust visiting shady web sites. + * But security is nothing if you can't do cool stuff. I want to make weird social music players and things to manipulate photos on my phone from my desktop. +=== 3. Free software idealism === + * This is Wikipedia but for apps? + * Make distributed versions of things that Google/Apple/Facebook dominate? + * Something about software architecture and making small tools that fit together to do neat things. +=== 4. Fun === + * I get a kick out of making all of this stuff from scratch. The stack is getting pretty high, and it's just getting started. + * This hopefully lowers the barrier to entry to making little webapps, since you can use my server and make something with ~2 files that you edited like something on Google Docs. + +== How to Get Started == + +{{{ +#!html +
    +
  1. Visit the /tasks page.
  2. +
  3. Pick a task (hint: consider helloworld, wiki, or chat). Click edit. You will probably have to log in at this point.
  4. +
  5. Make modifications.
  6. +
  7. Click install.
  8. +
  9. Visit your updated task's URL and see it updated.
  10. +
  11. Ask questions. Either stick them here on the wiki somewhere, or email Cory.
  12. +
+}}} + +It should be difficult to do anything too disruptive, and there are backups if you do. '''Don't worry about breaking things.''' + +It's easy to make things fail silently. I'm starting to improve that. '''Use the log task.''' + +There are probably ways to do bad things if you try hard. '''Please don't be mean.''' + +== TODO == +In no particular order: + * Set limits for everything. n anonymous users can each make m tasks with p MB of storage and can make q HTTP request per second. + * Make some sort of TODO / bug tracker to replace this task list. + * Establish some conventions for non-webapp tasks. Maybe make a shell that lets you run code from any task that exposes it. + * Establish some conventions for HTML fragments. Include a persistent HTML header in every page or something like that. + * Make an app store. Not too important until we have multiple installs but pretty important. + * Lots of the API exposed from C++ isn't async but should be. + * Pick a name for this thing. + * ~~Set up an [https://www.unprompted.com/projects/build/sandboxos automated build].~~ + * Feedback is really bad or missing. If you break a task, it might just stop responding to web requests. I have the benefit of being able to see console output, but that's really noisy and bad. There might be times where it is necessary to restart the whole process. Ultimately there should be a way to see feedback when you've broken something, and things should be hardened so that you can't take down the whole system without abusing administrative rights. + * Security here is not good at the moment. I'm essentially trusting you to please don't do bad things. I've taken some steps to harden this thing, but it has a long way to go. + * ~~Inter-task messaging is really awkward to type. And the way you need to respond to httpd requests is even weirder. I will work on this. It should be possible to get most things close to being first-class function calls, except asynchronous.~~ + * ~~Make this wiki actually use some sort of wiki syntax. HTML is too 90s.~~ + * ~~The C++ program needs to use separate processes for each task in order to prevent OOM situations in one task from taking down the whole process.~~ + * ~~The C++ program needs to build and run on Windows.~~ It needs to do so as transparently in the background as possible while still being trivial to acquire and start (Windows service / system tray icon). + * ~~Need to implement a real security model. I was thinking once this has an authentication task, everything can use that to enforce that nobody but administrators changed the core tasks and that anonymous and untrusted users don't do anything too malicious.~~ + * ~~Be able to marshal functions across task boundaries so that the httpd task can return a request object that has the same API as node.js. I think this will greatly simplify the client code for webapps.~~ + * ~~Need to implement something like proper javascript requires() and package dependencies for code sharing.~~ \ No newline at end of file diff --git a/deps/liblmdb/.gitignore b/deps/liblmdb/.gitignore new file mode 100644 index 00000000..01fe8bc1 --- /dev/null +++ b/deps/liblmdb/.gitignore @@ -0,0 +1,23 @@ +mtest +mtest[23456] +testdb +mdb_copy +mdb_stat +mdb_dump +mdb_load +*.lo +*.[ao] +*.so +*.exe +*[~#] +*.bak +*.orig +*.rej +*.gcov +*.gcda +*.gcno +core +core.* +valgrind.* +man/ +html/ diff --git a/deps/liblmdb/CHANGES b/deps/liblmdb/CHANGES new file mode 100644 index 00000000..bf4486be --- /dev/null +++ b/deps/liblmdb/CHANGES @@ -0,0 +1,182 @@ +LMDB 0.9 Change Log + +LMDB 0.9.17 Release (2015/11/30) + Fix ITS#7377 catch calloc failure + Fix ITS#8237 regression from ITS#7589 + Fix ITS#8238 page_split for DUPFIXED pages + Fix ITS#8221 MDB_PAGE_FULL on delete/rebalance + Fix ITS#8258 rebalance/split assert + Fix ITS#8263 cursor_put cursor tracking + Fix ITS#8264 cursor_del cursor tracking + Fix ITS#8310 cursor_del cursor tracking + Fix ITS#8299 mdb_del cursor tracking + Fix ITS#8300 mdb_del cursor tracking + Fix ITS#8304 mdb_del cursor tracking + Fix ITS#7771 fakepage cursor tracking + Fix ITS#7789 ensure mapsize >= pages in use + Fix ITS#7971 mdb_txn_renew0() new reader slots + Fix ITS#7969 use __sync_synchronize on non-x86 + Fix ITS#8311 page_split from update_key + Fix ITS#8312 loose pages in nested txn + Fix ITS#8313 mdb_rebalance dummy cursor + Fix ITS#8315 dirty_room in nested txn + Fix ITS#8323 dirty_list in nested txn + Fix ITS#8316 page_merge cursor tracking + Fix ITS#8321 cursor tracking + Fix ITS#8319 mdb_load error messages + Fix ITS#8320 mdb_load plaintext input + Added mdb_txn_id() (ITS#7994) + Added robust mutex support + Miscellaneous cleanup/simplification + Build + Create install dirs if needed (ITS#8256) + Fix ThreadProc decl on Win32/MSVC (ITS#8270) + Added ssize_t typedef for MSVC (ITS#8067) + Use ANSI apis on Windows (ITS#8069) + Use O_SYNC if O_DSYNC,MDB_DSYNC are not defined (ITS#7209) + Allow passing AR to make (ITS#8168) + Allow passing mandir to make install (ITS#8169) + +LMDB 0.9.16 Release (2015/08/14) + Fix cursor EOF bug (ITS#8190) + Fix handling of subDB records (ITS#8181) + Fix mdb_midl_shrink() usage (ITS#8200) + +LMDB 0.9.15 Release (2015/06/19) + Fix txn init (ITS#7961,#7987) + Fix MDB_PREV_DUP (ITS#7955,#7671) + Fix compact of empty env (ITS#7956) + Fix mdb_copy file mode + Fix mdb_env_close() after failed mdb_env_open() + Fix mdb_rebalance collapsing root (ITS#8062) + Fix mdb_load with large values (ITS#8066) + Fix to retry writes on EINTR (ITS#8106) + Fix mdb_cursor_del on empty DB (ITS#8109) + Fix MDB_INTEGERDUP key compare (ITS#8117) + Fix error handling (ITS#7959,#8157,etc.) + Fix race conditions (ITS#7969,7970) + Added workaround for fdatasync bug in ext3fs + Build + Don't use -fPIC for static lib + Update .gitignore (ITS#7952,#7953) + Cleanup for "make test" (ITS#7841), "make clean", mtest*.c + Misc. Android/Windows cleanup + Documentation + Fix MDB_APPEND doc + Fix MDB_MAXKEYSIZE doc (ITS#8156) + Fix mdb_cursor_put,mdb_cursor_del EACCES description + Fix mdb_env_sync(MDB_RDONLY env) doc (ITS#8021) + Clarify MDB_WRITEMAP doc (ITS#8021) + Clarify mdb_env_open doc + Clarify mdb_dbi_open doc + +LMDB 0.9.14 Release (2014/09/20) + Fix to support 64K page size (ITS#7713) + Fix to persist decreased as well as increased mapsizes (ITS#7789) + Fix cursor bug when deleting last node of a DUPSORT key + Fix mdb_env_info to return FIXEDMAP address + Fix ambiguous error code from writing to closed DBI (ITS#7825) + Fix mdb_copy copying past end of file (ITS#7886) + Fix cursor bugs from page_merge/rebalance + Fix to dirty fewer pages in deletes (mdb_page_loose()) + Fix mdb_dbi_open creating subDBs (ITS#7917) + Fix mdb_cursor_get(_DUP) with single value (ITS#7913) + Fix Windows compat issues in mtests (ITS#7879) + Add compacting variant of mdb_copy + Add BigEndian integer key compare code + Add mdb_dump/mdb_load utilities + +LMDB 0.9.13 Release (2014/06/18) + Fix mdb_page_alloc unlimited overflow page search + Documentation + Re-fix MDB_CURRENT doc (ITS#7793) + Fix MDB_GET_MULTIPLE/MDB_NEXT_MULTIPLE doc + +LMDB 0.9.12 Release (2014/06/13) + Fix MDB_GET_BOTH regression (ITS#7875,#7681) + Fix MDB_MULTIPLE writing multiple keys (ITS#7834) + Fix mdb_rebalance (ITS#7829) + Fix mdb_page_split (ITS#7815) + Fix md_entries count (ITS#7861,#7828,#7793) + Fix MDB_CURRENT (ITS#7793) + Fix possible crash on Windows DLL detach + Misc code cleanup + Documentation + mdb_cursor_put: cursor moves on error (ITS#7771) + + +LMDB 0.9.11 Release (2014/01/15) + Add mdb_env_set_assert() (ITS#7775) + Fix: invalidate txn on page allocation errors (ITS#7377) + Fix xcursor tracking in mdb_cursor_del0() (ITS#7771) + Fix corruption from deletes (ITS#7756) + Fix Windows/MSVC build issues + Raise safe limit of max MDB_MAXKEYSIZE + Misc code cleanup + Documentation + Remove spurious note about non-overlapping flags (ITS#7665) + +LMDB 0.9.10 Release (2013/11/12) + Add MDB_NOMEMINIT option + Fix mdb_page_split() again (ITS#7589) + Fix MDB_NORDAHEAD definition (ITS#7734) + Fix mdb_cursor_del() positioning (ITS#7733) + Partial fix for larger page sizes (ITS#7713) + Fix Windows64/MSVC build issues + +LMDB 0.9.9 Release (2013/10/24) + Add mdb_env_get_fd() + Add MDB_NORDAHEAD option + Add MDB_NOLOCK option + Avoid wasting space in mdb_page_split() (ITS#7589) + Fix mdb_page_merge() cursor fixup (ITS#7722) + Fix mdb_cursor_del() on last delete (ITS#7718) + Fix adding WRITEMAP on existing env (ITS#7715) + Fix nested txns (ITS#7515) + Fix mdb_env_copy() O_DIRECT bug (ITS#7682) + Fix mdb_cursor_set(SET_RANGE) return code (ITS#7681) + Fix mdb_rebalance() cursor fixup (ITS#7701) + Misc code cleanup + Documentation + Note that by default, readers need write access + + +LMDB 0.9.8 Release (2013/09/09) + Allow mdb_env_set_mapsize() on an open environment + Fix mdb_dbi_flags() (ITS#7672) + Fix mdb_page_unspill() in nested txns + Fix mdb_cursor_get(CURRENT|NEXT) after a delete + Fix mdb_cursor_get(DUP) to always return key (ITS#7671) + Fix mdb_cursor_del() to always advance to next item (ITS#7670) + Fix mdb_cursor_set(SET_RANGE) for tree with single page (ITS#7681) + Fix mdb_env_copy() retry open if O_DIRECT fails (ITS#7682) + Tweak mdb_page_spill() to be less aggressive + Documentation + Update caveats since mdb_reader_check() added in 0.9.7 + +LMDB 0.9.7 Release (2013/08/17) + Don't leave stale lockfile on failed RDONLY open (ITS#7664) + Fix mdb_page_split() ref beyond cursor depth + Fix read txn data race (ITS#7635) + Fix mdb_rebalance (ITS#7536, #7538) + Fix mdb_drop() (ITS#7561) + Misc DEBUG macro fixes + Add MDB_NOTLS envflag + Add mdb_env_copyfd() + Add mdb_txn_env() (ITS#7660) + Add mdb_dbi_flags() (ITS#7661) + Add mdb_env_get_maxkeysize() + Add mdb_env_reader_list()/mdb_env_reader_check() + Add mdb_page_spill/unspill, remove hard txn size limit + Use shorter names for semaphores (ITS#7615) + Build + Fix install target (ITS#7656) + Documentation + Misc updates for cursors, DB handles, data lifetime + +LMDB 0.9.6 Release (2013/02/25) + Many fixes/enhancements + +LMDB 0.9.5 Release (2012/11/30) + Renamed from libmdb to liblmdb + Many fixes/enhancements diff --git a/deps/liblmdb/COPYRIGHT b/deps/liblmdb/COPYRIGHT new file mode 100644 index 00000000..722d1a51 --- /dev/null +++ b/deps/liblmdb/COPYRIGHT @@ -0,0 +1,20 @@ +Copyright 2011-2015 Howard Chu, Symas Corp. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted only as authorized by the OpenLDAP +Public License. + +A copy of this license is available in the file LICENSE in the +top-level directory of the distribution or, alternatively, at +. + +OpenLDAP is a registered trademark of the OpenLDAP Foundation. + +Individual files and/or contributed packages may be copyright by +other parties and/or subject to additional restrictions. + +This work also contains materials derived from public sources. + +Additional information about OpenLDAP can be obtained at +. diff --git a/deps/liblmdb/Doxyfile b/deps/liblmdb/Doxyfile new file mode 100644 index 00000000..92d17b09 --- /dev/null +++ b/deps/liblmdb/Doxyfile @@ -0,0 +1,1631 @@ +# Doxyfile 1.7.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = LMDB + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen to replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +INLINE_GROUPED_CLASSES = YES +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = YES + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penality. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will rougly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespace are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = NO + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = lmdb.h midl.h mdb.c midl.c + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the stylesheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvances is that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = YES + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = DEBUG=2 __GNUC__=1 + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = tooltag=./man1 + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will write a font called FreeSans.ttf to the output +# directory and reference it in all dot files that doxygen generates. This +# font does not include all possible unicode characters however, so when you need +# these (or just want a differently looking font) you can specify the font name +# using DOT_FONTNAME. You need need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = FreeSans.ttf + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/deps/liblmdb/LICENSE b/deps/liblmdb/LICENSE new file mode 100644 index 00000000..05ad7571 --- /dev/null +++ b/deps/liblmdb/LICENSE @@ -0,0 +1,47 @@ +The OpenLDAP Public License + Version 2.8, 17 August 2003 + +Redistribution and use of this software and associated documentation +("Software"), with or without modification, are permitted provided +that the following conditions are met: + +1. Redistributions in source form must retain copyright statements + and notices, + +2. Redistributions in binary form must reproduce applicable copyright + statements and notices, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution, and + +3. Redistributions must contain a verbatim copy of this document. + +The OpenLDAP Foundation may revise this license from time to time. +Each revision is distinguished by a version number. You may use +this Software under terms of this license revision or under the +terms of any subsequent revision of the license. + +THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS +CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) +OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +The names of the authors and copyright holders must not be used in +advertising or otherwise to promote the sale, use or other dealing +in this Software without specific, written prior permission. Title +to copyright in this Software shall at all times remain with copyright +holders. + +OpenLDAP is a registered trademark of the OpenLDAP Foundation. + +Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, +California, USA. All Rights Reserved. Permission to copy and +distribute verbatim copies of this document is granted. diff --git a/deps/liblmdb/Makefile b/deps/liblmdb/Makefile new file mode 100644 index 00000000..dbb5d698 --- /dev/null +++ b/deps/liblmdb/Makefile @@ -0,0 +1,111 @@ +# Makefile for liblmdb (Lightning memory-mapped database library). + +######################################################################## +# Configuration. The compiler options must enable threaded compilation. +# +# Preprocessor macros (for CPPFLAGS) of interest... +# Note that the defaults should already be correct for most +# platforms; you should not need to change any of these. +# Read their descriptions in mdb.c if you do: +# +# - MDB_USE_POSIX_SEM +# - MDB_DSYNC +# - MDB_FDATASYNC +# - MDB_FDATASYNC_WORKS +# - MDB_USE_PWRITEV +# - MDB_USE_ROBUST +# +# There may be other macros in mdb.c of interest. You should +# read mdb.c before changing any of them. +# +CC = gcc +AR = ar +W = -W -Wall -Wno-unused-parameter -Wbad-function-cast -Wuninitialized +THREADS = -pthread +OPT = -O2 -g +CFLAGS = $(THREADS) $(OPT) $(W) $(XCFLAGS) +LDLIBS = +SOLIBS = +prefix = /usr/local +mandir = $(prefix)/man + +######################################################################## + +IHDRS = lmdb.h +ILIBS = liblmdb.a liblmdb.so +IPROGS = mdb_stat mdb_copy mdb_dump mdb_load +IDOCS = mdb_stat.1 mdb_copy.1 mdb_dump.1 mdb_load.1 +PROGS = $(IPROGS) mtest mtest2 mtest3 mtest4 mtest5 +all: $(ILIBS) $(PROGS) + +install: $(ILIBS) $(IPROGS) $(IHDRS) + mkdir -p $(DESTDIR)$(prefix)/bin + mkdir -p $(DESTDIR)$(prefix)/lib + mkdir -p $(DESTDIR)$(prefix)/include + mkdir -p $(DESTDIR)$(prefix)/man/man1 + for f in $(IPROGS); do cp $$f $(DESTDIR)$(prefix)/bin; done + for f in $(ILIBS); do cp $$f $(DESTDIR)$(prefix)/lib; done + for f in $(IHDRS); do cp $$f $(DESTDIR)$(prefix)/include; done + for f in $(IDOCS); do cp $$f $(DESTDIR)$(mandir)/man1; done + +clean: + rm -rf $(PROGS) *.[ao] *.[ls]o *~ testdb + +test: all + rm -rf testdb && mkdir testdb + ./mtest && ./mdb_stat testdb + +liblmdb.a: mdb.o midl.o + $(AR) rs $@ mdb.o midl.o + +liblmdb.so: mdb.lo midl.lo +# $(CC) $(LDFLAGS) -pthread -shared -Wl,-Bsymbolic -o $@ mdb.o midl.o $(SOLIBS) + $(CC) $(LDFLAGS) -pthread -shared -o $@ mdb.lo midl.lo $(SOLIBS) + +mdb_stat: mdb_stat.o liblmdb.a +mdb_copy: mdb_copy.o liblmdb.a +mdb_dump: mdb_dump.o liblmdb.a +mdb_load: mdb_load.o liblmdb.a +mtest: mtest.o liblmdb.a +mtest2: mtest2.o liblmdb.a +mtest3: mtest3.o liblmdb.a +mtest4: mtest4.o liblmdb.a +mtest5: mtest5.o liblmdb.a +mtest6: mtest6.o liblmdb.a + +mdb.o: mdb.c lmdb.h midl.h + $(CC) $(CFLAGS) $(CPPFLAGS) -c mdb.c + +midl.o: midl.c midl.h + $(CC) $(CFLAGS) $(CPPFLAGS) -c midl.c + +mdb.lo: mdb.c lmdb.h midl.h + $(CC) $(CFLAGS) -fPIC $(CPPFLAGS) -c mdb.c -o $@ + +midl.lo: midl.c midl.h + $(CC) $(CFLAGS) -fPIC $(CPPFLAGS) -c midl.c -o $@ + +%: %.o + $(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ + +%.o: %.c lmdb.h + $(CC) $(CFLAGS) $(CPPFLAGS) -c $< + +COV_FLAGS=-fprofile-arcs -ftest-coverage +COV_OBJS=xmdb.o xmidl.o + +coverage: xmtest + for i in mtest*.c [0-9]*.c; do j=`basename \$$i .c`; $(MAKE) $$j.o; \ + gcc -o x$$j $$j.o $(COV_OBJS) -pthread $(COV_FLAGS); \ + rm -rf testdb; mkdir testdb; ./x$$j; done + gcov xmdb.c + gcov xmidl.c + +xmtest: mtest.o xmdb.o xmidl.o + gcc -o xmtest mtest.o xmdb.o xmidl.o -pthread $(COV_FLAGS) + +xmdb.o: mdb.c lmdb.h midl.h + $(CC) $(CFLAGS) -fPIC $(CPPFLAGS) -O0 $(COV_FLAGS) -c mdb.c -o $@ + +xmidl.o: midl.c midl.h + $(CC) $(CFLAGS) -fPIC $(CPPFLAGS) -O0 $(COV_FLAGS) -c midl.c -o $@ diff --git a/deps/liblmdb/lmdb.h b/deps/liblmdb/lmdb.h new file mode 100644 index 00000000..fa7d62c5 --- /dev/null +++ b/deps/liblmdb/lmdb.h @@ -0,0 +1,1584 @@ +/** @file lmdb.h + * @brief Lightning memory-mapped database library + * + * @mainpage Lightning Memory-Mapped Database Manager (LMDB) + * + * @section intro_sec Introduction + * LMDB is a Btree-based database management library modeled loosely on the + * BerkeleyDB API, but much simplified. The entire database is exposed + * in a memory map, and all data fetches return data directly + * from the mapped memory, so no malloc's or memcpy's occur during + * data fetches. As such, the library is extremely simple because it + * requires no page caching layer of its own, and it is extremely high + * performance and memory-efficient. It is also fully transactional with + * full ACID semantics, and when the memory map is read-only, the + * database integrity cannot be corrupted by stray pointer writes from + * application code. + * + * The library is fully thread-aware and supports concurrent read/write + * access from multiple processes and threads. Data pages use a copy-on- + * write strategy so no active data pages are ever overwritten, which + * also provides resistance to corruption and eliminates the need of any + * special recovery procedures after a system crash. Writes are fully + * serialized; only one write transaction may be active at a time, which + * guarantees that writers can never deadlock. The database structure is + * multi-versioned so readers run with no locks; writers cannot block + * readers, and readers don't block writers. + * + * Unlike other well-known database mechanisms which use either write-ahead + * transaction logs or append-only data writes, LMDB requires no maintenance + * during operation. Both write-ahead loggers and append-only databases + * require periodic checkpointing and/or compaction of their log or database + * files otherwise they grow without bound. LMDB tracks free pages within + * the database and re-uses them for new write operations, so the database + * size does not grow without bound in normal use. + * + * The memory map can be used as a read-only or read-write map. It is + * read-only by default as this provides total immunity to corruption. + * Using read-write mode offers much higher write performance, but adds + * the possibility for stray application writes thru pointers to silently + * corrupt the database. Of course if your application code is known to + * be bug-free (...) then this is not an issue. + * + * @section caveats_sec Caveats + * Troubleshooting the lock file, plus semaphores on BSD systems: + * + * - A broken lockfile can cause sync issues. + * Stale reader transactions left behind by an aborted program + * cause further writes to grow the database quickly, and + * stale locks can block further operation. + * + * Fix: Check for stale readers periodically, using the + * #mdb_reader_check function or the \ref mdb_stat_1 "mdb_stat" tool. + * Stale writers will be cleared automatically on some systems: + * - Windows - automatic + * - Linux, systems using POSIX mutexes with Robust option - automatic + * - not on BSD, systems using POSIX semaphores. + * Otherwise just make all programs using the database close it; + * the lockfile is always reset on first open of the environment. + * + * - On BSD systems or others configured with MDB_USE_POSIX_SEM, + * startup can fail due to semaphores owned by another userid. + * + * Fix: Open and close the database as the user which owns the + * semaphores (likely last user) or as root, while no other + * process is using the database. + * + * Restrictions/caveats (in addition to those listed for some functions): + * + * - Only the database owner should normally use the database on + * BSD systems or when otherwise configured with MDB_USE_POSIX_SEM. + * Multiple users can cause startup to fail later, as noted above. + * + * - There is normally no pure read-only mode, since readers need write + * access to locks and lock file. Exceptions: On read-only filesystems + * or with the #MDB_NOLOCK flag described under #mdb_env_open(). + * + * - By default, in versions before 0.9.10, unused portions of the data + * file might receive garbage data from memory freed by other code. + * (This does not happen when using the #MDB_WRITEMAP flag.) As of + * 0.9.10 the default behavior is to initialize such memory before + * writing to the data file. Since there may be a slight performance + * cost due to this initialization, applications may disable it using + * the #MDB_NOMEMINIT flag. Applications handling sensitive data + * which must not be written should not use this flag. This flag is + * irrelevant when using #MDB_WRITEMAP. + * + * - A thread can only use one transaction at a time, plus any child + * transactions. Each transaction belongs to one thread. See below. + * The #MDB_NOTLS flag changes this for read-only transactions. + * + * - Use an MDB_env* in the process which opened it, without fork()ing. + * + * - Do not have open an LMDB database twice in the same process at + * the same time. Not even from a plain open() call - close()ing it + * breaks flock() advisory locking. + * + * - Avoid long-lived transactions. Read transactions prevent + * reuse of pages freed by newer write transactions, thus the + * database can grow quickly. Write transactions prevent + * other write transactions, since writes are serialized. + * + * - Avoid suspending a process with active transactions. These + * would then be "long-lived" as above. Also read transactions + * suspended when writers commit could sometimes see wrong data. + * + * ...when several processes can use a database concurrently: + * + * - Avoid aborting a process with an active transaction. + * The transaction becomes "long-lived" as above until a check + * for stale readers is performed or the lockfile is reset, + * since the process may not remove it from the lockfile. + * + * This does not apply to write transactions if the system clears + * stale writers, see above. + * + * - If you do that anyway, do a periodic check for stale readers. Or + * close the environment once in a while, so the lockfile can get reset. + * + * - Do not use LMDB databases on remote filesystems, even between + * processes on the same host. This breaks flock() on some OSes, + * possibly memory map sync, and certainly sync between programs + * on different hosts. + * + * - Opening a database can fail if another process is opening or + * closing it at exactly the same time. + * + * @author Howard Chu, Symas Corporation. + * + * @copyright Copyright 2011-2015 Howard Chu, Symas Corp. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + * + * @par Derived From: + * This code is derived from btree.c written by Martin Hedenfalk. + * + * Copyright (c) 2009, 2010 Martin Hedenfalk + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _LMDB_H_ +#define _LMDB_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** Unix permissions for creating files, or dummy definition for Windows */ +#ifdef _MSC_VER +typedef int mdb_mode_t; +#else +typedef mode_t mdb_mode_t; +#endif + +/** An abstraction for a file handle. + * On POSIX systems file handles are small integers. On Windows + * they're opaque pointers. + */ +#ifdef _WIN32 +typedef void *mdb_filehandle_t; +#else +typedef int mdb_filehandle_t; +#endif + +/** @defgroup mdb LMDB API + * @{ + * @brief OpenLDAP Lightning Memory-Mapped Database Manager + */ +/** @defgroup Version Version Macros + * @{ + */ +/** Library major version */ +#define MDB_VERSION_MAJOR 0 +/** Library minor version */ +#define MDB_VERSION_MINOR 9 +/** Library patch version */ +#define MDB_VERSION_PATCH 17 + +/** Combine args a,b,c into a single integer for easy version comparisons */ +#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c)) + +/** The full library version as a single integer */ +#define MDB_VERSION_FULL \ + MDB_VERINT(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH) + +/** The release date of this library version */ +#define MDB_VERSION_DATE "November 30, 2015" + +/** A stringifier for the version info */ +#define MDB_VERSTR(a,b,c,d) "LMDB " #a "." #b "." #c ": (" d ")" + +/** A helper for the stringifier macro */ +#define MDB_VERFOO(a,b,c,d) MDB_VERSTR(a,b,c,d) + +/** The full library version as a C string */ +#define MDB_VERSION_STRING \ + MDB_VERFOO(MDB_VERSION_MAJOR,MDB_VERSION_MINOR,MDB_VERSION_PATCH,MDB_VERSION_DATE) +/** @} */ + +/** @brief Opaque structure for a database environment. + * + * A DB environment supports multiple databases, all residing in the same + * shared-memory map. + */ +typedef struct MDB_env MDB_env; + +/** @brief Opaque structure for a transaction handle. + * + * All database operations require a transaction handle. Transactions may be + * read-only or read-write. + */ +typedef struct MDB_txn MDB_txn; + +/** @brief A handle for an individual database in the DB environment. */ +typedef unsigned int MDB_dbi; + +/** @brief Opaque structure for navigating through a database */ +typedef struct MDB_cursor MDB_cursor; + +/** @brief Generic structure used for passing keys and data in and out + * of the database. + * + * Values returned from the database are valid only until a subsequent + * update operation, or the end of the transaction. Do not modify or + * free them, they commonly point into the database itself. + * + * Key sizes must be between 1 and #mdb_env_get_maxkeysize() inclusive. + * The same applies to data sizes in databases with the #MDB_DUPSORT flag. + * Other data items can in theory be from 0 to 0xffffffff bytes long. + */ +typedef struct MDB_val { + size_t mv_size; /**< size of the data item */ + void *mv_data; /**< address of the data item */ +} MDB_val; + +/** @brief A callback function used to compare two keys in a database */ +typedef int (MDB_cmp_func)(const MDB_val *a, const MDB_val *b); + +/** @brief A callback function used to relocate a position-dependent data item + * in a fixed-address database. + * + * The \b newptr gives the item's desired address in + * the memory map, and \b oldptr gives its previous address. The item's actual + * data resides at the address in \b item. This callback is expected to walk + * through the fields of the record in \b item and modify any + * values based at the \b oldptr address to be relative to the \b newptr address. + * @param[in,out] item The item that is to be relocated. + * @param[in] oldptr The previous address. + * @param[in] newptr The new address to relocate to. + * @param[in] relctx An application-provided context, set by #mdb_set_relctx(). + * @todo This feature is currently unimplemented. + */ +typedef void (MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx); + +/** @defgroup mdb_env Environment Flags + * @{ + */ + /** mmap at a fixed address (experimental) */ +#define MDB_FIXEDMAP 0x01 + /** no environment directory */ +#define MDB_NOSUBDIR 0x4000 + /** don't fsync after commit */ +#define MDB_NOSYNC 0x10000 + /** read only */ +#define MDB_RDONLY 0x20000 + /** don't fsync metapage after commit */ +#define MDB_NOMETASYNC 0x40000 + /** use writable mmap */ +#define MDB_WRITEMAP 0x80000 + /** use asynchronous msync when #MDB_WRITEMAP is used */ +#define MDB_MAPASYNC 0x100000 + /** tie reader locktable slots to #MDB_txn objects instead of to threads */ +#define MDB_NOTLS 0x200000 + /** don't do any locking, caller must manage their own locks */ +#define MDB_NOLOCK 0x400000 + /** don't do readahead (no effect on Windows) */ +#define MDB_NORDAHEAD 0x800000 + /** don't initialize malloc'd memory before writing to datafile */ +#define MDB_NOMEMINIT 0x1000000 +/** @} */ + +/** @defgroup mdb_dbi_open Database Flags + * @{ + */ + /** use reverse string keys */ +#define MDB_REVERSEKEY 0x02 + /** use sorted duplicates */ +#define MDB_DUPSORT 0x04 + /** numeric keys in native byte order: either unsigned int or size_t. + * The keys must all be of the same size. */ +#define MDB_INTEGERKEY 0x08 + /** with #MDB_DUPSORT, sorted dup items have fixed size */ +#define MDB_DUPFIXED 0x10 + /** with #MDB_DUPSORT, dups are #MDB_INTEGERKEY-style integers */ +#define MDB_INTEGERDUP 0x20 + /** with #MDB_DUPSORT, use reverse string dups */ +#define MDB_REVERSEDUP 0x40 + /** create DB if not already existing */ +#define MDB_CREATE 0x40000 +/** @} */ + +/** @defgroup mdb_put Write Flags + * @{ + */ +/** For put: Don't write if the key already exists. */ +#define MDB_NOOVERWRITE 0x10 +/** Only for #MDB_DUPSORT
+ * For put: don't write if the key and data pair already exist.
+ * For mdb_cursor_del: remove all duplicate data items. + */ +#define MDB_NODUPDATA 0x20 +/** For mdb_cursor_put: overwrite the current key/data pair */ +#define MDB_CURRENT 0x40 +/** For put: Just reserve space for data, don't copy it. Return a + * pointer to the reserved space. + */ +#define MDB_RESERVE 0x10000 +/** Data is being appended, don't split full pages. */ +#define MDB_APPEND 0x20000 +/** Duplicate data is being appended, don't split full pages. */ +#define MDB_APPENDDUP 0x40000 +/** Store multiple data items in one call. Only for #MDB_DUPFIXED. */ +#define MDB_MULTIPLE 0x80000 +/* @} */ + +/** @defgroup mdb_copy Copy Flags + * @{ + */ +/** Compacting copy: Omit free space from copy, and renumber all + * pages sequentially. + */ +#define MDB_CP_COMPACT 0x01 +/* @} */ + +/** @brief Cursor Get operations. + * + * This is the set of all operations for retrieving data + * using a cursor. + */ +typedef enum MDB_cursor_op { + MDB_FIRST, /**< Position at first key/data item */ + MDB_FIRST_DUP, /**< Position at first data item of current key. + Only for #MDB_DUPSORT */ + MDB_GET_BOTH, /**< Position at key/data pair. Only for #MDB_DUPSORT */ + MDB_GET_BOTH_RANGE, /**< position at key, nearest data. Only for #MDB_DUPSORT */ + MDB_GET_CURRENT, /**< Return key/data at current cursor position */ + MDB_GET_MULTIPLE, /**< Return key and up to a page of duplicate data items + from current cursor position. Move cursor to prepare + for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ + MDB_LAST, /**< Position at last key/data item */ + MDB_LAST_DUP, /**< Position at last data item of current key. + Only for #MDB_DUPSORT */ + MDB_NEXT, /**< Position at next data item */ + MDB_NEXT_DUP, /**< Position at next data item of current key. + Only for #MDB_DUPSORT */ + MDB_NEXT_MULTIPLE, /**< Return key and up to a page of duplicate data items + from next cursor position. Move cursor to prepare + for #MDB_NEXT_MULTIPLE. Only for #MDB_DUPFIXED */ + MDB_NEXT_NODUP, /**< Position at first data item of next key */ + MDB_PREV, /**< Position at previous data item */ + MDB_PREV_DUP, /**< Position at previous data item of current key. + Only for #MDB_DUPSORT */ + MDB_PREV_NODUP, /**< Position at last data item of previous key */ + MDB_SET, /**< Position at specified key */ + MDB_SET_KEY, /**< Position at specified key, return key + data */ + MDB_SET_RANGE /**< Position at first key greater than or equal to specified key. */ +} MDB_cursor_op; + +/** @defgroup errors Return Codes + * + * BerkeleyDB uses -30800 to -30999, we'll go under them + * @{ + */ + /** Successful result */ +#define MDB_SUCCESS 0 + /** key/data pair already exists */ +#define MDB_KEYEXIST (-30799) + /** key/data pair not found (EOF) */ +#define MDB_NOTFOUND (-30798) + /** Requested page not found - this usually indicates corruption */ +#define MDB_PAGE_NOTFOUND (-30797) + /** Located page was wrong type */ +#define MDB_CORRUPTED (-30796) + /** Update of meta page failed or environment had fatal error */ +#define MDB_PANIC (-30795) + /** Environment version mismatch */ +#define MDB_VERSION_MISMATCH (-30794) + /** File is not a valid LMDB file */ +#define MDB_INVALID (-30793) + /** Environment mapsize reached */ +#define MDB_MAP_FULL (-30792) + /** Environment maxdbs reached */ +#define MDB_DBS_FULL (-30791) + /** Environment maxreaders reached */ +#define MDB_READERS_FULL (-30790) + /** Too many TLS keys in use - Windows only */ +#define MDB_TLS_FULL (-30789) + /** Txn has too many dirty pages */ +#define MDB_TXN_FULL (-30788) + /** Cursor stack too deep - internal error */ +#define MDB_CURSOR_FULL (-30787) + /** Page has not enough space - internal error */ +#define MDB_PAGE_FULL (-30786) + /** Database contents grew beyond environment mapsize */ +#define MDB_MAP_RESIZED (-30785) + /** Operation and DB incompatible, or DB type changed. This can mean: + *
    + *
  • The operation expects an #MDB_DUPSORT / #MDB_DUPFIXED database. + *
  • Opening a named DB when the unnamed DB has #MDB_DUPSORT / #MDB_INTEGERKEY. + *
  • Accessing a data record as a database, or vice versa. + *
  • The database was dropped and recreated with different flags. + *
+ */ +#define MDB_INCOMPATIBLE (-30784) + /** Invalid reuse of reader locktable slot */ +#define MDB_BAD_RSLOT (-30783) + /** Transaction must abort, has a child, or is invalid */ +#define MDB_BAD_TXN (-30782) + /** Unsupported size of key/DB name/data, or wrong DUPFIXED size */ +#define MDB_BAD_VALSIZE (-30781) + /** The specified DBI was changed unexpectedly */ +#define MDB_BAD_DBI (-30780) + /** The last defined error code */ +#define MDB_LAST_ERRCODE MDB_BAD_DBI +/** @} */ + +/** @brief Statistics for a database in the environment */ +typedef struct MDB_stat { + unsigned int ms_psize; /**< Size of a database page. + This is currently the same for all databases. */ + unsigned int ms_depth; /**< Depth (height) of the B-tree */ + size_t ms_branch_pages; /**< Number of internal (non-leaf) pages */ + size_t ms_leaf_pages; /**< Number of leaf pages */ + size_t ms_overflow_pages; /**< Number of overflow pages */ + size_t ms_entries; /**< Number of data items */ +} MDB_stat; + +/** @brief Information about the environment */ +typedef struct MDB_envinfo { + void *me_mapaddr; /**< Address of map, if fixed */ + size_t me_mapsize; /**< Size of the data memory map */ + size_t me_last_pgno; /**< ID of the last used page */ + size_t me_last_txnid; /**< ID of the last committed transaction */ + unsigned int me_maxreaders; /**< max reader slots in the environment */ + unsigned int me_numreaders; /**< max reader slots used in the environment */ +} MDB_envinfo; + + /** @brief Return the LMDB library version information. + * + * @param[out] major if non-NULL, the library major version number is copied here + * @param[out] minor if non-NULL, the library minor version number is copied here + * @param[out] patch if non-NULL, the library patch version number is copied here + * @retval "version string" The library version as a string + */ +char *mdb_version(int *major, int *minor, int *patch); + + /** @brief Return a string describing a given error code. + * + * This function is a superset of the ANSI C X3.159-1989 (ANSI C) strerror(3) + * function. If the error code is greater than or equal to 0, then the string + * returned by the system function strerror(3) is returned. If the error code + * is less than 0, an error string corresponding to the LMDB library error is + * returned. See @ref errors for a list of LMDB-specific error codes. + * @param[in] err The error code + * @retval "error message" The description of the error + */ +char *mdb_strerror(int err); + + /** @brief Create an LMDB environment handle. + * + * This function allocates memory for a #MDB_env structure. To release + * the allocated memory and discard the handle, call #mdb_env_close(). + * Before the handle may be used, it must be opened using #mdb_env_open(). + * Various other options may also need to be set before opening the handle, + * e.g. #mdb_env_set_mapsize(), #mdb_env_set_maxreaders(), #mdb_env_set_maxdbs(), + * depending on usage requirements. + * @param[out] env The address where the new handle will be stored + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_create(MDB_env **env); + + /** @brief Open an environment handle. + * + * If this function fails, #mdb_env_close() must be called to discard the #MDB_env handle. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] path The directory in which the database files reside. This + * directory must already exist and be writable. + * @param[in] flags Special options for this environment. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + * Flags set by mdb_env_set_flags() are also used. + *
    + *
  • #MDB_FIXEDMAP + * use a fixed address for the mmap region. This flag must be specified + * when creating the environment, and is stored persistently in the environment. + * If successful, the memory map will always reside at the same virtual address + * and pointers used to reference data items in the database will be constant + * across multiple invocations. This option may not always work, depending on + * how the operating system has allocated memory to shared libraries and other uses. + * The feature is highly experimental. + *
  • #MDB_NOSUBDIR + * By default, LMDB creates its environment in a directory whose + * pathname is given in \b path, and creates its data and lock files + * under that directory. With this option, \b path is used as-is for + * the database main data file. The database lock file is the \b path + * with "-lock" appended. + *
  • #MDB_RDONLY + * Open the environment in read-only mode. No write operations will be + * allowed. LMDB will still modify the lock file - except on read-only + * filesystems, where LMDB does not use locks. + *
  • #MDB_WRITEMAP + * Use a writeable memory map unless MDB_RDONLY is set. This is faster + * and uses fewer mallocs, but loses protection from application bugs + * like wild pointer writes and other bad updates into the database. + * Incompatible with nested transactions. + * Do not mix processes with and without MDB_WRITEMAP on the same + * environment. This can defeat durability (#mdb_env_sync etc). + *
  • #MDB_NOMETASYNC + * Flush system buffers to disk only once per transaction, omit the + * metadata flush. Defer that until the system flushes files to disk, + * or next non-MDB_RDONLY commit or #mdb_env_sync(). This optimization + * maintains database integrity, but a system crash may undo the last + * committed transaction. I.e. it preserves the ACI (atomicity, + * consistency, isolation) but not D (durability) database property. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
  • #MDB_NOSYNC + * Don't flush system buffers to disk when committing a transaction. + * This optimization means a system crash can corrupt the database or + * lose the last transactions if buffers are not yet flushed to disk. + * The risk is governed by how often the system flushes dirty buffers + * to disk and how often #mdb_env_sync() is called. However, if the + * filesystem preserves write order and the #MDB_WRITEMAP flag is not + * used, transactions exhibit ACI (atomicity, consistency, isolation) + * properties and only lose D (durability). I.e. database integrity + * is maintained, but a system crash may undo the final transactions. + * Note that (#MDB_NOSYNC | #MDB_WRITEMAP) leaves the system with no + * hint for when to write transactions to disk, unless #mdb_env_sync() + * is called. (#MDB_MAPASYNC | #MDB_WRITEMAP) may be preferable. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
  • #MDB_MAPASYNC + * When using #MDB_WRITEMAP, use asynchronous flushes to disk. + * As with #MDB_NOSYNC, a system crash can then corrupt the + * database or lose the last transactions. Calling #mdb_env_sync() + * ensures on-disk database integrity until next commit. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
  • #MDB_NOTLS + * Don't use Thread-Local Storage. Tie reader locktable slots to + * #MDB_txn objects instead of to threads. I.e. #mdb_txn_reset() keeps + * the slot reseved for the #MDB_txn object. A thread may use parallel + * read-only transactions. A read-only transaction may span threads if + * the user synchronizes its use. Applications that multiplex many + * user threads over individual OS threads need this option. Such an + * application must also serialize the write transactions in an OS + * thread, since LMDB's write locking is unaware of the user threads. + *
  • #MDB_NOLOCK + * Don't do any locking. If concurrent access is anticipated, the + * caller must manage all concurrency itself. For proper operation + * the caller must enforce single-writer semantics, and must ensure + * that no readers are using old transactions while a writer is + * active. The simplest approach is to use an exclusive lock so that + * no readers may be active at all when a writer begins. + *
  • #MDB_NORDAHEAD + * Turn off readahead. Most operating systems perform readahead on + * read requests by default. This option turns it off if the OS + * supports it. Turning it off may help random read performance + * when the DB is larger than RAM and system RAM is full. + * The option is not implemented on Windows. + *
  • #MDB_NOMEMINIT + * Don't initialize malloc'd memory before writing to unused spaces + * in the data file. By default, memory for pages written to the data + * file is obtained using malloc. While these pages may be reused in + * subsequent transactions, freshly malloc'd pages will be initialized + * to zeroes before use. This avoids persisting leftover data from other + * code (that used the heap and subsequently freed the memory) into the + * data file. Note that many other system libraries may allocate + * and free memory from the heap for arbitrary uses. E.g., stdio may + * use the heap for file I/O buffers. This initialization step has a + * modest performance cost so some applications may want to disable + * it using this flag. This option can be a problem for applications + * which handle sensitive data like passwords, and it makes memory + * checkers like Valgrind noisy. This flag is not needed with #MDB_WRITEMAP, + * which writes directly to the mmap instead of using malloc for pages. The + * initialization is also skipped if #MDB_RESERVE is used; the + * caller is expected to overwrite all of the memory that was + * reserved in that case. + * This flag may be changed at any time using #mdb_env_set_flags(). + *
+ * @param[in] mode The UNIX permissions to set on created files and semaphores. + * This parameter is ignored on Windows. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_VERSION_MISMATCH - the version of the LMDB library doesn't match the + * version that created the database environment. + *
  • #MDB_INVALID - the environment file headers are corrupted. + *
  • ENOENT - the directory specified by the path parameter doesn't exist. + *
  • EACCES - the user didn't have permission to access the environment files. + *
  • EAGAIN - the environment was locked by another process. + *
+ */ +int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode); + + /** @brief Copy an LMDB environment to the specified path. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] path The directory in which the copy will reside. This + * directory must already exist and be writable but must otherwise be + * empty. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_copy(MDB_env *env, const char *path); + + /** @brief Copy an LMDB environment to the specified file descriptor. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] fd The filedescriptor to write the copy to. It must + * have already been opened for Write access. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_copyfd(MDB_env *env, mdb_filehandle_t fd); + + /** @brief Copy an LMDB environment to the specified path, with options. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] path The directory in which the copy will reside. This + * directory must already exist and be writable but must otherwise be + * empty. + * @param[in] flags Special options for this operation. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_CP_COMPACT - Perform compaction while copying: omit free + * pages and sequentially renumber all pages in output. This option + * consumes more CPU and runs more slowly than the default. + *
+ * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags); + + /** @brief Copy an LMDB environment to the specified file descriptor, + * with options. + * + * This function may be used to make a backup of an existing environment. + * No lockfile is created, since it gets recreated at need. See + * #mdb_env_copy2() for further details. + * @note This call can trigger significant file size growth if run in + * parallel with write transactions, because it employs a read-only + * transaction. See long-lived transactions under @ref caveats_sec. + * @param[in] env An environment handle returned by #mdb_env_create(). It + * must have already been opened successfully. + * @param[in] fd The filedescriptor to write the copy to. It must + * have already been opened for Write access. + * @param[in] flags Special options for this operation. + * See #mdb_env_copy2() for options. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_copyfd2(MDB_env *env, mdb_filehandle_t fd, unsigned int flags); + + /** @brief Return statistics about the LMDB environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] stat The address of an #MDB_stat structure + * where the statistics will be copied + */ +int mdb_env_stat(MDB_env *env, MDB_stat *stat); + + /** @brief Return information about the LMDB environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] stat The address of an #MDB_envinfo structure + * where the information will be copied + */ +int mdb_env_info(MDB_env *env, MDB_envinfo *stat); + + /** @brief Flush the data buffers to disk. + * + * Data is always written to disk when #mdb_txn_commit() is called, + * but the operating system may keep it buffered. LMDB always flushes + * the OS buffers upon commit as well, unless the environment was + * opened with #MDB_NOSYNC or in part #MDB_NOMETASYNC. This call is + * not valid if the environment was opened with #MDB_RDONLY. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] force If non-zero, force a synchronous flush. Otherwise + * if the environment has the #MDB_NOSYNC flag set the flushes + * will be omitted, and with #MDB_MAPASYNC they will be asynchronous. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EACCES - the environment is read-only. + *
  • EINVAL - an invalid parameter was specified. + *
  • EIO - an error occurred during synchronization. + *
+ */ +int mdb_env_sync(MDB_env *env, int force); + + /** @brief Close the environment and release the memory map. + * + * Only a single thread may call this function. All transactions, databases, + * and cursors must already be closed before calling this function. Attempts to + * use any such handles after calling this function will cause a SIGSEGV. + * The environment handle will be freed and must not be used again after this call. + * @param[in] env An environment handle returned by #mdb_env_create() + */ +void mdb_env_close(MDB_env *env); + + /** @brief Set environment flags. + * + * This may be used to set some flags in addition to those from + * #mdb_env_open(), or to unset these flags. If several threads + * change the flags at the same time, the result is undefined. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] flags The flags to change, bitwise OR'ed together + * @param[in] onoff A non-zero value sets the flags, zero clears them. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff); + + /** @brief Get environment flags. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] flags The address of an integer to store the flags + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_env_get_flags(MDB_env *env, unsigned int *flags); + + /** @brief Return the path that was used in #mdb_env_open(). + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] path Address of a string pointer to contain the path. This + * is the actual string in the environment, not a copy. It should not be + * altered in any way. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_env_get_path(MDB_env *env, const char **path); + + /** @brief Return the filedescriptor for the given environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] fd Address of a mdb_filehandle_t to contain the descriptor. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *fd); + + /** @brief Set the size of the memory map to use for this environment. + * + * The size should be a multiple of the OS page size. The default is + * 10485760 bytes. The size of the memory map is also the maximum size + * of the database. The value should be chosen as large as possible, + * to accommodate future growth of the database. + * This function should be called after #mdb_env_create() and before #mdb_env_open(). + * It may be called at later times if no transactions are active in + * this process. Note that the library does not check for this condition, + * the caller must ensure it explicitly. + * + * The new size takes effect immediately for the current process but + * will not be persisted to any others until a write transaction has been + * committed by the current process. Also, only mapsize increases are + * persisted into the environment. + * + * If the mapsize is increased by another process, and data has grown + * beyond the range of the current mapsize, #mdb_txn_begin() will + * return #MDB_MAP_RESIZED. This function may be called with a size + * of zero to adopt the new size. + * + * Any attempt to set a size smaller than the space already consumed + * by the environment will be silently changed to the current size of the used space. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] size The size in bytes + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified, or the environment has + * an active write transaction. + *
+ */ +int mdb_env_set_mapsize(MDB_env *env, size_t size); + + /** @brief Set the maximum number of threads/reader slots for the environment. + * + * This defines the number of slots in the lock table that is used to track readers in the + * the environment. The default is 126. + * Starting a read-only transaction normally ties a lock table slot to the + * current thread until the environment closes or the thread exits. If + * MDB_NOTLS is in use, #mdb_txn_begin() instead ties the slot to the + * MDB_txn object until it or the #MDB_env object is destroyed. + * This function may only be called after #mdb_env_create() and before #mdb_env_open(). + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] readers The maximum number of reader lock table slots + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified, or the environment is already open. + *
+ */ +int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers); + + /** @brief Get the maximum number of threads/reader slots for the environment. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] readers Address of an integer to store the number of readers + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers); + + /** @brief Set the maximum number of named databases for the environment. + * + * This function is only needed if multiple databases will be used in the + * environment. Simpler applications that use the environment as a single + * unnamed database can ignore this option. + * This function may only be called after #mdb_env_create() and before #mdb_env_open(). + * + * Currently a moderate number of slots are cheap but a huge number gets + * expensive: 7-120 words per transaction, and every #mdb_dbi_open() + * does a linear search of the opened slots. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] dbs The maximum number of databases + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified, or the environment is already open. + *
+ */ +int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs); + + /** @brief Get the maximum size of keys and #MDB_DUPSORT data we can write. + * + * Depends on the compile-time constant #MDB_MAXKEYSIZE. Default 511. + * See @ref MDB_val. + * @param[in] env An environment handle returned by #mdb_env_create() + * @return The maximum size of a key we can write + */ +int mdb_env_get_maxkeysize(MDB_env *env); + + /** @brief Set application information associated with the #MDB_env. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] ctx An arbitrary pointer for whatever the application needs. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_set_userctx(MDB_env *env, void *ctx); + + /** @brief Get the application information associated with the #MDB_env. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @return The pointer set by #mdb_env_set_userctx(). + */ +void *mdb_env_get_userctx(MDB_env *env); + + /** @brief A callback function for most LMDB assert() failures, + * called before printing the message and aborting. + * + * @param[in] env An environment handle returned by #mdb_env_create(). + * @param[in] msg The assertion message, not including newline. + */ +typedef void MDB_assert_func(MDB_env *env, const char *msg); + + /** Set or reset the assert() callback of the environment. + * Disabled if liblmdb is buillt with NDEBUG. + * @note This hack should become obsolete as lmdb's error handling matures. + * @param[in] env An environment handle returned by #mdb_env_create(). + * @param[in] func An #MDB_assert_func function, or 0. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_env_set_assert(MDB_env *env, MDB_assert_func *func); + + /** @brief Create a transaction for use with the environment. + * + * The transaction handle may be discarded using #mdb_txn_abort() or #mdb_txn_commit(). + * @note A transaction and its cursors must only be used by a single + * thread, and a thread may only have a single transaction at a time. + * If #MDB_NOTLS is in use, this does not apply to read-only transactions. + * @note Cursors may not span transactions. + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] parent If this parameter is non-NULL, the new transaction + * will be a nested transaction, with the transaction indicated by \b parent + * as its parent. Transactions may be nested to any level. A parent + * transaction and its cursors may not issue any other operations than + * mdb_txn_commit and mdb_txn_abort while it has active child transactions. + * @param[in] flags Special options for this transaction. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_RDONLY + * This transaction will not perform any write operations. + *
+ * @param[out] txn Address where the new #MDB_txn handle will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_PANIC - a fatal error occurred earlier and the environment + * must be shut down. + *
  • #MDB_MAP_RESIZED - another process wrote data beyond this MDB_env's + * mapsize and this environment's map must be resized as well. + * See #mdb_env_set_mapsize(). + *
  • #MDB_READERS_FULL - a read-only transaction was requested and + * the reader lock table is full. See #mdb_env_set_maxreaders(). + *
  • ENOMEM - out of memory. + *
+ */ +int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn); + + /** @brief Returns the transaction's #MDB_env + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + */ +MDB_env *mdb_txn_env(MDB_txn *txn); + + /** @brief Return the transaction's ID. + * + * This returns the identifier associated with this transaction. For a + * read-only transaction, this corresponds to the snapshot being read; + * concurrent readers will frequently have the same transaction ID. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @return A transaction ID, valid if input is an active transaction. + */ +size_t mdb_txn_id(MDB_txn *txn); + + /** @brief Commit all the operations of a transaction into the database. + * + * The transaction handle is freed. It and its cursors must not be used + * again after this call, except with #mdb_cursor_renew(). + * @note Earlier documentation incorrectly said all cursors would be freed. + * Only write-transactions free cursors. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
  • ENOSPC - no more disk space. + *
  • EIO - a low-level I/O error occurred while writing. + *
  • ENOMEM - out of memory. + *
+ */ +int mdb_txn_commit(MDB_txn *txn); + + /** @brief Abandon all the operations of the transaction instead of saving them. + * + * The transaction handle is freed. It and its cursors must not be used + * again after this call, except with #mdb_cursor_renew(). + * @note Earlier documentation incorrectly said all cursors would be freed. + * Only write-transactions free cursors. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + */ +void mdb_txn_abort(MDB_txn *txn); + + /** @brief Reset a read-only transaction. + * + * Abort the transaction like #mdb_txn_abort(), but keep the transaction + * handle. #mdb_txn_renew() may reuse the handle. This saves allocation + * overhead if the process will start a new read-only transaction soon, + * and also locking overhead if #MDB_NOTLS is in use. The reader table + * lock is released, but the table slot stays tied to its thread or + * #MDB_txn. Use mdb_txn_abort() to discard a reset handle, and to free + * its lock table slot if MDB_NOTLS is in use. + * Cursors opened within the transaction must not be used + * again after this call, except with #mdb_cursor_renew(). + * Reader locks generally don't interfere with writers, but they keep old + * versions of database pages allocated. Thus they prevent the old pages + * from being reused when writers commit new data, and so under heavy load + * the database size may grow much more rapidly than otherwise. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + */ +void mdb_txn_reset(MDB_txn *txn); + + /** @brief Renew a read-only transaction. + * + * This acquires a new reader lock for a transaction handle that had been + * released by #mdb_txn_reset(). It must be called before a reset transaction + * may be used again. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_PANIC - a fatal error occurred earlier and the environment + * must be shut down. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_txn_renew(MDB_txn *txn); + +/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ +#define mdb_open(txn,name,flags,dbi) mdb_dbi_open(txn,name,flags,dbi) +/** Compat with version <= 0.9.4, avoid clash with libmdb from MDB Tools project */ +#define mdb_close(env,dbi) mdb_dbi_close(env,dbi) + + /** @brief Open a database in the environment. + * + * A database handle denotes the name and parameters of a database, + * independently of whether such a database exists. + * The database handle may be discarded by calling #mdb_dbi_close(). + * The old database handle is returned if the database was already open. + * The handle may only be closed once. + * + * The database handle will be private to the current transaction until + * the transaction is successfully committed. If the transaction is + * aborted the handle will be closed automatically. + * After a successful commit the handle will reside in the shared + * environment, and may be used by other transactions. + * + * This function must not be called from multiple concurrent + * transactions in the same process. A transaction that uses + * this function must finish (either commit or abort) before + * any other transaction in the process may use this function. + * + * To use named databases (with name != NULL), #mdb_env_set_maxdbs() + * must be called before opening the environment. Database names are + * keys in the unnamed database, and may be read but not written. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] name The name of the database to open. If only a single + * database is needed in the environment, this value may be NULL. + * @param[in] flags Special options for this database. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_REVERSEKEY + * Keys are strings to be compared in reverse order, from the end + * of the strings to the beginning. By default, Keys are treated as strings and + * compared from beginning to end. + *
  • #MDB_DUPSORT + * Duplicate keys may be used in the database. (Or, from another perspective, + * keys may have multiple data items, stored in sorted order.) By default + * keys must be unique and may have only a single data item. + *
  • #MDB_INTEGERKEY + * Keys are binary integers in native byte order, either unsigned int + * or size_t, and will be sorted as such. + * The keys must all be of the same size. + *
  • #MDB_DUPFIXED + * This flag may only be used in combination with #MDB_DUPSORT. This option + * tells the library that the data items for this database are all the same + * size, which allows further optimizations in storage and retrieval. When + * all data items are the same size, the #MDB_GET_MULTIPLE and #MDB_NEXT_MULTIPLE + * cursor operations may be used to retrieve multiple items at once. + *
  • #MDB_INTEGERDUP + * This option specifies that duplicate data items are binary integers, + * similar to #MDB_INTEGERKEY keys. + *
  • #MDB_REVERSEDUP + * This option specifies that duplicate data items should be compared as + * strings in reverse order. + *
  • #MDB_CREATE + * Create the named database if it doesn't exist. This option is not + * allowed in a read-only transaction or a read-only environment. + *
+ * @param[out] dbi Address where the new #MDB_dbi handle will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_NOTFOUND - the specified database doesn't exist in the environment + * and #MDB_CREATE was not specified. + *
  • #MDB_DBS_FULL - too many databases have been opened. See #mdb_env_set_maxdbs(). + *
+ */ +int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi); + + /** @brief Retrieve statistics for a database. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] stat The address of an #MDB_stat structure + * where the statistics will be copied + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); + + /** @brief Retrieve the DB flags for a database handle. + * + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] flags Address where the flags will be returned. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags); + + /** @brief Close a database handle. Normally unnecessary. Use with care: + * + * This call is not mutex protected. Handles should only be closed by + * a single thread, and only if no other threads are going to reference + * the database handle or one of its cursors any further. Do not close + * a handle if an existing transaction has modified its database. + * Doing so can cause misbehavior from database corruption to errors + * like MDB_BAD_VALSIZE (since the DB name is gone). + * + * Closing a database handle is not necessary, but lets #mdb_dbi_open() + * reuse the handle value. Usually it's better to set a bigger + * #mdb_env_set_maxdbs(), unless that value would be large. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + */ +void mdb_dbi_close(MDB_env *env, MDB_dbi dbi); + + /** @brief Empty or delete+close a database. + * + * See #mdb_dbi_close() for restrictions about closing the DB handle. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] del 0 to empty the DB, 1 to delete it from the + * environment and close the DB handle. + * @return A non-zero error value on failure and 0 on success. + */ +int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del); + + /** @brief Set a custom key comparison function for a database. + * + * The comparison function is called whenever it is necessary to compare a + * key specified by the application with a key currently stored in the database. + * If no comparison function is specified, and no special key flags were specified + * with #mdb_dbi_open(), the keys are compared lexically, with shorter keys collating + * before longer keys. + * @warning This function must be called before any data access functions are used, + * otherwise data corruption may occur. The same comparison function must be used by every + * program accessing the database, every time the database is used. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] cmp A #MDB_cmp_func function + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); + + /** @brief Set a custom data comparison function for a #MDB_DUPSORT database. + * + * This comparison function is called whenever it is necessary to compare a data + * item specified by the application with a data item currently stored in the database. + * This function only takes effect if the database was opened with the #MDB_DUPSORT + * flag. + * If no comparison function is specified, and no special key flags were specified + * with #mdb_dbi_open(), the data items are compared lexically, with shorter items collating + * before longer items. + * @warning This function must be called before any data access functions are used, + * otherwise data corruption may occur. The same comparison function must be used by every + * program accessing the database, every time the database is used. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] cmp A #MDB_cmp_func function + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp); + + /** @brief Set a relocation function for a #MDB_FIXEDMAP database. + * + * @todo The relocation function is called whenever it is necessary to move the data + * of an item to a different position in the database (e.g. through tree + * balancing operations, shifts as a result of adds or deletes, etc.). It is + * intended to allow address/position-dependent data items to be stored in + * a database in an environment opened with the #MDB_FIXEDMAP option. + * Currently the relocation feature is unimplemented and setting + * this function has no effect. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] rel A #MDB_rel_func function + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel); + + /** @brief Set a context pointer for a #MDB_FIXEDMAP database's relocation function. + * + * See #mdb_set_relfunc and #MDB_rel_func for more details. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] ctx An arbitrary pointer for whatever the application needs. + * It will be passed to the callback function set by #mdb_set_relfunc + * as its \b relctx parameter whenever the callback is invoked. + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx); + + /** @brief Get items from a database. + * + * This function retrieves key/data pairs from the database. The address + * and length of the data associated with the specified \b key are returned + * in the structure to which \b data refers. + * If the database supports duplicate keys (#MDB_DUPSORT) then the + * first data item for the key will be returned. Retrieval of other + * items requires the use of #mdb_cursor_get(). + * + * @note The memory pointed to by the returned values is owned by the + * database. The caller need not dispose of the memory, and may not + * modify it in any way. For values returned in a read-only transaction + * any modification attempts will cause a SIGSEGV. + * @note Values returned from the database are valid only until a + * subsequent update operation, or the end of the transaction. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] key The key to search for in the database + * @param[out] data The data corresponding to the key + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_NOTFOUND - the key was not in the database. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); + + /** @brief Store items into a database. + * + * This function stores key/data pairs in the database. The default behavior + * is to enter the new key/data pair, replacing any previously existing key + * if duplicates are disallowed, or adding a duplicate data item if + * duplicates are allowed (#MDB_DUPSORT). + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] key The key to store in the database + * @param[in,out] data The data to store + * @param[in] flags Special options for this operation. This parameter + * must be set to 0 or by bitwise OR'ing together one or more of the + * values described here. + *
    + *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not + * already appear in the database. This flag may only be specified + * if the database was opened with #MDB_DUPSORT. The function will + * return #MDB_KEYEXIST if the key/data pair already appears in the + * database. + *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key + * does not already appear in the database. The function will return + * #MDB_KEYEXIST if the key already appears in the database, even if + * the database supports duplicates (#MDB_DUPSORT). The \b data + * parameter will be set to point to the existing item. + *
  • #MDB_RESERVE - reserve space for data of the given size, but + * don't copy the given data. Instead, return a pointer to the + * reserved space, which the caller can fill in later - before + * the next update operation or the transaction ends. This saves + * an extra memcpy if the data is being generated later. + * LMDB does nothing else with this memory, the caller is expected + * to modify all of the space requested. This flag must not be + * specified if the database was opened with #MDB_DUPSORT. + *
  • #MDB_APPEND - append the given key/data pair to the end of the + * database. This option allows fast bulk loading when keys are + * already known to be in the correct order. Loading unsorted keys + * with this flag will cause a #MDB_KEYEXIST error. + *
  • #MDB_APPENDDUP - as above, but for sorted dup data. + *
+ * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). + *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_put(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, + unsigned int flags); + + /** @brief Delete items from a database. + * + * This function removes key/data pairs from the database. + * If the database does not support sorted duplicate data items + * (#MDB_DUPSORT) the data parameter is ignored. + * If the database supports sorted duplicates and the data parameter + * is NULL, all of the duplicate data items for the key will be + * deleted. Otherwise, if the data parameter is non-NULL + * only the matching data item will be deleted. + * This function will return #MDB_NOTFOUND if the specified key/data + * pair is not in the database. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] key The key to delete from the database + * @param[in] data The data to delete + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_del(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); + + /** @brief Create a cursor handle. + * + * A cursor is associated with a specific transaction and database. + * A cursor cannot be used when its database handle is closed. Nor + * when its transaction has ended, except with #mdb_cursor_renew(). + * It can be discarded with #mdb_cursor_close(). + * A cursor in a write-transaction can be closed before its transaction + * ends, and will otherwise be closed when its transaction ends. + * A cursor in a read-only transaction must be closed explicitly, before + * or after its transaction ends. It can be reused with + * #mdb_cursor_renew() before finally closing it. + * @note Earlier documentation said that cursors in every transaction + * were closed when the transaction committed or aborted. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[out] cursor Address where the new #MDB_cursor handle will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor); + + /** @brief Close a cursor handle. + * + * The cursor handle will be freed and must not be used again after this call. + * Its transaction must still be live if it is a write-transaction. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + */ +void mdb_cursor_close(MDB_cursor *cursor); + + /** @brief Renew a cursor handle. + * + * A cursor is associated with a specific transaction and database. + * Cursors that are only used in read-only + * transactions may be re-used, to avoid unnecessary malloc/free overhead. + * The cursor may be associated with a new read-only transaction, and + * referencing the same database handle as it was created with. + * This may be done whether the previous transaction is live or dead. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_cursor_renew(MDB_txn *txn, MDB_cursor *cursor); + + /** @brief Return the cursor's transaction handle. + * + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + */ +MDB_txn *mdb_cursor_txn(MDB_cursor *cursor); + + /** @brief Return the cursor's database handle. + * + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + */ +MDB_dbi mdb_cursor_dbi(MDB_cursor *cursor); + + /** @brief Retrieve by cursor. + * + * This function retrieves key/data pairs from the database. The address and length + * of the key are returned in the object to which \b key refers (except for the + * case of the #MDB_SET option, in which the \b key object is unchanged), and + * the address and length of the data are returned in the object to which \b data + * refers. + * See #mdb_get() for restrictions on using the output values. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[in,out] key The key for a retrieved item + * @param[in,out] data The data of a retrieved item + * @param[in] op A cursor operation #MDB_cursor_op + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_NOTFOUND - no matching key found. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val *data, + MDB_cursor_op op); + + /** @brief Store by cursor. + * + * This function stores key/data pairs into the database. + * The cursor is positioned at the new item, or on failure usually near it. + * @note Earlier documentation incorrectly said errors would leave the + * state of the cursor unchanged. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[in] key The key operated on. + * @param[in] data The data operated on. + * @param[in] flags Options for this operation. This parameter + * must be set to 0 or one of the values described here. + *
    + *
  • #MDB_CURRENT - replace the item at the current cursor position. + * The \b key parameter must still be provided, and must match it. + * If using sorted duplicates (#MDB_DUPSORT) the data item must still + * sort into the same place. This is intended to be used when the + * new data is the same size as the old. Otherwise it will simply + * perform a delete of the old record followed by an insert. + *
  • #MDB_NODUPDATA - enter the new key/data pair only if it does not + * already appear in the database. This flag may only be specified + * if the database was opened with #MDB_DUPSORT. The function will + * return #MDB_KEYEXIST if the key/data pair already appears in the + * database. + *
  • #MDB_NOOVERWRITE - enter the new key/data pair only if the key + * does not already appear in the database. The function will return + * #MDB_KEYEXIST if the key already appears in the database, even if + * the database supports duplicates (#MDB_DUPSORT). + *
  • #MDB_RESERVE - reserve space for data of the given size, but + * don't copy the given data. Instead, return a pointer to the + * reserved space, which the caller can fill in later. This saves + * an extra memcpy if the data is being generated later. This flag + * must not be specified if the database was opened with #MDB_DUPSORT. + *
  • #MDB_APPEND - append the given key/data pair to the end of the + * database. No key comparisons are performed. This option allows + * fast bulk loading when keys are already known to be in the + * correct order. Loading unsorted keys with this flag will cause + * a #MDB_KEYEXIST error. + *
  • #MDB_APPENDDUP - as above, but for sorted dup data. + *
  • #MDB_MULTIPLE - store multiple contiguous data elements in a + * single request. This flag may only be specified if the database + * was opened with #MDB_DUPFIXED. The \b data argument must be an + * array of two MDB_vals. The mv_size of the first MDB_val must be + * the size of a single data element. The mv_data of the first MDB_val + * must point to the beginning of the array of contiguous data elements. + * The mv_size of the second MDB_val must be the count of the number + * of data elements to store. On return this field will be set to + * the count of the number of elements actually written. The mv_data + * of the second MDB_val is unused. + *
+ * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • #MDB_MAP_FULL - the database is full, see #mdb_env_set_mapsize(). + *
  • #MDB_TXN_FULL - the transaction has too many dirty pages. + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_cursor_put(MDB_cursor *cursor, MDB_val *key, MDB_val *data, + unsigned int flags); + + /** @brief Delete current key/data pair + * + * This function deletes the key/data pair to which the cursor refers. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[in] flags Options for this operation. This parameter + * must be set to 0 or one of the values described here. + *
    + *
  • #MDB_NODUPDATA - delete all of the data items for the current key. + * This flag may only be specified if the database was opened with #MDB_DUPSORT. + *
+ * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EACCES - an attempt was made to write in a read-only transaction. + *
  • EINVAL - an invalid parameter was specified. + *
+ */ +int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags); + + /** @brief Return count of duplicates for current key. + * + * This call is only valid on databases that support sorted duplicate + * data items #MDB_DUPSORT. + * @param[in] cursor A cursor handle returned by #mdb_cursor_open() + * @param[out] countp Address where the count will be stored + * @return A non-zero error value on failure and 0 on success. Some possible + * errors are: + *
    + *
  • EINVAL - cursor is not initialized, or an invalid parameter was specified. + *
+ */ +int mdb_cursor_count(MDB_cursor *cursor, size_t *countp); + + /** @brief Compare two data items according to a particular database. + * + * This returns a comparison as if the two data items were keys in the + * specified database. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] a The first item to compare + * @param[in] b The second item to compare + * @return < 0 if a < b, 0 if a == b, > 0 if a > b + */ +int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); + + /** @brief Compare two data items according to a particular database. + * + * This returns a comparison as if the two items were data items of + * the specified database. The database must have the #MDB_DUPSORT flag. + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + * @param[in] a The first item to compare + * @param[in] b The second item to compare + * @return < 0 if a < b, 0 if a == b, > 0 if a > b + */ +int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b); + + /** @brief A callback function used to print a message from the library. + * + * @param[in] msg The string to be printed. + * @param[in] ctx An arbitrary context pointer for the callback. + * @return < 0 on failure, >= 0 on success. + */ +typedef int (MDB_msg_func)(const char *msg, void *ctx); + + /** @brief Dump the entries in the reader lock table. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[in] func A #MDB_msg_func function + * @param[in] ctx Anything the message function needs + * @return < 0 on failure, >= 0 on success. + */ +int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); + + /** @brief Check for stale entries in the reader lock table. + * + * @param[in] env An environment handle returned by #mdb_env_create() + * @param[out] dead Number of stale slots that were cleared + * @return 0 on success, non-zero on failure. + */ +int mdb_reader_check(MDB_env *env, int *dead); +/** @} */ + +#ifdef __cplusplus +} +#endif +/** @page tools LMDB Command Line Tools + The following describes the command line tools that are available for LMDB. + \li \ref mdb_copy_1 + \li \ref mdb_dump_1 + \li \ref mdb_load_1 + \li \ref mdb_stat_1 +*/ + +#endif /* _LMDB_H_ */ diff --git a/deps/liblmdb/mdb.c b/deps/liblmdb/mdb.c new file mode 100644 index 00000000..96c859ef --- /dev/null +++ b/deps/liblmdb/mdb.c @@ -0,0 +1,10011 @@ +/** @file mdb.c + * @brief Lightning memory-mapped database library + * + * A Btree-based database management library modeled loosely on the + * BerkeleyDB API, but much simplified. + */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + * + * This code is derived from btree.c written by Martin Hedenfalk. + * + * Copyright (c) 2009, 2010 Martin Hedenfalk + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif +#ifdef _WIN32 +#include +#include +/** getpid() returns int; MinGW defines pid_t but MinGW64 typedefs it + * as int64 which is wrong. MSVC doesn't define it at all, so just + * don't use it. + */ +#define MDB_PID_T int +#define MDB_THR_T DWORD +#include +#include +#ifdef __GNUC__ +# include +#else +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# define BYTE_ORDER LITTLE_ENDIAN +# ifndef SSIZE_MAX +# define SSIZE_MAX INT_MAX +# endif +#endif +#else +#include +#include +#define MDB_PID_T pid_t +#define MDB_THR_T pthread_t +#include +#include +#include +#ifdef HAVE_SYS_FILE_H +#include +#endif +#include +#endif + +#if defined(__mips) && defined(__linux) +/* MIPS has cache coherency issues, requires explicit cache control */ +#include +extern int cacheflush(char *addr, int nbytes, int cache); +#define CACHEFLUSH(addr, bytes, cache) cacheflush(addr, bytes, cache) +#else +#define CACHEFLUSH(addr, bytes, cache) +#endif + +#if defined(__linux) && !defined(MDB_FDATASYNC_WORKS) +/** fdatasync is broken on ext3/ext4fs on older kernels, see + * description in #mdb_env_open2 comments. You can safely + * define MDB_FDATASYNC_WORKS if this code will only be run + * on kernels 3.6 and newer. + */ +#define BROKEN_FDATASYNC +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER +#include +typedef SSIZE_T ssize_t; +#else +#include +#endif + +#if defined(__sun) || defined(ANDROID) +/* Most platforms have posix_memalign, older may only have memalign */ +#define HAVE_MEMALIGN 1 +#include +#endif + +#if !(defined(BYTE_ORDER) || defined(__BYTE_ORDER)) +#include +#include /* defines BYTE_ORDER on HPUX and Solaris */ +#endif + +#if defined(__APPLE__) || defined (BSD) +# define MDB_USE_POSIX_SEM 1 +# define MDB_FDATASYNC fsync +#elif defined(ANDROID) +# define MDB_FDATASYNC fsync +#endif + +#ifndef _WIN32 +#include +#ifdef MDB_USE_POSIX_SEM +# define MDB_USE_HASH 1 +#include +#else +#define MDB_USE_POSIX_MUTEX 1 +#endif +#endif + +#if defined(_WIN32) + defined(MDB_USE_POSIX_SEM) \ + + defined(MDB_USE_POSIX_MUTEX) != 1 +# error "Ambiguous shared-lock implementation" +#endif + +#ifdef USE_VALGRIND +#include +#define VGMEMP_CREATE(h,r,z) VALGRIND_CREATE_MEMPOOL(h,r,z) +#define VGMEMP_ALLOC(h,a,s) VALGRIND_MEMPOOL_ALLOC(h,a,s) +#define VGMEMP_FREE(h,a) VALGRIND_MEMPOOL_FREE(h,a) +#define VGMEMP_DESTROY(h) VALGRIND_DESTROY_MEMPOOL(h) +#define VGMEMP_DEFINED(a,s) VALGRIND_MAKE_MEM_DEFINED(a,s) +#else +#define VGMEMP_CREATE(h,r,z) +#define VGMEMP_ALLOC(h,a,s) +#define VGMEMP_FREE(h,a) +#define VGMEMP_DESTROY(h) +#define VGMEMP_DEFINED(a,s) +#endif + +#ifndef BYTE_ORDER +# if (defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)) +/* Solaris just defines one or the other */ +# define LITTLE_ENDIAN 1234 +# define BIG_ENDIAN 4321 +# ifdef _LITTLE_ENDIAN +# define BYTE_ORDER LITTLE_ENDIAN +# else +# define BYTE_ORDER BIG_ENDIAN +# endif +# else +# define BYTE_ORDER __BYTE_ORDER +# endif +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN __LITTLE_ENDIAN +#endif +#ifndef BIG_ENDIAN +#define BIG_ENDIAN __BIG_ENDIAN +#endif + +#if defined(__i386) || defined(__x86_64) || defined(_M_IX86) +#define MISALIGNED_OK 1 +#endif + +#include "lmdb.h" +#include "midl.h" + +#if (BYTE_ORDER == LITTLE_ENDIAN) == (BYTE_ORDER == BIG_ENDIAN) +# error "Unknown or unsupported endianness (BYTE_ORDER)" +#elif (-6 & 5) || CHAR_BIT != 8 || UINT_MAX < 0xffffffff || ULONG_MAX % 0xFFFF +# error "Two's complement, reasonably sized integer types, please" +#endif + +#ifdef __GNUC__ +/** Put infrequently used env functions in separate section */ +# ifdef __APPLE__ +# define ESECT __attribute__ ((section("__TEXT,text_env"))) +# else +# define ESECT __attribute__ ((section("text_env"))) +# endif +#else +#define ESECT +#endif + +#ifdef _MSC_VER +#define CALL_CONV WINAPI +#else +#define CALL_CONV +#endif + +/** @defgroup internal LMDB Internals + * @{ + */ +/** @defgroup compat Compatibility Macros + * A bunch of macros to minimize the amount of platform-specific ifdefs + * needed throughout the rest of the code. When the features this library + * needs are similar enough to POSIX to be hidden in a one-or-two line + * replacement, this macro approach is used. + * @{ + */ + + /** Features under development */ +#ifndef MDB_DEVEL +#define MDB_DEVEL 0 +#endif + + /** Wrapper around __func__, which is a C99 feature */ +#if __STDC_VERSION__ >= 199901L +# define mdb_func_ __func__ +#elif __GNUC__ >= 2 || _MSC_VER >= 1300 +# define mdb_func_ __FUNCTION__ +#else +/* If a debug message says (), update the #if statements above */ +# define mdb_func_ "" +#endif + +/* Internal error codes, not exposed outside liblmdb */ +#define MDB_NO_ROOT (MDB_LAST_ERRCODE + 10) +#ifdef _WIN32 +#define MDB_OWNERDEAD ((int) WAIT_ABANDONED) +#elif defined(MDB_USE_POSIX_MUTEX) && defined(EOWNERDEAD) +#define MDB_OWNERDEAD EOWNERDEAD /**< #LOCK_MUTEX0() result if dead owner */ +#endif + +#ifdef __GLIBC__ +#define GLIBC_VER ((__GLIBC__ << 16 )| __GLIBC_MINOR__) +#endif +/** Some platforms define the EOWNERDEAD error code + * even though they don't support Robust Mutexes. + * Compile with -DMDB_USE_ROBUST=0, or use some other + * mechanism like -DMDB_USE_SYSV_SEM instead of + * -DMDB_USE_POSIX_MUTEX. (SysV semaphores are + * also Robust, but some systems don't support them + * either.) + */ +#ifndef MDB_USE_ROBUST +/* Android currently lacks Robust Mutex support. So does glibc < 2.4. */ +# if defined(MDB_USE_POSIX_MUTEX) && (defined(ANDROID) || \ + (defined(__GLIBC__) && GLIBC_VER < 0x020004)) +# define MDB_USE_ROBUST 0 +# else +# define MDB_USE_ROBUST 1 +/* glibc < 2.10 only provided _np API */ +# if defined(__GLIBC__) && GLIBC_VER < 0x02000a +# define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP +# define pthread_mutexattr_setrobust(attr, flag) pthread_mutexattr_setrobust_np(attr, flag) +# define pthread_mutex_consistent(mutex) pthread_mutex_consistent_np(mutex) +# endif +# endif +#endif /* MDB_USE_ROBUST */ + +#if defined(MDB_OWNERDEAD) && MDB_USE_ROBUST +#define MDB_ROBUST_SUPPORTED 1 +#endif + +#ifdef _WIN32 +#define MDB_USE_HASH 1 +#define MDB_PIDLOCK 0 +#define THREAD_RET DWORD +#define pthread_t HANDLE +#define pthread_mutex_t HANDLE +#define pthread_cond_t HANDLE +typedef HANDLE mdb_mutex_t, mdb_mutexref_t; +#define pthread_key_t DWORD +#define pthread_self() GetCurrentThreadId() +#define pthread_key_create(x,y) \ + ((*(x) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? ErrCode() : 0) +#define pthread_key_delete(x) TlsFree(x) +#define pthread_getspecific(x) TlsGetValue(x) +#define pthread_setspecific(x,y) (TlsSetValue(x,y) ? 0 : ErrCode()) +#define pthread_mutex_unlock(x) ReleaseMutex(*x) +#define pthread_mutex_lock(x) WaitForSingleObject(*x, INFINITE) +#define pthread_cond_signal(x) SetEvent(*x) +#define pthread_cond_wait(cond,mutex) do{SignalObjectAndWait(*mutex, *cond, INFINITE, FALSE); WaitForSingleObject(*mutex, INFINITE);}while(0) +#define THREAD_CREATE(thr,start,arg) thr=CreateThread(NULL,0,start,arg,0,NULL) +#define THREAD_FINISH(thr) WaitForSingleObject(thr, INFINITE) +#define LOCK_MUTEX0(mutex) WaitForSingleObject(mutex, INFINITE) +#define UNLOCK_MUTEX(mutex) ReleaseMutex(mutex) +#define mdb_mutex_consistent(mutex) 0 +#define getpid() GetCurrentProcessId() +#define MDB_FDATASYNC(fd) (!FlushFileBuffers(fd)) +#define MDB_MSYNC(addr,len,flags) (!FlushViewOfFile(addr,len)) +#define ErrCode() GetLastError() +#define GET_PAGESIZE(x) {SYSTEM_INFO si; GetSystemInfo(&si); (x) = si.dwPageSize;} +#define close(fd) (CloseHandle(fd) ? 0 : -1) +#define munmap(ptr,len) UnmapViewOfFile(ptr) +#ifdef PROCESS_QUERY_LIMITED_INFORMATION +#define MDB_PROCESS_QUERY_LIMITED_INFORMATION PROCESS_QUERY_LIMITED_INFORMATION +#else +#define MDB_PROCESS_QUERY_LIMITED_INFORMATION 0x1000 +#endif +#define Z "I" +#else +#define THREAD_RET void * +#define THREAD_CREATE(thr,start,arg) pthread_create(&thr,NULL,start,arg) +#define THREAD_FINISH(thr) pthread_join(thr,NULL) +#define Z "z" /**< printf format modifier for size_t */ + + /** For MDB_LOCK_FORMAT: True if readers take a pid lock in the lockfile */ +#define MDB_PIDLOCK 1 + +#ifdef MDB_USE_POSIX_SEM + +typedef sem_t *mdb_mutex_t, *mdb_mutexref_t; +#define LOCK_MUTEX0(mutex) mdb_sem_wait(mutex) +#define UNLOCK_MUTEX(mutex) sem_post(mutex) + +static int +mdb_sem_wait(sem_t *sem) +{ + int rc; + while ((rc = sem_wait(sem)) && (rc = errno) == EINTR) ; + return rc; +} + +#else /* MDB_USE_POSIX_MUTEX: */ + /** Shared mutex/semaphore as it is stored (mdb_mutex_t), and as + * local variables keep it (mdb_mutexref_t). + * + * When #mdb_mutexref_t is a pointer declaration and #mdb_mutex_t is + * not, then it is array[size 1] so it can be assigned to a pointer. + * @{ + */ +typedef pthread_mutex_t mdb_mutex_t[1], *mdb_mutexref_t; + /* @} */ + /** Lock the reader or writer mutex. + * Returns 0 or a code to give #mdb_mutex_failed(), as in #LOCK_MUTEX(). + */ +#define LOCK_MUTEX0(mutex) pthread_mutex_lock(mutex) + /** Unlock the reader or writer mutex. + */ +#define UNLOCK_MUTEX(mutex) pthread_mutex_unlock(mutex) + /** Mark mutex-protected data as repaired, after death of previous owner. + */ +#define mdb_mutex_consistent(mutex) pthread_mutex_consistent(mutex) +#endif /* MDB_USE_POSIX_SEM */ + + /** Get the error code for the last failed system function. + */ +#define ErrCode() errno + + /** An abstraction for a file handle. + * On POSIX systems file handles are small integers. On Windows + * they're opaque pointers. + */ +#define HANDLE int + + /** A value for an invalid file handle. + * Mainly used to initialize file variables and signify that they are + * unused. + */ +#define INVALID_HANDLE_VALUE (-1) + + /** Get the size of a memory page for the system. + * This is the basic size that the platform's memory manager uses, and is + * fundamental to the use of memory-mapped files. + */ +#define GET_PAGESIZE(x) ((x) = sysconf(_SC_PAGE_SIZE)) +#endif + +#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) +#define MNAME_LEN 32 +#else +#define MNAME_LEN (sizeof(pthread_mutex_t)) +#endif + +/** @} */ + +#ifdef MDB_ROBUST_SUPPORTED + /** Lock mutex, handle any error, set rc = result. + * Return 0 on success, nonzero (not rc) on error. + */ +#define LOCK_MUTEX(rc, env, mutex) \ + (((rc) = LOCK_MUTEX0(mutex)) && \ + ((rc) = mdb_mutex_failed(env, mutex, rc))) +static int mdb_mutex_failed(MDB_env *env, mdb_mutexref_t mutex, int rc); +#else +#define LOCK_MUTEX(rc, env, mutex) ((rc) = LOCK_MUTEX0(mutex)) +#define mdb_mutex_failed(env, mutex, rc) (rc) +#endif + +#ifndef _WIN32 +/** A flag for opening a file and requesting synchronous data writes. + * This is only used when writing a meta page. It's not strictly needed; + * we could just do a normal write and then immediately perform a flush. + * But if this flag is available it saves us an extra system call. + * + * @note If O_DSYNC is undefined but exists in /usr/include, + * preferably set some compiler flag to get the definition. + */ +#ifndef MDB_DSYNC +# ifdef O_DSYNC +# define MDB_DSYNC O_DSYNC +# else +# define MDB_DSYNC O_SYNC +# endif +#endif +#endif + +/** Function for flushing the data of a file. Define this to fsync + * if fdatasync() is not supported. + */ +#ifndef MDB_FDATASYNC +# define MDB_FDATASYNC fdatasync +#endif + +#ifndef MDB_MSYNC +# define MDB_MSYNC(addr,len,flags) msync(addr,len,flags) +#endif + +#ifndef MS_SYNC +#define MS_SYNC 1 +#endif + +#ifndef MS_ASYNC +#define MS_ASYNC 0 +#endif + + /** A page number in the database. + * Note that 64 bit page numbers are overkill, since pages themselves + * already represent 12-13 bits of addressable memory, and the OS will + * always limit applications to a maximum of 63 bits of address space. + * + * @note In the #MDB_node structure, we only store 48 bits of this value, + * which thus limits us to only 60 bits of addressable data. + */ +typedef MDB_ID pgno_t; + + /** A transaction ID. + * See struct MDB_txn.mt_txnid for details. + */ +typedef MDB_ID txnid_t; + +/** @defgroup debug Debug Macros + * @{ + */ +#ifndef MDB_DEBUG + /** Enable debug output. Needs variable argument macros (a C99 feature). + * Set this to 1 for copious tracing. Set to 2 to add dumps of all IDLs + * read from and written to the database (used for free space management). + */ +#define MDB_DEBUG 0 +#endif + +#if MDB_DEBUG +static int mdb_debug; +static txnid_t mdb_debug_start; + + /** Print a debug message with printf formatting. + * Requires double parenthesis around 2 or more args. + */ +# define DPRINTF(args) ((void) ((mdb_debug) && DPRINTF0 args)) +# define DPRINTF0(fmt, ...) \ + fprintf(stderr, "%s:%d " fmt "\n", mdb_func_, __LINE__, __VA_ARGS__) +#else +# define DPRINTF(args) ((void) 0) +#endif + /** Print a debug string. + * The string is printed literally, with no format processing. + */ +#define DPUTS(arg) DPRINTF(("%s", arg)) + /** Debuging output value of a cursor DBI: Negative in a sub-cursor. */ +#define DDBI(mc) \ + (((mc)->mc_flags & C_SUB) ? -(int)(mc)->mc_dbi : (int)(mc)->mc_dbi) +/** @} */ + + /** @brief The maximum size of a database page. + * + * It is 32k or 64k, since value-PAGEBASE must fit in + * #MDB_page.%mp_upper. + * + * LMDB will use database pages < OS pages if needed. + * That causes more I/O in write transactions: The OS must + * know (read) the whole page before writing a partial page. + * + * Note that we don't currently support Huge pages. On Linux, + * regular data files cannot use Huge pages, and in general + * Huge pages aren't actually pageable. We rely on the OS + * demand-pager to read our data and page it out when memory + * pressure from other processes is high. So until OSs have + * actual paging support for Huge pages, they're not viable. + */ +#define MAX_PAGESIZE (PAGEBASE ? 0x10000 : 0x8000) + + /** The minimum number of keys required in a database page. + * Setting this to a larger value will place a smaller bound on the + * maximum size of a data item. Data items larger than this size will + * be pushed into overflow pages instead of being stored directly in + * the B-tree node. This value used to default to 4. With a page size + * of 4096 bytes that meant that any item larger than 1024 bytes would + * go into an overflow page. That also meant that on average 2-3KB of + * each overflow page was wasted space. The value cannot be lower than + * 2 because then there would no longer be a tree structure. With this + * value, items larger than 2KB will go into overflow pages, and on + * average only 1KB will be wasted. + */ +#define MDB_MINKEYS 2 + + /** A stamp that identifies a file as an LMDB file. + * There's nothing special about this value other than that it is easily + * recognizable, and it will reflect any byte order mismatches. + */ +#define MDB_MAGIC 0xBEEFC0DE + + /** The version number for a database's datafile format. */ +#define MDB_DATA_VERSION ((MDB_DEVEL) ? 999 : 1) + /** The version number for a database's lockfile format. */ +#define MDB_LOCK_VERSION 1 + + /** @brief The max size of a key we can write, or 0 for computed max. + * + * This macro should normally be left alone or set to 0. + * Note that a database with big keys or dupsort data cannot be + * reliably modified by a liblmdb which uses a smaller max. + * The default is 511 for backwards compat, or 0 when #MDB_DEVEL. + * + * Other values are allowed, for backwards compat. However: + * A value bigger than the computed max can break if you do not + * know what you are doing, and liblmdb <= 0.9.10 can break when + * modifying a DB with keys/dupsort data bigger than its max. + * + * Data items in an #MDB_DUPSORT database are also limited to + * this size, since they're actually keys of a sub-DB. Keys and + * #MDB_DUPSORT data items must fit on a node in a regular page. + */ +#ifndef MDB_MAXKEYSIZE +#define MDB_MAXKEYSIZE ((MDB_DEVEL) ? 0 : 511) +#endif + + /** The maximum size of a key we can write to the environment. */ +#if MDB_MAXKEYSIZE +#define ENV_MAXKEY(env) (MDB_MAXKEYSIZE) +#else +#define ENV_MAXKEY(env) ((env)->me_maxkey) +#endif + + /** @brief The maximum size of a data item. + * + * We only store a 32 bit value for node sizes. + */ +#define MAXDATASIZE 0xffffffffUL + +#if MDB_DEBUG + /** Key size which fits in a #DKBUF. + * @ingroup debug + */ +#define DKBUF_MAXKEYSIZE ((MDB_MAXKEYSIZE) > 0 ? (MDB_MAXKEYSIZE) : 511) + /** A key buffer. + * @ingroup debug + * This is used for printing a hex dump of a key's contents. + */ +#define DKBUF char kbuf[DKBUF_MAXKEYSIZE*2+1] + /** Display a key in hex. + * @ingroup debug + * Invoke a function to display a key in hex. + */ +#define DKEY(x) mdb_dkey(x, kbuf) +#else +#define DKBUF +#define DKEY(x) 0 +#endif + + /** An invalid page number. + * Mainly used to denote an empty tree. + */ +#define P_INVALID (~(pgno_t)0) + + /** Test if the flags \b f are set in a flag word \b w. */ +#define F_ISSET(w, f) (((w) & (f)) == (f)) + + /** Round \b n up to an even number. */ +#define EVEN(n) (((n) + 1U) & -2) /* sign-extending -2 to match n+1U */ + + /** Used for offsets within a single page. + * Since memory pages are typically 4 or 8KB in size, 12-13 bits, + * this is plenty. + */ +typedef uint16_t indx_t; + + /** Default size of memory map. + * This is certainly too small for any actual applications. Apps should always set + * the size explicitly using #mdb_env_set_mapsize(). + */ +#define DEFAULT_MAPSIZE 1048576 + +/** @defgroup readers Reader Lock Table + * Readers don't acquire any locks for their data access. Instead, they + * simply record their transaction ID in the reader table. The reader + * mutex is needed just to find an empty slot in the reader table. The + * slot's address is saved in thread-specific data so that subsequent read + * transactions started by the same thread need no further locking to proceed. + * + * If #MDB_NOTLS is set, the slot address is not saved in thread-specific data. + * + * No reader table is used if the database is on a read-only filesystem, or + * if #MDB_NOLOCK is set. + * + * Since the database uses multi-version concurrency control, readers don't + * actually need any locking. This table is used to keep track of which + * readers are using data from which old transactions, so that we'll know + * when a particular old transaction is no longer in use. Old transactions + * that have discarded any data pages can then have those pages reclaimed + * for use by a later write transaction. + * + * The lock table is constructed such that reader slots are aligned with the + * processor's cache line size. Any slot is only ever used by one thread. + * This alignment guarantees that there will be no contention or cache + * thrashing as threads update their own slot info, and also eliminates + * any need for locking when accessing a slot. + * + * A writer thread will scan every slot in the table to determine the oldest + * outstanding reader transaction. Any freed pages older than this will be + * reclaimed by the writer. The writer doesn't use any locks when scanning + * this table. This means that there's no guarantee that the writer will + * see the most up-to-date reader info, but that's not required for correct + * operation - all we need is to know the upper bound on the oldest reader, + * we don't care at all about the newest reader. So the only consequence of + * reading stale information here is that old pages might hang around a + * while longer before being reclaimed. That's actually good anyway, because + * the longer we delay reclaiming old pages, the more likely it is that a + * string of contiguous pages can be found after coalescing old pages from + * many old transactions together. + * @{ + */ + /** Number of slots in the reader table. + * This value was chosen somewhat arbitrarily. 126 readers plus a + * couple mutexes fit exactly into 8KB on my development machine. + * Applications should set the table size using #mdb_env_set_maxreaders(). + */ +#define DEFAULT_READERS 126 + + /** The size of a CPU cache line in bytes. We want our lock structures + * aligned to this size to avoid false cache line sharing in the + * lock table. + * This value works for most CPUs. For Itanium this should be 128. + */ +#ifndef CACHELINE +#define CACHELINE 64 +#endif + + /** The information we store in a single slot of the reader table. + * In addition to a transaction ID, we also record the process and + * thread ID that owns a slot, so that we can detect stale information, + * e.g. threads or processes that went away without cleaning up. + * @note We currently don't check for stale records. We simply re-init + * the table when we know that we're the only process opening the + * lock file. + */ +typedef struct MDB_rxbody { + /** Current Transaction ID when this transaction began, or (txnid_t)-1. + * Multiple readers that start at the same time will probably have the + * same ID here. Again, it's not important to exclude them from + * anything; all we need to know is which version of the DB they + * started from so we can avoid overwriting any data used in that + * particular version. + */ + volatile txnid_t mrb_txnid; + /** The process ID of the process owning this reader txn. */ + volatile MDB_PID_T mrb_pid; + /** The thread ID of the thread owning this txn. */ + volatile MDB_THR_T mrb_tid; +} MDB_rxbody; + + /** The actual reader record, with cacheline padding. */ +typedef struct MDB_reader { + union { + MDB_rxbody mrx; + /** shorthand for mrb_txnid */ +#define mr_txnid mru.mrx.mrb_txnid +#define mr_pid mru.mrx.mrb_pid +#define mr_tid mru.mrx.mrb_tid + /** cache line alignment */ + char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)]; + } mru; +} MDB_reader; + + /** The header for the reader table. + * The table resides in a memory-mapped file. (This is a different file + * than is used for the main database.) + * + * For POSIX the actual mutexes reside in the shared memory of this + * mapped file. On Windows, mutexes are named objects allocated by the + * kernel; we store the mutex names in this mapped file so that other + * processes can grab them. This same approach is also used on + * MacOSX/Darwin (using named semaphores) since MacOSX doesn't support + * process-shared POSIX mutexes. For these cases where a named object + * is used, the object name is derived from a 64 bit FNV hash of the + * environment pathname. As such, naming collisions are extremely + * unlikely. If a collision occurs, the results are unpredictable. + */ +typedef struct MDB_txbody { + /** Stamp identifying this as an LMDB file. It must be set + * to #MDB_MAGIC. */ + uint32_t mtb_magic; + /** Format of this lock file. Must be set to #MDB_LOCK_FORMAT. */ + uint32_t mtb_format; +#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) + char mtb_rmname[MNAME_LEN]; +#else + /** Mutex protecting access to this table. + * This is the reader table lock used with LOCK_MUTEX(). + */ + mdb_mutex_t mtb_rmutex; +#endif + /** The ID of the last transaction committed to the database. + * This is recorded here only for convenience; the value can always + * be determined by reading the main database meta pages. + */ + volatile txnid_t mtb_txnid; + /** The number of slots that have been used in the reader table. + * This always records the maximum count, it is not decremented + * when readers release their slots. + */ + volatile unsigned mtb_numreaders; +} MDB_txbody; + + /** The actual reader table definition. */ +typedef struct MDB_txninfo { + union { + MDB_txbody mtb; +#define mti_magic mt1.mtb.mtb_magic +#define mti_format mt1.mtb.mtb_format +#define mti_rmutex mt1.mtb.mtb_rmutex +#define mti_rmname mt1.mtb.mtb_rmname +#define mti_txnid mt1.mtb.mtb_txnid +#define mti_numreaders mt1.mtb.mtb_numreaders + char pad[(sizeof(MDB_txbody)+CACHELINE-1) & ~(CACHELINE-1)]; + } mt1; + union { +#if defined(_WIN32) || defined(MDB_USE_POSIX_SEM) + char mt2_wmname[MNAME_LEN]; +#define mti_wmname mt2.mt2_wmname +#else + mdb_mutex_t mt2_wmutex; +#define mti_wmutex mt2.mt2_wmutex +#endif + char pad[(MNAME_LEN+CACHELINE-1) & ~(CACHELINE-1)]; + } mt2; + MDB_reader mti_readers[1]; +} MDB_txninfo; + + /** Lockfile format signature: version, features and field layout */ +#define MDB_LOCK_FORMAT \ + ((uint32_t) \ + ((MDB_LOCK_VERSION) \ + /* Flags which describe functionality */ \ + + (((MDB_PIDLOCK) != 0) << 16))) +/** @} */ + +/** Common header for all page types. + * Overflow records occupy a number of contiguous pages with no + * headers on any page after the first. + */ +typedef struct MDB_page { +#define mp_pgno mp_p.p_pgno +#define mp_next mp_p.p_next + union { + pgno_t p_pgno; /**< page number */ + struct MDB_page *p_next; /**< for in-memory list of freed pages */ + } mp_p; + uint16_t mp_pad; +/** @defgroup mdb_page Page Flags + * @ingroup internal + * Flags for the page headers. + * @{ + */ +#define P_BRANCH 0x01 /**< branch page */ +#define P_LEAF 0x02 /**< leaf page */ +#define P_OVERFLOW 0x04 /**< overflow page */ +#define P_META 0x08 /**< meta page */ +#define P_DIRTY 0x10 /**< dirty page, also set for #P_SUBP pages */ +#define P_LEAF2 0x20 /**< for #MDB_DUPFIXED records */ +#define P_SUBP 0x40 /**< for #MDB_DUPSORT sub-pages */ +#define P_LOOSE 0x4000 /**< page was dirtied then freed, can be reused */ +#define P_KEEP 0x8000 /**< leave this page alone during spill */ +/** @} */ + uint16_t mp_flags; /**< @ref mdb_page */ +#define mp_lower mp_pb.pb.pb_lower +#define mp_upper mp_pb.pb.pb_upper +#define mp_pages mp_pb.pb_pages + union { + struct { + indx_t pb_lower; /**< lower bound of free space */ + indx_t pb_upper; /**< upper bound of free space */ + } pb; + uint32_t pb_pages; /**< number of overflow pages */ + } mp_pb; + indx_t mp_ptrs[1]; /**< dynamic size */ +} MDB_page; + + /** Size of the page header, excluding dynamic data at the end */ +#define PAGEHDRSZ ((unsigned) offsetof(MDB_page, mp_ptrs)) + + /** Address of first usable data byte in a page, after the header */ +#define METADATA(p) ((void *)((char *)(p) + PAGEHDRSZ)) + + /** ITS#7713, change PAGEBASE to handle 65536 byte pages */ +#define PAGEBASE ((MDB_DEVEL) ? PAGEHDRSZ : 0) + + /** Number of nodes on a page */ +#define NUMKEYS(p) (((p)->mp_lower - (PAGEHDRSZ-PAGEBASE)) >> 1) + + /** The amount of space remaining in the page */ +#define SIZELEFT(p) (indx_t)((p)->mp_upper - (p)->mp_lower) + + /** The percentage of space used in the page, in tenths of a percent. */ +#define PAGEFILL(env, p) (1000L * ((env)->me_psize - PAGEHDRSZ - SIZELEFT(p)) / \ + ((env)->me_psize - PAGEHDRSZ)) + /** The minimum page fill factor, in tenths of a percent. + * Pages emptier than this are candidates for merging. + */ +#define FILL_THRESHOLD 250 + + /** Test if a page is a leaf page */ +#define IS_LEAF(p) F_ISSET((p)->mp_flags, P_LEAF) + /** Test if a page is a LEAF2 page */ +#define IS_LEAF2(p) F_ISSET((p)->mp_flags, P_LEAF2) + /** Test if a page is a branch page */ +#define IS_BRANCH(p) F_ISSET((p)->mp_flags, P_BRANCH) + /** Test if a page is an overflow page */ +#define IS_OVERFLOW(p) F_ISSET((p)->mp_flags, P_OVERFLOW) + /** Test if a page is a sub page */ +#define IS_SUBP(p) F_ISSET((p)->mp_flags, P_SUBP) + + /** The number of overflow pages needed to store the given size. */ +#define OVPAGES(size, psize) ((PAGEHDRSZ-1 + (size)) / (psize) + 1) + + /** Link in #MDB_txn.%mt_loose_pgs list */ +#define NEXT_LOOSE_PAGE(p) (*(MDB_page **)((p) + 2)) + + /** Header for a single key/data pair within a page. + * Used in pages of type #P_BRANCH and #P_LEAF without #P_LEAF2. + * We guarantee 2-byte alignment for 'MDB_node's. + */ +typedef struct MDB_node { + /** lo and hi are used for data size on leaf nodes and for + * child pgno on branch nodes. On 64 bit platforms, flags + * is also used for pgno. (Branch nodes have no flags). + * They are in host byte order in case that lets some + * accesses be optimized into a 32-bit word access. + */ +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned short mn_lo, mn_hi; /**< part of data size or pgno */ +#else + unsigned short mn_hi, mn_lo; +#endif +/** @defgroup mdb_node Node Flags + * @ingroup internal + * Flags for node headers. + * @{ + */ +#define F_BIGDATA 0x01 /**< data put on overflow page */ +#define F_SUBDATA 0x02 /**< data is a sub-database */ +#define F_DUPDATA 0x04 /**< data has duplicates */ + +/** valid flags for #mdb_node_add() */ +#define NODE_ADD_FLAGS (F_DUPDATA|F_SUBDATA|MDB_RESERVE|MDB_APPEND) + +/** @} */ + unsigned short mn_flags; /**< @ref mdb_node */ + unsigned short mn_ksize; /**< key size */ + char mn_data[1]; /**< key and data are appended here */ +} MDB_node; + + /** Size of the node header, excluding dynamic data at the end */ +#define NODESIZE offsetof(MDB_node, mn_data) + + /** Bit position of top word in page number, for shifting mn_flags */ +#define PGNO_TOPWORD ((pgno_t)-1 > 0xffffffffu ? 32 : 0) + + /** Size of a node in a branch page with a given key. + * This is just the node header plus the key, there is no data. + */ +#define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size)) + + /** Size of a node in a leaf page with a given key and data. + * This is node header plus key plus data size. + */ +#define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size) + + /** Address of node \b i in page \b p */ +#define NODEPTR(p, i) ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i] + PAGEBASE)) + + /** Address of the key for the node */ +#define NODEKEY(node) (void *)((node)->mn_data) + + /** Address of the data for a node */ +#define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize) + + /** Get the page number pointed to by a branch node */ +#define NODEPGNO(node) \ + ((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | \ + (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0)) + /** Set the page number in a branch node */ +#define SETPGNO(node,pgno) do { \ + (node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; \ + if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0) + + /** Get the size of the data in a leaf node */ +#define NODEDSZ(node) ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16)) + /** Set the size of the data for a leaf node */ +#define SETDSZ(node,size) do { \ + (node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0) + /** The size of a key in a node */ +#define NODEKSZ(node) ((node)->mn_ksize) + + /** Copy a page number from src to dst */ +#ifdef MISALIGNED_OK +#define COPY_PGNO(dst,src) dst = src +#else +#if SIZE_MAX > 4294967295UL +#define COPY_PGNO(dst,src) do { \ + unsigned short *s, *d; \ + s = (unsigned short *)&(src); \ + d = (unsigned short *)&(dst); \ + *d++ = *s++; \ + *d++ = *s++; \ + *d++ = *s++; \ + *d = *s; \ +} while (0) +#else +#define COPY_PGNO(dst,src) do { \ + unsigned short *s, *d; \ + s = (unsigned short *)&(src); \ + d = (unsigned short *)&(dst); \ + *d++ = *s++; \ + *d = *s; \ +} while (0) +#endif +#endif + /** The address of a key in a LEAF2 page. + * LEAF2 pages are used for #MDB_DUPFIXED sorted-duplicate sub-DBs. + * There are no node headers, keys are stored contiguously. + */ +#define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i)*(ks))) + + /** Set the \b node's key into \b keyptr, if requested. */ +#define MDB_GET_KEY(node, keyptr) { if ((keyptr) != NULL) { \ + (keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } } + + /** Set the \b node's key into \b key. */ +#define MDB_GET_KEY2(node, key) { key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); } + + /** Information about a single database in the environment. */ +typedef struct MDB_db { + uint32_t md_pad; /**< also ksize for LEAF2 pages */ + uint16_t md_flags; /**< @ref mdb_dbi_open */ + uint16_t md_depth; /**< depth of this tree */ + pgno_t md_branch_pages; /**< number of internal pages */ + pgno_t md_leaf_pages; /**< number of leaf pages */ + pgno_t md_overflow_pages; /**< number of overflow pages */ + size_t md_entries; /**< number of data items */ + pgno_t md_root; /**< the root page of this tree */ +} MDB_db; + + /** mdb_dbi_open flags */ +#define MDB_VALID 0x8000 /**< DB handle is valid, for me_dbflags */ +#define PERSISTENT_FLAGS (0xffff & ~(MDB_VALID)) +#define VALID_FLAGS (MDB_REVERSEKEY|MDB_DUPSORT|MDB_INTEGERKEY|MDB_DUPFIXED|\ + MDB_INTEGERDUP|MDB_REVERSEDUP|MDB_CREATE) + + /** Handle for the DB used to track free pages. */ +#define FREE_DBI 0 + /** Handle for the default DB. */ +#define MAIN_DBI 1 + /** Number of DBs in metapage (free and main) - also hardcoded elsewhere */ +#define CORE_DBS 2 + + /** Number of meta pages - also hardcoded elsewhere */ +#define NUM_METAS 2 + + /** Meta page content. + * A meta page is the start point for accessing a database snapshot. + * Pages 0-1 are meta pages. Transaction N writes meta page #(N % 2). + */ +typedef struct MDB_meta { + /** Stamp identifying this as an LMDB file. It must be set + * to #MDB_MAGIC. */ + uint32_t mm_magic; + /** Version number of this file. Must be set to #MDB_DATA_VERSION. */ + uint32_t mm_version; + void *mm_address; /**< address for fixed mapping */ + size_t mm_mapsize; /**< size of mmap region */ + MDB_db mm_dbs[CORE_DBS]; /**< first is free space, 2nd is main db */ + /** The size of pages used in this DB */ +#define mm_psize mm_dbs[FREE_DBI].md_pad + /** Any persistent environment flags. @ref mdb_env */ +#define mm_flags mm_dbs[FREE_DBI].md_flags + pgno_t mm_last_pg; /**< last used page in file */ + volatile txnid_t mm_txnid; /**< txnid that committed this page */ +} MDB_meta; + + /** Buffer for a stack-allocated meta page. + * The members define size and alignment, and silence type + * aliasing warnings. They are not used directly; that could + * mean incorrectly using several union members in parallel. + */ +typedef union MDB_metabuf { + MDB_page mb_page; + struct { + char mm_pad[PAGEHDRSZ]; + MDB_meta mm_meta; + } mb_metabuf; +} MDB_metabuf; + + /** Auxiliary DB info. + * The information here is mostly static/read-only. There is + * only a single copy of this record in the environment. + */ +typedef struct MDB_dbx { + MDB_val md_name; /**< name of the database */ + MDB_cmp_func *md_cmp; /**< function for comparing keys */ + MDB_cmp_func *md_dcmp; /**< function for comparing data items */ + MDB_rel_func *md_rel; /**< user relocate function */ + void *md_relctx; /**< user-provided context for md_rel */ +} MDB_dbx; + + /** A database transaction. + * Every operation requires a transaction handle. + */ +struct MDB_txn { + MDB_txn *mt_parent; /**< parent of a nested txn */ + /** Nested txn under this txn, set together with flag #MDB_TXN_HAS_CHILD */ + MDB_txn *mt_child; + pgno_t mt_next_pgno; /**< next unallocated page */ + /** The ID of this transaction. IDs are integers incrementing from 1. + * Only committed write transactions increment the ID. If a transaction + * aborts, the ID may be re-used by the next writer. + */ + txnid_t mt_txnid; + MDB_env *mt_env; /**< the DB environment */ + /** The list of pages that became unused during this transaction. + */ + MDB_IDL mt_free_pgs; + /** The list of loose pages that became unused and may be reused + * in this transaction, linked through #NEXT_LOOSE_PAGE(page). + */ + MDB_page *mt_loose_pgs; + /* #Number of loose pages (#mt_loose_pgs) */ + int mt_loose_count; + /** The sorted list of dirty pages we temporarily wrote to disk + * because the dirty list was full. page numbers in here are + * shifted left by 1, deleted slots have the LSB set. + */ + MDB_IDL mt_spill_pgs; + union { + /** For write txns: Modified pages. Sorted when not MDB_WRITEMAP. */ + MDB_ID2L dirty_list; + /** For read txns: This thread/txn's reader table slot, or NULL. */ + MDB_reader *reader; + } mt_u; + /** Array of records for each DB known in the environment. */ + MDB_dbx *mt_dbxs; + /** Array of MDB_db records for each known DB */ + MDB_db *mt_dbs; + /** Array of sequence numbers for each DB handle */ + unsigned int *mt_dbiseqs; +/** @defgroup mt_dbflag Transaction DB Flags + * @ingroup internal + * @{ + */ +#define DB_DIRTY 0x01 /**< DB was modified or is DUPSORT data */ +#define DB_STALE 0x02 /**< Named-DB record is older than txnID */ +#define DB_NEW 0x04 /**< Named-DB handle opened in this txn */ +#define DB_VALID 0x08 /**< DB handle is valid, see also #MDB_VALID */ +#define DB_USRVALID 0x10 /**< As #DB_VALID, but not set for #FREE_DBI */ +/** @} */ + /** In write txns, array of cursors for each DB */ + MDB_cursor **mt_cursors; + /** Array of flags for each DB */ + unsigned char *mt_dbflags; + /** Number of DB records in use, or 0 when the txn is finished. + * This number only ever increments until the txn finishes; we + * don't decrement it when individual DB handles are closed. + */ + MDB_dbi mt_numdbs; + +/** @defgroup mdb_txn Transaction Flags + * @ingroup internal + * @{ + */ + /** #mdb_txn_begin() flags */ +#define MDB_TXN_BEGIN_FLAGS MDB_RDONLY +#define MDB_TXN_RDONLY MDB_RDONLY /**< read-only transaction */ + /* internal txn flags */ +#define MDB_TXN_WRITEMAP MDB_WRITEMAP /**< copy of #MDB_env flag in writers */ +#define MDB_TXN_FINISHED 0x01 /**< txn is finished or never began */ +#define MDB_TXN_ERROR 0x02 /**< txn is unusable after an error */ +#define MDB_TXN_DIRTY 0x04 /**< must write, even if dirty list is empty */ +#define MDB_TXN_SPILLS 0x08 /**< txn or a parent has spilled pages */ +#define MDB_TXN_HAS_CHILD 0x10 /**< txn has an #MDB_txn.%mt_child */ + /** most operations on the txn are currently illegal */ +#define MDB_TXN_BLOCKED (MDB_TXN_FINISHED|MDB_TXN_ERROR|MDB_TXN_HAS_CHILD) +/** @} */ + unsigned int mt_flags; /**< @ref mdb_txn */ + /** #dirty_list room: Array size - \#dirty pages visible to this txn. + * Includes ancestor txns' dirty pages not hidden by other txns' + * dirty/spilled pages. Thus commit(nested txn) has room to merge + * dirty_list into mt_parent after freeing hidden mt_parent pages. + */ + unsigned int mt_dirty_room; +}; + +/** Enough space for 2^32 nodes with minimum of 2 keys per node. I.e., plenty. + * At 4 keys per node, enough for 2^64 nodes, so there's probably no need to + * raise this on a 64 bit machine. + */ +#define CURSOR_STACK 32 + +struct MDB_xcursor; + + /** Cursors are used for all DB operations. + * A cursor holds a path of (page pointer, key index) from the DB + * root to a position in the DB, plus other state. #MDB_DUPSORT + * cursors include an xcursor to the current data item. Write txns + * track their cursors and keep them up to date when data moves. + * Exception: An xcursor's pointer to a #P_SUBP page can be stale. + * (A node with #F_DUPDATA but no #F_SUBDATA contains a subpage). + */ +struct MDB_cursor { + /** Next cursor on this DB in this txn */ + MDB_cursor *mc_next; + /** Backup of the original cursor if this cursor is a shadow */ + MDB_cursor *mc_backup; + /** Context used for databases with #MDB_DUPSORT, otherwise NULL */ + struct MDB_xcursor *mc_xcursor; + /** The transaction that owns this cursor */ + MDB_txn *mc_txn; + /** The database handle this cursor operates on */ + MDB_dbi mc_dbi; + /** The database record for this cursor */ + MDB_db *mc_db; + /** The database auxiliary record for this cursor */ + MDB_dbx *mc_dbx; + /** The @ref mt_dbflag for this database */ + unsigned char *mc_dbflag; + unsigned short mc_snum; /**< number of pushed pages */ + unsigned short mc_top; /**< index of top page, normally mc_snum-1 */ +/** @defgroup mdb_cursor Cursor Flags + * @ingroup internal + * Cursor state flags. + * @{ + */ +#define C_INITIALIZED 0x01 /**< cursor has been initialized and is valid */ +#define C_EOF 0x02 /**< No more data */ +#define C_SUB 0x04 /**< Cursor is a sub-cursor */ +#define C_DEL 0x08 /**< last op was a cursor_del */ +#define C_UNTRACK 0x40 /**< Un-track cursor when closing */ +/** @} */ + unsigned int mc_flags; /**< @ref mdb_cursor */ + MDB_page *mc_pg[CURSOR_STACK]; /**< stack of pushed pages */ + indx_t mc_ki[CURSOR_STACK]; /**< stack of page indices */ +}; + + /** Context for sorted-dup records. + * We could have gone to a fully recursive design, with arbitrarily + * deep nesting of sub-databases. But for now we only handle these + * levels - main DB, optional sub-DB, sorted-duplicate DB. + */ +typedef struct MDB_xcursor { + /** A sub-cursor for traversing the Dup DB */ + MDB_cursor mx_cursor; + /** The database record for this Dup DB */ + MDB_db mx_db; + /** The auxiliary DB record for this Dup DB */ + MDB_dbx mx_dbx; + /** The @ref mt_dbflag for this Dup DB */ + unsigned char mx_dbflag; +} MDB_xcursor; + + /** State of FreeDB old pages, stored in the MDB_env */ +typedef struct MDB_pgstate { + pgno_t *mf_pghead; /**< Reclaimed freeDB pages, or NULL before use */ + txnid_t mf_pglast; /**< ID of last used record, or 0 if !mf_pghead */ +} MDB_pgstate; + + /** The database environment. */ +struct MDB_env { + HANDLE me_fd; /**< The main data file */ + HANDLE me_lfd; /**< The lock file */ + HANDLE me_mfd; /**< just for writing the meta pages */ + /** Failed to update the meta page. Probably an I/O error. */ +#define MDB_FATAL_ERROR 0x80000000U + /** Some fields are initialized. */ +#define MDB_ENV_ACTIVE 0x20000000U + /** me_txkey is set */ +#define MDB_ENV_TXKEY 0x10000000U + /** fdatasync is unreliable */ +#define MDB_FSYNCONLY 0x08000000U + uint32_t me_flags; /**< @ref mdb_env */ + unsigned int me_psize; /**< DB page size, inited from me_os_psize */ + unsigned int me_os_psize; /**< OS page size, from #GET_PAGESIZE */ + unsigned int me_maxreaders; /**< size of the reader table */ + /** Max #MDB_txninfo.%mti_numreaders of interest to #mdb_env_close() */ + volatile int me_close_readers; + MDB_dbi me_numdbs; /**< number of DBs opened */ + MDB_dbi me_maxdbs; /**< size of the DB table */ + MDB_PID_T me_pid; /**< process ID of this env */ + char *me_path; /**< path to the DB files */ + char *me_map; /**< the memory map of the data file */ + MDB_txninfo *me_txns; /**< the memory map of the lock file or NULL */ + MDB_meta *me_metas[NUM_METAS]; /**< pointers to the two meta pages */ + void *me_pbuf; /**< scratch area for DUPSORT put() */ + MDB_txn *me_txn; /**< current write transaction */ + MDB_txn *me_txn0; /**< prealloc'd write transaction */ + size_t me_mapsize; /**< size of the data memory map */ + off_t me_size; /**< current file size */ + pgno_t me_maxpg; /**< me_mapsize / me_psize */ + MDB_dbx *me_dbxs; /**< array of static DB info */ + uint16_t *me_dbflags; /**< array of flags from MDB_db.md_flags */ + unsigned int *me_dbiseqs; /**< array of dbi sequence numbers */ + pthread_key_t me_txkey; /**< thread-key for readers */ + txnid_t me_pgoldest; /**< ID of oldest reader last time we looked */ + MDB_pgstate me_pgstate; /**< state of old pages from freeDB */ +# define me_pglast me_pgstate.mf_pglast +# define me_pghead me_pgstate.mf_pghead + MDB_page *me_dpages; /**< list of malloc'd blocks for re-use */ + /** IDL of pages that became unused in a write txn */ + MDB_IDL me_free_pgs; + /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */ + MDB_ID2L me_dirty_list; + /** Max number of freelist items that can fit in a single overflow page */ + int me_maxfree_1pg; + /** Max size of a node on a page */ + unsigned int me_nodemax; +#if !(MDB_MAXKEYSIZE) + unsigned int me_maxkey; /**< max size of a key */ +#endif + int me_live_reader; /**< have liveness lock in reader table */ +#ifdef _WIN32 + int me_pidquery; /**< Used in OpenProcess */ +#endif +#ifdef MDB_USE_POSIX_MUTEX /* Posix mutexes reside in shared mem */ +# define me_rmutex me_txns->mti_rmutex /**< Shared reader lock */ +# define me_wmutex me_txns->mti_wmutex /**< Shared writer lock */ +#else + mdb_mutex_t me_rmutex; + mdb_mutex_t me_wmutex; +#endif + void *me_userctx; /**< User-settable context */ + MDB_assert_func *me_assert_func; /**< Callback for assertion failures */ +}; + + /** Nested transaction */ +typedef struct MDB_ntxn { + MDB_txn mnt_txn; /**< the transaction */ + MDB_pgstate mnt_pgstate; /**< parent transaction's saved freestate */ +} MDB_ntxn; + + /** max number of pages to commit in one writev() call */ +#define MDB_COMMIT_PAGES 64 +#if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES +#undef MDB_COMMIT_PAGES +#define MDB_COMMIT_PAGES IOV_MAX +#endif + + /** max bytes to write in one call */ +#define MAX_WRITE (0x80000000U >> (sizeof(ssize_t) == 4)) + + /** Check \b txn and \b dbi arguments to a function */ +#define TXN_DBI_EXIST(txn, dbi, validity) \ + ((txn) && (dbi)<(txn)->mt_numdbs && ((txn)->mt_dbflags[dbi] & (validity))) + + /** Check for misused \b dbi handles */ +#define TXN_DBI_CHANGED(txn, dbi) \ + ((txn)->mt_dbiseqs[dbi] != (txn)->mt_env->me_dbiseqs[dbi]) + +static int mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp); +static int mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp); +static int mdb_page_touch(MDB_cursor *mc); + +#define MDB_END_NAMES {"committed", "empty-commit", "abort", "reset", \ + "reset-tmp", "fail-begin", "fail-beginchild"} +enum { + /* mdb_txn_end operation number, for logging */ + MDB_END_COMMITTED, MDB_END_EMPTY_COMMIT, MDB_END_ABORT, MDB_END_RESET, + MDB_END_RESET_TMP, MDB_END_FAIL_BEGIN, MDB_END_FAIL_BEGINCHILD +}; +#define MDB_END_OPMASK 0x0F /**< mask for #mdb_txn_end() operation number */ +#define MDB_END_UPDATE 0x10 /**< update env state (DBIs) */ +#define MDB_END_FREE 0x20 /**< free txn unless it is #MDB_env.%me_txn0 */ +#define MDB_END_SLOT MDB_NOTLS /**< release any reader slot if #MDB_NOTLS */ +static void mdb_txn_end(MDB_txn *txn, unsigned mode); + +static int mdb_page_get(MDB_txn *txn, pgno_t pgno, MDB_page **mp, int *lvl); +static int mdb_page_search_root(MDB_cursor *mc, + MDB_val *key, int modify); +#define MDB_PS_MODIFY 1 +#define MDB_PS_ROOTONLY 2 +#define MDB_PS_FIRST 4 +#define MDB_PS_LAST 8 +static int mdb_page_search(MDB_cursor *mc, + MDB_val *key, int flags); +static int mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst); + +#define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */ +static int mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, + pgno_t newpgno, unsigned int nflags); + +static int mdb_env_read_header(MDB_env *env, MDB_meta *meta); +static MDB_meta *mdb_env_pick_meta(const MDB_env *env); +static int mdb_env_write_meta(MDB_txn *txn); +#ifdef MDB_USE_POSIX_MUTEX /* Drop unused excl arg */ +# define mdb_env_close0(env, excl) mdb_env_close1(env) +#endif +static void mdb_env_close0(MDB_env *env, int excl); + +static MDB_node *mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp); +static int mdb_node_add(MDB_cursor *mc, indx_t indx, + MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags); +static void mdb_node_del(MDB_cursor *mc, int ksize); +static void mdb_node_shrink(MDB_page *mp, indx_t indx); +static int mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft); +static int mdb_node_read(MDB_txn *txn, MDB_node *leaf, MDB_val *data); +static size_t mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data); +static size_t mdb_branch_size(MDB_env *env, MDB_val *key); + +static int mdb_rebalance(MDB_cursor *mc); +static int mdb_update_key(MDB_cursor *mc, MDB_val *key); + +static void mdb_cursor_pop(MDB_cursor *mc); +static int mdb_cursor_push(MDB_cursor *mc, MDB_page *mp); + +static int mdb_cursor_del0(MDB_cursor *mc); +static int mdb_del0(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data, unsigned flags); +static int mdb_cursor_sibling(MDB_cursor *mc, int move_right); +static int mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); +static int mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op); +static int mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op, + int *exactp); +static int mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data); +static int mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data); + +static void mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx); +static void mdb_xcursor_init0(MDB_cursor *mc); +static void mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node); +static void mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int force); + +static int mdb_drop0(MDB_cursor *mc, int subs); +static void mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi); +static int mdb_reader_check0(MDB_env *env, int rlocked, int *dead); + +/** @cond */ +static MDB_cmp_func mdb_cmp_memn, mdb_cmp_memnr, mdb_cmp_int, mdb_cmp_cint, mdb_cmp_long; +/** @endcond */ + +/** Compare two items pointing at size_t's of unknown alignment. */ +#ifdef MISALIGNED_OK +# define mdb_cmp_clong mdb_cmp_long +#else +# define mdb_cmp_clong mdb_cmp_cint +#endif + +#ifdef _WIN32 +static SECURITY_DESCRIPTOR mdb_null_sd; +static SECURITY_ATTRIBUTES mdb_all_sa; +static int mdb_sec_inited; + +static int utf8_to_utf16(const char *src, int srcsize, wchar_t **dst, int *dstsize); +#endif + +/** Return the library version info. */ +char * ESECT +mdb_version(int *major, int *minor, int *patch) +{ + if (major) *major = MDB_VERSION_MAJOR; + if (minor) *minor = MDB_VERSION_MINOR; + if (patch) *patch = MDB_VERSION_PATCH; + return MDB_VERSION_STRING; +} + +/** Table of descriptions for LMDB @ref errors */ +static char *const mdb_errstr[] = { + "MDB_KEYEXIST: Key/data pair already exists", + "MDB_NOTFOUND: No matching key/data pair found", + "MDB_PAGE_NOTFOUND: Requested page not found", + "MDB_CORRUPTED: Located page was wrong type", + "MDB_PANIC: Update of meta page failed or environment had fatal error", + "MDB_VERSION_MISMATCH: Database environment version mismatch", + "MDB_INVALID: File is not an LMDB file", + "MDB_MAP_FULL: Environment mapsize limit reached", + "MDB_DBS_FULL: Environment maxdbs limit reached", + "MDB_READERS_FULL: Environment maxreaders limit reached", + "MDB_TLS_FULL: Thread-local storage keys full - too many environments open", + "MDB_TXN_FULL: Transaction has too many dirty pages - transaction too big", + "MDB_CURSOR_FULL: Internal error - cursor stack limit reached", + "MDB_PAGE_FULL: Internal error - page has no more space", + "MDB_MAP_RESIZED: Database contents grew beyond environment mapsize", + "MDB_INCOMPATIBLE: Operation and DB incompatible, or DB flags changed", + "MDB_BAD_RSLOT: Invalid reuse of reader locktable slot", + "MDB_BAD_TXN: Transaction must abort, has a child, or is invalid", + "MDB_BAD_VALSIZE: Unsupported size of key/DB name/data, or wrong DUPFIXED size", + "MDB_BAD_DBI: The specified DBI handle was closed/changed unexpectedly", +}; + +char * +mdb_strerror(int err) +{ +#ifdef _WIN32 + /** HACK: pad 4KB on stack over the buf. Return system msgs in buf. + * This works as long as no function between the call to mdb_strerror + * and the actual use of the message uses more than 4K of stack. + */ + char pad[4096]; + char buf[1024], *ptr = buf; +#endif + int i; + if (!err) + return ("Successful return: 0"); + + if (err >= MDB_KEYEXIST && err <= MDB_LAST_ERRCODE) { + i = err - MDB_KEYEXIST; + return mdb_errstr[i]; + } + +#ifdef _WIN32 + /* These are the C-runtime error codes we use. The comment indicates + * their numeric value, and the Win32 error they would correspond to + * if the error actually came from a Win32 API. A major mess, we should + * have used LMDB-specific error codes for everything. + */ + switch(err) { + case ENOENT: /* 2, FILE_NOT_FOUND */ + case EIO: /* 5, ACCESS_DENIED */ + case ENOMEM: /* 12, INVALID_ACCESS */ + case EACCES: /* 13, INVALID_DATA */ + case EBUSY: /* 16, CURRENT_DIRECTORY */ + case EINVAL: /* 22, BAD_COMMAND */ + case ENOSPC: /* 28, OUT_OF_PAPER */ + return strerror(err); + default: + ; + } + buf[0] = 0; + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, 0, ptr, sizeof(buf), (va_list *)pad); + return ptr; +#else + return strerror(err); +#endif +} + +/** assert(3) variant in cursor context */ +#define mdb_cassert(mc, expr) mdb_assert0((mc)->mc_txn->mt_env, expr, #expr) +/** assert(3) variant in transaction context */ +#define mdb_tassert(txn, expr) mdb_assert0((txn)->mt_env, expr, #expr) +/** assert(3) variant in environment context */ +#define mdb_eassert(env, expr) mdb_assert0(env, expr, #expr) + +#ifndef NDEBUG +# define mdb_assert0(env, expr, expr_txt) ((expr) ? (void)0 : \ + mdb_assert_fail(env, expr_txt, mdb_func_, __FILE__, __LINE__)) + +static void ESECT +mdb_assert_fail(MDB_env *env, const char *expr_txt, + const char *func, const char *file, int line) +{ + char buf[400]; + sprintf(buf, "%.100s:%d: Assertion '%.200s' failed in %.40s()", + file, line, expr_txt, func); + if (env->me_assert_func) + env->me_assert_func(env, buf); + fprintf(stderr, "%s\n", buf); + abort(); +} +#else +# define mdb_assert0(env, expr, expr_txt) ((void) 0) +#endif /* NDEBUG */ + +#if MDB_DEBUG +/** Return the page number of \b mp which may be sub-page, for debug output */ +static pgno_t +mdb_dbg_pgno(MDB_page *mp) +{ + pgno_t ret; + COPY_PGNO(ret, mp->mp_pgno); + return ret; +} + +/** Display a key in hexadecimal and return the address of the result. + * @param[in] key the key to display + * @param[in] buf the buffer to write into. Should always be #DKBUF. + * @return The key in hexadecimal form. + */ +char * +mdb_dkey(MDB_val *key, char *buf) +{ + char *ptr = buf; + unsigned char *c = key->mv_data; + unsigned int i; + + if (!key) + return ""; + + if (key->mv_size > DKBUF_MAXKEYSIZE) + return "MDB_MAXKEYSIZE"; + /* may want to make this a dynamic check: if the key is mostly + * printable characters, print it as-is instead of converting to hex. + */ +#if 1 + buf[0] = '\0'; + for (i=0; imv_size; i++) + ptr += sprintf(ptr, "%02x", *c++); +#else + sprintf(buf, "%.*s", key->mv_size, key->mv_data); +#endif + return buf; +} + +static const char * +mdb_leafnode_type(MDB_node *n) +{ + static char *const tp[2][2] = {{"", ": DB"}, {": sub-page", ": sub-DB"}}; + return F_ISSET(n->mn_flags, F_BIGDATA) ? ": overflow page" : + tp[F_ISSET(n->mn_flags, F_DUPDATA)][F_ISSET(n->mn_flags, F_SUBDATA)]; +} + +/** Display all the keys in the page. */ +void +mdb_page_list(MDB_page *mp) +{ + pgno_t pgno = mdb_dbg_pgno(mp); + const char *type, *state = (mp->mp_flags & P_DIRTY) ? ", dirty" : ""; + MDB_node *node; + unsigned int i, nkeys, nsize, total = 0; + MDB_val key; + DKBUF; + + switch (mp->mp_flags & (P_BRANCH|P_LEAF|P_LEAF2|P_META|P_OVERFLOW|P_SUBP)) { + case P_BRANCH: type = "Branch page"; break; + case P_LEAF: type = "Leaf page"; break; + case P_LEAF|P_SUBP: type = "Sub-page"; break; + case P_LEAF|P_LEAF2: type = "LEAF2 page"; break; + case P_LEAF|P_LEAF2|P_SUBP: type = "LEAF2 sub-page"; break; + case P_OVERFLOW: + fprintf(stderr, "Overflow page %"Z"u pages %u%s\n", + pgno, mp->mp_pages, state); + return; + case P_META: + fprintf(stderr, "Meta-page %"Z"u txnid %"Z"u\n", + pgno, ((MDB_meta *)METADATA(mp))->mm_txnid); + return; + default: + fprintf(stderr, "Bad page %"Z"u flags 0x%u\n", pgno, mp->mp_flags); + return; + } + + nkeys = NUMKEYS(mp); + fprintf(stderr, "%s %"Z"u numkeys %d%s\n", type, pgno, nkeys, state); + + for (i=0; imp_pad; + key.mv_data = LEAF2KEY(mp, i, nsize); + total += nsize; + fprintf(stderr, "key %d: nsize %d, %s\n", i, nsize, DKEY(&key)); + continue; + } + node = NODEPTR(mp, i); + key.mv_size = node->mn_ksize; + key.mv_data = node->mn_data; + nsize = NODESIZE + key.mv_size; + if (IS_BRANCH(mp)) { + fprintf(stderr, "key %d: page %"Z"u, %s\n", i, NODEPGNO(node), + DKEY(&key)); + total += nsize; + } else { + if (F_ISSET(node->mn_flags, F_BIGDATA)) + nsize += sizeof(pgno_t); + else + nsize += NODEDSZ(node); + total += nsize; + nsize += sizeof(indx_t); + fprintf(stderr, "key %d: nsize %d, %s%s\n", + i, nsize, DKEY(&key), mdb_leafnode_type(node)); + } + total = EVEN(total); + } + fprintf(stderr, "Total: header %d + contents %d + unused %d\n", + IS_LEAF2(mp) ? PAGEHDRSZ : PAGEBASE + mp->mp_lower, total, SIZELEFT(mp)); +} + +void +mdb_cursor_chk(MDB_cursor *mc) +{ + unsigned int i; + MDB_node *node; + MDB_page *mp; + + if (!mc->mc_snum || !(mc->mc_flags & C_INITIALIZED)) return; + for (i=0; imc_top; i++) { + mp = mc->mc_pg[i]; + node = NODEPTR(mp, mc->mc_ki[i]); + if (NODEPGNO(node) != mc->mc_pg[i+1]->mp_pgno) + printf("oops!\n"); + } + if (mc->mc_ki[i] >= NUMKEYS(mc->mc_pg[i])) + printf("ack!\n"); + if (mc->mc_xcursor && (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { + node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) && + mc->mc_xcursor->mx_cursor.mc_pg[0] != NODEDATA(node)) { + printf("blah!\n"); + } + } +} +#endif + +#if (MDB_DEBUG) > 2 +/** Count all the pages in each DB and in the freelist + * and make sure it matches the actual number of pages + * being used. + * All named DBs must be open for a correct count. + */ +static void mdb_audit(MDB_txn *txn) +{ + MDB_cursor mc; + MDB_val key, data; + MDB_ID freecount, count; + MDB_dbi i; + int rc; + + freecount = 0; + mdb_cursor_init(&mc, txn, FREE_DBI, NULL); + while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) + freecount += *(MDB_ID *)data.mv_data; + mdb_tassert(txn, rc == MDB_NOTFOUND); + + count = 0; + for (i = 0; imt_numdbs; i++) { + MDB_xcursor mx; + if (!(txn->mt_dbflags[i] & DB_VALID)) + continue; + mdb_cursor_init(&mc, txn, i, &mx); + if (txn->mt_dbs[i].md_root == P_INVALID) + continue; + count += txn->mt_dbs[i].md_branch_pages + + txn->mt_dbs[i].md_leaf_pages + + txn->mt_dbs[i].md_overflow_pages; + if (txn->mt_dbs[i].md_flags & MDB_DUPSORT) { + rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST); + for (; rc == MDB_SUCCESS; rc = mdb_cursor_sibling(&mc, 1)) { + unsigned j; + MDB_page *mp; + mp = mc.mc_pg[mc.mc_top]; + for (j=0; jmn_flags & F_SUBDATA) { + MDB_db db; + memcpy(&db, NODEDATA(leaf), sizeof(db)); + count += db.md_branch_pages + db.md_leaf_pages + + db.md_overflow_pages; + } + } + } + mdb_tassert(txn, rc == MDB_NOTFOUND); + } + } + if (freecount + count + NUM_METAS != txn->mt_next_pgno) { + fprintf(stderr, "audit: %lu freecount: %lu count: %lu total: %lu next_pgno: %lu\n", + txn->mt_txnid, freecount, count+NUM_METAS, + freecount+count+NUM_METAS, txn->mt_next_pgno); + } +} +#endif + +int +mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) +{ + return txn->mt_dbxs[dbi].md_cmp(a, b); +} + +int +mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b) +{ + MDB_cmp_func *dcmp = txn->mt_dbxs[dbi].md_dcmp; +#if UINT_MAX < SIZE_MAX + if (dcmp == mdb_cmp_int && a->mv_size == sizeof(size_t)) + dcmp = mdb_cmp_clong; +#endif + return dcmp(a, b); +} + +/** Allocate memory for a page. + * Re-use old malloc'd pages first for singletons, otherwise just malloc. + */ +static MDB_page * +mdb_page_malloc(MDB_txn *txn, unsigned num) +{ + MDB_env *env = txn->mt_env; + MDB_page *ret = env->me_dpages; + size_t psize = env->me_psize, sz = psize, off; + /* For ! #MDB_NOMEMINIT, psize counts how much to init. + * For a single page alloc, we init everything after the page header. + * For multi-page, we init the final page; if the caller needed that + * many pages they will be filling in at least up to the last page. + */ + if (num == 1) { + if (ret) { + VGMEMP_ALLOC(env, ret, sz); + VGMEMP_DEFINED(ret, sizeof(ret->mp_next)); + env->me_dpages = ret->mp_next; + return ret; + } + psize -= off = PAGEHDRSZ; + } else { + sz *= num; + off = sz - psize; + } + if ((ret = malloc(sz)) != NULL) { + VGMEMP_ALLOC(env, ret, sz); + if (!(env->me_flags & MDB_NOMEMINIT)) { + memset((char *)ret + off, 0, psize); + ret->mp_pad = 0; + } + } else { + txn->mt_flags |= MDB_TXN_ERROR; + } + return ret; +} +/** Free a single page. + * Saves single pages to a list, for future reuse. + * (This is not used for multi-page overflow pages.) + */ +static void +mdb_page_free(MDB_env *env, MDB_page *mp) +{ + mp->mp_next = env->me_dpages; + VGMEMP_FREE(env, mp); + env->me_dpages = mp; +} + +/** Free a dirty page */ +static void +mdb_dpage_free(MDB_env *env, MDB_page *dp) +{ + if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) { + mdb_page_free(env, dp); + } else { + /* large pages just get freed directly */ + VGMEMP_FREE(env, dp); + free(dp); + } +} + +/** Return all dirty pages to dpage list */ +static void +mdb_dlist_free(MDB_txn *txn) +{ + MDB_env *env = txn->mt_env; + MDB_ID2L dl = txn->mt_u.dirty_list; + unsigned i, n = dl[0].mid; + + for (i = 1; i <= n; i++) { + mdb_dpage_free(env, dl[i].mptr); + } + dl[0].mid = 0; +} + +/** Loosen or free a single page. + * Saves single pages to a list for future reuse + * in this same txn. It has been pulled from the freeDB + * and already resides on the dirty list, but has been + * deleted. Use these pages first before pulling again + * from the freeDB. + * + * If the page wasn't dirtied in this txn, just add it + * to this txn's free list. + */ +static int +mdb_page_loose(MDB_cursor *mc, MDB_page *mp) +{ + int loose = 0; + pgno_t pgno = mp->mp_pgno; + MDB_txn *txn = mc->mc_txn; + + if ((mp->mp_flags & P_DIRTY) && mc->mc_dbi != FREE_DBI) { + if (txn->mt_parent) { + MDB_ID2 *dl = txn->mt_u.dirty_list; + /* If txn has a parent, make sure the page is in our + * dirty list. + */ + if (dl[0].mid) { + unsigned x = mdb_mid2l_search(dl, pgno); + if (x <= dl[0].mid && dl[x].mid == pgno) { + if (mp != dl[x].mptr) { /* bad cursor? */ + mc->mc_flags &= ~(C_INITIALIZED|C_EOF); + txn->mt_flags |= MDB_TXN_ERROR; + return MDB_CORRUPTED; + } + /* ok, it's ours */ + loose = 1; + } + } + } else { + /* no parent txn, so it's just ours */ + loose = 1; + } + } + if (loose) { + DPRINTF(("loosen db %d page %"Z"u", DDBI(mc), + mp->mp_pgno)); + NEXT_LOOSE_PAGE(mp) = txn->mt_loose_pgs; + txn->mt_loose_pgs = mp; + txn->mt_loose_count++; + mp->mp_flags |= P_LOOSE; + } else { + int rc = mdb_midl_append(&txn->mt_free_pgs, pgno); + if (rc) + return rc; + } + + return MDB_SUCCESS; +} + +/** Set or clear P_KEEP in dirty, non-overflow, non-sub pages watched by txn. + * @param[in] mc A cursor handle for the current operation. + * @param[in] pflags Flags of the pages to update: + * P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it. + * @param[in] all No shortcuts. Needed except after a full #mdb_page_flush(). + * @return 0 on success, non-zero on failure. + */ +static int +mdb_pages_xkeep(MDB_cursor *mc, unsigned pflags, int all) +{ + enum { Mask = P_SUBP|P_DIRTY|P_LOOSE|P_KEEP }; + MDB_txn *txn = mc->mc_txn; + MDB_cursor *m3; + MDB_xcursor *mx; + MDB_page *dp, *mp; + MDB_node *leaf; + unsigned i, j; + int rc = MDB_SUCCESS, level; + + /* Mark pages seen by cursors */ + if (mc->mc_flags & C_UNTRACK) + mc = NULL; /* will find mc in mt_cursors */ + for (i = txn->mt_numdbs;; mc = txn->mt_cursors[--i]) { + for (; mc; mc=mc->mc_next) { + if (!(mc->mc_flags & C_INITIALIZED)) + continue; + for (m3 = mc;; m3 = &mx->mx_cursor) { + mp = NULL; + for (j=0; jmc_snum; j++) { + mp = m3->mc_pg[j]; + if ((mp->mp_flags & Mask) == pflags) + mp->mp_flags ^= P_KEEP; + } + mx = m3->mc_xcursor; + /* Proceed to mx if it is at a sub-database */ + if (! (mx && (mx->mx_cursor.mc_flags & C_INITIALIZED))) + break; + if (! (mp && (mp->mp_flags & P_LEAF))) + break; + leaf = NODEPTR(mp, m3->mc_ki[j-1]); + if (!(leaf->mn_flags & F_SUBDATA)) + break; + } + } + if (i == 0) + break; + } + + if (all) { + /* Mark dirty root pages */ + for (i=0; imt_numdbs; i++) { + if (txn->mt_dbflags[i] & DB_DIRTY) { + pgno_t pgno = txn->mt_dbs[i].md_root; + if (pgno == P_INVALID) + continue; + if ((rc = mdb_page_get(txn, pgno, &dp, &level)) != MDB_SUCCESS) + break; + if ((dp->mp_flags & Mask) == pflags && level <= 1) + dp->mp_flags ^= P_KEEP; + } + } + } + + return rc; +} + +static int mdb_page_flush(MDB_txn *txn, int keep); + +/** Spill pages from the dirty list back to disk. + * This is intended to prevent running into #MDB_TXN_FULL situations, + * but note that they may still occur in a few cases: + * 1) our estimate of the txn size could be too small. Currently this + * seems unlikely, except with a large number of #MDB_MULTIPLE items. + * 2) child txns may run out of space if their parents dirtied a + * lot of pages and never spilled them. TODO: we probably should do + * a preemptive spill during #mdb_txn_begin() of a child txn, if + * the parent's dirty_room is below a given threshold. + * + * Otherwise, if not using nested txns, it is expected that apps will + * not run into #MDB_TXN_FULL any more. The pages are flushed to disk + * the same way as for a txn commit, e.g. their P_DIRTY flag is cleared. + * If the txn never references them again, they can be left alone. + * If the txn only reads them, they can be used without any fuss. + * If the txn writes them again, they can be dirtied immediately without + * going thru all of the work of #mdb_page_touch(). Such references are + * handled by #mdb_page_unspill(). + * + * Also note, we never spill DB root pages, nor pages of active cursors, + * because we'll need these back again soon anyway. And in nested txns, + * we can't spill a page in a child txn if it was already spilled in a + * parent txn. That would alter the parent txns' data even though + * the child hasn't committed yet, and we'd have no way to undo it if + * the child aborted. + * + * @param[in] m0 cursor A cursor handle identifying the transaction and + * database for which we are checking space. + * @param[in] key For a put operation, the key being stored. + * @param[in] data For a put operation, the data being stored. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_spill(MDB_cursor *m0, MDB_val *key, MDB_val *data) +{ + MDB_txn *txn = m0->mc_txn; + MDB_page *dp; + MDB_ID2L dl = txn->mt_u.dirty_list; + unsigned int i, j, need; + int rc; + + if (m0->mc_flags & C_SUB) + return MDB_SUCCESS; + + /* Estimate how much space this op will take */ + i = m0->mc_db->md_depth; + /* Named DBs also dirty the main DB */ + if (m0->mc_dbi >= CORE_DBS) + i += txn->mt_dbs[MAIN_DBI].md_depth; + /* For puts, roughly factor in the key+data size */ + if (key) + i += (LEAFSIZE(key, data) + txn->mt_env->me_psize) / txn->mt_env->me_psize; + i += i; /* double it for good measure */ + need = i; + + if (txn->mt_dirty_room > i) + return MDB_SUCCESS; + + if (!txn->mt_spill_pgs) { + txn->mt_spill_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX); + if (!txn->mt_spill_pgs) + return ENOMEM; + } else { + /* purge deleted slots */ + MDB_IDL sl = txn->mt_spill_pgs; + unsigned int num = sl[0]; + j=0; + for (i=1; i<=num; i++) { + if (!(sl[i] & 1)) + sl[++j] = sl[i]; + } + sl[0] = j; + } + + /* Preserve pages which may soon be dirtied again */ + if ((rc = mdb_pages_xkeep(m0, P_DIRTY, 1)) != MDB_SUCCESS) + goto done; + + /* Less aggressive spill - we originally spilled the entire dirty list, + * with a few exceptions for cursor pages and DB root pages. But this + * turns out to be a lot of wasted effort because in a large txn many + * of those pages will need to be used again. So now we spill only 1/8th + * of the dirty pages. Testing revealed this to be a good tradeoff, + * better than 1/2, 1/4, or 1/10. + */ + if (need < MDB_IDL_UM_MAX / 8) + need = MDB_IDL_UM_MAX / 8; + + /* Save the page IDs of all the pages we're flushing */ + /* flush from the tail forward, this saves a lot of shifting later on. */ + for (i=dl[0].mid; i && need; i--) { + MDB_ID pn = dl[i].mid << 1; + dp = dl[i].mptr; + if (dp->mp_flags & (P_LOOSE|P_KEEP)) + continue; + /* Can't spill twice, make sure it's not already in a parent's + * spill list. + */ + if (txn->mt_parent) { + MDB_txn *tx2; + for (tx2 = txn->mt_parent; tx2; tx2 = tx2->mt_parent) { + if (tx2->mt_spill_pgs) { + j = mdb_midl_search(tx2->mt_spill_pgs, pn); + if (j <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[j] == pn) { + dp->mp_flags |= P_KEEP; + break; + } + } + } + if (tx2) + continue; + } + if ((rc = mdb_midl_append(&txn->mt_spill_pgs, pn))) + goto done; + need--; + } + mdb_midl_sort(txn->mt_spill_pgs); + + /* Flush the spilled part of dirty list */ + if ((rc = mdb_page_flush(txn, i)) != MDB_SUCCESS) + goto done; + + /* Reset any dirty pages we kept that page_flush didn't see */ + rc = mdb_pages_xkeep(m0, P_DIRTY|P_KEEP, i); + +done: + txn->mt_flags |= rc ? MDB_TXN_ERROR : MDB_TXN_SPILLS; + return rc; +} + +/** Find oldest txnid still referenced. Expects txn->mt_txnid > 0. */ +static txnid_t +mdb_find_oldest(MDB_txn *txn) +{ + int i; + txnid_t mr, oldest = txn->mt_txnid - 1; + if (txn->mt_env->me_txns) { + MDB_reader *r = txn->mt_env->me_txns->mti_readers; + for (i = txn->mt_env->me_txns->mti_numreaders; --i >= 0; ) { + if (r[i].mr_pid) { + mr = r[i].mr_txnid; + if (oldest > mr) + oldest = mr; + } + } + } + return oldest; +} + +/** Add a page to the txn's dirty list */ +static void +mdb_page_dirty(MDB_txn *txn, MDB_page *mp) +{ + MDB_ID2 mid; + int rc, (*insert)(MDB_ID2L, MDB_ID2 *); + + if (txn->mt_flags & MDB_TXN_WRITEMAP) { + insert = mdb_mid2l_append; + } else { + insert = mdb_mid2l_insert; + } + mid.mid = mp->mp_pgno; + mid.mptr = mp; + rc = insert(txn->mt_u.dirty_list, &mid); + mdb_tassert(txn, rc == 0); + txn->mt_dirty_room--; +} + +/** Allocate page numbers and memory for writing. Maintain me_pglast, + * me_pghead and mt_next_pgno. + * + * If there are free pages available from older transactions, they + * are re-used first. Otherwise allocate a new page at mt_next_pgno. + * Do not modify the freedB, just merge freeDB records into me_pghead[] + * and move me_pglast to say which records were consumed. Only this + * function can create me_pghead and move me_pglast/mt_next_pgno. + * @param[in] mc cursor A cursor handle identifying the transaction and + * database for which we are allocating. + * @param[in] num the number of pages to allocate. + * @param[out] mp Address of the allocated page(s). Requests for multiple pages + * will always be satisfied by a single contiguous chunk of memory. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_alloc(MDB_cursor *mc, int num, MDB_page **mp) +{ +#ifdef MDB_PARANOID /* Seems like we can ignore this now */ + /* Get at most more freeDB records once me_pghead + * has enough pages. If not enough, use new pages from the map. + * If and mc is updating the freeDB, only get new + * records if me_pghead is empty. Then the freelist cannot play + * catch-up with itself by growing while trying to save it. + */ + enum { Paranoid = 1, Max_retries = 500 }; +#else + enum { Paranoid = 0, Max_retries = INT_MAX /*infinite*/ }; +#endif + int rc, retry = num * 60; + MDB_txn *txn = mc->mc_txn; + MDB_env *env = txn->mt_env; + pgno_t pgno, *mop = env->me_pghead; + unsigned i, j, mop_len = mop ? mop[0] : 0, n2 = num-1; + MDB_page *np; + txnid_t oldest = 0, last; + MDB_cursor_op op; + MDB_cursor m2; + int found_old = 0; + + /* If there are any loose pages, just use them */ + if (num == 1 && txn->mt_loose_pgs) { + np = txn->mt_loose_pgs; + txn->mt_loose_pgs = NEXT_LOOSE_PAGE(np); + txn->mt_loose_count--; + DPRINTF(("db %d use loose page %"Z"u", DDBI(mc), + np->mp_pgno)); + *mp = np; + return MDB_SUCCESS; + } + + *mp = NULL; + + /* If our dirty list is already full, we can't do anything */ + if (txn->mt_dirty_room == 0) { + rc = MDB_TXN_FULL; + goto fail; + } + + for (op = MDB_FIRST;; op = MDB_NEXT) { + MDB_val key, data; + MDB_node *leaf; + pgno_t *idl; + + /* Seek a big enough contiguous page range. Prefer + * pages at the tail, just truncating the list. + */ + if (mop_len > n2) { + i = mop_len; + do { + pgno = mop[i]; + if (mop[i-n2] == pgno+n2) + goto search_done; + } while (--i > n2); + if (--retry < 0) + break; + } + + if (op == MDB_FIRST) { /* 1st iteration */ + /* Prepare to fetch more and coalesce */ + last = env->me_pglast; + oldest = env->me_pgoldest; + mdb_cursor_init(&m2, txn, FREE_DBI, NULL); + if (last) { + op = MDB_SET_RANGE; + key.mv_data = &last; /* will look up last+1 */ + key.mv_size = sizeof(last); + } + if (Paranoid && mc->mc_dbi == FREE_DBI) + retry = -1; + } + if (Paranoid && retry < 0 && mop_len) + break; + + last++; + /* Do not fetch more if the record will be too recent */ + if (oldest <= last) { + if (!found_old) { + oldest = mdb_find_oldest(txn); + env->me_pgoldest = oldest; + found_old = 1; + } + if (oldest <= last) + break; + } + rc = mdb_cursor_get(&m2, &key, NULL, op); + if (rc) { + if (rc == MDB_NOTFOUND) + break; + goto fail; + } + last = *(txnid_t*)key.mv_data; + if (oldest <= last) { + if (!found_old) { + oldest = mdb_find_oldest(txn); + env->me_pgoldest = oldest; + found_old = 1; + } + if (oldest <= last) + break; + } + np = m2.mc_pg[m2.mc_top]; + leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]); + if ((rc = mdb_node_read(txn, leaf, &data)) != MDB_SUCCESS) + return rc; + + idl = (MDB_ID *) data.mv_data; + i = idl[0]; + if (!mop) { + if (!(env->me_pghead = mop = mdb_midl_alloc(i))) { + rc = ENOMEM; + goto fail; + } + } else { + if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0) + goto fail; + mop = env->me_pghead; + } + env->me_pglast = last; +#if (MDB_DEBUG) > 1 + DPRINTF(("IDL read txn %"Z"u root %"Z"u num %u", + last, txn->mt_dbs[FREE_DBI].md_root, i)); + for (j = i; j; j--) + DPRINTF(("IDL %"Z"u", idl[j])); +#endif + /* Merge in descending sorted order */ + mdb_midl_xmerge(mop, idl); + mop_len = mop[0]; + } + + /* Use new pages from the map when nothing suitable in the freeDB */ + i = 0; + pgno = txn->mt_next_pgno; + if (pgno + num >= env->me_maxpg) { + DPUTS("DB size maxed out"); + rc = MDB_MAP_FULL; + goto fail; + } + +search_done: + if (env->me_flags & MDB_WRITEMAP) { + np = (MDB_page *)(env->me_map + env->me_psize * pgno); + } else { + if (!(np = mdb_page_malloc(txn, num))) { + rc = ENOMEM; + goto fail; + } + } + if (i) { + mop[0] = mop_len -= num; + /* Move any stragglers down */ + for (j = i-num; j < mop_len; ) + mop[++j] = mop[++i]; + } else { + txn->mt_next_pgno = pgno + num; + } + np->mp_pgno = pgno; + mdb_page_dirty(txn, np); + *mp = np; + + return MDB_SUCCESS; + +fail: + txn->mt_flags |= MDB_TXN_ERROR; + return rc; +} + +/** Copy the used portions of a non-overflow page. + * @param[in] dst page to copy into + * @param[in] src page to copy from + * @param[in] psize size of a page + */ +static void +mdb_page_copy(MDB_page *dst, MDB_page *src, unsigned int psize) +{ + enum { Align = sizeof(pgno_t) }; + indx_t upper = src->mp_upper, lower = src->mp_lower, unused = upper-lower; + + /* If page isn't full, just copy the used portion. Adjust + * alignment so memcpy may copy words instead of bytes. + */ + if ((unused &= -Align) && !IS_LEAF2(src)) { + upper = (upper + PAGEBASE) & -Align; + memcpy(dst, src, (lower + PAGEBASE + (Align-1)) & -Align); + memcpy((pgno_t *)((char *)dst+upper), (pgno_t *)((char *)src+upper), + psize - upper); + } else { + memcpy(dst, src, psize - unused); + } +} + +/** Pull a page off the txn's spill list, if present. + * If a page being referenced was spilled to disk in this txn, bring + * it back and make it dirty/writable again. + * @param[in] txn the transaction handle. + * @param[in] mp the page being referenced. It must not be dirty. + * @param[out] ret the writable page, if any. ret is unchanged if + * mp wasn't spilled. + */ +static int +mdb_page_unspill(MDB_txn *txn, MDB_page *mp, MDB_page **ret) +{ + MDB_env *env = txn->mt_env; + const MDB_txn *tx2; + unsigned x; + pgno_t pgno = mp->mp_pgno, pn = pgno << 1; + + for (tx2 = txn; tx2; tx2=tx2->mt_parent) { + if (!tx2->mt_spill_pgs) + continue; + x = mdb_midl_search(tx2->mt_spill_pgs, pn); + if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { + MDB_page *np; + int num; + if (txn->mt_dirty_room == 0) + return MDB_TXN_FULL; + if (IS_OVERFLOW(mp)) + num = mp->mp_pages; + else + num = 1; + if (env->me_flags & MDB_WRITEMAP) { + np = mp; + } else { + np = mdb_page_malloc(txn, num); + if (!np) + return ENOMEM; + if (num > 1) + memcpy(np, mp, num * env->me_psize); + else + mdb_page_copy(np, mp, env->me_psize); + } + if (tx2 == txn) { + /* If in current txn, this page is no longer spilled. + * If it happens to be the last page, truncate the spill list. + * Otherwise mark it as deleted by setting the LSB. + */ + if (x == txn->mt_spill_pgs[0]) + txn->mt_spill_pgs[0]--; + else + txn->mt_spill_pgs[x] |= 1; + } /* otherwise, if belonging to a parent txn, the + * page remains spilled until child commits + */ + + mdb_page_dirty(txn, np); + np->mp_flags |= P_DIRTY; + *ret = np; + break; + } + } + return MDB_SUCCESS; +} + +/** Touch a page: make it dirty and re-insert into tree with updated pgno. + * @param[in] mc cursor pointing to the page to be touched + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_touch(MDB_cursor *mc) +{ + MDB_page *mp = mc->mc_pg[mc->mc_top], *np; + MDB_txn *txn = mc->mc_txn; + MDB_cursor *m2, *m3; + pgno_t pgno; + int rc; + + if (!F_ISSET(mp->mp_flags, P_DIRTY)) { + if (txn->mt_flags & MDB_TXN_SPILLS) { + np = NULL; + rc = mdb_page_unspill(txn, mp, &np); + if (rc) + goto fail; + if (np) + goto done; + } + if ((rc = mdb_midl_need(&txn->mt_free_pgs, 1)) || + (rc = mdb_page_alloc(mc, 1, &np))) + goto fail; + pgno = np->mp_pgno; + DPRINTF(("touched db %d page %"Z"u -> %"Z"u", DDBI(mc), + mp->mp_pgno, pgno)); + mdb_cassert(mc, mp->mp_pgno != pgno); + mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); + /* Update the parent page, if any, to point to the new page */ + if (mc->mc_top) { + MDB_page *parent = mc->mc_pg[mc->mc_top-1]; + MDB_node *node = NODEPTR(parent, mc->mc_ki[mc->mc_top-1]); + SETPGNO(node, pgno); + } else { + mc->mc_db->md_root = pgno; + } + } else if (txn->mt_parent && !IS_SUBP(mp)) { + MDB_ID2 mid, *dl = txn->mt_u.dirty_list; + pgno = mp->mp_pgno; + /* If txn has a parent, make sure the page is in our + * dirty list. + */ + if (dl[0].mid) { + unsigned x = mdb_mid2l_search(dl, pgno); + if (x <= dl[0].mid && dl[x].mid == pgno) { + if (mp != dl[x].mptr) { /* bad cursor? */ + mc->mc_flags &= ~(C_INITIALIZED|C_EOF); + txn->mt_flags |= MDB_TXN_ERROR; + return MDB_CORRUPTED; + } + return 0; + } + } + mdb_cassert(mc, dl[0].mid < MDB_IDL_UM_MAX); + /* No - copy it */ + np = mdb_page_malloc(txn, 1); + if (!np) + return ENOMEM; + mid.mid = pgno; + mid.mptr = np; + rc = mdb_mid2l_insert(dl, &mid); + mdb_cassert(mc, rc == 0); + } else { + return 0; + } + + mdb_page_copy(np, mp, txn->mt_env->me_psize); + np->mp_pgno = pgno; + np->mp_flags |= P_DIRTY; + +done: + /* Adjust cursors pointing to mp */ + mc->mc_pg[mc->mc_top] = np; + m2 = txn->mt_cursors[mc->mc_dbi]; + if (mc->mc_flags & C_SUB) { + for (; m2; m2=m2->mc_next) { + m3 = &m2->mc_xcursor->mx_cursor; + if (m3->mc_snum < mc->mc_snum) continue; + if (m3->mc_pg[mc->mc_top] == mp) + m3->mc_pg[mc->mc_top] = np; + } + } else { + for (; m2; m2=m2->mc_next) { + if (m2->mc_snum < mc->mc_snum) continue; + if (m2 == mc) continue; + if (m2->mc_pg[mc->mc_top] == mp) { + m2->mc_pg[mc->mc_top] = np; + if ((mc->mc_db->md_flags & MDB_DUPSORT) && + IS_LEAF(np) && + (m2->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) + { + MDB_node *leaf = NODEPTR(np, m2->mc_ki[mc->mc_top]); + if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) + m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); + } + } + } + } + return 0; + +fail: + txn->mt_flags |= MDB_TXN_ERROR; + return rc; +} + +int +mdb_env_sync(MDB_env *env, int force) +{ + int rc = 0; + if (env->me_flags & MDB_RDONLY) + return EACCES; + if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) { + if (env->me_flags & MDB_WRITEMAP) { + int flags = ((env->me_flags & MDB_MAPASYNC) && !force) + ? MS_ASYNC : MS_SYNC; + if (MDB_MSYNC(env->me_map, env->me_mapsize, flags)) + rc = ErrCode(); +#ifdef _WIN32 + else if (flags == MS_SYNC && MDB_FDATASYNC(env->me_fd)) + rc = ErrCode(); +#endif + } else { +#ifdef BROKEN_FDATASYNC + if (env->me_flags & MDB_FSYNCONLY) { + if (fsync(env->me_fd)) + rc = ErrCode(); + } else +#endif + if (MDB_FDATASYNC(env->me_fd)) + rc = ErrCode(); + } + } + return rc; +} + +/** Back up parent txn's cursors, then grab the originals for tracking */ +static int +mdb_cursor_shadow(MDB_txn *src, MDB_txn *dst) +{ + MDB_cursor *mc, *bk; + MDB_xcursor *mx; + size_t size; + int i; + + for (i = src->mt_numdbs; --i >= 0; ) { + if ((mc = src->mt_cursors[i]) != NULL) { + size = sizeof(MDB_cursor); + if (mc->mc_xcursor) + size += sizeof(MDB_xcursor); + for (; mc; mc = bk->mc_next) { + bk = malloc(size); + if (!bk) + return ENOMEM; + *bk = *mc; + mc->mc_backup = bk; + mc->mc_db = &dst->mt_dbs[i]; + /* Kill pointers into src to reduce abuse: The + * user may not use mc until dst ends. But we need a valid + * txn pointer here for cursor fixups to keep working. + */ + mc->mc_txn = dst; + mc->mc_dbflag = &dst->mt_dbflags[i]; + if ((mx = mc->mc_xcursor) != NULL) { + *(MDB_xcursor *)(bk+1) = *mx; + mx->mx_cursor.mc_txn = dst; + } + mc->mc_next = dst->mt_cursors[i]; + dst->mt_cursors[i] = mc; + } + } + } + return MDB_SUCCESS; +} + +/** Close this write txn's cursors, give parent txn's cursors back to parent. + * @param[in] txn the transaction handle. + * @param[in] merge true to keep changes to parent cursors, false to revert. + * @return 0 on success, non-zero on failure. + */ +static void +mdb_cursors_close(MDB_txn *txn, unsigned merge) +{ + MDB_cursor **cursors = txn->mt_cursors, *mc, *next, *bk; + MDB_xcursor *mx; + int i; + + for (i = txn->mt_numdbs; --i >= 0; ) { + for (mc = cursors[i]; mc; mc = next) { + next = mc->mc_next; + if ((bk = mc->mc_backup) != NULL) { + if (merge) { + /* Commit changes to parent txn */ + mc->mc_next = bk->mc_next; + mc->mc_backup = bk->mc_backup; + mc->mc_txn = bk->mc_txn; + mc->mc_db = bk->mc_db; + mc->mc_dbflag = bk->mc_dbflag; + if ((mx = mc->mc_xcursor) != NULL) + mx->mx_cursor.mc_txn = bk->mc_txn; + } else { + /* Abort nested txn */ + *mc = *bk; + if ((mx = mc->mc_xcursor) != NULL) + *mx = *(MDB_xcursor *)(bk+1); + } + mc = bk; + } + /* Only malloced cursors are permanently tracked. */ + free(mc); + } + cursors[i] = NULL; + } +} + +#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ +enum Pidlock_op { + Pidset, Pidcheck +}; +#else +enum Pidlock_op { + Pidset = F_SETLK, Pidcheck = F_GETLK +}; +#endif + +/** Set or check a pid lock. Set returns 0 on success. + * Check returns 0 if the process is certainly dead, nonzero if it may + * be alive (the lock exists or an error happened so we do not know). + * + * On Windows Pidset is a no-op, we merely check for the existence + * of the process with the given pid. On POSIX we use a single byte + * lock on the lockfile, set at an offset equal to the pid. + */ +static int +mdb_reader_pid(MDB_env *env, enum Pidlock_op op, MDB_PID_T pid) +{ +#if !(MDB_PIDLOCK) /* Currently the same as defined(_WIN32) */ + int ret = 0; + HANDLE h; + if (op == Pidcheck) { + h = OpenProcess(env->me_pidquery, FALSE, pid); + /* No documented "no such process" code, but other program use this: */ + if (!h) + return ErrCode() != ERROR_INVALID_PARAMETER; + /* A process exists until all handles to it close. Has it exited? */ + ret = WaitForSingleObject(h, 0) != 0; + CloseHandle(h); + } + return ret; +#else + for (;;) { + int rc; + struct flock lock_info; + memset(&lock_info, 0, sizeof(lock_info)); + lock_info.l_type = F_WRLCK; + lock_info.l_whence = SEEK_SET; + lock_info.l_start = pid; + lock_info.l_len = 1; + if ((rc = fcntl(env->me_lfd, op, &lock_info)) == 0) { + if (op == F_GETLK && lock_info.l_type != F_UNLCK) + rc = -1; + } else if ((rc = ErrCode()) == EINTR) { + continue; + } + return rc; + } +#endif +} + +/** Common code for #mdb_txn_begin() and #mdb_txn_renew(). + * @param[in] txn the transaction handle to initialize + * @return 0 on success, non-zero on failure. + */ +static int +mdb_txn_renew0(MDB_txn *txn) +{ + MDB_env *env = txn->mt_env; + MDB_txninfo *ti = env->me_txns; + MDB_meta *meta; + unsigned int i, nr, flags = txn->mt_flags; + uint16_t x; + int rc, new_notls = 0; + + if ((flags &= MDB_TXN_RDONLY) != 0) { + if (!ti) { + meta = mdb_env_pick_meta(env); + txn->mt_txnid = meta->mm_txnid; + txn->mt_u.reader = NULL; + } else { + MDB_reader *r = (env->me_flags & MDB_NOTLS) ? txn->mt_u.reader : + pthread_getspecific(env->me_txkey); + if (r) { + if (r->mr_pid != env->me_pid || r->mr_txnid != (txnid_t)-1) + return MDB_BAD_RSLOT; + } else { + MDB_PID_T pid = env->me_pid; + MDB_THR_T tid = pthread_self(); + mdb_mutexref_t rmutex = env->me_rmutex; + + if (!env->me_live_reader) { + rc = mdb_reader_pid(env, Pidset, pid); + if (rc) + return rc; + env->me_live_reader = 1; + } + + if (LOCK_MUTEX(rc, env, rmutex)) + return rc; + nr = ti->mti_numreaders; + for (i=0; imti_readers[i].mr_pid == 0) + break; + if (i == env->me_maxreaders) { + UNLOCK_MUTEX(rmutex); + return MDB_READERS_FULL; + } + r = &ti->mti_readers[i]; + /* Claim the reader slot, carefully since other code + * uses the reader table un-mutexed: First reset the + * slot, next publish it in mti_numreaders. After + * that, it is safe for mdb_env_close() to touch it. + * When it will be closed, we can finally claim it. + */ + r->mr_pid = 0; + r->mr_txnid = (txnid_t)-1; + r->mr_tid = tid; + if (i == nr) + ti->mti_numreaders = ++nr; + env->me_close_readers = nr; + r->mr_pid = pid; + UNLOCK_MUTEX(rmutex); + + new_notls = (env->me_flags & MDB_NOTLS); + if (!new_notls && (rc=pthread_setspecific(env->me_txkey, r))) { + r->mr_pid = 0; + return rc; + } + } + do /* LY: Retry on a race, ITS#7970. */ + r->mr_txnid = ti->mti_txnid; + while(r->mr_txnid != ti->mti_txnid); + txn->mt_txnid = r->mr_txnid; + txn->mt_u.reader = r; + meta = env->me_metas[txn->mt_txnid & 1]; + } + + } else { + /* Not yet touching txn == env->me_txn0, it may be active */ + if (ti) { + if (LOCK_MUTEX(rc, env, env->me_wmutex)) + return rc; + txn->mt_txnid = ti->mti_txnid; + meta = env->me_metas[txn->mt_txnid & 1]; + } else { + meta = mdb_env_pick_meta(env); + txn->mt_txnid = meta->mm_txnid; + } + txn->mt_txnid++; +#if MDB_DEBUG + if (txn->mt_txnid == mdb_debug_start) + mdb_debug = 1; +#endif + txn->mt_child = NULL; + txn->mt_loose_pgs = NULL; + txn->mt_loose_count = 0; + txn->mt_dirty_room = MDB_IDL_UM_MAX; + txn->mt_u.dirty_list = env->me_dirty_list; + txn->mt_u.dirty_list[0].mid = 0; + txn->mt_free_pgs = env->me_free_pgs; + txn->mt_free_pgs[0] = 0; + txn->mt_spill_pgs = NULL; + env->me_txn = txn; + memcpy(txn->mt_dbiseqs, env->me_dbiseqs, env->me_maxdbs * sizeof(unsigned int)); + } + + /* Copy the DB info and flags */ + memcpy(txn->mt_dbs, meta->mm_dbs, CORE_DBS * sizeof(MDB_db)); + + /* Moved to here to avoid a data race in read TXNs */ + txn->mt_next_pgno = meta->mm_last_pg+1; + + txn->mt_flags = flags; + + /* Setup db info */ + txn->mt_numdbs = env->me_numdbs; + for (i=CORE_DBS; imt_numdbs; i++) { + x = env->me_dbflags[i]; + txn->mt_dbs[i].md_flags = x & PERSISTENT_FLAGS; + txn->mt_dbflags[i] = (x & MDB_VALID) ? DB_VALID|DB_USRVALID|DB_STALE : 0; + } + txn->mt_dbflags[MAIN_DBI] = DB_VALID|DB_USRVALID; + txn->mt_dbflags[FREE_DBI] = DB_VALID; + + if (env->me_flags & MDB_FATAL_ERROR) { + DPUTS("environment had fatal error, must shutdown!"); + rc = MDB_PANIC; + } else if (env->me_maxpg < txn->mt_next_pgno) { + rc = MDB_MAP_RESIZED; + } else { + return MDB_SUCCESS; + } + mdb_txn_end(txn, new_notls /*0 or MDB_END_SLOT*/ | MDB_END_FAIL_BEGIN); + return rc; +} + +int +mdb_txn_renew(MDB_txn *txn) +{ + int rc; + + if (!txn || !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY|MDB_TXN_FINISHED)) + return EINVAL; + + rc = mdb_txn_renew0(txn); + if (rc == MDB_SUCCESS) { + DPRINTF(("renew txn %"Z"u%c %p on mdbenv %p, root page %"Z"u", + txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', + (void *)txn, (void *)txn->mt_env, txn->mt_dbs[MAIN_DBI].md_root)); + } + return rc; +} + +int +mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **ret) +{ + MDB_txn *txn; + MDB_ntxn *ntxn; + int rc, size, tsize; + + flags &= MDB_TXN_BEGIN_FLAGS; + flags |= env->me_flags & MDB_WRITEMAP; + + if (env->me_flags & MDB_RDONLY & ~flags) /* write txn in RDONLY env */ + return EACCES; + + if (parent) { + /* Nested transactions: Max 1 child, write txns only, no writemap */ + flags |= parent->mt_flags; + if (flags & (MDB_RDONLY|MDB_WRITEMAP|MDB_TXN_BLOCKED)) { + return (parent->mt_flags & MDB_TXN_RDONLY) ? EINVAL : MDB_BAD_TXN; + } + /* Child txns save MDB_pgstate and use own copy of cursors */ + size = env->me_maxdbs * (sizeof(MDB_db)+sizeof(MDB_cursor *)+1); + size += tsize = sizeof(MDB_ntxn); + } else if (flags & MDB_RDONLY) { + size = env->me_maxdbs * (sizeof(MDB_db)+1); + size += tsize = sizeof(MDB_txn); + } else { + /* Reuse preallocated write txn. However, do not touch it until + * mdb_txn_renew0() succeeds, since it currently may be active. + */ + txn = env->me_txn0; + goto renew; + } + if ((txn = calloc(1, size)) == NULL) { + DPRINTF(("calloc: %s", strerror(errno))); + return ENOMEM; + } + txn->mt_dbxs = env->me_dbxs; /* static */ + txn->mt_dbs = (MDB_db *) ((char *)txn + tsize); + txn->mt_dbflags = (unsigned char *)txn + size - env->me_maxdbs; + txn->mt_flags = flags; + txn->mt_env = env; + + if (parent) { + unsigned int i; + txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); + txn->mt_dbiseqs = parent->mt_dbiseqs; + txn->mt_u.dirty_list = malloc(sizeof(MDB_ID2)*MDB_IDL_UM_SIZE); + if (!txn->mt_u.dirty_list || + !(txn->mt_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX))) + { + free(txn->mt_u.dirty_list); + free(txn); + return ENOMEM; + } + txn->mt_txnid = parent->mt_txnid; + txn->mt_dirty_room = parent->mt_dirty_room; + txn->mt_u.dirty_list[0].mid = 0; + txn->mt_spill_pgs = NULL; + txn->mt_next_pgno = parent->mt_next_pgno; + parent->mt_flags |= MDB_TXN_HAS_CHILD; + parent->mt_child = txn; + txn->mt_parent = parent; + txn->mt_numdbs = parent->mt_numdbs; + memcpy(txn->mt_dbs, parent->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); + /* Copy parent's mt_dbflags, but clear DB_NEW */ + for (i=0; imt_numdbs; i++) + txn->mt_dbflags[i] = parent->mt_dbflags[i] & ~DB_NEW; + rc = 0; + ntxn = (MDB_ntxn *)txn; + ntxn->mnt_pgstate = env->me_pgstate; /* save parent me_pghead & co */ + if (env->me_pghead) { + size = MDB_IDL_SIZEOF(env->me_pghead); + env->me_pghead = mdb_midl_alloc(env->me_pghead[0]); + if (env->me_pghead) + memcpy(env->me_pghead, ntxn->mnt_pgstate.mf_pghead, size); + else + rc = ENOMEM; + } + if (!rc) + rc = mdb_cursor_shadow(parent, txn); + if (rc) + mdb_txn_end(txn, MDB_END_FAIL_BEGINCHILD); + } else { /* MDB_RDONLY */ + txn->mt_dbiseqs = env->me_dbiseqs; +renew: + rc = mdb_txn_renew0(txn); + } + if (rc) { + if (txn != env->me_txn0) + free(txn); + } else { + txn->mt_flags |= flags; /* could not change txn=me_txn0 earlier */ + *ret = txn; + DPRINTF(("begin txn %"Z"u%c %p on mdbenv %p, root page %"Z"u", + txn->mt_txnid, (flags & MDB_RDONLY) ? 'r' : 'w', + (void *) txn, (void *) env, txn->mt_dbs[MAIN_DBI].md_root)); + } + + return rc; +} + +MDB_env * +mdb_txn_env(MDB_txn *txn) +{ + if(!txn) return NULL; + return txn->mt_env; +} + +size_t +mdb_txn_id(MDB_txn *txn) +{ + if(!txn) return 0; + return txn->mt_txnid; +} + +/** Export or close DBI handles opened in this txn. */ +static void +mdb_dbis_update(MDB_txn *txn, int keep) +{ + int i; + MDB_dbi n = txn->mt_numdbs; + MDB_env *env = txn->mt_env; + unsigned char *tdbflags = txn->mt_dbflags; + + for (i = n; --i >= CORE_DBS;) { + if (tdbflags[i] & DB_NEW) { + if (keep) { + env->me_dbflags[i] = txn->mt_dbs[i].md_flags | MDB_VALID; + } else { + char *ptr = env->me_dbxs[i].md_name.mv_data; + if (ptr) { + env->me_dbxs[i].md_name.mv_data = NULL; + env->me_dbxs[i].md_name.mv_size = 0; + env->me_dbflags[i] = 0; + env->me_dbiseqs[i]++; + free(ptr); + } + } + } + } + if (keep && env->me_numdbs < n) + env->me_numdbs = n; +} + +/** End a transaction, except successful commit of a nested transaction. + * May be called twice for readonly txns: First reset it, then abort. + * @param[in] txn the transaction handle to end + * @param[in] mode why and how to end the transaction + */ +static void +mdb_txn_end(MDB_txn *txn, unsigned mode) +{ + MDB_env *env = txn->mt_env; +#if MDB_DEBUG + static const char *const names[] = MDB_END_NAMES; +#endif + + /* Export or close DBI handles opened in this txn */ + mdb_dbis_update(txn, mode & MDB_END_UPDATE); + + DPRINTF(("%s txn %"Z"u%c %p on mdbenv %p, root page %"Z"u", + names[mode & MDB_END_OPMASK], + txn->mt_txnid, (txn->mt_flags & MDB_TXN_RDONLY) ? 'r' : 'w', + (void *) txn, (void *)env, txn->mt_dbs[MAIN_DBI].md_root)); + + if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { + if (txn->mt_u.reader) { + txn->mt_u.reader->mr_txnid = (txnid_t)-1; + if (!(env->me_flags & MDB_NOTLS)) { + txn->mt_u.reader = NULL; /* txn does not own reader */ + } else if (mode & MDB_END_SLOT) { + txn->mt_u.reader->mr_pid = 0; + txn->mt_u.reader = NULL; + } /* else txn owns the slot until it does MDB_END_SLOT */ + } + txn->mt_numdbs = 0; /* prevent further DBI activity */ + txn->mt_flags |= MDB_TXN_FINISHED; + + } else if (!F_ISSET(txn->mt_flags, MDB_TXN_FINISHED)) { + pgno_t *pghead = env->me_pghead; + + if (!(mode & MDB_END_UPDATE)) /* !(already closed cursors) */ + mdb_cursors_close(txn, 0); + if (!(env->me_flags & MDB_WRITEMAP)) { + mdb_dlist_free(txn); + } + + txn->mt_numdbs = 0; + txn->mt_flags = MDB_TXN_FINISHED; + + if (!txn->mt_parent) { + mdb_midl_shrink(&txn->mt_free_pgs); + env->me_free_pgs = txn->mt_free_pgs; + /* me_pgstate: */ + env->me_pghead = NULL; + env->me_pglast = 0; + + env->me_txn = NULL; + mode = 0; /* txn == env->me_txn0, do not free() it */ + + /* The writer mutex was locked in mdb_txn_begin. */ + if (env->me_txns) + UNLOCK_MUTEX(env->me_wmutex); + } else { + txn->mt_parent->mt_child = NULL; + txn->mt_parent->mt_flags &= ~MDB_TXN_HAS_CHILD; + env->me_pgstate = ((MDB_ntxn *)txn)->mnt_pgstate; + mdb_midl_free(txn->mt_free_pgs); + mdb_midl_free(txn->mt_spill_pgs); + free(txn->mt_u.dirty_list); + } + + mdb_midl_free(pghead); + } + + if (mode & MDB_END_FREE) + free(txn); +} + +void +mdb_txn_reset(MDB_txn *txn) +{ + if (txn == NULL) + return; + + /* This call is only valid for read-only txns */ + if (!(txn->mt_flags & MDB_TXN_RDONLY)) + return; + + mdb_txn_end(txn, MDB_END_RESET); +} + +void +mdb_txn_abort(MDB_txn *txn) +{ + if (txn == NULL) + return; + + if (txn->mt_child) + mdb_txn_abort(txn->mt_child); + + mdb_txn_end(txn, MDB_END_ABORT|MDB_END_SLOT|MDB_END_FREE); +} + +/** Save the freelist as of this transaction to the freeDB. + * This changes the freelist. Keep trying until it stabilizes. + */ +static int +mdb_freelist_save(MDB_txn *txn) +{ + /* env->me_pghead[] can grow and shrink during this call. + * env->me_pglast and txn->mt_free_pgs[] can only grow. + * Page numbers cannot disappear from txn->mt_free_pgs[]. + */ + MDB_cursor mc; + MDB_env *env = txn->mt_env; + int rc, maxfree_1pg = env->me_maxfree_1pg, more = 1; + txnid_t pglast = 0, head_id = 0; + pgno_t freecnt = 0, *free_pgs, *mop; + ssize_t head_room = 0, total_room = 0, mop_len, clean_limit; + + mdb_cursor_init(&mc, txn, FREE_DBI, NULL); + + if (env->me_pghead) { + /* Make sure first page of freeDB is touched and on freelist */ + rc = mdb_page_search(&mc, NULL, MDB_PS_FIRST|MDB_PS_MODIFY); + if (rc && rc != MDB_NOTFOUND) + return rc; + } + + if (!env->me_pghead && txn->mt_loose_pgs) { + /* Put loose page numbers in mt_free_pgs, since + * we may be unable to return them to me_pghead. + */ + MDB_page *mp = txn->mt_loose_pgs; + if ((rc = mdb_midl_need(&txn->mt_free_pgs, txn->mt_loose_count)) != 0) + return rc; + for (; mp; mp = NEXT_LOOSE_PAGE(mp)) + mdb_midl_xappend(txn->mt_free_pgs, mp->mp_pgno); + txn->mt_loose_pgs = NULL; + txn->mt_loose_count = 0; + } + + /* MDB_RESERVE cancels meminit in ovpage malloc (when no WRITEMAP) */ + clean_limit = (env->me_flags & (MDB_NOMEMINIT|MDB_WRITEMAP)) + ? SSIZE_MAX : maxfree_1pg; + + for (;;) { + /* Come back here after each Put() in case freelist changed */ + MDB_val key, data; + pgno_t *pgs; + ssize_t j; + + /* If using records from freeDB which we have not yet + * deleted, delete them and any we reserved for me_pghead. + */ + while (pglast < env->me_pglast) { + rc = mdb_cursor_first(&mc, &key, NULL); + if (rc) + return rc; + pglast = head_id = *(txnid_t *)key.mv_data; + total_room = head_room = 0; + mdb_tassert(txn, pglast <= env->me_pglast); + rc = mdb_cursor_del(&mc, 0); + if (rc) + return rc; + } + + /* Save the IDL of pages freed by this txn, to a single record */ + if (freecnt < txn->mt_free_pgs[0]) { + if (!freecnt) { + /* Make sure last page of freeDB is touched and on freelist */ + rc = mdb_page_search(&mc, NULL, MDB_PS_LAST|MDB_PS_MODIFY); + if (rc && rc != MDB_NOTFOUND) + return rc; + } + free_pgs = txn->mt_free_pgs; + /* Write to last page of freeDB */ + key.mv_size = sizeof(txn->mt_txnid); + key.mv_data = &txn->mt_txnid; + do { + freecnt = free_pgs[0]; + data.mv_size = MDB_IDL_SIZEOF(free_pgs); + rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); + if (rc) + return rc; + /* Retry if mt_free_pgs[] grew during the Put() */ + free_pgs = txn->mt_free_pgs; + } while (freecnt < free_pgs[0]); + mdb_midl_sort(free_pgs); + memcpy(data.mv_data, free_pgs, data.mv_size); +#if (MDB_DEBUG) > 1 + { + unsigned int i = free_pgs[0]; + DPRINTF(("IDL write txn %"Z"u root %"Z"u num %u", + txn->mt_txnid, txn->mt_dbs[FREE_DBI].md_root, i)); + for (; i; i--) + DPRINTF(("IDL %"Z"u", free_pgs[i])); + } +#endif + continue; + } + + mop = env->me_pghead; + mop_len = (mop ? mop[0] : 0) + txn->mt_loose_count; + + /* Reserve records for me_pghead[]. Split it if multi-page, + * to avoid searching freeDB for a page range. Use keys in + * range [1,me_pglast]: Smaller than txnid of oldest reader. + */ + if (total_room >= mop_len) { + if (total_room == mop_len || --more < 0) + break; + } else if (head_room >= maxfree_1pg && head_id > 1) { + /* Keep current record (overflow page), add a new one */ + head_id--; + head_room = 0; + } + /* (Re)write {key = head_id, IDL length = head_room} */ + total_room -= head_room; + head_room = mop_len - total_room; + if (head_room > maxfree_1pg && head_id > 1) { + /* Overflow multi-page for part of me_pghead */ + head_room /= head_id; /* amortize page sizes */ + head_room += maxfree_1pg - head_room % (maxfree_1pg + 1); + } else if (head_room < 0) { + /* Rare case, not bothering to delete this record */ + head_room = 0; + } + key.mv_size = sizeof(head_id); + key.mv_data = &head_id; + data.mv_size = (head_room + 1) * sizeof(pgno_t); + rc = mdb_cursor_put(&mc, &key, &data, MDB_RESERVE); + if (rc) + return rc; + /* IDL is initially empty, zero out at least the length */ + pgs = (pgno_t *)data.mv_data; + j = head_room > clean_limit ? head_room : 0; + do { + pgs[j] = 0; + } while (--j >= 0); + total_room += head_room; + } + + /* Return loose page numbers to me_pghead, though usually none are + * left at this point. The pages themselves remain in dirty_list. + */ + if (txn->mt_loose_pgs) { + MDB_page *mp = txn->mt_loose_pgs; + unsigned count = txn->mt_loose_count; + MDB_IDL loose; + /* Room for loose pages + temp IDL with same */ + if ((rc = mdb_midl_need(&env->me_pghead, 2*count+1)) != 0) + return rc; + mop = env->me_pghead; + loose = mop + MDB_IDL_ALLOCLEN(mop) - count; + for (count = 0; mp; mp = NEXT_LOOSE_PAGE(mp)) + loose[ ++count ] = mp->mp_pgno; + loose[0] = count; + mdb_midl_sort(loose); + mdb_midl_xmerge(mop, loose); + txn->mt_loose_pgs = NULL; + txn->mt_loose_count = 0; + mop_len = mop[0]; + } + + /* Fill in the reserved me_pghead records */ + rc = MDB_SUCCESS; + if (mop_len) { + MDB_val key, data; + + mop += mop_len; + rc = mdb_cursor_first(&mc, &key, &data); + for (; !rc; rc = mdb_cursor_next(&mc, &key, &data, MDB_NEXT)) { + txnid_t id = *(txnid_t *)key.mv_data; + ssize_t len = (ssize_t)(data.mv_size / sizeof(MDB_ID)) - 1; + MDB_ID save; + + mdb_tassert(txn, len >= 0 && id <= env->me_pglast); + key.mv_data = &id; + if (len > mop_len) { + len = mop_len; + data.mv_size = (len + 1) * sizeof(MDB_ID); + } + data.mv_data = mop -= len; + save = mop[0]; + mop[0] = len; + rc = mdb_cursor_put(&mc, &key, &data, MDB_CURRENT); + mop[0] = save; + if (rc || !(mop_len -= len)) + break; + } + } + return rc; +} + +/** Flush (some) dirty pages to the map, after clearing their dirty flag. + * @param[in] txn the transaction that's being committed + * @param[in] keep number of initial pages in dirty_list to keep dirty. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_flush(MDB_txn *txn, int keep) +{ + MDB_env *env = txn->mt_env; + MDB_ID2L dl = txn->mt_u.dirty_list; + unsigned psize = env->me_psize, j; + int i, pagecount = dl[0].mid, rc; + size_t size = 0, pos = 0; + pgno_t pgno = 0; + MDB_page *dp = NULL; +#ifdef _WIN32 + OVERLAPPED ov; +#else + struct iovec iov[MDB_COMMIT_PAGES]; + ssize_t wpos = 0, wsize = 0, wres; + size_t next_pos = 1; /* impossible pos, so pos != next_pos */ + int n = 0; +#endif + + j = i = keep; + + if (env->me_flags & MDB_WRITEMAP) { + /* Clear dirty flags */ + while (++i <= pagecount) { + dp = dl[i].mptr; + /* Don't flush this page yet */ + if (dp->mp_flags & (P_LOOSE|P_KEEP)) { + dp->mp_flags &= ~P_KEEP; + dl[++j] = dl[i]; + continue; + } + dp->mp_flags &= ~P_DIRTY; + } + goto done; + } + + /* Write the pages */ + for (;;) { + if (++i <= pagecount) { + dp = dl[i].mptr; + /* Don't flush this page yet */ + if (dp->mp_flags & (P_LOOSE|P_KEEP)) { + dp->mp_flags &= ~P_KEEP; + dl[i].mid = 0; + continue; + } + pgno = dl[i].mid; + /* clear dirty flag */ + dp->mp_flags &= ~P_DIRTY; + pos = pgno * psize; + size = psize; + if (IS_OVERFLOW(dp)) size *= dp->mp_pages; + } +#ifdef _WIN32 + else break; + + /* Windows actually supports scatter/gather I/O, but only on + * unbuffered file handles. Since we're relying on the OS page + * cache for all our data, that's self-defeating. So we just + * write pages one at a time. We use the ov structure to set + * the write offset, to at least save the overhead of a Seek + * system call. + */ + DPRINTF(("committing page %"Z"u", pgno)); + memset(&ov, 0, sizeof(ov)); + ov.Offset = pos & 0xffffffff; + ov.OffsetHigh = pos >> 16 >> 16; + if (!WriteFile(env->me_fd, dp, size, NULL, &ov)) { + rc = ErrCode(); + DPRINTF(("WriteFile: %d", rc)); + return rc; + } +#else + /* Write up to MDB_COMMIT_PAGES dirty pages at a time. */ + if (pos!=next_pos || n==MDB_COMMIT_PAGES || wsize+size>MAX_WRITE) { + if (n) { +retry_write: + /* Write previous page(s) */ +#ifdef MDB_USE_PWRITEV + wres = pwritev(env->me_fd, iov, n, wpos); +#else + if (n == 1) { + wres = pwrite(env->me_fd, iov[0].iov_base, wsize, wpos); + } else { +retry_seek: + if (lseek(env->me_fd, wpos, SEEK_SET) == -1) { + rc = ErrCode(); + if (rc == EINTR) + goto retry_seek; + DPRINTF(("lseek: %s", strerror(rc))); + return rc; + } + wres = writev(env->me_fd, iov, n); + } +#endif + if (wres != wsize) { + if (wres < 0) { + rc = ErrCode(); + if (rc == EINTR) + goto retry_write; + DPRINTF(("Write error: %s", strerror(rc))); + } else { + rc = EIO; /* TODO: Use which error code? */ + DPUTS("short write, filesystem full?"); + } + return rc; + } + n = 0; + } + if (i > pagecount) + break; + wpos = pos; + wsize = 0; + } + DPRINTF(("committing page %"Z"u", pgno)); + next_pos = pos + size; + iov[n].iov_len = size; + iov[n].iov_base = (char *)dp; + wsize += size; + n++; +#endif /* _WIN32 */ + } + + /* MIPS has cache coherency issues, this is a no-op everywhere else + * Note: for any size >= on-chip cache size, entire on-chip cache is + * flushed. + */ + CACHEFLUSH(env->me_map, txn->mt_next_pgno * env->me_psize, DCACHE); + + for (i = keep; ++i <= pagecount; ) { + dp = dl[i].mptr; + /* This is a page we skipped above */ + if (!dl[i].mid) { + dl[++j] = dl[i]; + dl[j].mid = dp->mp_pgno; + continue; + } + mdb_dpage_free(env, dp); + } + +done: + i--; + txn->mt_dirty_room += i - j; + dl[0].mid = j; + return MDB_SUCCESS; +} + +int +mdb_txn_commit(MDB_txn *txn) +{ + int rc; + unsigned int i, end_mode; + MDB_env *env; + + if (txn == NULL) + return EINVAL; + + /* mdb_txn_end() mode for a commit which writes nothing */ + end_mode = MDB_END_EMPTY_COMMIT|MDB_END_UPDATE|MDB_END_SLOT|MDB_END_FREE; + + if (txn->mt_child) { + rc = mdb_txn_commit(txn->mt_child); + if (rc) + goto fail; + } + + env = txn->mt_env; + + if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) { + goto done; + } + + if (txn->mt_flags & (MDB_TXN_FINISHED|MDB_TXN_ERROR)) { + DPUTS("txn has failed/finished, can't commit"); + if (txn->mt_parent) + txn->mt_parent->mt_flags |= MDB_TXN_ERROR; + rc = MDB_BAD_TXN; + goto fail; + } + + if (txn->mt_parent) { + MDB_txn *parent = txn->mt_parent; + MDB_page **lp; + MDB_ID2L dst, src; + MDB_IDL pspill; + unsigned x, y, len, ps_len; + + /* Append our free list to parent's */ + rc = mdb_midl_append_list(&parent->mt_free_pgs, txn->mt_free_pgs); + if (rc) + goto fail; + mdb_midl_free(txn->mt_free_pgs); + /* Failures after this must either undo the changes + * to the parent or set MDB_TXN_ERROR in the parent. + */ + + parent->mt_next_pgno = txn->mt_next_pgno; + parent->mt_flags = txn->mt_flags; + + /* Merge our cursors into parent's and close them */ + mdb_cursors_close(txn, 1); + + /* Update parent's DB table. */ + memcpy(parent->mt_dbs, txn->mt_dbs, txn->mt_numdbs * sizeof(MDB_db)); + parent->mt_numdbs = txn->mt_numdbs; + parent->mt_dbflags[FREE_DBI] = txn->mt_dbflags[FREE_DBI]; + parent->mt_dbflags[MAIN_DBI] = txn->mt_dbflags[MAIN_DBI]; + for (i=CORE_DBS; imt_numdbs; i++) { + /* preserve parent's DB_NEW status */ + x = parent->mt_dbflags[i] & DB_NEW; + parent->mt_dbflags[i] = txn->mt_dbflags[i] | x; + } + + dst = parent->mt_u.dirty_list; + src = txn->mt_u.dirty_list; + /* Remove anything in our dirty list from parent's spill list */ + if ((pspill = parent->mt_spill_pgs) && (ps_len = pspill[0])) { + x = y = ps_len; + pspill[0] = (pgno_t)-1; + /* Mark our dirty pages as deleted in parent spill list */ + for (i=0, len=src[0].mid; ++i <= len; ) { + MDB_ID pn = src[i].mid << 1; + while (pn > pspill[x]) + x--; + if (pn == pspill[x]) { + pspill[x] = 1; + y = --x; + } + } + /* Squash deleted pagenums if we deleted any */ + for (x=y; ++x <= ps_len; ) + if (!(pspill[x] & 1)) + pspill[++y] = pspill[x]; + pspill[0] = y; + } + + /* Remove anything in our spill list from parent's dirty list */ + if (txn->mt_spill_pgs && txn->mt_spill_pgs[0]) { + for (i=1; i<=txn->mt_spill_pgs[0]; i++) { + MDB_ID pn = txn->mt_spill_pgs[i]; + if (pn & 1) + continue; /* deleted spillpg */ + pn >>= 1; + y = mdb_mid2l_search(dst, pn); + if (y <= dst[0].mid && dst[y].mid == pn) { + free(dst[y].mptr); + while (y < dst[0].mid) { + dst[y] = dst[y+1]; + y++; + } + dst[0].mid--; + } + } + } + + /* Find len = length of merging our dirty list with parent's */ + x = dst[0].mid; + dst[0].mid = 0; /* simplify loops */ + if (parent->mt_parent) { + len = x + src[0].mid; + y = mdb_mid2l_search(src, dst[x].mid + 1) - 1; + for (i = x; y && i; y--) { + pgno_t yp = src[y].mid; + while (yp < dst[i].mid) + i--; + if (yp == dst[i].mid) { + i--; + len--; + } + } + } else { /* Simplify the above for single-ancestor case */ + len = MDB_IDL_UM_MAX - txn->mt_dirty_room; + } + /* Merge our dirty list with parent's */ + y = src[0].mid; + for (i = len; y; dst[i--] = src[y--]) { + pgno_t yp = src[y].mid; + while (yp < dst[x].mid) + dst[i--] = dst[x--]; + if (yp == dst[x].mid) + free(dst[x--].mptr); + } + mdb_tassert(txn, i == x); + dst[0].mid = len; + free(txn->mt_u.dirty_list); + parent->mt_dirty_room = txn->mt_dirty_room; + if (txn->mt_spill_pgs) { + if (parent->mt_spill_pgs) { + /* TODO: Prevent failure here, so parent does not fail */ + rc = mdb_midl_append_list(&parent->mt_spill_pgs, txn->mt_spill_pgs); + if (rc) + parent->mt_flags |= MDB_TXN_ERROR; + mdb_midl_free(txn->mt_spill_pgs); + mdb_midl_sort(parent->mt_spill_pgs); + } else { + parent->mt_spill_pgs = txn->mt_spill_pgs; + } + } + + /* Append our loose page list to parent's */ + for (lp = &parent->mt_loose_pgs; *lp; lp = &NEXT_LOOSE_PAGE(*lp)) + ; + *lp = txn->mt_loose_pgs; + parent->mt_loose_count += txn->mt_loose_count; + + parent->mt_child = NULL; + mdb_midl_free(((MDB_ntxn *)txn)->mnt_pgstate.mf_pghead); + free(txn); + return rc; + } + + if (txn != env->me_txn) { + DPUTS("attempt to commit unknown transaction"); + rc = EINVAL; + goto fail; + } + + mdb_cursors_close(txn, 0); + + if (!txn->mt_u.dirty_list[0].mid && + !(txn->mt_flags & (MDB_TXN_DIRTY|MDB_TXN_SPILLS))) + goto done; + + DPRINTF(("committing txn %"Z"u %p on mdbenv %p, root page %"Z"u", + txn->mt_txnid, (void*)txn, (void*)env, txn->mt_dbs[MAIN_DBI].md_root)); + + /* Update DB root pointers */ + if (txn->mt_numdbs > CORE_DBS) { + MDB_cursor mc; + MDB_dbi i; + MDB_val data; + data.mv_size = sizeof(MDB_db); + + mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); + for (i = CORE_DBS; i < txn->mt_numdbs; i++) { + if (txn->mt_dbflags[i] & DB_DIRTY) { + if (TXN_DBI_CHANGED(txn, i)) { + rc = MDB_BAD_DBI; + goto fail; + } + data.mv_data = &txn->mt_dbs[i]; + rc = mdb_cursor_put(&mc, &txn->mt_dbxs[i].md_name, &data, + F_SUBDATA); + if (rc) + goto fail; + } + } + } + + rc = mdb_freelist_save(txn); + if (rc) + goto fail; + + mdb_midl_free(env->me_pghead); + env->me_pghead = NULL; + mdb_midl_shrink(&txn->mt_free_pgs); + +#if (MDB_DEBUG) > 2 + mdb_audit(txn); +#endif + + if ((rc = mdb_page_flush(txn, 0)) || + (rc = mdb_env_sync(env, 0)) || + (rc = mdb_env_write_meta(txn))) + goto fail; + end_mode = MDB_END_COMMITTED|MDB_END_UPDATE; + +done: + mdb_txn_end(txn, end_mode); + return MDB_SUCCESS; + +fail: + mdb_txn_abort(txn); + return rc; +} + +/** Read the environment parameters of a DB environment before + * mapping it into memory. + * @param[in] env the environment handle + * @param[out] meta address of where to store the meta information + * @return 0 on success, non-zero on failure. + */ +static int ESECT +mdb_env_read_header(MDB_env *env, MDB_meta *meta) +{ + MDB_metabuf pbuf; + MDB_page *p; + MDB_meta *m; + int i, rc, off; + enum { Size = sizeof(pbuf) }; + + /* We don't know the page size yet, so use a minimum value. + * Read both meta pages so we can use the latest one. + */ + + for (i=off=0; imm_psize) { +#ifdef _WIN32 + DWORD len; + OVERLAPPED ov; + memset(&ov, 0, sizeof(ov)); + ov.Offset = off; + rc = ReadFile(env->me_fd, &pbuf, Size, &len, &ov) ? (int)len : -1; + if (rc == -1 && ErrCode() == ERROR_HANDLE_EOF) + rc = 0; +#else + rc = pread(env->me_fd, &pbuf, Size, off); +#endif + if (rc != Size) { + if (rc == 0 && off == 0) + return ENOENT; + rc = rc < 0 ? (int) ErrCode() : MDB_INVALID; + DPRINTF(("read: %s", mdb_strerror(rc))); + return rc; + } + + p = (MDB_page *)&pbuf; + + if (!F_ISSET(p->mp_flags, P_META)) { + DPRINTF(("page %"Z"u not a meta page", p->mp_pgno)); + return MDB_INVALID; + } + + m = METADATA(p); + if (m->mm_magic != MDB_MAGIC) { + DPUTS("meta has invalid magic"); + return MDB_INVALID; + } + + if (m->mm_version != MDB_DATA_VERSION) { + DPRINTF(("database is version %u, expected version %u", + m->mm_version, MDB_DATA_VERSION)); + return MDB_VERSION_MISMATCH; + } + + if (off == 0 || m->mm_txnid > meta->mm_txnid) + *meta = *m; + } + return 0; +} + +/** Fill in most of the zeroed #MDB_meta for an empty database environment */ +static void ESECT +mdb_env_init_meta0(MDB_env *env, MDB_meta *meta) +{ + meta->mm_magic = MDB_MAGIC; + meta->mm_version = MDB_DATA_VERSION; + meta->mm_mapsize = env->me_mapsize; + meta->mm_psize = env->me_psize; + meta->mm_last_pg = NUM_METAS-1; + meta->mm_flags = env->me_flags & 0xffff; + meta->mm_flags |= MDB_INTEGERKEY; /* this is mm_dbs[FREE_DBI].md_flags */ + meta->mm_dbs[FREE_DBI].md_root = P_INVALID; + meta->mm_dbs[MAIN_DBI].md_root = P_INVALID; +} + +/** Write the environment parameters of a freshly created DB environment. + * @param[in] env the environment handle + * @param[in] meta the #MDB_meta to write + * @return 0 on success, non-zero on failure. + */ +static int ESECT +mdb_env_init_meta(MDB_env *env, MDB_meta *meta) +{ + MDB_page *p, *q; + int rc; + unsigned int psize; +#ifdef _WIN32 + DWORD len; + OVERLAPPED ov; + memset(&ov, 0, sizeof(ov)); +#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ + ov.Offset = pos; \ + rc = WriteFile(fd, ptr, size, &len, &ov); } while(0) +#else + int len; +#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \ + len = pwrite(fd, ptr, size, pos); \ + if (len == -1 && ErrCode() == EINTR) continue; \ + rc = (len >= 0); break; } while(1) +#endif + + DPUTS("writing new meta page"); + + psize = env->me_psize; + + p = calloc(NUM_METAS, psize); + if (!p) + return ENOMEM; + + p->mp_pgno = 0; + p->mp_flags = P_META; + *(MDB_meta *)METADATA(p) = *meta; + + q = (MDB_page *)((char *)p + psize); + q->mp_pgno = 1; + q->mp_flags = P_META; + *(MDB_meta *)METADATA(q) = *meta; + + DO_PWRITE(rc, env->me_fd, p, psize * NUM_METAS, len, 0); + if (!rc) + rc = ErrCode(); + else if ((unsigned) len == psize * NUM_METAS) + rc = MDB_SUCCESS; + else + rc = ENOSPC; + free(p); + return rc; +} + +/** Update the environment info to commit a transaction. + * @param[in] txn the transaction that's being committed + * @return 0 on success, non-zero on failure. + */ +static int +mdb_env_write_meta(MDB_txn *txn) +{ + MDB_env *env; + MDB_meta meta, metab, *mp; + unsigned flags; + size_t mapsize; + off_t off; + int rc, len, toggle; + char *ptr; + HANDLE mfd; +#ifdef _WIN32 + OVERLAPPED ov; +#else + int r2; +#endif + + toggle = txn->mt_txnid & 1; + DPRINTF(("writing meta page %d for root page %"Z"u", + toggle, txn->mt_dbs[MAIN_DBI].md_root)); + + env = txn->mt_env; + flags = env->me_flags; + mp = env->me_metas[toggle]; + mapsize = env->me_metas[toggle ^ 1]->mm_mapsize; + /* Persist any increases of mapsize config */ + if (mapsize < env->me_mapsize) + mapsize = env->me_mapsize; + + if (flags & MDB_WRITEMAP) { + mp->mm_mapsize = mapsize; + mp->mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; + mp->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; + mp->mm_last_pg = txn->mt_next_pgno - 1; +#if (__GNUC__ * 100 + __GNUC_MINOR__ >= 404) && /* TODO: portability */ \ + !(defined(__i386__) || defined(__x86_64__)) + /* LY: issue a memory barrier, if not x86. ITS#7969 */ + __sync_synchronize(); +#endif + mp->mm_txnid = txn->mt_txnid; + if (!(flags & (MDB_NOMETASYNC|MDB_NOSYNC))) { + unsigned meta_size = env->me_psize; + rc = (env->me_flags & MDB_MAPASYNC) ? MS_ASYNC : MS_SYNC; + ptr = (char *)mp - PAGEHDRSZ; +#ifndef _WIN32 /* POSIX msync() requires ptr = start of OS page */ + r2 = (ptr - env->me_map) & (env->me_os_psize - 1); + ptr -= r2; + meta_size += r2; +#endif + if (MDB_MSYNC(ptr, meta_size, rc)) { + rc = ErrCode(); + goto fail; + } + } + goto done; + } + metab.mm_txnid = mp->mm_txnid; + metab.mm_last_pg = mp->mm_last_pg; + + meta.mm_mapsize = mapsize; + meta.mm_dbs[FREE_DBI] = txn->mt_dbs[FREE_DBI]; + meta.mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; + meta.mm_last_pg = txn->mt_next_pgno - 1; + meta.mm_txnid = txn->mt_txnid; + + off = offsetof(MDB_meta, mm_mapsize); + ptr = (char *)&meta + off; + len = sizeof(MDB_meta) - off; + off += (char *)mp - env->me_map; + + /* Write to the SYNC fd */ + mfd = (flags & (MDB_NOSYNC|MDB_NOMETASYNC)) ? env->me_fd : env->me_mfd; +#ifdef _WIN32 + { + memset(&ov, 0, sizeof(ov)); + ov.Offset = off; + if (!WriteFile(mfd, ptr, len, (DWORD *)&rc, &ov)) + rc = -1; + } +#else +retry_write: + rc = pwrite(mfd, ptr, len, off); +#endif + if (rc != len) { + rc = rc < 0 ? ErrCode() : EIO; +#ifndef _WIN32 + if (rc == EINTR) + goto retry_write; +#endif + DPUTS("write failed, disk error?"); + /* On a failure, the pagecache still contains the new data. + * Write some old data back, to prevent it from being used. + * Use the non-SYNC fd; we know it will fail anyway. + */ + meta.mm_last_pg = metab.mm_last_pg; + meta.mm_txnid = metab.mm_txnid; +#ifdef _WIN32 + memset(&ov, 0, sizeof(ov)); + ov.Offset = off; + WriteFile(env->me_fd, ptr, len, NULL, &ov); +#else + r2 = pwrite(env->me_fd, ptr, len, off); + (void)r2; /* Silence warnings. We don't care about pwrite's return value */ +#endif +fail: + env->me_flags |= MDB_FATAL_ERROR; + return rc; + } + /* MIPS has cache coherency issues, this is a no-op everywhere else */ + CACHEFLUSH(env->me_map + off, len, DCACHE); +done: + /* Memory ordering issues are irrelevant; since the entire writer + * is wrapped by wmutex, all of these changes will become visible + * after the wmutex is unlocked. Since the DB is multi-version, + * readers will get consistent data regardless of how fresh or + * how stale their view of these values is. + */ + if (env->me_txns) + env->me_txns->mti_txnid = txn->mt_txnid; + + return MDB_SUCCESS; +} + +/** Check both meta pages to see which one is newer. + * @param[in] env the environment handle + * @return newest #MDB_meta. + */ +static MDB_meta * +mdb_env_pick_meta(const MDB_env *env) +{ + MDB_meta *const *metas = env->me_metas; + return metas[ metas[0]->mm_txnid < metas[1]->mm_txnid ]; +} + +int ESECT +mdb_env_create(MDB_env **env) +{ + MDB_env *e; + + e = calloc(1, sizeof(MDB_env)); + if (!e) + return ENOMEM; + + e->me_maxreaders = DEFAULT_READERS; + e->me_maxdbs = e->me_numdbs = CORE_DBS; + e->me_fd = INVALID_HANDLE_VALUE; + e->me_lfd = INVALID_HANDLE_VALUE; + e->me_mfd = INVALID_HANDLE_VALUE; +#ifdef MDB_USE_POSIX_SEM + e->me_rmutex = SEM_FAILED; + e->me_wmutex = SEM_FAILED; +#endif + e->me_pid = getpid(); + GET_PAGESIZE(e->me_os_psize); + VGMEMP_CREATE(e,0,0); + *env = e; + return MDB_SUCCESS; +} + +static int ESECT +mdb_env_map(MDB_env *env, void *addr) +{ + MDB_page *p; + unsigned int flags = env->me_flags; +#ifdef _WIN32 + int rc; + HANDLE mh; + LONG sizelo, sizehi; + size_t msize; + + if (flags & MDB_RDONLY) { + /* Don't set explicit map size, use whatever exists */ + msize = 0; + sizelo = 0; + sizehi = 0; + } else { + msize = env->me_mapsize; + sizelo = msize & 0xffffffff; + sizehi = msize >> 16 >> 16; /* only needed on Win64 */ + + /* Windows won't create mappings for zero length files. + * and won't map more than the file size. + * Just set the maxsize right now. + */ + if (SetFilePointer(env->me_fd, sizelo, &sizehi, 0) != (DWORD)sizelo + || !SetEndOfFile(env->me_fd) + || SetFilePointer(env->me_fd, 0, NULL, 0) != 0) + return ErrCode(); + } + + mh = CreateFileMapping(env->me_fd, NULL, flags & MDB_WRITEMAP ? + PAGE_READWRITE : PAGE_READONLY, + sizehi, sizelo, NULL); + if (!mh) + return ErrCode(); + env->me_map = MapViewOfFileEx(mh, flags & MDB_WRITEMAP ? + FILE_MAP_WRITE : FILE_MAP_READ, + 0, 0, msize, addr); + rc = env->me_map ? 0 : ErrCode(); + CloseHandle(mh); + if (rc) + return rc; +#else + int prot = PROT_READ; + if (flags & MDB_WRITEMAP) { + prot |= PROT_WRITE; + if (ftruncate(env->me_fd, env->me_mapsize) < 0) + return ErrCode(); + } + env->me_map = mmap(addr, env->me_mapsize, prot, MAP_SHARED, + env->me_fd, 0); + if (env->me_map == MAP_FAILED) { + env->me_map = NULL; + return ErrCode(); + } + + if (flags & MDB_NORDAHEAD) { + /* Turn off readahead. It's harmful when the DB is larger than RAM. */ +#ifdef MADV_RANDOM + madvise(env->me_map, env->me_mapsize, MADV_RANDOM); +#else +#ifdef POSIX_MADV_RANDOM + posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM); +#endif /* POSIX_MADV_RANDOM */ +#endif /* MADV_RANDOM */ + } +#endif /* _WIN32 */ + + /* Can happen because the address argument to mmap() is just a + * hint. mmap() can pick another, e.g. if the range is in use. + * The MAP_FIXED flag would prevent that, but then mmap could + * instead unmap existing pages to make room for the new map. + */ + if (addr && env->me_map != addr) + return EBUSY; /* TODO: Make a new MDB_* error code? */ + + p = (MDB_page *)env->me_map; + env->me_metas[0] = METADATA(p); + env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize); + + return MDB_SUCCESS; +} + +int ESECT +mdb_env_set_mapsize(MDB_env *env, size_t size) +{ + /* If env is already open, caller is responsible for making + * sure there are no active txns. + */ + if (env->me_map) { + int rc; + MDB_meta *meta; + void *old; + if (env->me_txn) + return EINVAL; + meta = mdb_env_pick_meta(env); + if (!size) + size = meta->mm_mapsize; + { + /* Silently round up to minimum if the size is too small */ + size_t minsize = (meta->mm_last_pg + 1) * env->me_psize; + if (size < minsize) + size = minsize; + } + munmap(env->me_map, env->me_mapsize); + env->me_mapsize = size; + old = (env->me_flags & MDB_FIXEDMAP) ? env->me_map : NULL; + rc = mdb_env_map(env, old); + if (rc) + return rc; + } + env->me_mapsize = size; + if (env->me_psize) + env->me_maxpg = env->me_mapsize / env->me_psize; + return MDB_SUCCESS; +} + +int ESECT +mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs) +{ + if (env->me_map) + return EINVAL; + env->me_maxdbs = dbs + CORE_DBS; + return MDB_SUCCESS; +} + +int ESECT +mdb_env_set_maxreaders(MDB_env *env, unsigned int readers) +{ + if (env->me_map || readers < 1) + return EINVAL; + env->me_maxreaders = readers; + return MDB_SUCCESS; +} + +int ESECT +mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers) +{ + if (!env || !readers) + return EINVAL; + *readers = env->me_maxreaders; + return MDB_SUCCESS; +} + +static int ESECT +mdb_fsize(HANDLE fd, size_t *size) +{ +#ifdef _WIN32 + LARGE_INTEGER fsize; + + if (!GetFileSizeEx(fd, &fsize)) + return ErrCode(); + + *size = fsize.QuadPart; +#else + struct stat st; + + if (fstat(fd, &st)) + return ErrCode(); + + *size = st.st_size; +#endif + return MDB_SUCCESS; +} + +#ifdef BROKEN_FDATASYNC +#include +#include +#endif + +/** Further setup required for opening an LMDB environment + */ +static int ESECT +mdb_env_open2(MDB_env *env) +{ + unsigned int flags = env->me_flags; + int i, newenv = 0, rc; + MDB_meta meta; + +#ifdef _WIN32 + /* See if we should use QueryLimited */ + rc = GetVersion(); + if ((rc & 0xff) > 5) + env->me_pidquery = MDB_PROCESS_QUERY_LIMITED_INFORMATION; + else + env->me_pidquery = PROCESS_QUERY_INFORMATION; +#endif /* _WIN32 */ + +#ifdef BROKEN_FDATASYNC + /* ext3/ext4 fdatasync is broken on some older Linux kernels. + * https://lkml.org/lkml/2012/9/3/83 + * Kernels after 3.6-rc6 are known good. + * https://lkml.org/lkml/2012/9/10/556 + * See if the DB is on ext3/ext4, then check for new enough kernel + * Kernels 2.6.32.60, 2.6.34.15, 3.2.30, and 3.5.4 are also known + * to be patched. + */ + { + struct statfs st; + fstatfs(env->me_fd, &st); + while (st.f_type == 0xEF53) { + struct utsname uts; + int i; + uname(&uts); + if (uts.release[0] < '3') { + if (!strncmp(uts.release, "2.6.32.", 7)) { + i = atoi(uts.release+7); + if (i >= 60) + break; /* 2.6.32.60 and newer is OK */ + } else if (!strncmp(uts.release, "2.6.34.", 7)) { + i = atoi(uts.release+7); + if (i >= 15) + break; /* 2.6.34.15 and newer is OK */ + } + } else if (uts.release[0] == '3') { + i = atoi(uts.release+2); + if (i > 5) + break; /* 3.6 and newer is OK */ + if (i == 5) { + i = atoi(uts.release+4); + if (i >= 4) + break; /* 3.5.4 and newer is OK */ + } else if (i == 2) { + i = atoi(uts.release+4); + if (i >= 30) + break; /* 3.2.30 and newer is OK */ + } + } else { /* 4.x and newer is OK */ + break; + } + env->me_flags |= MDB_FSYNCONLY; + break; + } + } +#endif + + if ((i = mdb_env_read_header(env, &meta)) != 0) { + if (i != ENOENT) + return i; + DPUTS("new mdbenv"); + newenv = 1; + env->me_psize = env->me_os_psize; + if (env->me_psize > MAX_PAGESIZE) + env->me_psize = MAX_PAGESIZE; + memset(&meta, 0, sizeof(meta)); + mdb_env_init_meta0(env, &meta); + meta.mm_mapsize = DEFAULT_MAPSIZE; + } else { + env->me_psize = meta.mm_psize; + } + + /* Was a mapsize configured? */ + if (!env->me_mapsize) { + env->me_mapsize = meta.mm_mapsize; + } + { + /* Make sure mapsize >= committed data size. Even when using + * mm_mapsize, which could be broken in old files (ITS#7789). + */ + size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize; + if (env->me_mapsize < minsize) + env->me_mapsize = minsize; + } + meta.mm_mapsize = env->me_mapsize; + + if (newenv && !(flags & MDB_FIXEDMAP)) { + /* mdb_env_map() may grow the datafile. Write the metapages + * first, so the file will be valid if initialization fails. + * Except with FIXEDMAP, since we do not yet know mm_address. + * We could fill in mm_address later, but then a different + * program might end up doing that - one with a memory layout + * and map address which does not suit the main program. + */ + rc = mdb_env_init_meta(env, &meta); + if (rc) + return rc; + newenv = 0; + } + + rc = mdb_env_map(env, (flags & MDB_FIXEDMAP) ? meta.mm_address : NULL); + if (rc) + return rc; + + if (newenv) { + if (flags & MDB_FIXEDMAP) + meta.mm_address = env->me_map; + i = mdb_env_init_meta(env, &meta); + if (i != MDB_SUCCESS) { + return i; + } + } + + env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1; + env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2) + - sizeof(indx_t); +#if !(MDB_MAXKEYSIZE) + env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db)); +#endif + env->me_maxpg = env->me_mapsize / env->me_psize; + +#if MDB_DEBUG + { + MDB_meta *meta = mdb_env_pick_meta(env); + MDB_db *db = &meta->mm_dbs[MAIN_DBI]; + + DPRINTF(("opened database version %u, pagesize %u", + meta->mm_version, env->me_psize)); + DPRINTF(("using meta page %d", (int) (meta->mm_txnid & 1))); + DPRINTF(("depth: %u", db->md_depth)); + DPRINTF(("entries: %"Z"u", db->md_entries)); + DPRINTF(("branch pages: %"Z"u", db->md_branch_pages)); + DPRINTF(("leaf pages: %"Z"u", db->md_leaf_pages)); + DPRINTF(("overflow pages: %"Z"u", db->md_overflow_pages)); + DPRINTF(("root: %"Z"u", db->md_root)); + } +#endif + + return MDB_SUCCESS; +} + + +/** Release a reader thread's slot in the reader lock table. + * This function is called automatically when a thread exits. + * @param[in] ptr This points to the slot in the reader lock table. + */ +static void +mdb_env_reader_dest(void *ptr) +{ + MDB_reader *reader = ptr; + + reader->mr_pid = 0; +} + +#ifdef _WIN32 +/** Junk for arranging thread-specific callbacks on Windows. This is + * necessarily platform and compiler-specific. Windows supports up + * to 1088 keys. Let's assume nobody opens more than 64 environments + * in a single process, for now. They can override this if needed. + */ +#ifndef MAX_TLS_KEYS +#define MAX_TLS_KEYS 64 +#endif +static pthread_key_t mdb_tls_keys[MAX_TLS_KEYS]; +static int mdb_tls_nkeys; + +static void NTAPI mdb_tls_callback(PVOID module, DWORD reason, PVOID ptr) +{ + int i; + switch(reason) { + case DLL_PROCESS_ATTACH: break; + case DLL_THREAD_ATTACH: break; + case DLL_THREAD_DETACH: + for (i=0; ime_txns->mti_txnid = meta->mm_txnid; + +#ifdef _WIN32 + { + OVERLAPPED ov; + /* First acquire a shared lock. The Unlock will + * then release the existing exclusive lock. + */ + memset(&ov, 0, sizeof(ov)); + if (!LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { + rc = ErrCode(); + } else { + UnlockFile(env->me_lfd, 0, 0, 1, 0); + *excl = 0; + } + } +#else + { + struct flock lock_info; + /* The shared lock replaces the existing lock */ + memset((void *)&lock_info, 0, sizeof(lock_info)); + lock_info.l_type = F_RDLCK; + lock_info.l_whence = SEEK_SET; + lock_info.l_start = 0; + lock_info.l_len = 1; + while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && + (rc = ErrCode()) == EINTR) ; + *excl = rc ? -1 : 0; /* error may mean we lost the lock */ + } +#endif + + return rc; +} + +/** Try to get exclusive lock, otherwise shared. + * Maintain *excl = -1: no/unknown lock, 0: shared, 1: exclusive. + */ +static int ESECT +mdb_env_excl_lock(MDB_env *env, int *excl) +{ + int rc = 0; +#ifdef _WIN32 + if (LockFile(env->me_lfd, 0, 0, 1, 0)) { + *excl = 1; + } else { + OVERLAPPED ov; + memset(&ov, 0, sizeof(ov)); + if (LockFileEx(env->me_lfd, 0, 0, 1, 0, &ov)) { + *excl = 0; + } else { + rc = ErrCode(); + } + } +#else + struct flock lock_info; + memset((void *)&lock_info, 0, sizeof(lock_info)); + lock_info.l_type = F_WRLCK; + lock_info.l_whence = SEEK_SET; + lock_info.l_start = 0; + lock_info.l_len = 1; + while ((rc = fcntl(env->me_lfd, F_SETLK, &lock_info)) && + (rc = ErrCode()) == EINTR) ; + if (!rc) { + *excl = 1; + } else +# ifndef MDB_USE_POSIX_MUTEX + if (*excl < 0) /* always true when MDB_USE_POSIX_MUTEX */ +# endif + { + lock_info.l_type = F_RDLCK; + while ((rc = fcntl(env->me_lfd, F_SETLKW, &lock_info)) && + (rc = ErrCode()) == EINTR) ; + if (rc == 0) + *excl = 0; + } +#endif + return rc; +} + +#ifdef MDB_USE_HASH +/* + * hash_64 - 64 bit Fowler/Noll/Vo-0 FNV-1a hash code + * + * @(#) $Revision: 5.1 $ + * @(#) $Id: hash_64a.c,v 5.1 2009/06/30 09:01:38 chongo Exp $ + * @(#) $Source: /usr/local/src/cmd/fnv/RCS/hash_64a.c,v $ + * + * http://www.isthe.com/chongo/tech/comp/fnv/index.html + * + *** + * + * Please do not copyright this code. This code is in the public domain. + * + * LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO + * EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF + * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + * + * By: + * chongo /\oo/\ + * http://www.isthe.com/chongo/ + * + * Share and Enjoy! :-) + */ + +typedef unsigned long long mdb_hash_t; +#define MDB_HASH_INIT ((mdb_hash_t)0xcbf29ce484222325ULL) + +/** perform a 64 bit Fowler/Noll/Vo FNV-1a hash on a buffer + * @param[in] val value to hash + * @param[in] hval initial value for hash + * @return 64 bit hash + * + * NOTE: To use the recommended 64 bit FNV-1a hash, use MDB_HASH_INIT as the + * hval arg on the first call. + */ +static mdb_hash_t +mdb_hash_val(MDB_val *val, mdb_hash_t hval) +{ + unsigned char *s = (unsigned char *)val->mv_data; /* unsigned string */ + unsigned char *end = s + val->mv_size; + /* + * FNV-1a hash each octet of the string + */ + while (s < end) { + /* xor the bottom with the current octet */ + hval ^= (mdb_hash_t)*s++; + + /* multiply by the 64 bit FNV magic prime mod 2^64 */ + hval += (hval << 1) + (hval << 4) + (hval << 5) + + (hval << 7) + (hval << 8) + (hval << 40); + } + /* return our new hash value */ + return hval; +} + +/** Hash the string and output the encoded hash. + * This uses modified RFC1924 Ascii85 encoding to accommodate systems with + * very short name limits. We don't care about the encoding being reversible, + * we just want to preserve as many bits of the input as possible in a + * small printable string. + * @param[in] str string to hash + * @param[out] encbuf an array of 11 chars to hold the hash + */ +static const char mdb_a85[]= "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"; + +static void ESECT +mdb_pack85(unsigned long l, char *out) +{ + int i; + + for (i=0; i<5; i++) { + *out++ = mdb_a85[l % 85]; + l /= 85; + } +} + +static void ESECT +mdb_hash_enc(MDB_val *val, char *encbuf) +{ + mdb_hash_t h = mdb_hash_val(val, MDB_HASH_INIT); + + mdb_pack85(h, encbuf); + mdb_pack85(h>>32, encbuf+5); + encbuf[10] = '\0'; +} +#endif + +/** Open and/or initialize the lock region for the environment. + * @param[in] env The LMDB environment. + * @param[in] lpath The pathname of the file used for the lock region. + * @param[in] mode The Unix permissions for the file, if we create it. + * @param[in,out] excl In -1, out lock type: -1 none, 0 shared, 1 exclusive + * @return 0 on success, non-zero on failure. + */ +static int ESECT +mdb_env_setup_locks(MDB_env *env, char *lpath, int mode, int *excl) +{ +#ifdef _WIN32 +# define MDB_ERRCODE_ROFS ERROR_WRITE_PROTECT +#else +# define MDB_ERRCODE_ROFS EROFS +#ifdef O_CLOEXEC /* Linux: Open file and set FD_CLOEXEC atomically */ +# define MDB_CLOEXEC O_CLOEXEC +#else + int fdflags; +# define MDB_CLOEXEC 0 +#endif +#endif + int rc; + off_t size, rsize; + +#ifdef _WIN32 + wchar_t *wlpath; + utf8_to_utf16(lpath, -1, &wlpath, NULL); + env->me_lfd = CreateFileW(wlpath, GENERIC_READ|GENERIC_WRITE, + FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, + FILE_ATTRIBUTE_NORMAL, NULL); + free(wlpath); +#else + env->me_lfd = open(lpath, O_RDWR|O_CREAT|MDB_CLOEXEC, mode); +#endif + if (env->me_lfd == INVALID_HANDLE_VALUE) { + rc = ErrCode(); + if (rc == MDB_ERRCODE_ROFS && (env->me_flags & MDB_RDONLY)) { + return MDB_SUCCESS; + } + goto fail_errno; + } +#if ! ((MDB_CLOEXEC) || defined(_WIN32)) + /* Lose record locks when exec*() */ + if ((fdflags = fcntl(env->me_lfd, F_GETFD) | FD_CLOEXEC) >= 0) + fcntl(env->me_lfd, F_SETFD, fdflags); +#endif + + if (!(env->me_flags & MDB_NOTLS)) { + rc = pthread_key_create(&env->me_txkey, mdb_env_reader_dest); + if (rc) + goto fail; + env->me_flags |= MDB_ENV_TXKEY; +#ifdef _WIN32 + /* Windows TLS callbacks need help finding their TLS info. */ + if (mdb_tls_nkeys >= MAX_TLS_KEYS) { + rc = MDB_TLS_FULL; + goto fail; + } + mdb_tls_keys[mdb_tls_nkeys++] = env->me_txkey; +#endif + } + + /* Try to get exclusive lock. If we succeed, then + * nobody is using the lock region and we should initialize it. + */ + if ((rc = mdb_env_excl_lock(env, excl))) goto fail; + +#ifdef _WIN32 + size = GetFileSize(env->me_lfd, NULL); +#else + size = lseek(env->me_lfd, 0, SEEK_END); + if (size == -1) goto fail_errno; +#endif + rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo); + if (size < rsize && *excl > 0) { +#ifdef _WIN32 + if (SetFilePointer(env->me_lfd, rsize, NULL, FILE_BEGIN) != (DWORD)rsize + || !SetEndOfFile(env->me_lfd)) + goto fail_errno; +#else + if (ftruncate(env->me_lfd, rsize) != 0) goto fail_errno; +#endif + } else { + rsize = size; + size = rsize - sizeof(MDB_txninfo); + env->me_maxreaders = size/sizeof(MDB_reader) + 1; + } + { +#ifdef _WIN32 + HANDLE mh; + mh = CreateFileMapping(env->me_lfd, NULL, PAGE_READWRITE, + 0, 0, NULL); + if (!mh) goto fail_errno; + env->me_txns = MapViewOfFileEx(mh, FILE_MAP_WRITE, 0, 0, rsize, NULL); + CloseHandle(mh); + if (!env->me_txns) goto fail_errno; +#else + void *m = mmap(NULL, rsize, PROT_READ|PROT_WRITE, MAP_SHARED, + env->me_lfd, 0); + if (m == MAP_FAILED) goto fail_errno; + env->me_txns = m; +#endif + } + if (*excl > 0) { +#ifdef _WIN32 + BY_HANDLE_FILE_INFORMATION stbuf; + struct { + DWORD volume; + DWORD nhigh; + DWORD nlow; + } idbuf; + MDB_val val; + char encbuf[11]; + + if (!mdb_sec_inited) { + InitializeSecurityDescriptor(&mdb_null_sd, + SECURITY_DESCRIPTOR_REVISION); + SetSecurityDescriptorDacl(&mdb_null_sd, TRUE, 0, FALSE); + mdb_all_sa.nLength = sizeof(SECURITY_ATTRIBUTES); + mdb_all_sa.bInheritHandle = FALSE; + mdb_all_sa.lpSecurityDescriptor = &mdb_null_sd; + mdb_sec_inited = 1; + } + if (!GetFileInformationByHandle(env->me_lfd, &stbuf)) goto fail_errno; + idbuf.volume = stbuf.dwVolumeSerialNumber; + idbuf.nhigh = stbuf.nFileIndexHigh; + idbuf.nlow = stbuf.nFileIndexLow; + val.mv_data = &idbuf; + val.mv_size = sizeof(idbuf); + mdb_hash_enc(&val, encbuf); + sprintf(env->me_txns->mti_rmname, "Global\\MDBr%s", encbuf); + sprintf(env->me_txns->mti_wmname, "Global\\MDBw%s", encbuf); + env->me_rmutex = CreateMutexA(&mdb_all_sa, FALSE, env->me_txns->mti_rmname); + if (!env->me_rmutex) goto fail_errno; + env->me_wmutex = CreateMutexA(&mdb_all_sa, FALSE, env->me_txns->mti_wmname); + if (!env->me_wmutex) goto fail_errno; +#elif defined(MDB_USE_POSIX_SEM) + struct stat stbuf; + struct { + dev_t dev; + ino_t ino; + } idbuf; + MDB_val val; + char encbuf[11]; + +#if defined(__NetBSD__) +#define MDB_SHORT_SEMNAMES 1 /* limited to 14 chars */ +#endif + if (fstat(env->me_lfd, &stbuf)) goto fail_errno; + idbuf.dev = stbuf.st_dev; + idbuf.ino = stbuf.st_ino; + val.mv_data = &idbuf; + val.mv_size = sizeof(idbuf); + mdb_hash_enc(&val, encbuf); +#ifdef MDB_SHORT_SEMNAMES + encbuf[9] = '\0'; /* drop name from 15 chars to 14 chars */ +#endif + sprintf(env->me_txns->mti_rmname, "/MDBr%s", encbuf); + sprintf(env->me_txns->mti_wmname, "/MDBw%s", encbuf); + /* Clean up after a previous run, if needed: Try to + * remove both semaphores before doing anything else. + */ + sem_unlink(env->me_txns->mti_rmname); + sem_unlink(env->me_txns->mti_wmname); + env->me_rmutex = sem_open(env->me_txns->mti_rmname, + O_CREAT|O_EXCL, mode, 1); + if (env->me_rmutex == SEM_FAILED) goto fail_errno; + env->me_wmutex = sem_open(env->me_txns->mti_wmname, + O_CREAT|O_EXCL, mode, 1); + if (env->me_wmutex == SEM_FAILED) goto fail_errno; +#else /* MDB_USE_POSIX_MUTEX: */ + pthread_mutexattr_t mattr; + + if ((rc = pthread_mutexattr_init(&mattr)) + || (rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED)) +#ifdef MDB_ROBUST_SUPPORTED + || (rc = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST)) +#endif + || (rc = pthread_mutex_init(env->me_txns->mti_rmutex, &mattr)) + || (rc = pthread_mutex_init(env->me_txns->mti_wmutex, &mattr))) + goto fail; + pthread_mutexattr_destroy(&mattr); +#endif /* _WIN32 || MDB_USE_POSIX_SEM */ + + env->me_txns->mti_magic = MDB_MAGIC; + env->me_txns->mti_format = MDB_LOCK_FORMAT; + env->me_txns->mti_txnid = 0; + env->me_txns->mti_numreaders = 0; + + } else { + if (env->me_txns->mti_magic != MDB_MAGIC) { + DPUTS("lock region has invalid magic"); + rc = MDB_INVALID; + goto fail; + } + if (env->me_txns->mti_format != MDB_LOCK_FORMAT) { + DPRINTF(("lock region has format+version 0x%x, expected 0x%x", + env->me_txns->mti_format, MDB_LOCK_FORMAT)); + rc = MDB_VERSION_MISMATCH; + goto fail; + } + rc = ErrCode(); + if (rc && rc != EACCES && rc != EAGAIN) { + goto fail; + } +#ifdef _WIN32 + env->me_rmutex = OpenMutexA(SYNCHRONIZE, FALSE, env->me_txns->mti_rmname); + if (!env->me_rmutex) goto fail_errno; + env->me_wmutex = OpenMutexA(SYNCHRONIZE, FALSE, env->me_txns->mti_wmname); + if (!env->me_wmutex) goto fail_errno; +#elif defined(MDB_USE_POSIX_SEM) + env->me_rmutex = sem_open(env->me_txns->mti_rmname, 0); + if (env->me_rmutex == SEM_FAILED) goto fail_errno; + env->me_wmutex = sem_open(env->me_txns->mti_wmname, 0); + if (env->me_wmutex == SEM_FAILED) goto fail_errno; +#endif + } + return MDB_SUCCESS; + +fail_errno: + rc = ErrCode(); +fail: + return rc; +} + + /** The name of the lock file in the DB environment */ +#define LOCKNAME "/lock.mdb" + /** The name of the data file in the DB environment */ +#define DATANAME "/data.mdb" + /** The suffix of the lock file when no subdir is used */ +#define LOCKSUFF "-lock" + /** Only a subset of the @ref mdb_env flags can be changed + * at runtime. Changing other flags requires closing the + * environment and re-opening it with the new flags. + */ +#define CHANGEABLE (MDB_NOSYNC|MDB_NOMETASYNC|MDB_MAPASYNC|MDB_NOMEMINIT) +#define CHANGELESS (MDB_FIXEDMAP|MDB_NOSUBDIR|MDB_RDONLY| \ + MDB_WRITEMAP|MDB_NOTLS|MDB_NOLOCK|MDB_NORDAHEAD) + +#if VALID_FLAGS & PERSISTENT_FLAGS & (CHANGEABLE|CHANGELESS) +# error "Persistent DB flags & env flags overlap, but both go in mm_flags" +#endif + +int ESECT +mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mdb_mode_t mode) +{ + int oflags, rc, len, excl = -1; + char *lpath, *dpath; +#ifdef _WIN32 + wchar_t *wpath; +#endif + + if (env->me_fd!=INVALID_HANDLE_VALUE || (flags & ~(CHANGEABLE|CHANGELESS))) + return EINVAL; + + len = strlen(path); + if (flags & MDB_NOSUBDIR) { + rc = len + sizeof(LOCKSUFF) + len + 1; + } else { + rc = len + sizeof(LOCKNAME) + len + sizeof(DATANAME); + } + lpath = malloc(rc); + if (!lpath) + return ENOMEM; + if (flags & MDB_NOSUBDIR) { + dpath = lpath + len + sizeof(LOCKSUFF); + sprintf(lpath, "%s" LOCKSUFF, path); + strcpy(dpath, path); + } else { + dpath = lpath + len + sizeof(LOCKNAME); + sprintf(lpath, "%s" LOCKNAME, path); + sprintf(dpath, "%s" DATANAME, path); + } + + rc = MDB_SUCCESS; + flags |= env->me_flags; + if (flags & MDB_RDONLY) { + /* silently ignore WRITEMAP when we're only getting read access */ + flags &= ~MDB_WRITEMAP; + } else { + if (!((env->me_free_pgs = mdb_midl_alloc(MDB_IDL_UM_MAX)) && + (env->me_dirty_list = calloc(MDB_IDL_UM_SIZE, sizeof(MDB_ID2))))) + rc = ENOMEM; + } + env->me_flags = flags |= MDB_ENV_ACTIVE; + if (rc) + goto leave; + + env->me_path = strdup(path); + env->me_dbxs = calloc(env->me_maxdbs, sizeof(MDB_dbx)); + env->me_dbflags = calloc(env->me_maxdbs, sizeof(uint16_t)); + env->me_dbiseqs = calloc(env->me_maxdbs, sizeof(unsigned int)); + if (!(env->me_dbxs && env->me_path && env->me_dbflags && env->me_dbiseqs)) { + rc = ENOMEM; + goto leave; + } + env->me_dbxs[FREE_DBI].md_cmp = mdb_cmp_long; /* aligned MDB_INTEGERKEY */ + + /* For RDONLY, get lockfile after we know datafile exists */ + if (!(flags & (MDB_RDONLY|MDB_NOLOCK))) { + rc = mdb_env_setup_locks(env, lpath, mode, &excl); + if (rc) + goto leave; + } + +#ifdef _WIN32 + if (F_ISSET(flags, MDB_RDONLY)) { + oflags = GENERIC_READ; + len = OPEN_EXISTING; + } else { + oflags = GENERIC_READ|GENERIC_WRITE; + len = OPEN_ALWAYS; + } + mode = FILE_ATTRIBUTE_NORMAL; + utf8_to_utf16(dpath, -1, &wpath, NULL); + env->me_fd = CreateFileW(wpath, oflags, FILE_SHARE_READ|FILE_SHARE_WRITE, + NULL, len, mode, NULL); + free(wpath); +#else + if (F_ISSET(flags, MDB_RDONLY)) + oflags = O_RDONLY; + else + oflags = O_RDWR | O_CREAT; + + env->me_fd = open(dpath, oflags, mode); +#endif + if (env->me_fd == INVALID_HANDLE_VALUE) { + rc = ErrCode(); + goto leave; + } + + if ((flags & (MDB_RDONLY|MDB_NOLOCK)) == MDB_RDONLY) { + rc = mdb_env_setup_locks(env, lpath, mode, &excl); + if (rc) + goto leave; + } + + if ((rc = mdb_env_open2(env)) == MDB_SUCCESS) { + if (flags & (MDB_RDONLY|MDB_WRITEMAP)) { + env->me_mfd = env->me_fd; + } else { + /* Synchronous fd for meta writes. Needed even with + * MDB_NOSYNC/MDB_NOMETASYNC, in case these get reset. + */ +#ifdef _WIN32 + len = OPEN_EXISTING; + utf8_to_utf16(dpath, -1, &wpath, NULL); + env->me_mfd = CreateFileW(wpath, oflags, + FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, len, + mode | FILE_FLAG_WRITE_THROUGH, NULL); + free(wpath); +#else + oflags &= ~O_CREAT; + env->me_mfd = open(dpath, oflags | MDB_DSYNC, mode); +#endif + if (env->me_mfd == INVALID_HANDLE_VALUE) { + rc = ErrCode(); + goto leave; + } + } + DPRINTF(("opened dbenv %p", (void *) env)); + if (excl > 0) { + rc = mdb_env_share_locks(env, &excl); + if (rc) + goto leave; + } + if (!(flags & MDB_RDONLY)) { + MDB_txn *txn; + int tsize = sizeof(MDB_txn), size = tsize + env->me_maxdbs * + (sizeof(MDB_db)+sizeof(MDB_cursor *)+sizeof(unsigned int)+1); + if ((env->me_pbuf = calloc(1, env->me_psize)) && + (txn = calloc(1, size))) + { + txn->mt_dbs = (MDB_db *)((char *)txn + tsize); + txn->mt_cursors = (MDB_cursor **)(txn->mt_dbs + env->me_maxdbs); + txn->mt_dbiseqs = (unsigned int *)(txn->mt_cursors + env->me_maxdbs); + txn->mt_dbflags = (unsigned char *)(txn->mt_dbiseqs + env->me_maxdbs); + txn->mt_env = env; + txn->mt_dbxs = env->me_dbxs; + txn->mt_flags = MDB_TXN_FINISHED; + env->me_txn0 = txn; + } else { + rc = ENOMEM; + } + } + } + +leave: + if (rc) { + mdb_env_close0(env, excl); + } + free(lpath); + return rc; +} + +/** Destroy resources from mdb_env_open(), clear our readers & DBIs */ +static void ESECT +mdb_env_close0(MDB_env *env, int excl) +{ + int i; + + if (!(env->me_flags & MDB_ENV_ACTIVE)) + return; + + /* Doing this here since me_dbxs may not exist during mdb_env_close */ + if (env->me_dbxs) { + for (i = env->me_maxdbs; --i >= CORE_DBS; ) + free(env->me_dbxs[i].md_name.mv_data); + free(env->me_dbxs); + } + + free(env->me_pbuf); + free(env->me_dbiseqs); + free(env->me_dbflags); + free(env->me_path); + free(env->me_dirty_list); + free(env->me_txn0); + mdb_midl_free(env->me_free_pgs); + + if (env->me_flags & MDB_ENV_TXKEY) { + pthread_key_delete(env->me_txkey); +#ifdef _WIN32 + /* Delete our key from the global list */ + for (i=0; ime_txkey) { + mdb_tls_keys[i] = mdb_tls_keys[mdb_tls_nkeys-1]; + mdb_tls_nkeys--; + break; + } +#endif + } + + if (env->me_map) { + munmap(env->me_map, env->me_mapsize); + } + if (env->me_mfd != env->me_fd && env->me_mfd != INVALID_HANDLE_VALUE) + (void) close(env->me_mfd); + if (env->me_fd != INVALID_HANDLE_VALUE) + (void) close(env->me_fd); + if (env->me_txns) { + MDB_PID_T pid = env->me_pid; + /* Clearing readers is done in this function because + * me_txkey with its destructor must be disabled first. + * + * We skip the the reader mutex, so we touch only + * data owned by this process (me_close_readers and + * our readers), and clear each reader atomically. + */ + for (i = env->me_close_readers; --i >= 0; ) + if (env->me_txns->mti_readers[i].mr_pid == pid) + env->me_txns->mti_readers[i].mr_pid = 0; +#ifdef _WIN32 + if (env->me_rmutex) { + CloseHandle(env->me_rmutex); + if (env->me_wmutex) CloseHandle(env->me_wmutex); + } + /* Windows automatically destroys the mutexes when + * the last handle closes. + */ +#elif defined(MDB_USE_POSIX_SEM) + if (env->me_rmutex != SEM_FAILED) { + sem_close(env->me_rmutex); + if (env->me_wmutex != SEM_FAILED) + sem_close(env->me_wmutex); + /* If we have the filelock: If we are the + * only remaining user, clean up semaphores. + */ + if (excl == 0) + mdb_env_excl_lock(env, &excl); + if (excl > 0) { + sem_unlink(env->me_txns->mti_rmname); + sem_unlink(env->me_txns->mti_wmname); + } + } +#endif + munmap((void *)env->me_txns, (env->me_maxreaders-1)*sizeof(MDB_reader)+sizeof(MDB_txninfo)); + } + if (env->me_lfd != INVALID_HANDLE_VALUE) { +#ifdef _WIN32 + if (excl >= 0) { + /* Unlock the lockfile. Windows would have unlocked it + * after closing anyway, but not necessarily at once. + */ + UnlockFile(env->me_lfd, 0, 0, 1, 0); + } +#endif + (void) close(env->me_lfd); + } + + env->me_flags &= ~(MDB_ENV_ACTIVE|MDB_ENV_TXKEY); +} + +void ESECT +mdb_env_close(MDB_env *env) +{ + MDB_page *dp; + + if (env == NULL) + return; + + VGMEMP_DESTROY(env); + while ((dp = env->me_dpages) != NULL) { + VGMEMP_DEFINED(&dp->mp_next, sizeof(dp->mp_next)); + env->me_dpages = dp->mp_next; + free(dp); + } + + mdb_env_close0(env, 0); + free(env); +} + +/** Compare two items pointing at aligned size_t's */ +static int +mdb_cmp_long(const MDB_val *a, const MDB_val *b) +{ + return (*(size_t *)a->mv_data < *(size_t *)b->mv_data) ? -1 : + *(size_t *)a->mv_data > *(size_t *)b->mv_data; +} + +/** Compare two items pointing at aligned unsigned int's. + * + * This is also set as #MDB_INTEGERDUP|#MDB_DUPFIXED's #MDB_dbx.%md_dcmp, + * but #mdb_cmp_clong() is called instead if the data type is size_t. + */ +static int +mdb_cmp_int(const MDB_val *a, const MDB_val *b) +{ + return (*(unsigned int *)a->mv_data < *(unsigned int *)b->mv_data) ? -1 : + *(unsigned int *)a->mv_data > *(unsigned int *)b->mv_data; +} + +/** Compare two items pointing at unsigned ints of unknown alignment. + * Nodes and keys are guaranteed to be 2-byte aligned. + */ +static int +mdb_cmp_cint(const MDB_val *a, const MDB_val *b) +{ +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned short *u, *c; + int x; + + u = (unsigned short *) ((char *) a->mv_data + a->mv_size); + c = (unsigned short *) ((char *) b->mv_data + a->mv_size); + do { + x = *--u - *--c; + } while(!x && u > (unsigned short *)a->mv_data); + return x; +#else + unsigned short *u, *c, *end; + int x; + + end = (unsigned short *) ((char *) a->mv_data + a->mv_size); + u = (unsigned short *)a->mv_data; + c = (unsigned short *)b->mv_data; + do { + x = *u++ - *c++; + } while(!x && u < end); + return x; +#endif +} + +/** Compare two items lexically */ +static int +mdb_cmp_memn(const MDB_val *a, const MDB_val *b) +{ + int diff; + ssize_t len_diff; + unsigned int len; + + len = a->mv_size; + len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; + if (len_diff > 0) { + len = b->mv_size; + len_diff = 1; + } + + diff = memcmp(a->mv_data, b->mv_data, len); + return diff ? diff : len_diff<0 ? -1 : len_diff; +} + +/** Compare two items in reverse byte order */ +static int +mdb_cmp_memnr(const MDB_val *a, const MDB_val *b) +{ + const unsigned char *p1, *p2, *p1_lim; + ssize_t len_diff; + int diff; + + p1_lim = (const unsigned char *)a->mv_data; + p1 = (const unsigned char *)a->mv_data + a->mv_size; + p2 = (const unsigned char *)b->mv_data + b->mv_size; + + len_diff = (ssize_t) a->mv_size - (ssize_t) b->mv_size; + if (len_diff > 0) { + p1_lim += len_diff; + len_diff = 1; + } + + while (p1 > p1_lim) { + diff = *--p1 - *--p2; + if (diff) + return diff; + } + return len_diff<0 ? -1 : len_diff; +} + +/** Search for key within a page, using binary search. + * Returns the smallest entry larger or equal to the key. + * If exactp is non-null, stores whether the found entry was an exact match + * in *exactp (1 or 0). + * Updates the cursor index with the index of the found entry. + * If no entry larger or equal to the key is found, returns NULL. + */ +static MDB_node * +mdb_node_search(MDB_cursor *mc, MDB_val *key, int *exactp) +{ + unsigned int i = 0, nkeys; + int low, high; + int rc = 0; + MDB_page *mp = mc->mc_pg[mc->mc_top]; + MDB_node *node = NULL; + MDB_val nodekey; + MDB_cmp_func *cmp; + DKBUF; + + nkeys = NUMKEYS(mp); + + DPRINTF(("searching %u keys in %s %spage %"Z"u", + nkeys, IS_LEAF(mp) ? "leaf" : "branch", IS_SUBP(mp) ? "sub-" : "", + mdb_dbg_pgno(mp))); + + low = IS_LEAF(mp) ? 0 : 1; + high = nkeys - 1; + cmp = mc->mc_dbx->md_cmp; + + /* Branch pages have no data, so if using integer keys, + * alignment is guaranteed. Use faster mdb_cmp_int. + */ + if (cmp == mdb_cmp_cint && IS_BRANCH(mp)) { + if (NODEPTR(mp, 1)->mn_ksize == sizeof(size_t)) + cmp = mdb_cmp_long; + else + cmp = mdb_cmp_int; + } + + if (IS_LEAF2(mp)) { + nodekey.mv_size = mc->mc_db->md_pad; + node = NODEPTR(mp, 0); /* fake */ + while (low <= high) { + i = (low + high) >> 1; + nodekey.mv_data = LEAF2KEY(mp, i, nodekey.mv_size); + rc = cmp(key, &nodekey); + DPRINTF(("found leaf index %u [%s], rc = %i", + i, DKEY(&nodekey), rc)); + if (rc == 0) + break; + if (rc > 0) + low = i + 1; + else + high = i - 1; + } + } else { + while (low <= high) { + i = (low + high) >> 1; + + node = NODEPTR(mp, i); + nodekey.mv_size = NODEKSZ(node); + nodekey.mv_data = NODEKEY(node); + + rc = cmp(key, &nodekey); +#if MDB_DEBUG + if (IS_LEAF(mp)) + DPRINTF(("found leaf index %u [%s], rc = %i", + i, DKEY(&nodekey), rc)); + else + DPRINTF(("found branch index %u [%s -> %"Z"u], rc = %i", + i, DKEY(&nodekey), NODEPGNO(node), rc)); +#endif + if (rc == 0) + break; + if (rc > 0) + low = i + 1; + else + high = i - 1; + } + } + + if (rc > 0) { /* Found entry is less than the key. */ + i++; /* Skip to get the smallest entry larger than key. */ + if (!IS_LEAF2(mp)) + node = NODEPTR(mp, i); + } + if (exactp) + *exactp = (rc == 0 && nkeys > 0); + /* store the key index */ + mc->mc_ki[mc->mc_top] = i; + if (i >= nkeys) + /* There is no entry larger or equal to the key. */ + return NULL; + + /* nodeptr is fake for LEAF2 */ + return node; +} + +#if 0 +static void +mdb_cursor_adjust(MDB_cursor *mc, func) +{ + MDB_cursor *m2; + + for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { + if (m2->mc_pg[m2->mc_top] == mc->mc_pg[mc->mc_top]) { + func(mc, m2); + } + } +} +#endif + +/** Pop a page off the top of the cursor's stack. */ +static void +mdb_cursor_pop(MDB_cursor *mc) +{ + if (mc->mc_snum) { + DPRINTF(("popping page %"Z"u off db %d cursor %p", + mc->mc_pg[mc->mc_top]->mp_pgno, DDBI(mc), (void *) mc)); + + mc->mc_snum--; + if (mc->mc_snum) { + mc->mc_top--; + } else { + mc->mc_flags &= ~C_INITIALIZED; + } + } +} + +/** Push a page onto the top of the cursor's stack. */ +static int +mdb_cursor_push(MDB_cursor *mc, MDB_page *mp) +{ + DPRINTF(("pushing page %"Z"u on db %d cursor %p", mp->mp_pgno, + DDBI(mc), (void *) mc)); + + if (mc->mc_snum >= CURSOR_STACK) { + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return MDB_CURSOR_FULL; + } + + mc->mc_top = mc->mc_snum++; + mc->mc_pg[mc->mc_top] = mp; + mc->mc_ki[mc->mc_top] = 0; + + return MDB_SUCCESS; +} + +/** Find the address of the page corresponding to a given page number. + * @param[in] txn the transaction for this access. + * @param[in] pgno the page number for the page to retrieve. + * @param[out] ret address of a pointer where the page's address will be stored. + * @param[out] lvl dirty_list inheritance level of found page. 1=current txn, 0=mapped page. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_get(MDB_txn *txn, pgno_t pgno, MDB_page **ret, int *lvl) +{ + MDB_env *env = txn->mt_env; + MDB_page *p = NULL; + int level; + + if (! (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_WRITEMAP))) { + MDB_txn *tx2 = txn; + level = 1; + do { + MDB_ID2L dl = tx2->mt_u.dirty_list; + unsigned x; + /* Spilled pages were dirtied in this txn and flushed + * because the dirty list got full. Bring this page + * back in from the map (but don't unspill it here, + * leave that unless page_touch happens again). + */ + if (tx2->mt_spill_pgs) { + MDB_ID pn = pgno << 1; + x = mdb_midl_search(tx2->mt_spill_pgs, pn); + if (x <= tx2->mt_spill_pgs[0] && tx2->mt_spill_pgs[x] == pn) { + p = (MDB_page *)(env->me_map + env->me_psize * pgno); + goto done; + } + } + if (dl[0].mid) { + unsigned x = mdb_mid2l_search(dl, pgno); + if (x <= dl[0].mid && dl[x].mid == pgno) { + p = dl[x].mptr; + goto done; + } + } + level++; + } while ((tx2 = tx2->mt_parent) != NULL); + } + + if (pgno < txn->mt_next_pgno) { + level = 0; + p = (MDB_page *)(env->me_map + env->me_psize * pgno); + } else { + DPRINTF(("page %"Z"u not found", pgno)); + txn->mt_flags |= MDB_TXN_ERROR; + return MDB_PAGE_NOTFOUND; + } + +done: + *ret = p; + if (lvl) + *lvl = level; + return MDB_SUCCESS; +} + +/** Finish #mdb_page_search() / #mdb_page_search_lowest(). + * The cursor is at the root page, set up the rest of it. + */ +static int +mdb_page_search_root(MDB_cursor *mc, MDB_val *key, int flags) +{ + MDB_page *mp = mc->mc_pg[mc->mc_top]; + int rc; + DKBUF; + + while (IS_BRANCH(mp)) { + MDB_node *node; + indx_t i; + + DPRINTF(("branch page %"Z"u has %u keys", mp->mp_pgno, NUMKEYS(mp))); + mdb_cassert(mc, NUMKEYS(mp) > 1); + DPRINTF(("found index 0 to page %"Z"u", NODEPGNO(NODEPTR(mp, 0)))); + + if (flags & (MDB_PS_FIRST|MDB_PS_LAST)) { + i = 0; + if (flags & MDB_PS_LAST) + i = NUMKEYS(mp) - 1; + } else { + int exact; + node = mdb_node_search(mc, key, &exact); + if (node == NULL) + i = NUMKEYS(mp) - 1; + else { + i = mc->mc_ki[mc->mc_top]; + if (!exact) { + mdb_cassert(mc, i > 0); + i--; + } + } + DPRINTF(("following index %u for key [%s]", i, DKEY(key))); + } + + mdb_cassert(mc, i < NUMKEYS(mp)); + node = NODEPTR(mp, i); + + if ((rc = mdb_page_get(mc->mc_txn, NODEPGNO(node), &mp, NULL)) != 0) + return rc; + + mc->mc_ki[mc->mc_top] = i; + if ((rc = mdb_cursor_push(mc, mp))) + return rc; + + if (flags & MDB_PS_MODIFY) { + if ((rc = mdb_page_touch(mc)) != 0) + return rc; + mp = mc->mc_pg[mc->mc_top]; + } + } + + if (!IS_LEAF(mp)) { + DPRINTF(("internal error, index points to a %02X page!?", + mp->mp_flags)); + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return MDB_CORRUPTED; + } + + DPRINTF(("found leaf page %"Z"u for key [%s]", mp->mp_pgno, + key ? DKEY(key) : "null")); + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; + + return MDB_SUCCESS; +} + +/** Search for the lowest key under the current branch page. + * This just bypasses a NUMKEYS check in the current page + * before calling mdb_page_search_root(), because the callers + * are all in situations where the current page is known to + * be underfilled. + */ +static int +mdb_page_search_lowest(MDB_cursor *mc) +{ + MDB_page *mp = mc->mc_pg[mc->mc_top]; + MDB_node *node = NODEPTR(mp, 0); + int rc; + + if ((rc = mdb_page_get(mc->mc_txn, NODEPGNO(node), &mp, NULL)) != 0) + return rc; + + mc->mc_ki[mc->mc_top] = 0; + if ((rc = mdb_cursor_push(mc, mp))) + return rc; + return mdb_page_search_root(mc, NULL, MDB_PS_FIRST); +} + +/** Search for the page a given key should be in. + * Push it and its parent pages on the cursor stack. + * @param[in,out] mc the cursor for this operation. + * @param[in] key the key to search for, or NULL for first/last page. + * @param[in] flags If MDB_PS_MODIFY is set, visited pages in the DB + * are touched (updated with new page numbers). + * If MDB_PS_FIRST or MDB_PS_LAST is set, find first or last leaf. + * This is used by #mdb_cursor_first() and #mdb_cursor_last(). + * If MDB_PS_ROOTONLY set, just fetch root node, no further lookups. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_search(MDB_cursor *mc, MDB_val *key, int flags) +{ + int rc; + pgno_t root; + + /* Make sure the txn is still viable, then find the root from + * the txn's db table and set it as the root of the cursor's stack. + */ + if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) { + DPUTS("transaction may not be used now"); + return MDB_BAD_TXN; + } else { + /* Make sure we're using an up-to-date root */ + if (*mc->mc_dbflag & DB_STALE) { + MDB_cursor mc2; + if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) + return MDB_BAD_DBI; + mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, NULL); + rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, 0); + if (rc) + return rc; + { + MDB_val data; + int exact = 0; + uint16_t flags; + MDB_node *leaf = mdb_node_search(&mc2, + &mc->mc_dbx->md_name, &exact); + if (!exact) + return MDB_NOTFOUND; + if ((leaf->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA) + return MDB_INCOMPATIBLE; /* not a named DB */ + rc = mdb_node_read(mc->mc_txn, leaf, &data); + if (rc) + return rc; + memcpy(&flags, ((char *) data.mv_data + offsetof(MDB_db, md_flags)), + sizeof(uint16_t)); + /* The txn may not know this DBI, or another process may + * have dropped and recreated the DB with other flags. + */ + if ((mc->mc_db->md_flags & PERSISTENT_FLAGS) != flags) + return MDB_INCOMPATIBLE; + memcpy(mc->mc_db, data.mv_data, sizeof(MDB_db)); + } + *mc->mc_dbflag &= ~DB_STALE; + } + root = mc->mc_db->md_root; + + if (root == P_INVALID) { /* Tree is empty. */ + DPUTS("tree is empty"); + return MDB_NOTFOUND; + } + } + + mdb_cassert(mc, root > 1); + if (!mc->mc_pg[0] || mc->mc_pg[0]->mp_pgno != root) + if ((rc = mdb_page_get(mc->mc_txn, root, &mc->mc_pg[0], NULL)) != 0) + return rc; + + mc->mc_snum = 1; + mc->mc_top = 0; + + DPRINTF(("db %d root page %"Z"u has flags 0x%X", + DDBI(mc), root, mc->mc_pg[0]->mp_flags)); + + if (flags & MDB_PS_MODIFY) { + if ((rc = mdb_page_touch(mc))) + return rc; + } + + if (flags & MDB_PS_ROOTONLY) + return MDB_SUCCESS; + + return mdb_page_search_root(mc, key, flags); +} + +static int +mdb_ovpage_free(MDB_cursor *mc, MDB_page *mp) +{ + MDB_txn *txn = mc->mc_txn; + pgno_t pg = mp->mp_pgno; + unsigned x = 0, ovpages = mp->mp_pages; + MDB_env *env = txn->mt_env; + MDB_IDL sl = txn->mt_spill_pgs; + MDB_ID pn = pg << 1; + int rc; + + DPRINTF(("free ov page %"Z"u (%d)", pg, ovpages)); + /* If the page is dirty or on the spill list we just acquired it, + * so we should give it back to our current free list, if any. + * Otherwise put it onto the list of pages we freed in this txn. + * + * Won't create me_pghead: me_pglast must be inited along with it. + * Unsupported in nested txns: They would need to hide the page + * range in ancestor txns' dirty and spilled lists. + */ + if (env->me_pghead && + !txn->mt_parent && + ((mp->mp_flags & P_DIRTY) || + (sl && (x = mdb_midl_search(sl, pn)) <= sl[0] && sl[x] == pn))) + { + unsigned i, j; + pgno_t *mop; + MDB_ID2 *dl, ix, iy; + rc = mdb_midl_need(&env->me_pghead, ovpages); + if (rc) + return rc; + if (!(mp->mp_flags & P_DIRTY)) { + /* This page is no longer spilled */ + if (x == sl[0]) + sl[0]--; + else + sl[x] |= 1; + goto release; + } + /* Remove from dirty list */ + dl = txn->mt_u.dirty_list; + x = dl[0].mid--; + for (ix = dl[x]; ix.mptr != mp; ix = iy) { + if (x > 1) { + x--; + iy = dl[x]; + dl[x] = ix; + } else { + mdb_cassert(mc, x > 1); + j = ++(dl[0].mid); + dl[j] = ix; /* Unsorted. OK when MDB_TXN_ERROR. */ + txn->mt_flags |= MDB_TXN_ERROR; + return MDB_CORRUPTED; + } + } + txn->mt_dirty_room++; + if (!(env->me_flags & MDB_WRITEMAP)) + mdb_dpage_free(env, mp); +release: + /* Insert in me_pghead */ + mop = env->me_pghead; + j = mop[0] + ovpages; + for (i = mop[0]; i && mop[i] < pg; i--) + mop[j--] = mop[i]; + while (j>i) + mop[j--] = pg++; + mop[0] += ovpages; + } else { + rc = mdb_midl_append_range(&txn->mt_free_pgs, pg, ovpages); + if (rc) + return rc; + } + mc->mc_db->md_overflow_pages -= ovpages; + return 0; +} + +/** Return the data associated with a given node. + * @param[in] txn The transaction for this operation. + * @param[in] leaf The node being read. + * @param[out] data Updated to point to the node's data. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_node_read(MDB_txn *txn, MDB_node *leaf, MDB_val *data) +{ + MDB_page *omp; /* overflow page */ + pgno_t pgno; + int rc; + + if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) { + data->mv_size = NODEDSZ(leaf); + data->mv_data = NODEDATA(leaf); + return MDB_SUCCESS; + } + + /* Read overflow data. + */ + data->mv_size = NODEDSZ(leaf); + memcpy(&pgno, NODEDATA(leaf), sizeof(pgno)); + if ((rc = mdb_page_get(txn, pgno, &omp, NULL)) != 0) { + DPRINTF(("read overflow page %"Z"u failed", pgno)); + return rc; + } + data->mv_data = METADATA(omp); + + return MDB_SUCCESS; +} + +int +mdb_get(MDB_txn *txn, MDB_dbi dbi, + MDB_val *key, MDB_val *data) +{ + MDB_cursor mc; + MDB_xcursor mx; + int exact = 0; + DKBUF; + + DPRINTF(("===> get db %u key [%s]", dbi, DKEY(key))); + + if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + if (txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + mdb_cursor_init(&mc, txn, dbi, &mx); + return mdb_cursor_set(&mc, key, data, MDB_SET, &exact); +} + +/** Find a sibling for a page. + * Replaces the page at the top of the cursor's stack with the + * specified sibling, if one exists. + * @param[in] mc The cursor for this operation. + * @param[in] move_right Non-zero if the right sibling is requested, + * otherwise the left sibling. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_cursor_sibling(MDB_cursor *mc, int move_right) +{ + int rc; + MDB_node *indx; + MDB_page *mp; + + if (mc->mc_snum < 2) { + return MDB_NOTFOUND; /* root has no siblings */ + } + + mdb_cursor_pop(mc); + DPRINTF(("parent page is page %"Z"u, index %u", + mc->mc_pg[mc->mc_top]->mp_pgno, mc->mc_ki[mc->mc_top])); + + if (move_right ? (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mc->mc_pg[mc->mc_top])) + : (mc->mc_ki[mc->mc_top] == 0)) { + DPRINTF(("no more keys left, moving to %s sibling", + move_right ? "right" : "left")); + if ((rc = mdb_cursor_sibling(mc, move_right)) != MDB_SUCCESS) { + /* undo cursor_pop before returning */ + mc->mc_top++; + mc->mc_snum++; + return rc; + } + } else { + if (move_right) + mc->mc_ki[mc->mc_top]++; + else + mc->mc_ki[mc->mc_top]--; + DPRINTF(("just moving to %s index key %u", + move_right ? "right" : "left", mc->mc_ki[mc->mc_top])); + } + mdb_cassert(mc, IS_BRANCH(mc->mc_pg[mc->mc_top])); + + indx = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if ((rc = mdb_page_get(mc->mc_txn, NODEPGNO(indx), &mp, NULL)) != 0) { + /* mc will be inconsistent if caller does mc_snum++ as above */ + mc->mc_flags &= ~(C_INITIALIZED|C_EOF); + return rc; + } + + mdb_cursor_push(mc, mp); + if (!move_right) + mc->mc_ki[mc->mc_top] = NUMKEYS(mp)-1; + + return MDB_SUCCESS; +} + +/** Move the cursor to the next data item. */ +static int +mdb_cursor_next(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) +{ + MDB_page *mp; + MDB_node *leaf; + int rc; + + if (mc->mc_flags & C_EOF) { + return MDB_NOTFOUND; + } + + mdb_cassert(mc, mc->mc_flags & C_INITIALIZED); + + mp = mc->mc_pg[mc->mc_top]; + + if (mc->mc_db->md_flags & MDB_DUPSORT) { + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (op == MDB_NEXT || op == MDB_NEXT_DUP) { + rc = mdb_cursor_next(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_NEXT); + if (op != MDB_NEXT || rc != MDB_NOTFOUND) { + if (rc == MDB_SUCCESS) + MDB_GET_KEY(leaf, key); + return rc; + } + } + } else { + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + if (op == MDB_NEXT_DUP) + return MDB_NOTFOUND; + } + } + + DPRINTF(("cursor_next: top page is %"Z"u in cursor %p", + mdb_dbg_pgno(mp), (void *) mc)); + if (mc->mc_flags & C_DEL) + goto skip; + + if (mc->mc_ki[mc->mc_top] + 1u >= NUMKEYS(mp)) { + DPUTS("=====> move to next sibling page"); + if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { + mc->mc_flags |= C_EOF; + return rc; + } + mp = mc->mc_pg[mc->mc_top]; + DPRINTF(("next page is %"Z"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); + } else + mc->mc_ki[mc->mc_top]++; + +skip: + DPRINTF(("==> cursor points to page %"Z"u with %u keys, key index %u", + mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); + + if (IS_LEAF2(mp)) { + key->mv_size = mc->mc_db->md_pad; + key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); + return MDB_SUCCESS; + } + + mdb_cassert(mc, IS_LEAF(mp)); + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdb_xcursor_init1(mc, leaf); + } + if (data) { + if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) + return rc; + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + if (rc != MDB_SUCCESS) + return rc; + } + } + + MDB_GET_KEY(leaf, key); + return MDB_SUCCESS; +} + +/** Move the cursor to the previous data item. */ +static int +mdb_cursor_prev(MDB_cursor *mc, MDB_val *key, MDB_val *data, MDB_cursor_op op) +{ + MDB_page *mp; + MDB_node *leaf; + int rc; + + mdb_cassert(mc, mc->mc_flags & C_INITIALIZED); + + mp = mc->mc_pg[mc->mc_top]; + + if (mc->mc_db->md_flags & MDB_DUPSORT) { + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (op == MDB_PREV || op == MDB_PREV_DUP) { + rc = mdb_cursor_prev(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_PREV); + if (op != MDB_PREV || rc != MDB_NOTFOUND) { + if (rc == MDB_SUCCESS) { + MDB_GET_KEY(leaf, key); + mc->mc_flags &= ~C_EOF; + } + return rc; + } + } + } else { + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + if (op == MDB_PREV_DUP) + return MDB_NOTFOUND; + } + } + + DPRINTF(("cursor_prev: top page is %"Z"u in cursor %p", + mdb_dbg_pgno(mp), (void *) mc)); + + if (mc->mc_ki[mc->mc_top] == 0) { + DPUTS("=====> move to prev sibling page"); + if ((rc = mdb_cursor_sibling(mc, 0)) != MDB_SUCCESS) { + return rc; + } + mp = mc->mc_pg[mc->mc_top]; + mc->mc_ki[mc->mc_top] = NUMKEYS(mp) - 1; + DPRINTF(("prev page is %"Z"u, key index %u", mp->mp_pgno, mc->mc_ki[mc->mc_top])); + } else + mc->mc_ki[mc->mc_top]--; + + mc->mc_flags &= ~C_EOF; + + DPRINTF(("==> cursor points to page %"Z"u with %u keys, key index %u", + mdb_dbg_pgno(mp), NUMKEYS(mp), mc->mc_ki[mc->mc_top])); + + if (IS_LEAF2(mp)) { + key->mv_size = mc->mc_db->md_pad; + key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); + return MDB_SUCCESS; + } + + mdb_cassert(mc, IS_LEAF(mp)); + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdb_xcursor_init1(mc, leaf); + } + if (data) { + if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) + return rc; + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); + if (rc != MDB_SUCCESS) + return rc; + } + } + + MDB_GET_KEY(leaf, key); + return MDB_SUCCESS; +} + +/** Set the cursor on a specific data item. */ +static int +mdb_cursor_set(MDB_cursor *mc, MDB_val *key, MDB_val *data, + MDB_cursor_op op, int *exactp) +{ + int rc; + MDB_page *mp; + MDB_node *leaf = NULL; + DKBUF; + + if (key->mv_size == 0) + return MDB_BAD_VALSIZE; + + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + + /* See if we're already on the right page */ + if (mc->mc_flags & C_INITIALIZED) { + MDB_val nodekey; + + mp = mc->mc_pg[mc->mc_top]; + if (!NUMKEYS(mp)) { + mc->mc_ki[mc->mc_top] = 0; + return MDB_NOTFOUND; + } + if (mp->mp_flags & P_LEAF2) { + nodekey.mv_size = mc->mc_db->md_pad; + nodekey.mv_data = LEAF2KEY(mp, 0, nodekey.mv_size); + } else { + leaf = NODEPTR(mp, 0); + MDB_GET_KEY2(leaf, nodekey); + } + rc = mc->mc_dbx->md_cmp(key, &nodekey); + if (rc == 0) { + /* Probably happens rarely, but first node on the page + * was the one we wanted. + */ + mc->mc_ki[mc->mc_top] = 0; + if (exactp) + *exactp = 1; + goto set1; + } + if (rc > 0) { + unsigned int i; + unsigned int nkeys = NUMKEYS(mp); + if (nkeys > 1) { + if (mp->mp_flags & P_LEAF2) { + nodekey.mv_data = LEAF2KEY(mp, + nkeys-1, nodekey.mv_size); + } else { + leaf = NODEPTR(mp, nkeys-1); + MDB_GET_KEY2(leaf, nodekey); + } + rc = mc->mc_dbx->md_cmp(key, &nodekey); + if (rc == 0) { + /* last node was the one we wanted */ + mc->mc_ki[mc->mc_top] = nkeys-1; + if (exactp) + *exactp = 1; + goto set1; + } + if (rc < 0) { + if (mc->mc_ki[mc->mc_top] < NUMKEYS(mp)) { + /* This is definitely the right page, skip search_page */ + if (mp->mp_flags & P_LEAF2) { + nodekey.mv_data = LEAF2KEY(mp, + mc->mc_ki[mc->mc_top], nodekey.mv_size); + } else { + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + MDB_GET_KEY2(leaf, nodekey); + } + rc = mc->mc_dbx->md_cmp(key, &nodekey); + if (rc == 0) { + /* current node was the one we wanted */ + if (exactp) + *exactp = 1; + goto set1; + } + } + rc = 0; + goto set2; + } + } + /* If any parents have right-sibs, search. + * Otherwise, there's nothing further. + */ + for (i=0; imc_top; i++) + if (mc->mc_ki[i] < + NUMKEYS(mc->mc_pg[i])-1) + break; + if (i == mc->mc_top) { + /* There are no other pages */ + mc->mc_ki[mc->mc_top] = nkeys; + return MDB_NOTFOUND; + } + } + if (!mc->mc_top) { + /* There are no other pages */ + mc->mc_ki[mc->mc_top] = 0; + if (op == MDB_SET_RANGE && !exactp) { + rc = 0; + goto set1; + } else + return MDB_NOTFOUND; + } + } else { + mc->mc_pg[0] = 0; + } + + rc = mdb_page_search(mc, key, 0); + if (rc != MDB_SUCCESS) + return rc; + + mp = mc->mc_pg[mc->mc_top]; + mdb_cassert(mc, IS_LEAF(mp)); + +set2: + leaf = mdb_node_search(mc, key, exactp); + if (exactp != NULL && !*exactp) { + /* MDB_SET specified and not an exact match. */ + return MDB_NOTFOUND; + } + + if (leaf == NULL) { + DPUTS("===> inexact leaf not found, goto sibling"); + if ((rc = mdb_cursor_sibling(mc, 1)) != MDB_SUCCESS) { + mc->mc_flags |= C_EOF; + return rc; /* no entries matched */ + } + mp = mc->mc_pg[mc->mc_top]; + mdb_cassert(mc, IS_LEAF(mp)); + leaf = NODEPTR(mp, 0); + } + +set1: + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; + + if (IS_LEAF2(mp)) { + if (op == MDB_SET_RANGE || op == MDB_SET_KEY) { + key->mv_size = mc->mc_db->md_pad; + key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); + } + return MDB_SUCCESS; + } + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdb_xcursor_init1(mc, leaf); + } + if (data) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (op == MDB_SET || op == MDB_SET_KEY || op == MDB_SET_RANGE) { + rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + } else { + int ex2, *ex2p; + if (op == MDB_GET_BOTH) { + ex2p = &ex2; + ex2 = 0; + } else { + ex2p = NULL; + } + rc = mdb_cursor_set(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_SET_RANGE, ex2p); + if (rc != MDB_SUCCESS) + return rc; + } + } else if (op == MDB_GET_BOTH || op == MDB_GET_BOTH_RANGE) { + MDB_val olddata; + MDB_cmp_func *dcmp; + if ((rc = mdb_node_read(mc->mc_txn, leaf, &olddata)) != MDB_SUCCESS) + return rc; + dcmp = mc->mc_dbx->md_dcmp; +#if UINT_MAX < SIZE_MAX + if (dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t)) + dcmp = mdb_cmp_clong; +#endif + rc = dcmp(data, &olddata); + if (rc) { + if (op == MDB_GET_BOTH || rc > 0) + return MDB_NOTFOUND; + rc = 0; + *data = olddata; + } + + } else { + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) + return rc; + } + } + + /* The key already matches in all other cases */ + if (op == MDB_SET_RANGE || op == MDB_SET_KEY) + MDB_GET_KEY(leaf, key); + DPRINTF(("==> cursor placed on key [%s]", DKEY(key))); + + return rc; +} + +/** Move the cursor to the first item in the database. */ +static int +mdb_cursor_first(MDB_cursor *mc, MDB_val *key, MDB_val *data) +{ + int rc; + MDB_node *leaf; + + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + + if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { + rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); + if (rc != MDB_SUCCESS) + return rc; + } + mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); + + leaf = NODEPTR(mc->mc_pg[mc->mc_top], 0); + mc->mc_flags |= C_INITIALIZED; + mc->mc_flags &= ~C_EOF; + + mc->mc_ki[mc->mc_top] = 0; + + if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { + key->mv_size = mc->mc_db->md_pad; + key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], 0, key->mv_size); + return MDB_SUCCESS; + } + + if (data) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdb_xcursor_init1(mc, leaf); + rc = mdb_cursor_first(&mc->mc_xcursor->mx_cursor, data, NULL); + if (rc) + return rc; + } else { + if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) + return rc; + } + } + MDB_GET_KEY(leaf, key); + return MDB_SUCCESS; +} + +/** Move the cursor to the last item in the database. */ +static int +mdb_cursor_last(MDB_cursor *mc, MDB_val *key, MDB_val *data) +{ + int rc; + MDB_node *leaf; + + if (mc->mc_xcursor) + mc->mc_xcursor->mx_cursor.mc_flags &= ~(C_INITIALIZED|C_EOF); + + if (!(mc->mc_flags & C_EOF)) { + + if (!(mc->mc_flags & C_INITIALIZED) || mc->mc_top) { + rc = mdb_page_search(mc, NULL, MDB_PS_LAST); + if (rc != MDB_SUCCESS) + return rc; + } + mdb_cassert(mc, IS_LEAF(mc->mc_pg[mc->mc_top])); + + } + mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]) - 1; + mc->mc_flags |= C_INITIALIZED|C_EOF; + leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + + if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { + key->mv_size = mc->mc_db->md_pad; + key->mv_data = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], key->mv_size); + return MDB_SUCCESS; + } + + if (data) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + mdb_xcursor_init1(mc, leaf); + rc = mdb_cursor_last(&mc->mc_xcursor->mx_cursor, data, NULL); + if (rc) + return rc; + } else { + if ((rc = mdb_node_read(mc->mc_txn, leaf, data)) != MDB_SUCCESS) + return rc; + } + } + + MDB_GET_KEY(leaf, key); + return MDB_SUCCESS; +} + +int +mdb_cursor_get(MDB_cursor *mc, MDB_val *key, MDB_val *data, + MDB_cursor_op op) +{ + int rc; + int exact = 0; + int (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data); + + if (mc == NULL) + return EINVAL; + + if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + switch (op) { + case MDB_GET_CURRENT: + if (!(mc->mc_flags & C_INITIALIZED)) { + rc = EINVAL; + } else { + MDB_page *mp = mc->mc_pg[mc->mc_top]; + int nkeys = NUMKEYS(mp); + if (!nkeys || mc->mc_ki[mc->mc_top] >= nkeys) { + mc->mc_ki[mc->mc_top] = nkeys; + rc = MDB_NOTFOUND; + break; + } + rc = MDB_SUCCESS; + if (IS_LEAF2(mp)) { + key->mv_size = mc->mc_db->md_pad; + key->mv_data = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], key->mv_size); + } else { + MDB_node *leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + MDB_GET_KEY(leaf, key); + if (data) { + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + rc = mdb_cursor_get(&mc->mc_xcursor->mx_cursor, data, NULL, MDB_GET_CURRENT); + } else { + rc = mdb_node_read(mc->mc_txn, leaf, data); + } + } + } + } + break; + case MDB_GET_BOTH: + case MDB_GET_BOTH_RANGE: + if (data == NULL) { + rc = EINVAL; + break; + } + if (mc->mc_xcursor == NULL) { + rc = MDB_INCOMPATIBLE; + break; + } + /* FALLTHRU */ + case MDB_SET: + case MDB_SET_KEY: + case MDB_SET_RANGE: + if (key == NULL) { + rc = EINVAL; + } else { + rc = mdb_cursor_set(mc, key, data, op, + op == MDB_SET_RANGE ? NULL : &exact); + } + break; + case MDB_GET_MULTIPLE: + if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { + rc = EINVAL; + break; + } + if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { + rc = MDB_INCOMPATIBLE; + break; + } + rc = MDB_SUCCESS; + if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) || + (mc->mc_xcursor->mx_cursor.mc_flags & C_EOF)) + break; + goto fetchm; + case MDB_NEXT_MULTIPLE: + if (data == NULL) { + rc = EINVAL; + break; + } + if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { + rc = MDB_INCOMPATIBLE; + break; + } + if (!(mc->mc_flags & C_INITIALIZED)) + rc = mdb_cursor_first(mc, key, data); + else + rc = mdb_cursor_next(mc, key, data, MDB_NEXT_DUP); + if (rc == MDB_SUCCESS) { + if (mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) { + MDB_cursor *mx; +fetchm: + mx = &mc->mc_xcursor->mx_cursor; + data->mv_size = NUMKEYS(mx->mc_pg[mx->mc_top]) * + mx->mc_db->md_pad; + data->mv_data = METADATA(mx->mc_pg[mx->mc_top]); + mx->mc_ki[mx->mc_top] = NUMKEYS(mx->mc_pg[mx->mc_top])-1; + } else { + rc = MDB_NOTFOUND; + } + } + break; + case MDB_NEXT: + case MDB_NEXT_DUP: + case MDB_NEXT_NODUP: + if (!(mc->mc_flags & C_INITIALIZED)) + rc = mdb_cursor_first(mc, key, data); + else + rc = mdb_cursor_next(mc, key, data, op); + break; + case MDB_PREV: + case MDB_PREV_DUP: + case MDB_PREV_NODUP: + if (!(mc->mc_flags & C_INITIALIZED)) { + rc = mdb_cursor_last(mc, key, data); + if (rc) + break; + mc->mc_flags |= C_INITIALIZED; + mc->mc_ki[mc->mc_top]++; + } + rc = mdb_cursor_prev(mc, key, data, op); + break; + case MDB_FIRST: + rc = mdb_cursor_first(mc, key, data); + break; + case MDB_FIRST_DUP: + mfunc = mdb_cursor_first; + mmove: + if (data == NULL || !(mc->mc_flags & C_INITIALIZED)) { + rc = EINVAL; + break; + } + if (mc->mc_xcursor == NULL) { + rc = MDB_INCOMPATIBLE; + break; + } + { + MDB_node *leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { + MDB_GET_KEY(leaf, key); + rc = mdb_node_read(mc->mc_txn, leaf, data); + break; + } + } + if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { + rc = EINVAL; + break; + } + rc = mfunc(&mc->mc_xcursor->mx_cursor, data, NULL); + break; + case MDB_LAST: + rc = mdb_cursor_last(mc, key, data); + break; + case MDB_LAST_DUP: + mfunc = mdb_cursor_last; + goto mmove; + default: + DPRINTF(("unhandled/unimplemented cursor operation %u", op)); + rc = EINVAL; + break; + } + + if (mc->mc_flags & C_DEL) + mc->mc_flags ^= C_DEL; + + return rc; +} + +/** Touch all the pages in the cursor stack. Set mc_top. + * Makes sure all the pages are writable, before attempting a write operation. + * @param[in] mc The cursor to operate on. + */ +static int +mdb_cursor_touch(MDB_cursor *mc) +{ + int rc = MDB_SUCCESS; + + if (mc->mc_dbi >= CORE_DBS && !(*mc->mc_dbflag & DB_DIRTY)) { + MDB_cursor mc2; + MDB_xcursor mcx; + if (TXN_DBI_CHANGED(mc->mc_txn, mc->mc_dbi)) + return MDB_BAD_DBI; + mdb_cursor_init(&mc2, mc->mc_txn, MAIN_DBI, &mcx); + rc = mdb_page_search(&mc2, &mc->mc_dbx->md_name, MDB_PS_MODIFY); + if (rc) + return rc; + *mc->mc_dbflag |= DB_DIRTY; + } + mc->mc_top = 0; + if (mc->mc_snum) { + do { + rc = mdb_page_touch(mc); + } while (!rc && ++(mc->mc_top) < mc->mc_snum); + mc->mc_top = mc->mc_snum-1; + } + return rc; +} + +/** Do not spill pages to disk if txn is getting full, may fail instead */ +#define MDB_NOSPILL 0x8000 + +int +mdb_cursor_put(MDB_cursor *mc, MDB_val *key, MDB_val *data, + unsigned int flags) +{ + MDB_env *env; + MDB_node *leaf = NULL; + MDB_page *fp, *mp, *sub_root = NULL; + uint16_t fp_flags; + MDB_val xdata, *rdata, dkey, olddata; + MDB_db dummy; + int do_sub = 0, insert_key, insert_data; + unsigned int mcount = 0, dcount = 0, nospill; + size_t nsize; + int rc, rc2; + unsigned int nflags; + DKBUF; + + if (mc == NULL || key == NULL) + return EINVAL; + + env = mc->mc_txn->mt_env; + + /* Check this first so counter will always be zero on any + * early failures. + */ + if (flags & MDB_MULTIPLE) { + dcount = data[1].mv_size; + data[1].mv_size = 0; + if (!F_ISSET(mc->mc_db->md_flags, MDB_DUPFIXED)) + return MDB_INCOMPATIBLE; + } + + nospill = flags & MDB_NOSPILL; + flags &= ~MDB_NOSPILL; + + if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) + return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; + + if (key->mv_size-1 >= ENV_MAXKEY(env)) + return MDB_BAD_VALSIZE; + +#if SIZE_MAX > MAXDATASIZE + if (data->mv_size > ((mc->mc_db->md_flags & MDB_DUPSORT) ? ENV_MAXKEY(env) : MAXDATASIZE)) + return MDB_BAD_VALSIZE; +#else + if ((mc->mc_db->md_flags & MDB_DUPSORT) && data->mv_size > ENV_MAXKEY(env)) + return MDB_BAD_VALSIZE; +#endif + + DPRINTF(("==> put db %d key [%s], size %"Z"u, data size %"Z"u", + DDBI(mc), DKEY(key), key ? key->mv_size : 0, data->mv_size)); + + dkey.mv_size = 0; + + if (flags == MDB_CURRENT) { + if (!(mc->mc_flags & C_INITIALIZED)) + return EINVAL; + rc = MDB_SUCCESS; + } else if (mc->mc_db->md_root == P_INVALID) { + /* new database, cursor has nothing to point to */ + mc->mc_snum = 0; + mc->mc_top = 0; + mc->mc_flags &= ~C_INITIALIZED; + rc = MDB_NO_ROOT; + } else { + int exact = 0; + MDB_val d2; + if (flags & MDB_APPEND) { + MDB_val k2; + rc = mdb_cursor_last(mc, &k2, &d2); + if (rc == 0) { + rc = mc->mc_dbx->md_cmp(key, &k2); + if (rc > 0) { + rc = MDB_NOTFOUND; + mc->mc_ki[mc->mc_top]++; + } else { + /* new key is <= last key */ + rc = MDB_KEYEXIST; + } + } + } else { + rc = mdb_cursor_set(mc, key, &d2, MDB_SET, &exact); + } + if ((flags & MDB_NOOVERWRITE) && rc == 0) { + DPRINTF(("duplicate key [%s]", DKEY(key))); + *data = d2; + return MDB_KEYEXIST; + } + if (rc && rc != MDB_NOTFOUND) + return rc; + } + + if (mc->mc_flags & C_DEL) + mc->mc_flags ^= C_DEL; + + /* Cursor is positioned, check for room in the dirty list */ + if (!nospill) { + if (flags & MDB_MULTIPLE) { + rdata = &xdata; + xdata.mv_size = data->mv_size * dcount; + } else { + rdata = data; + } + if ((rc2 = mdb_page_spill(mc, key, rdata))) + return rc2; + } + + if (rc == MDB_NO_ROOT) { + MDB_page *np; + /* new database, write a root leaf page */ + DPUTS("allocating new root leaf page"); + if ((rc2 = mdb_page_new(mc, P_LEAF, 1, &np))) { + return rc2; + } + mdb_cursor_push(mc, np); + mc->mc_db->md_root = np->mp_pgno; + mc->mc_db->md_depth++; + *mc->mc_dbflag |= DB_DIRTY; + if ((mc->mc_db->md_flags & (MDB_DUPSORT|MDB_DUPFIXED)) + == MDB_DUPFIXED) + np->mp_flags |= P_LEAF2; + mc->mc_flags |= C_INITIALIZED; + } else { + /* make sure all cursor pages are writable */ + rc2 = mdb_cursor_touch(mc); + if (rc2) + return rc2; + } + + insert_key = insert_data = rc; + if (insert_key) { + /* The key does not exist */ + DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top])); + if ((mc->mc_db->md_flags & MDB_DUPSORT) && + LEAFSIZE(key, data) > env->me_nodemax) + { + /* Too big for a node, insert in sub-DB. Set up an empty + * "old sub-page" for prep_subDB to expand to a full page. + */ + fp_flags = P_LEAF|P_DIRTY; + fp = env->me_pbuf; + fp->mp_pad = data->mv_size; /* used if MDB_DUPFIXED */ + fp->mp_lower = fp->mp_upper = (PAGEHDRSZ-PAGEBASE); + olddata.mv_size = PAGEHDRSZ; + goto prep_subDB; + } + } else { + /* there's only a key anyway, so this is a no-op */ + if (IS_LEAF2(mc->mc_pg[mc->mc_top])) { + char *ptr; + unsigned int ksize = mc->mc_db->md_pad; + if (key->mv_size != ksize) + return MDB_BAD_VALSIZE; + ptr = LEAF2KEY(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top], ksize); + memcpy(ptr, key->mv_data, ksize); +fix_parent: + /* if overwriting slot 0 of leaf, need to + * update branch key if there is a parent page + */ + if (mc->mc_top && !mc->mc_ki[mc->mc_top]) { + unsigned short dtop = 1; + mc->mc_top--; + /* slot 0 is always an empty key, find real slot */ + while (mc->mc_top && !mc->mc_ki[mc->mc_top]) { + mc->mc_top--; + dtop++; + } + if (mc->mc_ki[mc->mc_top]) + rc2 = mdb_update_key(mc, key); + else + rc2 = MDB_SUCCESS; + mc->mc_top += dtop; + if (rc2) + return rc2; + } + return MDB_SUCCESS; + } + +more: + leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + olddata.mv_size = NODEDSZ(leaf); + olddata.mv_data = NODEDATA(leaf); + + /* DB has dups? */ + if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) { + /* Prepare (sub-)page/sub-DB to accept the new item, + * if needed. fp: old sub-page or a header faking + * it. mp: new (sub-)page. offset: growth in page + * size. xdata: node data with new page or DB. + */ + unsigned i, offset = 0; + mp = fp = xdata.mv_data = env->me_pbuf; + mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno; + + /* Was a single item before, must convert now */ + if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { + MDB_cmp_func *dcmp; + /* Just overwrite the current item */ + if (flags == MDB_CURRENT) + goto current; + dcmp = mc->mc_dbx->md_dcmp; +#if UINT_MAX < SIZE_MAX + if (dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t)) + dcmp = mdb_cmp_clong; +#endif + /* does data match? */ + if (!dcmp(data, &olddata)) { + if (flags & MDB_NODUPDATA) + return MDB_KEYEXIST; + /* overwrite it */ + goto current; + } + + /* Back up original data item */ + dkey.mv_size = olddata.mv_size; + dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size); + + /* Make sub-page header for the dup items, with dummy body */ + fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP; + fp->mp_lower = (PAGEHDRSZ-PAGEBASE); + xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size; + if (mc->mc_db->md_flags & MDB_DUPFIXED) { + fp->mp_flags |= P_LEAF2; + fp->mp_pad = data->mv_size; + xdata.mv_size += 2 * data->mv_size; /* leave space for 2 more */ + } else { + xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) + + (dkey.mv_size & 1) + (data->mv_size & 1); + } + fp->mp_upper = xdata.mv_size - PAGEBASE; + olddata.mv_size = xdata.mv_size; /* pretend olddata is fp */ + } else if (leaf->mn_flags & F_SUBDATA) { + /* Data is on sub-DB, just store it */ + flags |= F_DUPDATA|F_SUBDATA; + goto put_sub; + } else { + /* Data is on sub-page */ + fp = olddata.mv_data; + switch (flags) { + default: + if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) { + offset = EVEN(NODESIZE + sizeof(indx_t) + + data->mv_size); + break; + } + offset = fp->mp_pad; + if (SIZELEFT(fp) < offset) { + offset *= 4; /* space for 4 more */ + break; + } + /* FALLTHRU: Big enough MDB_DUPFIXED sub-page */ + case MDB_CURRENT: + fp->mp_flags |= P_DIRTY; + COPY_PGNO(fp->mp_pgno, mp->mp_pgno); + mc->mc_xcursor->mx_cursor.mc_pg[0] = fp; + flags |= F_DUPDATA; + goto put_sub; + } + xdata.mv_size = olddata.mv_size + offset; + } + + fp_flags = fp->mp_flags; + if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) { + /* Too big for a sub-page, convert to sub-DB */ + fp_flags &= ~P_SUBP; +prep_subDB: + if (mc->mc_db->md_flags & MDB_DUPFIXED) { + fp_flags |= P_LEAF2; + dummy.md_pad = fp->mp_pad; + dummy.md_flags = MDB_DUPFIXED; + if (mc->mc_db->md_flags & MDB_INTEGERDUP) + dummy.md_flags |= MDB_INTEGERKEY; + } else { + dummy.md_pad = 0; + dummy.md_flags = 0; + } + dummy.md_depth = 1; + dummy.md_branch_pages = 0; + dummy.md_leaf_pages = 1; + dummy.md_overflow_pages = 0; + dummy.md_entries = NUMKEYS(fp); + xdata.mv_size = sizeof(MDB_db); + xdata.mv_data = &dummy; + if ((rc = mdb_page_alloc(mc, 1, &mp))) + return rc; + offset = env->me_psize - olddata.mv_size; + flags |= F_DUPDATA|F_SUBDATA; + dummy.md_root = mp->mp_pgno; + sub_root = mp; + } + if (mp != fp) { + mp->mp_flags = fp_flags | P_DIRTY; + mp->mp_pad = fp->mp_pad; + mp->mp_lower = fp->mp_lower; + mp->mp_upper = fp->mp_upper + offset; + if (fp_flags & P_LEAF2) { + memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad); + } else { + memcpy((char *)mp + mp->mp_upper + PAGEBASE, (char *)fp + fp->mp_upper + PAGEBASE, + olddata.mv_size - fp->mp_upper - PAGEBASE); + for (i=0; imp_ptrs[i] = fp->mp_ptrs[i] + offset; + } + } + + rdata = &xdata; + flags |= F_DUPDATA; + do_sub = 1; + if (!insert_key) + mdb_node_del(mc, 0); + goto new_sub; + } +current: + /* LMDB passes F_SUBDATA in 'flags' to write a DB record */ + if ((leaf->mn_flags ^ flags) & F_SUBDATA) + return MDB_INCOMPATIBLE; + /* overflow page overwrites need special handling */ + if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { + MDB_page *omp; + pgno_t pg; + int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize); + + memcpy(&pg, olddata.mv_data, sizeof(pg)); + if ((rc2 = mdb_page_get(mc->mc_txn, pg, &omp, &level)) != 0) + return rc2; + ovpages = omp->mp_pages; + + /* Is the ov page large enough? */ + if (ovpages >= dpages) { + if (!(omp->mp_flags & P_DIRTY) && + (level || (env->me_flags & MDB_WRITEMAP))) + { + rc = mdb_page_unspill(mc->mc_txn, omp, &omp); + if (rc) + return rc; + level = 0; /* dirty in this txn or clean */ + } + /* Is it dirty? */ + if (omp->mp_flags & P_DIRTY) { + /* yes, overwrite it. Note in this case we don't + * bother to try shrinking the page if the new data + * is smaller than the overflow threshold. + */ + if (level > 1) { + /* It is writable only in a parent txn */ + size_t sz = (size_t) env->me_psize * ovpages, off; + MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages); + MDB_ID2 id2; + if (!np) + return ENOMEM; + id2.mid = pg; + id2.mptr = np; + /* Note - this page is already counted in parent's dirty_room */ + rc2 = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2); + mdb_cassert(mc, rc2 == 0); + if (!(flags & MDB_RESERVE)) { + /* Copy end of page, adjusting alignment so + * compiler may copy words instead of bytes. + */ + off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t); + memcpy((size_t *)((char *)np + off), + (size_t *)((char *)omp + off), sz - off); + sz = PAGEHDRSZ; + } + memcpy(np, omp, sz); /* Copy beginning of page */ + omp = np; + } + SETDSZ(leaf, data->mv_size); + if (F_ISSET(flags, MDB_RESERVE)) + data->mv_data = METADATA(omp); + else + memcpy(METADATA(omp), data->mv_data, data->mv_size); + return MDB_SUCCESS; + } + } + if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS) + return rc2; + } else if (data->mv_size == olddata.mv_size) { + /* same size, just replace it. Note that we could + * also reuse this node if the new data is smaller, + * but instead we opt to shrink the node in that case. + */ + if (F_ISSET(flags, MDB_RESERVE)) + data->mv_data = olddata.mv_data; + else if (!(mc->mc_flags & C_SUB)) + memcpy(olddata.mv_data, data->mv_data, data->mv_size); + else { + memcpy(NODEKEY(leaf), key->mv_data, key->mv_size); + goto fix_parent; + } + return MDB_SUCCESS; + } + mdb_node_del(mc, 0); + } + + rdata = data; + +new_sub: + nflags = flags & NODE_ADD_FLAGS; + nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata); + if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) { + if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA ) + nflags &= ~MDB_APPEND; /* sub-page may need room to grow */ + if (!insert_key) + nflags |= MDB_SPLIT_REPLACE; + rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags); + } else { + /* There is room already in this leaf page. */ + rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags); + if (rc == 0) { + /* Adjust other cursors pointing to mp */ + MDB_cursor *m2, *m3; + MDB_dbi dbi = mc->mc_dbi; + unsigned i = mc->mc_top; + MDB_page *mp = mc->mc_pg[i]; + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == mc || m3->mc_snum < mc->mc_snum || m3->mc_pg[i] != mp) continue; + if (m3->mc_ki[i] >= mc->mc_ki[i] && insert_key) { + m3->mc_ki[i]++; + } + if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { + MDB_node *n2 = NODEPTR(mp, m3->mc_ki[i]); + if ((n2->mn_flags & (F_SUBDATA|F_DUPDATA)) == F_DUPDATA) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); + } + } + } + } + + if (rc == MDB_SUCCESS) { + /* Now store the actual data in the child DB. Note that we're + * storing the user data in the keys field, so there are strict + * size limits on dupdata. The actual data fields of the child + * DB are all zero size. + */ + if (do_sub) { + int xflags, new_dupdata; + size_t ecount; +put_sub: + xdata.mv_size = 0; + xdata.mv_data = ""; + leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (flags & MDB_CURRENT) { + xflags = MDB_CURRENT|MDB_NOSPILL; + } else { + mdb_xcursor_init1(mc, leaf); + xflags = (flags & MDB_NODUPDATA) ? + MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL; + } + if (sub_root) + mc->mc_xcursor->mx_cursor.mc_pg[0] = sub_root; + new_dupdata = (int)dkey.mv_size; + /* converted, write the original data first */ + if (dkey.mv_size) { + rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags); + if (rc) + goto bad_sub; + /* we've done our job */ + dkey.mv_size = 0; + } + if (!(leaf->mn_flags & F_SUBDATA) || sub_root) { + /* Adjust other cursors pointing to mp */ + MDB_cursor *m2; + MDB_xcursor *mx = mc->mc_xcursor; + unsigned i = mc->mc_top; + MDB_page *mp = mc->mc_pg[i]; + int nkeys = NUMKEYS(mp); + + for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { + if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; + if (!(m2->mc_flags & C_INITIALIZED)) continue; + if (m2->mc_pg[i] == mp) { + if (m2->mc_ki[i] == mc->mc_ki[i]) { + mdb_xcursor_init2(m2, mx, new_dupdata); + } else if (!insert_key && m2->mc_ki[i] < nkeys) { + MDB_node *n2 = NODEPTR(mp, m2->mc_ki[i]); + if ((n2->mn_flags & (F_SUBDATA|F_DUPDATA)) == F_DUPDATA) + m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); + } + } + } + } + ecount = mc->mc_xcursor->mx_db.md_entries; + if (flags & MDB_APPENDDUP) + xflags |= MDB_APPEND; + rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags); + if (flags & F_SUBDATA) { + void *db = NODEDATA(leaf); + memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); + } + insert_data = mc->mc_xcursor->mx_db.md_entries - ecount; + } + /* Increment count unless we just replaced an existing item. */ + if (insert_data) + mc->mc_db->md_entries++; + if (insert_key) { + /* Invalidate txn if we created an empty sub-DB */ + if (rc) + goto bad_sub; + /* If we succeeded and the key didn't exist before, + * make sure the cursor is marked valid. + */ + mc->mc_flags |= C_INITIALIZED; + } + if (flags & MDB_MULTIPLE) { + if (!rc) { + mcount++; + /* let caller know how many succeeded, if any */ + data[1].mv_size = mcount; + if (mcount < dcount) { + data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size; + insert_key = insert_data = 0; + goto more; + } + } + } + return rc; +bad_sub: + if (rc == MDB_KEYEXIST) /* should not happen, we deleted that item */ + rc = MDB_CORRUPTED; + } + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return rc; +} + +int +mdb_cursor_del(MDB_cursor *mc, unsigned int flags) +{ + MDB_node *leaf; + MDB_page *mp; + int rc; + + if (mc->mc_txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) + return (mc->mc_txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; + + if (!(mc->mc_flags & C_INITIALIZED)) + return EINVAL; + + if (mc->mc_ki[mc->mc_top] >= NUMKEYS(mc->mc_pg[mc->mc_top])) + return MDB_NOTFOUND; + + if (!(flags & MDB_NOSPILL) && (rc = mdb_page_spill(mc, NULL, NULL))) + return rc; + + rc = mdb_cursor_touch(mc); + if (rc) + return rc; + + mp = mc->mc_pg[mc->mc_top]; + if (IS_LEAF2(mp)) + goto del_key; + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + + if (F_ISSET(leaf->mn_flags, F_DUPDATA)) { + if (flags & MDB_NODUPDATA) { + /* mdb_cursor_del0() will subtract the final entry */ + mc->mc_db->md_entries -= mc->mc_xcursor->mx_db.md_entries - 1; + mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; + } else { + if (!F_ISSET(leaf->mn_flags, F_SUBDATA)) { + mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); + } + rc = mdb_cursor_del(&mc->mc_xcursor->mx_cursor, MDB_NOSPILL); + if (rc) + return rc; + /* If sub-DB still has entries, we're done */ + if (mc->mc_xcursor->mx_db.md_entries) { + if (leaf->mn_flags & F_SUBDATA) { + /* update subDB info */ + void *db = NODEDATA(leaf); + memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db)); + } else { + MDB_cursor *m2; + /* shrink fake page */ + mdb_node_shrink(mp, mc->mc_ki[mc->mc_top]); + leaf = NODEPTR(mp, mc->mc_ki[mc->mc_top]); + mc->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); + /* fix other sub-DB cursors pointed at fake pages on this page */ + for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) { + if (m2 == mc || m2->mc_snum < mc->mc_snum) continue; + if (!(m2->mc_flags & C_INITIALIZED)) continue; + if (m2->mc_pg[mc->mc_top] == mp) { + if (m2->mc_ki[mc->mc_top] == mc->mc_ki[mc->mc_top]) { + m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(leaf); + } else { + MDB_node *n2 = NODEPTR(mp, m2->mc_ki[mc->mc_top]); + if (!(n2->mn_flags & F_SUBDATA)) + m2->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(n2); + } + } + } + } + mc->mc_db->md_entries--; + return rc; + } else { + mc->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; + } + /* otherwise fall thru and delete the sub-DB */ + } + + if (leaf->mn_flags & F_SUBDATA) { + /* add all the child DB's pages to the free list */ + rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); + if (rc) + goto fail; + } + } + /* LMDB passes F_SUBDATA in 'flags' to delete a DB record */ + else if ((leaf->mn_flags ^ flags) & F_SUBDATA) { + rc = MDB_INCOMPATIBLE; + goto fail; + } + + /* add overflow pages to free list */ + if (F_ISSET(leaf->mn_flags, F_BIGDATA)) { + MDB_page *omp; + pgno_t pg; + + memcpy(&pg, NODEDATA(leaf), sizeof(pg)); + if ((rc = mdb_page_get(mc->mc_txn, pg, &omp, NULL)) || + (rc = mdb_ovpage_free(mc, omp))) + goto fail; + } + +del_key: + return mdb_cursor_del0(mc); + +fail: + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return rc; +} + +/** Allocate and initialize new pages for a database. + * @param[in] mc a cursor on the database being added to. + * @param[in] flags flags defining what type of page is being allocated. + * @param[in] num the number of pages to allocate. This is usually 1, + * unless allocating overflow pages for a large record. + * @param[out] mp Address of a page, or NULL on failure. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_new(MDB_cursor *mc, uint32_t flags, int num, MDB_page **mp) +{ + MDB_page *np; + int rc; + + if ((rc = mdb_page_alloc(mc, num, &np))) + return rc; + DPRINTF(("allocated new mpage %"Z"u, page size %u", + np->mp_pgno, mc->mc_txn->mt_env->me_psize)); + np->mp_flags = flags | P_DIRTY; + np->mp_lower = (PAGEHDRSZ-PAGEBASE); + np->mp_upper = mc->mc_txn->mt_env->me_psize - PAGEBASE; + + if (IS_BRANCH(np)) + mc->mc_db->md_branch_pages++; + else if (IS_LEAF(np)) + mc->mc_db->md_leaf_pages++; + else if (IS_OVERFLOW(np)) { + mc->mc_db->md_overflow_pages += num; + np->mp_pages = num; + } + *mp = np; + + return 0; +} + +/** Calculate the size of a leaf node. + * The size depends on the environment's page size; if a data item + * is too large it will be put onto an overflow page and the node + * size will only include the key and not the data. Sizes are always + * rounded up to an even number of bytes, to guarantee 2-byte alignment + * of the #MDB_node headers. + * @param[in] env The environment handle. + * @param[in] key The key for the node. + * @param[in] data The data for the node. + * @return The number of bytes needed to store the node. + */ +static size_t +mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data) +{ + size_t sz; + + sz = LEAFSIZE(key, data); + if (sz > env->me_nodemax) { + /* put on overflow page */ + sz -= data->mv_size - sizeof(pgno_t); + } + + return EVEN(sz + sizeof(indx_t)); +} + +/** Calculate the size of a branch node. + * The size should depend on the environment's page size but since + * we currently don't support spilling large keys onto overflow + * pages, it's simply the size of the #MDB_node header plus the + * size of the key. Sizes are always rounded up to an even number + * of bytes, to guarantee 2-byte alignment of the #MDB_node headers. + * @param[in] env The environment handle. + * @param[in] key The key for the node. + * @return The number of bytes needed to store the node. + */ +static size_t +mdb_branch_size(MDB_env *env, MDB_val *key) +{ + size_t sz; + + sz = INDXSIZE(key); + if (sz > env->me_nodemax) { + /* put on overflow page */ + /* not implemented */ + /* sz -= key->size - sizeof(pgno_t); */ + } + + return sz + sizeof(indx_t); +} + +/** Add a node to the page pointed to by the cursor. + * @param[in] mc The cursor for this operation. + * @param[in] indx The index on the page where the new node should be added. + * @param[in] key The key for the new node. + * @param[in] data The data for the new node, if any. + * @param[in] pgno The page number, if adding a branch node. + * @param[in] flags Flags for the node. + * @return 0 on success, non-zero on failure. Possible errors are: + *
    + *
  • ENOMEM - failed to allocate overflow pages for the node. + *
  • MDB_PAGE_FULL - there is insufficient room in the page. This error + * should never happen since all callers already calculate the + * page's free space before calling this function. + *
+ */ +static int +mdb_node_add(MDB_cursor *mc, indx_t indx, + MDB_val *key, MDB_val *data, pgno_t pgno, unsigned int flags) +{ + unsigned int i; + size_t node_size = NODESIZE; + ssize_t room; + indx_t ofs; + MDB_node *node; + MDB_page *mp = mc->mc_pg[mc->mc_top]; + MDB_page *ofp = NULL; /* overflow page */ + void *ndata; + DKBUF; + + mdb_cassert(mc, mp->mp_upper >= mp->mp_lower); + + DPRINTF(("add to %s %spage %"Z"u index %i, data size %"Z"u key size %"Z"u [%s]", + IS_LEAF(mp) ? "leaf" : "branch", + IS_SUBP(mp) ? "sub-" : "", + mdb_dbg_pgno(mp), indx, data ? data->mv_size : 0, + key ? key->mv_size : 0, key ? DKEY(key) : "null")); + + if (IS_LEAF2(mp)) { + /* Move higher keys up one slot. */ + int ksize = mc->mc_db->md_pad, dif; + char *ptr = LEAF2KEY(mp, indx, ksize); + dif = NUMKEYS(mp) - indx; + if (dif > 0) + memmove(ptr+ksize, ptr, dif*ksize); + /* insert new key */ + memcpy(ptr, key->mv_data, ksize); + + /* Just using these for counting */ + mp->mp_lower += sizeof(indx_t); + mp->mp_upper -= ksize - sizeof(indx_t); + return MDB_SUCCESS; + } + + room = (ssize_t)SIZELEFT(mp) - (ssize_t)sizeof(indx_t); + if (key != NULL) + node_size += key->mv_size; + if (IS_LEAF(mp)) { + mdb_cassert(mc, key && data); + if (F_ISSET(flags, F_BIGDATA)) { + /* Data already on overflow page. */ + node_size += sizeof(pgno_t); + } else if (node_size + data->mv_size > mc->mc_txn->mt_env->me_nodemax) { + int ovpages = OVPAGES(data->mv_size, mc->mc_txn->mt_env->me_psize); + int rc; + /* Put data on overflow page. */ + DPRINTF(("data size is %"Z"u, node would be %"Z"u, put data on overflow page", + data->mv_size, node_size+data->mv_size)); + node_size = EVEN(node_size + sizeof(pgno_t)); + if ((ssize_t)node_size > room) + goto full; + if ((rc = mdb_page_new(mc, P_OVERFLOW, ovpages, &ofp))) + return rc; + DPRINTF(("allocated overflow page %"Z"u", ofp->mp_pgno)); + flags |= F_BIGDATA; + goto update; + } else { + node_size += data->mv_size; + } + } + node_size = EVEN(node_size); + if ((ssize_t)node_size > room) + goto full; + +update: + /* Move higher pointers up one slot. */ + for (i = NUMKEYS(mp); i > indx; i--) + mp->mp_ptrs[i] = mp->mp_ptrs[i - 1]; + + /* Adjust free space offsets. */ + ofs = mp->mp_upper - node_size; + mdb_cassert(mc, ofs >= mp->mp_lower + sizeof(indx_t)); + mp->mp_ptrs[indx] = ofs; + mp->mp_upper = ofs; + mp->mp_lower += sizeof(indx_t); + + /* Write the node data. */ + node = NODEPTR(mp, indx); + node->mn_ksize = (key == NULL) ? 0 : key->mv_size; + node->mn_flags = flags; + if (IS_LEAF(mp)) + SETDSZ(node,data->mv_size); + else + SETPGNO(node,pgno); + + if (key) + memcpy(NODEKEY(node), key->mv_data, key->mv_size); + + if (IS_LEAF(mp)) { + ndata = NODEDATA(node); + if (ofp == NULL) { + if (F_ISSET(flags, F_BIGDATA)) + memcpy(ndata, data->mv_data, sizeof(pgno_t)); + else if (F_ISSET(flags, MDB_RESERVE)) + data->mv_data = ndata; + else + memcpy(ndata, data->mv_data, data->mv_size); + } else { + memcpy(ndata, &ofp->mp_pgno, sizeof(pgno_t)); + ndata = METADATA(ofp); + if (F_ISSET(flags, MDB_RESERVE)) + data->mv_data = ndata; + else + memcpy(ndata, data->mv_data, data->mv_size); + } + } + + return MDB_SUCCESS; + +full: + DPRINTF(("not enough room in page %"Z"u, got %u ptrs", + mdb_dbg_pgno(mp), NUMKEYS(mp))); + DPRINTF(("upper-lower = %u - %u = %"Z"d", mp->mp_upper,mp->mp_lower,room)); + DPRINTF(("node size = %"Z"u", node_size)); + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return MDB_PAGE_FULL; +} + +/** Delete the specified node from a page. + * @param[in] mc Cursor pointing to the node to delete. + * @param[in] ksize The size of a node. Only used if the page is + * part of a #MDB_DUPFIXED database. + */ +static void +mdb_node_del(MDB_cursor *mc, int ksize) +{ + MDB_page *mp = mc->mc_pg[mc->mc_top]; + indx_t indx = mc->mc_ki[mc->mc_top]; + unsigned int sz; + indx_t i, j, numkeys, ptr; + MDB_node *node; + char *base; + + DPRINTF(("delete node %u on %s page %"Z"u", indx, + IS_LEAF(mp) ? "leaf" : "branch", mdb_dbg_pgno(mp))); + numkeys = NUMKEYS(mp); + mdb_cassert(mc, indx < numkeys); + + if (IS_LEAF2(mp)) { + int x = numkeys - 1 - indx; + base = LEAF2KEY(mp, indx, ksize); + if (x) + memmove(base, base + ksize, x * ksize); + mp->mp_lower -= sizeof(indx_t); + mp->mp_upper += ksize - sizeof(indx_t); + return; + } + + node = NODEPTR(mp, indx); + sz = NODESIZE + node->mn_ksize; + if (IS_LEAF(mp)) { + if (F_ISSET(node->mn_flags, F_BIGDATA)) + sz += sizeof(pgno_t); + else + sz += NODEDSZ(node); + } + sz = EVEN(sz); + + ptr = mp->mp_ptrs[indx]; + for (i = j = 0; i < numkeys; i++) { + if (i != indx) { + mp->mp_ptrs[j] = mp->mp_ptrs[i]; + if (mp->mp_ptrs[i] < ptr) + mp->mp_ptrs[j] += sz; + j++; + } + } + + base = (char *)mp + mp->mp_upper + PAGEBASE; + memmove(base + sz, base, ptr - mp->mp_upper); + + mp->mp_lower -= sizeof(indx_t); + mp->mp_upper += sz; +} + +/** Compact the main page after deleting a node on a subpage. + * @param[in] mp The main page to operate on. + * @param[in] indx The index of the subpage on the main page. + */ +static void +mdb_node_shrink(MDB_page *mp, indx_t indx) +{ + MDB_node *node; + MDB_page *sp, *xp; + char *base; + indx_t delta, nsize, len, ptr; + int i; + + node = NODEPTR(mp, indx); + sp = (MDB_page *)NODEDATA(node); + delta = SIZELEFT(sp); + nsize = NODEDSZ(node) - delta; + + /* Prepare to shift upward, set len = length(subpage part to shift) */ + if (IS_LEAF2(sp)) { + len = nsize; + if (nsize & 1) + return; /* do not make the node uneven-sized */ + } else { + xp = (MDB_page *)((char *)sp + delta); /* destination subpage */ + for (i = NUMKEYS(sp); --i >= 0; ) + xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta; + len = PAGEHDRSZ; + } + sp->mp_upper = sp->mp_lower; + COPY_PGNO(sp->mp_pgno, mp->mp_pgno); + SETDSZ(node, nsize); + + /* Shift upward */ + base = (char *)mp + mp->mp_upper + PAGEBASE; + memmove(base + delta, base, (char *)sp + len - base); + + ptr = mp->mp_ptrs[indx]; + for (i = NUMKEYS(mp); --i >= 0; ) { + if (mp->mp_ptrs[i] <= ptr) + mp->mp_ptrs[i] += delta; + } + mp->mp_upper += delta; +} + +/** Initial setup of a sorted-dups cursor. + * Sorted duplicates are implemented as a sub-database for the given key. + * The duplicate data items are actually keys of the sub-database. + * Operations on the duplicate data items are performed using a sub-cursor + * initialized when the sub-database is first accessed. This function does + * the preliminary setup of the sub-cursor, filling in the fields that + * depend only on the parent DB. + * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. + */ +static void +mdb_xcursor_init0(MDB_cursor *mc) +{ + MDB_xcursor *mx = mc->mc_xcursor; + + mx->mx_cursor.mc_xcursor = NULL; + mx->mx_cursor.mc_txn = mc->mc_txn; + mx->mx_cursor.mc_db = &mx->mx_db; + mx->mx_cursor.mc_dbx = &mx->mx_dbx; + mx->mx_cursor.mc_dbi = mc->mc_dbi; + mx->mx_cursor.mc_dbflag = &mx->mx_dbflag; + mx->mx_cursor.mc_snum = 0; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags = C_SUB; + mx->mx_dbx.md_name.mv_size = 0; + mx->mx_dbx.md_name.mv_data = NULL; + mx->mx_dbx.md_cmp = mc->mc_dbx->md_dcmp; + mx->mx_dbx.md_dcmp = NULL; + mx->mx_dbx.md_rel = mc->mc_dbx->md_rel; +} + +/** Final setup of a sorted-dups cursor. + * Sets up the fields that depend on the data from the main cursor. + * @param[in] mc The main cursor whose sorted-dups cursor is to be initialized. + * @param[in] node The data containing the #MDB_db record for the + * sorted-dup database. + */ +static void +mdb_xcursor_init1(MDB_cursor *mc, MDB_node *node) +{ + MDB_xcursor *mx = mc->mc_xcursor; + + if (node->mn_flags & F_SUBDATA) { + memcpy(&mx->mx_db, NODEDATA(node), sizeof(MDB_db)); + mx->mx_cursor.mc_pg[0] = 0; + mx->mx_cursor.mc_snum = 0; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags = C_SUB; + } else { + MDB_page *fp = NODEDATA(node); + mx->mx_db.md_pad = 0; + mx->mx_db.md_flags = 0; + mx->mx_db.md_depth = 1; + mx->mx_db.md_branch_pages = 0; + mx->mx_db.md_leaf_pages = 1; + mx->mx_db.md_overflow_pages = 0; + mx->mx_db.md_entries = NUMKEYS(fp); + COPY_PGNO(mx->mx_db.md_root, fp->mp_pgno); + mx->mx_cursor.mc_snum = 1; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags = C_INITIALIZED|C_SUB; + mx->mx_cursor.mc_pg[0] = fp; + mx->mx_cursor.mc_ki[0] = 0; + if (mc->mc_db->md_flags & MDB_DUPFIXED) { + mx->mx_db.md_flags = MDB_DUPFIXED; + mx->mx_db.md_pad = fp->mp_pad; + if (mc->mc_db->md_flags & MDB_INTEGERDUP) + mx->mx_db.md_flags |= MDB_INTEGERKEY; + } + } + DPRINTF(("Sub-db -%u root page %"Z"u", mx->mx_cursor.mc_dbi, + mx->mx_db.md_root)); + mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DIRTY; /* DB_DIRTY guides mdb_cursor_touch */ +#if UINT_MAX < SIZE_MAX + if (mx->mx_dbx.md_cmp == mdb_cmp_int && mx->mx_db.md_pad == sizeof(size_t)) + mx->mx_dbx.md_cmp = mdb_cmp_clong; +#endif +} + + +/** Fixup a sorted-dups cursor due to underlying update. + * Sets up some fields that depend on the data from the main cursor. + * Almost the same as init1, but skips initialization steps if the + * xcursor had already been used. + * @param[in] mc The main cursor whose sorted-dups cursor is to be fixed up. + * @param[in] src_mx The xcursor of an up-to-date cursor. + * @param[in] new_dupdata True if converting from a non-#F_DUPDATA item. + */ +static void +mdb_xcursor_init2(MDB_cursor *mc, MDB_xcursor *src_mx, int new_dupdata) +{ + MDB_xcursor *mx = mc->mc_xcursor; + + if (new_dupdata) { + mx->mx_cursor.mc_snum = 1; + mx->mx_cursor.mc_top = 0; + mx->mx_cursor.mc_flags |= C_INITIALIZED; + mx->mx_cursor.mc_ki[0] = 0; + mx->mx_dbflag = DB_VALID|DB_USRVALID|DB_DIRTY; /* DB_DIRTY guides mdb_cursor_touch */ +#if UINT_MAX < SIZE_MAX + mx->mx_dbx.md_cmp = src_mx->mx_dbx.md_cmp; +#endif + } else if (!(mx->mx_cursor.mc_flags & C_INITIALIZED)) { + return; + } + mx->mx_db = src_mx->mx_db; + mx->mx_cursor.mc_pg[0] = src_mx->mx_cursor.mc_pg[0]; + DPRINTF(("Sub-db -%u root page %"Z"u", mx->mx_cursor.mc_dbi, + mx->mx_db.md_root)); +} + +/** Initialize a cursor for a given transaction and database. */ +static void +mdb_cursor_init(MDB_cursor *mc, MDB_txn *txn, MDB_dbi dbi, MDB_xcursor *mx) +{ + mc->mc_next = NULL; + mc->mc_backup = NULL; + mc->mc_dbi = dbi; + mc->mc_txn = txn; + mc->mc_db = &txn->mt_dbs[dbi]; + mc->mc_dbx = &txn->mt_dbxs[dbi]; + mc->mc_dbflag = &txn->mt_dbflags[dbi]; + mc->mc_snum = 0; + mc->mc_top = 0; + mc->mc_pg[0] = 0; + mc->mc_ki[0] = 0; + mc->mc_flags = 0; + if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) { + mdb_tassert(txn, mx != NULL); + mc->mc_xcursor = mx; + mdb_xcursor_init0(mc); + } else { + mc->mc_xcursor = NULL; + } + if (*mc->mc_dbflag & DB_STALE) { + mdb_page_search(mc, NULL, MDB_PS_ROOTONLY); + } +} + +int +mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret) +{ + MDB_cursor *mc; + size_t size = sizeof(MDB_cursor); + + if (!ret || !TXN_DBI_EXIST(txn, dbi, DB_VALID)) + return EINVAL; + + if (txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + if (dbi == FREE_DBI && !F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) + return EINVAL; + + if (txn->mt_dbs[dbi].md_flags & MDB_DUPSORT) + size += sizeof(MDB_xcursor); + + if ((mc = malloc(size)) != NULL) { + mdb_cursor_init(mc, txn, dbi, (MDB_xcursor *)(mc + 1)); + if (txn->mt_cursors) { + mc->mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = mc; + mc->mc_flags |= C_UNTRACK; + } + } else { + return ENOMEM; + } + + *ret = mc; + + return MDB_SUCCESS; +} + +int +mdb_cursor_renew(MDB_txn *txn, MDB_cursor *mc) +{ + if (!mc || !TXN_DBI_EXIST(txn, mc->mc_dbi, DB_VALID)) + return EINVAL; + + if ((mc->mc_flags & C_UNTRACK) || txn->mt_cursors) + return EINVAL; + + if (txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor); + return MDB_SUCCESS; +} + +/* Return the count of duplicate data items for the current key */ +int +mdb_cursor_count(MDB_cursor *mc, size_t *countp) +{ + MDB_node *leaf; + + if (mc == NULL || countp == NULL) + return EINVAL; + + if (mc->mc_xcursor == NULL) + return MDB_INCOMPATIBLE; + + if (mc->mc_txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + if (!(mc->mc_flags & C_INITIALIZED)) + return EINVAL; + + if (!mc->mc_snum || (mc->mc_flags & C_EOF)) + return MDB_NOTFOUND; + + leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) { + *countp = 1; + } else { + if (!(mc->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) + return EINVAL; + + *countp = mc->mc_xcursor->mx_db.md_entries; + } + return MDB_SUCCESS; +} + +void +mdb_cursor_close(MDB_cursor *mc) +{ + if (mc && !mc->mc_backup) { + /* remove from txn, if tracked */ + if ((mc->mc_flags & C_UNTRACK) && mc->mc_txn->mt_cursors) { + MDB_cursor **prev = &mc->mc_txn->mt_cursors[mc->mc_dbi]; + while (*prev && *prev != mc) prev = &(*prev)->mc_next; + if (*prev == mc) + *prev = mc->mc_next; + } + free(mc); + } +} + +MDB_txn * +mdb_cursor_txn(MDB_cursor *mc) +{ + if (!mc) return NULL; + return mc->mc_txn; +} + +MDB_dbi +mdb_cursor_dbi(MDB_cursor *mc) +{ + return mc->mc_dbi; +} + +/** Replace the key for a branch node with a new key. + * @param[in] mc Cursor pointing to the node to operate on. + * @param[in] key The new key to use. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_update_key(MDB_cursor *mc, MDB_val *key) +{ + MDB_page *mp; + MDB_node *node; + char *base; + size_t len; + int delta, ksize, oksize; + indx_t ptr, i, numkeys, indx; + DKBUF; + + indx = mc->mc_ki[mc->mc_top]; + mp = mc->mc_pg[mc->mc_top]; + node = NODEPTR(mp, indx); + ptr = mp->mp_ptrs[indx]; +#if MDB_DEBUG + { + MDB_val k2; + char kbuf2[DKBUF_MAXKEYSIZE*2+1]; + k2.mv_data = NODEKEY(node); + k2.mv_size = node->mn_ksize; + DPRINTF(("update key %u (ofs %u) [%s] to [%s] on page %"Z"u", + indx, ptr, + mdb_dkey(&k2, kbuf2), + DKEY(key), + mp->mp_pgno)); + } +#endif + + /* Sizes must be 2-byte aligned. */ + ksize = EVEN(key->mv_size); + oksize = EVEN(node->mn_ksize); + delta = ksize - oksize; + + /* Shift node contents if EVEN(key length) changed. */ + if (delta) { + if (delta > 0 && SIZELEFT(mp) < delta) { + pgno_t pgno; + /* not enough space left, do a delete and split */ + DPRINTF(("Not enough room, delta = %d, splitting...", delta)); + pgno = NODEPGNO(node); + mdb_node_del(mc, 0); + return mdb_page_split(mc, key, NULL, pgno, MDB_SPLIT_REPLACE); + } + + numkeys = NUMKEYS(mp); + for (i = 0; i < numkeys; i++) { + if (mp->mp_ptrs[i] <= ptr) + mp->mp_ptrs[i] -= delta; + } + + base = (char *)mp + mp->mp_upper + PAGEBASE; + len = ptr - mp->mp_upper + NODESIZE; + memmove(base - delta, base, len); + mp->mp_upper -= delta; + + node = NODEPTR(mp, indx); + } + + /* But even if no shift was needed, update ksize */ + if (node->mn_ksize != key->mv_size) + node->mn_ksize = key->mv_size; + + if (key->mv_size) + memcpy(NODEKEY(node), key->mv_data, key->mv_size); + + return MDB_SUCCESS; +} + +static void +mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst); + +/** Perform \b act while tracking temporary cursor \b mn */ +#define WITH_CURSOR_TRACKING(mn, act) do { \ + MDB_cursor dummy, *tracked, **tp = &(mn).mc_txn->mt_cursors[mn.mc_dbi]; \ + if ((mn).mc_flags & C_SUB) { \ + dummy.mc_flags = C_INITIALIZED; \ + dummy.mc_xcursor = (MDB_xcursor *)&(mn); \ + tracked = &dummy; \ + } else { \ + tracked = &(mn); \ + } \ + tracked->mc_next = *tp; \ + *tp = tracked; \ + { act; } \ + *tp = tracked->mc_next; \ +} while (0) + +/** Move a node from csrc to cdst. + */ +static int +mdb_node_move(MDB_cursor *csrc, MDB_cursor *cdst, int fromleft) +{ + MDB_node *srcnode; + MDB_val key, data; + pgno_t srcpg; + MDB_cursor mn; + int rc; + unsigned short flags; + + DKBUF; + + /* Mark src and dst as dirty. */ + if ((rc = mdb_page_touch(csrc)) || + (rc = mdb_page_touch(cdst))) + return rc; + + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.mv_size = csrc->mc_db->md_pad; + key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top], key.mv_size); + data.mv_size = 0; + data.mv_data = NULL; + srcpg = 0; + flags = 0; + } else { + srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], csrc->mc_ki[csrc->mc_top]); + mdb_cassert(csrc, !((size_t)srcnode & 1)); + srcpg = NODEPGNO(srcnode); + flags = srcnode->mn_flags; + if (csrc->mc_ki[csrc->mc_top] == 0 && IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { + unsigned int snum = csrc->mc_snum; + MDB_node *s2; + /* must find the lowest key below src */ + rc = mdb_page_search_lowest(csrc); + if (rc) + return rc; + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.mv_size = csrc->mc_db->md_pad; + key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); + } else { + s2 = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); + key.mv_size = NODEKSZ(s2); + key.mv_data = NODEKEY(s2); + } + csrc->mc_snum = snum--; + csrc->mc_top = snum; + } else { + key.mv_size = NODEKSZ(srcnode); + key.mv_data = NODEKEY(srcnode); + } + data.mv_size = NODEDSZ(srcnode); + data.mv_data = NODEDATA(srcnode); + } + mn.mc_xcursor = NULL; + if (IS_BRANCH(cdst->mc_pg[cdst->mc_top]) && cdst->mc_ki[cdst->mc_top] == 0) { + unsigned int snum = cdst->mc_snum; + MDB_node *s2; + MDB_val bkey; + /* must find the lowest key below dst */ + mdb_cursor_copy(cdst, &mn); + rc = mdb_page_search_lowest(&mn); + if (rc) + return rc; + if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { + bkey.mv_size = mn.mc_db->md_pad; + bkey.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, bkey.mv_size); + } else { + s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); + bkey.mv_size = NODEKSZ(s2); + bkey.mv_data = NODEKEY(s2); + } + mn.mc_snum = snum--; + mn.mc_top = snum; + mn.mc_ki[snum] = 0; + rc = mdb_update_key(&mn, &bkey); + if (rc) + return rc; + } + + DPRINTF(("moving %s node %u [%s] on page %"Z"u to node %u on page %"Z"u", + IS_LEAF(csrc->mc_pg[csrc->mc_top]) ? "leaf" : "branch", + csrc->mc_ki[csrc->mc_top], + DKEY(&key), + csrc->mc_pg[csrc->mc_top]->mp_pgno, + cdst->mc_ki[cdst->mc_top], cdst->mc_pg[cdst->mc_top]->mp_pgno)); + + /* Add the node to the destination page. + */ + rc = mdb_node_add(cdst, cdst->mc_ki[cdst->mc_top], &key, &data, srcpg, flags); + if (rc != MDB_SUCCESS) + return rc; + + /* Delete the node from the source page. + */ + mdb_node_del(csrc, key.mv_size); + + { + /* Adjust other cursors pointing to mp */ + MDB_cursor *m2, *m3; + MDB_dbi dbi = csrc->mc_dbi; + MDB_page *mpd, *mps; + + mps = csrc->mc_pg[csrc->mc_top]; + /* If we're adding on the left, bump others up */ + if (fromleft) { + mpd = cdst->mc_pg[csrc->mc_top]; + for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (csrc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) + continue; + if (m3 != cdst && + m3->mc_pg[csrc->mc_top] == mpd && + m3->mc_ki[csrc->mc_top] >= cdst->mc_ki[csrc->mc_top]) { + m3->mc_ki[csrc->mc_top]++; + } + if (m3 !=csrc && + m3->mc_pg[csrc->mc_top] == mps && + m3->mc_ki[csrc->mc_top] == csrc->mc_ki[csrc->mc_top]) { + m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; + m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; + m3->mc_ki[csrc->mc_top-1]++; + } + if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && + IS_LEAF(mps)) { + MDB_node *node = NODEPTR(m3->mc_pg[csrc->mc_top], m3->mc_ki[csrc->mc_top]); + if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); + } + } + } else + /* Adding on the right, bump others down */ + { + for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (csrc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == csrc) continue; + if (!(m3->mc_flags & C_INITIALIZED) || m3->mc_top < csrc->mc_top) + continue; + if (m3->mc_pg[csrc->mc_top] == mps) { + if (!m3->mc_ki[csrc->mc_top]) { + m3->mc_pg[csrc->mc_top] = cdst->mc_pg[cdst->mc_top]; + m3->mc_ki[csrc->mc_top] = cdst->mc_ki[cdst->mc_top]; + m3->mc_ki[csrc->mc_top-1]--; + } else { + m3->mc_ki[csrc->mc_top]--; + } + if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && + IS_LEAF(mps)) { + MDB_node *node = NODEPTR(m3->mc_pg[csrc->mc_top], m3->mc_ki[csrc->mc_top]); + if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); + } + } + } + } + } + + /* Update the parent separators. + */ + if (csrc->mc_ki[csrc->mc_top] == 0) { + if (csrc->mc_ki[csrc->mc_top-1] != 0) { + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.mv_data = LEAF2KEY(csrc->mc_pg[csrc->mc_top], 0, key.mv_size); + } else { + srcnode = NODEPTR(csrc->mc_pg[csrc->mc_top], 0); + key.mv_size = NODEKSZ(srcnode); + key.mv_data = NODEKEY(srcnode); + } + DPRINTF(("update separator for source page %"Z"u to [%s]", + csrc->mc_pg[csrc->mc_top]->mp_pgno, DKEY(&key))); + mdb_cursor_copy(csrc, &mn); + mn.mc_snum--; + mn.mc_top--; + /* We want mdb_rebalance to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, + rc = mdb_update_key(&mn, &key)); + if (rc) + return rc; + } + if (IS_BRANCH(csrc->mc_pg[csrc->mc_top])) { + MDB_val nullkey; + indx_t ix = csrc->mc_ki[csrc->mc_top]; + nullkey.mv_size = 0; + csrc->mc_ki[csrc->mc_top] = 0; + rc = mdb_update_key(csrc, &nullkey); + csrc->mc_ki[csrc->mc_top] = ix; + mdb_cassert(csrc, rc == MDB_SUCCESS); + } + } + + if (cdst->mc_ki[cdst->mc_top] == 0) { + if (cdst->mc_ki[cdst->mc_top-1] != 0) { + if (IS_LEAF2(csrc->mc_pg[csrc->mc_top])) { + key.mv_data = LEAF2KEY(cdst->mc_pg[cdst->mc_top], 0, key.mv_size); + } else { + srcnode = NODEPTR(cdst->mc_pg[cdst->mc_top], 0); + key.mv_size = NODEKSZ(srcnode); + key.mv_data = NODEKEY(srcnode); + } + DPRINTF(("update separator for destination page %"Z"u to [%s]", + cdst->mc_pg[cdst->mc_top]->mp_pgno, DKEY(&key))); + mdb_cursor_copy(cdst, &mn); + mn.mc_snum--; + mn.mc_top--; + /* We want mdb_rebalance to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, + rc = mdb_update_key(&mn, &key)); + if (rc) + return rc; + } + if (IS_BRANCH(cdst->mc_pg[cdst->mc_top])) { + MDB_val nullkey; + indx_t ix = cdst->mc_ki[cdst->mc_top]; + nullkey.mv_size = 0; + cdst->mc_ki[cdst->mc_top] = 0; + rc = mdb_update_key(cdst, &nullkey); + cdst->mc_ki[cdst->mc_top] = ix; + mdb_cassert(cdst, rc == MDB_SUCCESS); + } + } + + return MDB_SUCCESS; +} + +/** Merge one page into another. + * The nodes from the page pointed to by \b csrc will + * be copied to the page pointed to by \b cdst and then + * the \b csrc page will be freed. + * @param[in] csrc Cursor pointing to the source page. + * @param[in] cdst Cursor pointing to the destination page. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_merge(MDB_cursor *csrc, MDB_cursor *cdst) +{ + MDB_page *psrc, *pdst; + MDB_node *srcnode; + MDB_val key, data; + unsigned nkeys; + int rc; + indx_t i, j; + + psrc = csrc->mc_pg[csrc->mc_top]; + pdst = cdst->mc_pg[cdst->mc_top]; + + DPRINTF(("merging page %"Z"u into %"Z"u", psrc->mp_pgno, pdst->mp_pgno)); + + mdb_cassert(csrc, csrc->mc_snum > 1); /* can't merge root page */ + mdb_cassert(csrc, cdst->mc_snum > 1); + + /* Mark dst as dirty. */ + if ((rc = mdb_page_touch(cdst))) + return rc; + + /* get dst page again now that we've touched it. */ + pdst = cdst->mc_pg[cdst->mc_top]; + + /* Move all nodes from src to dst. + */ + j = nkeys = NUMKEYS(pdst); + if (IS_LEAF2(psrc)) { + key.mv_size = csrc->mc_db->md_pad; + key.mv_data = METADATA(psrc); + for (i = 0; i < NUMKEYS(psrc); i++, j++) { + rc = mdb_node_add(cdst, j, &key, NULL, 0, 0); + if (rc != MDB_SUCCESS) + return rc; + key.mv_data = (char *)key.mv_data + key.mv_size; + } + } else { + for (i = 0; i < NUMKEYS(psrc); i++, j++) { + srcnode = NODEPTR(psrc, i); + if (i == 0 && IS_BRANCH(psrc)) { + MDB_cursor mn; + MDB_node *s2; + mdb_cursor_copy(csrc, &mn); + mn.mc_xcursor = NULL; + /* must find the lowest key below src */ + rc = mdb_page_search_lowest(&mn); + if (rc) + return rc; + if (IS_LEAF2(mn.mc_pg[mn.mc_top])) { + key.mv_size = mn.mc_db->md_pad; + key.mv_data = LEAF2KEY(mn.mc_pg[mn.mc_top], 0, key.mv_size); + } else { + s2 = NODEPTR(mn.mc_pg[mn.mc_top], 0); + key.mv_size = NODEKSZ(s2); + key.mv_data = NODEKEY(s2); + } + } else { + key.mv_size = srcnode->mn_ksize; + key.mv_data = NODEKEY(srcnode); + } + + data.mv_size = NODEDSZ(srcnode); + data.mv_data = NODEDATA(srcnode); + rc = mdb_node_add(cdst, j, &key, &data, NODEPGNO(srcnode), srcnode->mn_flags); + if (rc != MDB_SUCCESS) + return rc; + } + } + + DPRINTF(("dst page %"Z"u now has %u keys (%.1f%% filled)", + pdst->mp_pgno, NUMKEYS(pdst), + (float)PAGEFILL(cdst->mc_txn->mt_env, pdst) / 10)); + + /* Unlink the src page from parent and add to free list. + */ + csrc->mc_top--; + mdb_node_del(csrc, 0); + if (csrc->mc_ki[csrc->mc_top] == 0) { + key.mv_size = 0; + rc = mdb_update_key(csrc, &key); + if (rc) { + csrc->mc_top++; + return rc; + } + } + csrc->mc_top++; + + psrc = csrc->mc_pg[csrc->mc_top]; + /* If not operating on FreeDB, allow this page to be reused + * in this txn. Otherwise just add to free list. + */ + rc = mdb_page_loose(csrc, psrc); + if (rc) + return rc; + if (IS_LEAF(psrc)) + csrc->mc_db->md_leaf_pages--; + else + csrc->mc_db->md_branch_pages--; + { + /* Adjust other cursors pointing to mp */ + MDB_cursor *m2, *m3; + MDB_dbi dbi = csrc->mc_dbi; + unsigned int top = csrc->mc_top; + + for (m2 = csrc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (csrc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == csrc) continue; + if (m3->mc_snum < csrc->mc_snum) continue; + if (m3->mc_pg[top] == psrc) { + m3->mc_pg[top] = pdst; + m3->mc_ki[top] += nkeys; + m3->mc_ki[top-1] = cdst->mc_ki[top-1]; + } else if (m3->mc_pg[top-1] == csrc->mc_pg[top-1] && + m3->mc_ki[top-1] > csrc->mc_ki[top-1]) { + m3->mc_ki[top-1]--; + } + if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && + IS_LEAF(psrc)) { + MDB_node *node = NODEPTR(m3->mc_pg[top], m3->mc_ki[top]); + if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); + } + } + } + { + unsigned int snum = cdst->mc_snum; + uint16_t depth = cdst->mc_db->md_depth; + mdb_cursor_pop(cdst); + rc = mdb_rebalance(cdst); + /* Did the tree height change? */ + if (depth != cdst->mc_db->md_depth) + snum += cdst->mc_db->md_depth - depth; + cdst->mc_snum = snum; + cdst->mc_top = snum-1; + } + return rc; +} + +/** Copy the contents of a cursor. + * @param[in] csrc The cursor to copy from. + * @param[out] cdst The cursor to copy to. + */ +static void +mdb_cursor_copy(const MDB_cursor *csrc, MDB_cursor *cdst) +{ + unsigned int i; + + cdst->mc_txn = csrc->mc_txn; + cdst->mc_dbi = csrc->mc_dbi; + cdst->mc_db = csrc->mc_db; + cdst->mc_dbx = csrc->mc_dbx; + cdst->mc_snum = csrc->mc_snum; + cdst->mc_top = csrc->mc_top; + cdst->mc_flags = csrc->mc_flags; + + for (i=0; imc_snum; i++) { + cdst->mc_pg[i] = csrc->mc_pg[i]; + cdst->mc_ki[i] = csrc->mc_ki[i]; + } +} + +/** Rebalance the tree after a delete operation. + * @param[in] mc Cursor pointing to the page where rebalancing + * should begin. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_rebalance(MDB_cursor *mc) +{ + MDB_node *node; + int rc, fromleft; + unsigned int ptop, minkeys, thresh; + MDB_cursor mn; + indx_t oldki; + + if (IS_BRANCH(mc->mc_pg[mc->mc_top])) { + minkeys = 2; + thresh = 1; + } else { + minkeys = 1; + thresh = FILL_THRESHOLD; + } + DPRINTF(("rebalancing %s page %"Z"u (has %u keys, %.1f%% full)", + IS_LEAF(mc->mc_pg[mc->mc_top]) ? "leaf" : "branch", + mdb_dbg_pgno(mc->mc_pg[mc->mc_top]), NUMKEYS(mc->mc_pg[mc->mc_top]), + (float)PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) / 10)); + + if (PAGEFILL(mc->mc_txn->mt_env, mc->mc_pg[mc->mc_top]) >= thresh && + NUMKEYS(mc->mc_pg[mc->mc_top]) >= minkeys) { + DPRINTF(("no need to rebalance page %"Z"u, above fill threshold", + mdb_dbg_pgno(mc->mc_pg[mc->mc_top]))); + return MDB_SUCCESS; + } + + if (mc->mc_snum < 2) { + MDB_page *mp = mc->mc_pg[0]; + if (IS_SUBP(mp)) { + DPUTS("Can't rebalance a subpage, ignoring"); + return MDB_SUCCESS; + } + if (NUMKEYS(mp) == 0) { + DPUTS("tree is completely empty"); + mc->mc_db->md_root = P_INVALID; + mc->mc_db->md_depth = 0; + mc->mc_db->md_leaf_pages = 0; + rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); + if (rc) + return rc; + /* Adjust cursors pointing to mp */ + mc->mc_snum = 0; + mc->mc_top = 0; + mc->mc_flags &= ~C_INITIALIZED; + { + MDB_cursor *m2, *m3; + MDB_dbi dbi = mc->mc_dbi; + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (!(m3->mc_flags & C_INITIALIZED) || (m3->mc_snum < mc->mc_snum)) + continue; + if (m3->mc_pg[0] == mp) { + m3->mc_snum = 0; + m3->mc_top = 0; + m3->mc_flags &= ~C_INITIALIZED; + } + } + } + } else if (IS_BRANCH(mp) && NUMKEYS(mp) == 1) { + int i; + DPUTS("collapsing root page!"); + rc = mdb_midl_append(&mc->mc_txn->mt_free_pgs, mp->mp_pgno); + if (rc) + return rc; + mc->mc_db->md_root = NODEPGNO(NODEPTR(mp, 0)); + rc = mdb_page_get(mc->mc_txn,mc->mc_db->md_root,&mc->mc_pg[0],NULL); + if (rc) + return rc; + mc->mc_db->md_depth--; + mc->mc_db->md_branch_pages--; + mc->mc_ki[0] = mc->mc_ki[1]; + for (i = 1; imc_db->md_depth; i++) { + mc->mc_pg[i] = mc->mc_pg[i+1]; + mc->mc_ki[i] = mc->mc_ki[i+1]; + } + { + /* Adjust other cursors pointing to mp */ + MDB_cursor *m2, *m3; + MDB_dbi dbi = mc->mc_dbi; + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == mc) continue; + if (!(m3->mc_flags & C_INITIALIZED)) + continue; + if (m3->mc_pg[0] == mp) { + for (i=0; imc_db->md_depth; i++) { + m3->mc_pg[i] = m3->mc_pg[i+1]; + m3->mc_ki[i] = m3->mc_ki[i+1]; + } + m3->mc_snum--; + m3->mc_top--; + } + } + } + } else + DPUTS("root page doesn't need rebalancing"); + return MDB_SUCCESS; + } + + /* The parent (branch page) must have at least 2 pointers, + * otherwise the tree is invalid. + */ + ptop = mc->mc_top-1; + mdb_cassert(mc, NUMKEYS(mc->mc_pg[ptop]) > 1); + + /* Leaf page fill factor is below the threshold. + * Try to move keys from left or right neighbor, or + * merge with a neighbor page. + */ + + /* Find neighbors. + */ + mdb_cursor_copy(mc, &mn); + mn.mc_xcursor = NULL; + + oldki = mc->mc_ki[mc->mc_top]; + if (mc->mc_ki[ptop] == 0) { + /* We're the leftmost leaf in our parent. + */ + DPUTS("reading right neighbor"); + mn.mc_ki[ptop]++; + node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); + rc = mdb_page_get(mc->mc_txn,NODEPGNO(node),&mn.mc_pg[mn.mc_top],NULL); + if (rc) + return rc; + mn.mc_ki[mn.mc_top] = 0; + mc->mc_ki[mc->mc_top] = NUMKEYS(mc->mc_pg[mc->mc_top]); + fromleft = 0; + } else { + /* There is at least one neighbor to the left. + */ + DPUTS("reading left neighbor"); + mn.mc_ki[ptop]--; + node = NODEPTR(mc->mc_pg[ptop], mn.mc_ki[ptop]); + rc = mdb_page_get(mc->mc_txn,NODEPGNO(node),&mn.mc_pg[mn.mc_top],NULL); + if (rc) + return rc; + mn.mc_ki[mn.mc_top] = NUMKEYS(mn.mc_pg[mn.mc_top]) - 1; + mc->mc_ki[mc->mc_top] = 0; + fromleft = 1; + } + + DPRINTF(("found neighbor page %"Z"u (%u keys, %.1f%% full)", + mn.mc_pg[mn.mc_top]->mp_pgno, NUMKEYS(mn.mc_pg[mn.mc_top]), + (float)PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) / 10)); + + /* If the neighbor page is above threshold and has enough keys, + * move one key from it. Otherwise we should try to merge them. + * (A branch page must never have less than 2 keys.) + */ + if (PAGEFILL(mc->mc_txn->mt_env, mn.mc_pg[mn.mc_top]) >= thresh && NUMKEYS(mn.mc_pg[mn.mc_top]) > minkeys) { + rc = mdb_node_move(&mn, mc, fromleft); + if (fromleft) { + /* if we inserted on left, bump position up */ + oldki++; + } + } else { + if (!fromleft) { + rc = mdb_page_merge(&mn, mc); + } else { + oldki += NUMKEYS(mn.mc_pg[mn.mc_top]); + mn.mc_ki[mn.mc_top] += mc->mc_ki[mn.mc_top] + 1; + /* We want mdb_rebalance to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, + rc = mdb_page_merge(mc, &mn)); + mdb_cursor_copy(&mn, mc); + } + mc->mc_flags &= ~C_EOF; + } + mc->mc_ki[mc->mc_top] = oldki; + return rc; +} + +/** Complete a delete operation started by #mdb_cursor_del(). */ +static int +mdb_cursor_del0(MDB_cursor *mc) +{ + int rc; + MDB_page *mp; + indx_t ki; + unsigned int nkeys; + MDB_cursor *m2, *m3; + MDB_dbi dbi = mc->mc_dbi; + + ki = mc->mc_ki[mc->mc_top]; + mp = mc->mc_pg[mc->mc_top]; + mdb_node_del(mc, mc->mc_db->md_pad); + mc->mc_db->md_entries--; + { + /* Adjust other cursors pointing to mp */ + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; + if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) + continue; + if (m3 == mc || m3->mc_snum < mc->mc_snum) + continue; + if (m3->mc_pg[mc->mc_top] == mp) { + if (m3->mc_ki[mc->mc_top] == ki) { + m3->mc_flags |= C_DEL; + if (mc->mc_db->md_flags & MDB_DUPSORT) + m3->mc_xcursor->mx_cursor.mc_flags &= ~C_INITIALIZED; + } else if (m3->mc_ki[mc->mc_top] > ki) { + m3->mc_ki[mc->mc_top]--; + } + if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED)) { + MDB_node *node = NODEPTR(m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); + if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); + } + } + } + } + rc = mdb_rebalance(mc); + + if (rc == MDB_SUCCESS) { + /* DB is totally empty now, just bail out. + * Other cursors adjustments were already done + * by mdb_rebalance and aren't needed here. + */ + if (!mc->mc_snum) + return rc; + + mp = mc->mc_pg[mc->mc_top]; + nkeys = NUMKEYS(mp); + + /* Adjust other cursors pointing to mp */ + for (m2 = mc->mc_txn->mt_cursors[dbi]; !rc && m2; m2=m2->mc_next) { + m3 = (mc->mc_flags & C_SUB) ? &m2->mc_xcursor->mx_cursor : m2; + if (! (m2->mc_flags & m3->mc_flags & C_INITIALIZED)) + continue; + if (m3->mc_snum < mc->mc_snum) + continue; + if (m3->mc_pg[mc->mc_top] == mp) { + /* if m3 points past last node in page, find next sibling */ + if (m3->mc_ki[mc->mc_top] >= nkeys) { + rc = mdb_cursor_sibling(m3, 1); + if (rc == MDB_NOTFOUND) { + m3->mc_flags |= C_EOF; + rc = MDB_SUCCESS; + } + } + } + } + mc->mc_flags |= C_DEL; + } + + if (rc) + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return rc; +} + +int +mdb_del(MDB_txn *txn, MDB_dbi dbi, + MDB_val *key, MDB_val *data) +{ + if (!key || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) + return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; + + if (!F_ISSET(txn->mt_dbs[dbi].md_flags, MDB_DUPSORT)) { + /* must ignore any data */ + data = NULL; + } + + return mdb_del0(txn, dbi, key, data, 0); +} + +static int +mdb_del0(MDB_txn *txn, MDB_dbi dbi, + MDB_val *key, MDB_val *data, unsigned flags) +{ + MDB_cursor mc; + MDB_xcursor mx; + MDB_cursor_op op; + MDB_val rdata, *xdata; + int rc, exact = 0; + DKBUF; + + DPRINTF(("====> delete db %u key [%s]", dbi, DKEY(key))); + + mdb_cursor_init(&mc, txn, dbi, &mx); + + if (data) { + op = MDB_GET_BOTH; + rdata = *data; + xdata = &rdata; + } else { + op = MDB_SET; + xdata = NULL; + flags |= MDB_NODUPDATA; + } + rc = mdb_cursor_set(&mc, key, xdata, op, &exact); + if (rc == 0) { + /* let mdb_page_split know about this cursor if needed: + * delete will trigger a rebalance; if it needs to move + * a node from one page to another, it will have to + * update the parent's separator key(s). If the new sepkey + * is larger than the current one, the parent page may + * run out of space, triggering a split. We need this + * cursor to be consistent until the end of the rebalance. + */ + mc.mc_flags |= C_UNTRACK; + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + rc = mdb_cursor_del(&mc, flags); + txn->mt_cursors[dbi] = mc.mc_next; + } + return rc; +} + +/** Split a page and insert a new node. + * @param[in,out] mc Cursor pointing to the page and desired insertion index. + * The cursor will be updated to point to the actual page and index where + * the node got inserted after the split. + * @param[in] newkey The key for the newly inserted node. + * @param[in] newdata The data for the newly inserted node. + * @param[in] newpgno The page number, if the new node is a branch node. + * @param[in] nflags The #NODE_ADD_FLAGS for the new node. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_page_split(MDB_cursor *mc, MDB_val *newkey, MDB_val *newdata, pgno_t newpgno, + unsigned int nflags) +{ + unsigned int flags; + int rc = MDB_SUCCESS, new_root = 0, did_split = 0; + indx_t newindx; + pgno_t pgno = 0; + int i, j, split_indx, nkeys, pmax; + MDB_env *env = mc->mc_txn->mt_env; + MDB_node *node; + MDB_val sepkey, rkey, xdata, *rdata = &xdata; + MDB_page *copy = NULL; + MDB_page *mp, *rp, *pp; + int ptop; + MDB_cursor mn; + DKBUF; + + mp = mc->mc_pg[mc->mc_top]; + newindx = mc->mc_ki[mc->mc_top]; + nkeys = NUMKEYS(mp); + + DPRINTF(("-----> splitting %s page %"Z"u and adding [%s] at index %i/%i", + IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno, + DKEY(newkey), mc->mc_ki[mc->mc_top], nkeys)); + + /* Create a right sibling. */ + if ((rc = mdb_page_new(mc, mp->mp_flags, 1, &rp))) + return rc; + rp->mp_pad = mp->mp_pad; + DPRINTF(("new right sibling: page %"Z"u", rp->mp_pgno)); + + /* Usually when splitting the root page, the cursor + * height is 1. But when called from mdb_update_key, + * the cursor height may be greater because it walks + * up the stack while finding the branch slot to update. + */ + if (mc->mc_top < 1) { + if ((rc = mdb_page_new(mc, P_BRANCH, 1, &pp))) + goto done; + /* shift current top to make room for new parent */ + for (i=mc->mc_snum; i>0; i--) { + mc->mc_pg[i] = mc->mc_pg[i-1]; + mc->mc_ki[i] = mc->mc_ki[i-1]; + } + mc->mc_pg[0] = pp; + mc->mc_ki[0] = 0; + mc->mc_db->md_root = pp->mp_pgno; + DPRINTF(("root split! new root = %"Z"u", pp->mp_pgno)); + new_root = mc->mc_db->md_depth++; + + /* Add left (implicit) pointer. */ + if ((rc = mdb_node_add(mc, 0, NULL, NULL, mp->mp_pgno, 0)) != MDB_SUCCESS) { + /* undo the pre-push */ + mc->mc_pg[0] = mc->mc_pg[1]; + mc->mc_ki[0] = mc->mc_ki[1]; + mc->mc_db->md_root = mp->mp_pgno; + mc->mc_db->md_depth--; + goto done; + } + mc->mc_snum++; + mc->mc_top++; + ptop = 0; + } else { + ptop = mc->mc_top-1; + DPRINTF(("parent branch page is %"Z"u", mc->mc_pg[ptop]->mp_pgno)); + } + + mdb_cursor_copy(mc, &mn); + mn.mc_xcursor = NULL; + mn.mc_pg[mn.mc_top] = rp; + mn.mc_ki[ptop] = mc->mc_ki[ptop]+1; + + if (nflags & MDB_APPEND) { + mn.mc_ki[mn.mc_top] = 0; + sepkey = *newkey; + split_indx = newindx; + nkeys = 0; + } else { + + split_indx = (nkeys+1) / 2; + + if (IS_LEAF2(rp)) { + char *split, *ins; + int x; + unsigned int lsize, rsize, ksize; + /* Move half of the keys to the right sibling */ + x = mc->mc_ki[mc->mc_top] - split_indx; + ksize = mc->mc_db->md_pad; + split = LEAF2KEY(mp, split_indx, ksize); + rsize = (nkeys - split_indx) * ksize; + lsize = (nkeys - split_indx) * sizeof(indx_t); + mp->mp_lower -= lsize; + rp->mp_lower += lsize; + mp->mp_upper += rsize - lsize; + rp->mp_upper -= rsize - lsize; + sepkey.mv_size = ksize; + if (newindx == split_indx) { + sepkey.mv_data = newkey->mv_data; + } else { + sepkey.mv_data = split; + } + if (x<0) { + ins = LEAF2KEY(mp, mc->mc_ki[mc->mc_top], ksize); + memcpy(rp->mp_ptrs, split, rsize); + sepkey.mv_data = rp->mp_ptrs; + memmove(ins+ksize, ins, (split_indx - mc->mc_ki[mc->mc_top]) * ksize); + memcpy(ins, newkey->mv_data, ksize); + mp->mp_lower += sizeof(indx_t); + mp->mp_upper -= ksize - sizeof(indx_t); + } else { + if (x) + memcpy(rp->mp_ptrs, split, x * ksize); + ins = LEAF2KEY(rp, x, ksize); + memcpy(ins, newkey->mv_data, ksize); + memcpy(ins+ksize, split + x * ksize, rsize - x * ksize); + rp->mp_lower += sizeof(indx_t); + rp->mp_upper -= ksize - sizeof(indx_t); + mc->mc_ki[mc->mc_top] = x; + } + } else { + int psize, nsize, k; + /* Maximum free space in an empty page */ + pmax = env->me_psize - PAGEHDRSZ; + if (IS_LEAF(mp)) + nsize = mdb_leaf_size(env, newkey, newdata); + else + nsize = mdb_branch_size(env, newkey); + nsize = EVEN(nsize); + + /* grab a page to hold a temporary copy */ + copy = mdb_page_malloc(mc->mc_txn, 1); + if (copy == NULL) { + rc = ENOMEM; + goto done; + } + copy->mp_pgno = mp->mp_pgno; + copy->mp_flags = mp->mp_flags; + copy->mp_lower = (PAGEHDRSZ-PAGEBASE); + copy->mp_upper = env->me_psize - PAGEBASE; + + /* prepare to insert */ + for (i=0, j=0; imp_ptrs[j++] = 0; + } + copy->mp_ptrs[j++] = mp->mp_ptrs[i]; + } + + /* When items are relatively large the split point needs + * to be checked, because being off-by-one will make the + * difference between success or failure in mdb_node_add. + * + * It's also relevant if a page happens to be laid out + * such that one half of its nodes are all "small" and + * the other half of its nodes are "large." If the new + * item is also "large" and falls on the half with + * "large" nodes, it also may not fit. + * + * As a final tweak, if the new item goes on the last + * spot on the page (and thus, onto the new page), bias + * the split so the new page is emptier than the old page. + * This yields better packing during sequential inserts. + */ + if (nkeys < 20 || nsize > pmax/16 || newindx >= nkeys) { + /* Find split point */ + psize = 0; + if (newindx <= split_indx || newindx >= nkeys) { + i = 0; j = 1; + k = newindx >= nkeys ? nkeys : split_indx+1+IS_LEAF(mp); + } else { + i = nkeys; j = -1; + k = split_indx-1; + } + for (; i!=k; i+=j) { + if (i == newindx) { + psize += nsize; + node = NULL; + } else { + node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); + psize += NODESIZE + NODEKSZ(node) + sizeof(indx_t); + if (IS_LEAF(mp)) { + if (F_ISSET(node->mn_flags, F_BIGDATA)) + psize += sizeof(pgno_t); + else + psize += NODEDSZ(node); + } + psize = EVEN(psize); + } + if (psize > pmax || i == k-j) { + split_indx = i + (j<0); + break; + } + } + } + if (split_indx == newindx) { + sepkey.mv_size = newkey->mv_size; + sepkey.mv_data = newkey->mv_data; + } else { + node = (MDB_node *)((char *)mp + copy->mp_ptrs[split_indx] + PAGEBASE); + sepkey.mv_size = node->mn_ksize; + sepkey.mv_data = NODEKEY(node); + } + } + } + + DPRINTF(("separator is %d [%s]", split_indx, DKEY(&sepkey))); + + /* Copy separator key to the parent. + */ + if (SIZELEFT(mn.mc_pg[ptop]) < mdb_branch_size(env, &sepkey)) { + int snum = mc->mc_snum; + mn.mc_snum--; + mn.mc_top--; + did_split = 1; + /* We want other splits to find mn when doing fixups */ + WITH_CURSOR_TRACKING(mn, + rc = mdb_page_split(&mn, &sepkey, NULL, rp->mp_pgno, 0)); + if (rc) + goto done; + + /* root split? */ + if (mc->mc_snum > snum) { + ptop++; + } + /* Right page might now have changed parent. + * Check if left page also changed parent. + */ + if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && + mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { + for (i=0; imc_pg[i] = mn.mc_pg[i]; + mc->mc_ki[i] = mn.mc_ki[i]; + } + mc->mc_pg[ptop] = mn.mc_pg[ptop]; + if (mn.mc_ki[ptop]) { + mc->mc_ki[ptop] = mn.mc_ki[ptop] - 1; + } else { + /* find right page's left sibling */ + mc->mc_ki[ptop] = mn.mc_ki[ptop]; + mdb_cursor_sibling(mc, 0); + } + } + } else { + mn.mc_top--; + rc = mdb_node_add(&mn, mn.mc_ki[ptop], &sepkey, NULL, rp->mp_pgno, 0); + mn.mc_top++; + } + if (rc != MDB_SUCCESS) { + goto done; + } + if (nflags & MDB_APPEND) { + mc->mc_pg[mc->mc_top] = rp; + mc->mc_ki[mc->mc_top] = 0; + rc = mdb_node_add(mc, 0, newkey, newdata, newpgno, nflags); + if (rc) + goto done; + for (i=0; imc_top; i++) + mc->mc_ki[i] = mn.mc_ki[i]; + } else if (!IS_LEAF2(mp)) { + /* Move nodes */ + mc->mc_pg[mc->mc_top] = rp; + i = split_indx; + j = 0; + do { + if (i == newindx) { + rkey.mv_data = newkey->mv_data; + rkey.mv_size = newkey->mv_size; + if (IS_LEAF(mp)) { + rdata = newdata; + } else + pgno = newpgno; + flags = nflags; + /* Update index for the new key. */ + mc->mc_ki[mc->mc_top] = j; + } else { + node = (MDB_node *)((char *)mp + copy->mp_ptrs[i] + PAGEBASE); + rkey.mv_data = NODEKEY(node); + rkey.mv_size = node->mn_ksize; + if (IS_LEAF(mp)) { + xdata.mv_data = NODEDATA(node); + xdata.mv_size = NODEDSZ(node); + rdata = &xdata; + } else + pgno = NODEPGNO(node); + flags = node->mn_flags; + } + + if (!IS_LEAF(mp) && j == 0) { + /* First branch index doesn't need key data. */ + rkey.mv_size = 0; + } + + rc = mdb_node_add(mc, j, &rkey, rdata, pgno, flags); + if (rc) + goto done; + if (i == nkeys) { + i = 0; + j = 0; + mc->mc_pg[mc->mc_top] = copy; + } else { + i++; + j++; + } + } while (i != split_indx); + + nkeys = NUMKEYS(copy); + for (i=0; imp_ptrs[i] = copy->mp_ptrs[i]; + mp->mp_lower = copy->mp_lower; + mp->mp_upper = copy->mp_upper; + memcpy(NODEPTR(mp, nkeys-1), NODEPTR(copy, nkeys-1), + env->me_psize - copy->mp_upper - PAGEBASE); + + /* reset back to original page */ + if (newindx < split_indx) { + mc->mc_pg[mc->mc_top] = mp; + } else { + mc->mc_pg[mc->mc_top] = rp; + mc->mc_ki[ptop]++; + /* Make sure mc_ki is still valid. + */ + if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && + mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { + for (i=0; i<=ptop; i++) { + mc->mc_pg[i] = mn.mc_pg[i]; + mc->mc_ki[i] = mn.mc_ki[i]; + } + } + } + if (nflags & MDB_RESERVE) { + node = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]); + if (!(node->mn_flags & F_BIGDATA)) + newdata->mv_data = NODEDATA(node); + } + } else { + if (newindx >= split_indx) { + mc->mc_pg[mc->mc_top] = rp; + mc->mc_ki[ptop]++; + /* Make sure mc_ki is still valid. + */ + if (mn.mc_pg[ptop] != mc->mc_pg[ptop] && + mc->mc_ki[ptop] >= NUMKEYS(mc->mc_pg[ptop])) { + for (i=0; i<=ptop; i++) { + mc->mc_pg[i] = mn.mc_pg[i]; + mc->mc_ki[i] = mn.mc_ki[i]; + } + } + } + } + + { + /* Adjust other cursors pointing to mp */ + MDB_cursor *m2, *m3; + MDB_dbi dbi = mc->mc_dbi; + nkeys = NUMKEYS(mp); + + for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) { + if (mc->mc_flags & C_SUB) + m3 = &m2->mc_xcursor->mx_cursor; + else + m3 = m2; + if (m3 == mc) + continue; + if (!(m2->mc_flags & m3->mc_flags & C_INITIALIZED)) + continue; + if (new_root) { + int k; + /* sub cursors may be on different DB */ + if (m3->mc_pg[0] != mp) + continue; + /* root split */ + for (k=new_root; k>=0; k--) { + m3->mc_ki[k+1] = m3->mc_ki[k]; + m3->mc_pg[k+1] = m3->mc_pg[k]; + } + if (m3->mc_ki[0] >= nkeys) { + m3->mc_ki[0] = 1; + } else { + m3->mc_ki[0] = 0; + } + m3->mc_pg[0] = mc->mc_pg[0]; + m3->mc_snum++; + m3->mc_top++; + } + if (m3->mc_top >= mc->mc_top && m3->mc_pg[mc->mc_top] == mp) { + if (m3->mc_ki[mc->mc_top] >= newindx && !(nflags & MDB_SPLIT_REPLACE)) + m3->mc_ki[mc->mc_top]++; + if (m3->mc_ki[mc->mc_top] >= nkeys) { + m3->mc_pg[mc->mc_top] = rp; + m3->mc_ki[mc->mc_top] -= nkeys; + for (i=0; imc_top; i++) { + m3->mc_ki[i] = mn.mc_ki[i]; + m3->mc_pg[i] = mn.mc_pg[i]; + } + } + } else if (!did_split && m3->mc_top >= ptop && m3->mc_pg[ptop] == mc->mc_pg[ptop] && + m3->mc_ki[ptop] >= mc->mc_ki[ptop]) { + m3->mc_ki[ptop]++; + } + if (m3->mc_xcursor && (m3->mc_xcursor->mx_cursor.mc_flags & C_INITIALIZED) && + IS_LEAF(mp)) { + MDB_node *node = NODEPTR(m3->mc_pg[mc->mc_top], m3->mc_ki[mc->mc_top]); + if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA) + m3->mc_xcursor->mx_cursor.mc_pg[0] = NODEDATA(node); + } + } + } + DPRINTF(("mp left: %d, rp left: %d", SIZELEFT(mp), SIZELEFT(rp))); + +done: + if (copy) /* tmp page */ + mdb_page_free(env, copy); + if (rc) + mc->mc_txn->mt_flags |= MDB_TXN_ERROR; + return rc; +} + +int +mdb_put(MDB_txn *txn, MDB_dbi dbi, + MDB_val *key, MDB_val *data, unsigned int flags) +{ + MDB_cursor mc; + MDB_xcursor mx; + int rc; + + if (!key || !data || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + if (flags & ~(MDB_NOOVERWRITE|MDB_NODUPDATA|MDB_RESERVE|MDB_APPEND|MDB_APPENDDUP)) + return EINVAL; + + if (txn->mt_flags & (MDB_TXN_RDONLY|MDB_TXN_BLOCKED)) + return (txn->mt_flags & MDB_TXN_RDONLY) ? EACCES : MDB_BAD_TXN; + + mdb_cursor_init(&mc, txn, dbi, &mx); + mc.mc_next = txn->mt_cursors[dbi]; + txn->mt_cursors[dbi] = &mc; + rc = mdb_cursor_put(&mc, key, data, flags); + txn->mt_cursors[dbi] = mc.mc_next; + return rc; +} + +#ifndef MDB_WBUF +#define MDB_WBUF (1024*1024) +#endif + + /** State needed for a compacting copy. */ +typedef struct mdb_copy { + pthread_mutex_t mc_mutex; + pthread_cond_t mc_cond; + char *mc_wbuf[2]; + char *mc_over[2]; + MDB_env *mc_env; + MDB_txn *mc_txn; + int mc_wlen[2]; + int mc_olen[2]; + pgno_t mc_next_pgno; + HANDLE mc_fd; + int mc_status; + volatile int mc_new; + int mc_toggle; + +} mdb_copy; + + /** Dedicated writer thread for compacting copy. */ +static THREAD_RET ESECT CALL_CONV +mdb_env_copythr(void *arg) +{ + mdb_copy *my = arg; + char *ptr; + int toggle = 0, wsize, rc; +#ifdef _WIN32 + DWORD len; +#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) +#else + int len; +#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) +#endif + + pthread_mutex_lock(&my->mc_mutex); + my->mc_new = 0; + pthread_cond_signal(&my->mc_cond); + for(;;) { + while (!my->mc_new) + pthread_cond_wait(&my->mc_cond, &my->mc_mutex); + if (my->mc_new < 0) { + my->mc_new = 0; + break; + } + my->mc_new = 0; + wsize = my->mc_wlen[toggle]; + ptr = my->mc_wbuf[toggle]; +again: + while (wsize > 0) { + DO_WRITE(rc, my->mc_fd, ptr, wsize, len); + if (!rc) { + rc = ErrCode(); + break; + } else if (len > 0) { + rc = MDB_SUCCESS; + ptr += len; + wsize -= len; + continue; + } else { + rc = EIO; + break; + } + } + if (rc) { + my->mc_status = rc; + break; + } + /* If there's an overflow page tail, write it too */ + if (my->mc_olen[toggle]) { + wsize = my->mc_olen[toggle]; + ptr = my->mc_over[toggle]; + my->mc_olen[toggle] = 0; + goto again; + } + my->mc_wlen[toggle] = 0; + toggle ^= 1; + pthread_cond_signal(&my->mc_cond); + } + pthread_cond_signal(&my->mc_cond); + pthread_mutex_unlock(&my->mc_mutex); + return (THREAD_RET)0; +#undef DO_WRITE +} + + /** Tell the writer thread there's a buffer ready to write */ +static int ESECT +mdb_env_cthr_toggle(mdb_copy *my, int st) +{ + int toggle = my->mc_toggle ^ 1; + pthread_mutex_lock(&my->mc_mutex); + if (my->mc_status) { + pthread_mutex_unlock(&my->mc_mutex); + return my->mc_status; + } + while (my->mc_new == 1) + pthread_cond_wait(&my->mc_cond, &my->mc_mutex); + my->mc_new = st; + my->mc_toggle = toggle; + pthread_cond_signal(&my->mc_cond); + pthread_mutex_unlock(&my->mc_mutex); + return 0; +} + + /** Depth-first tree traversal for compacting copy. */ +static int ESECT +mdb_env_cwalk(mdb_copy *my, pgno_t *pg, int flags) +{ + MDB_cursor mc; + MDB_txn *txn = my->mc_txn; + MDB_node *ni; + MDB_page *mo, *mp, *leaf; + char *buf, *ptr; + int rc, toggle; + unsigned int i; + + /* Empty DB, nothing to do */ + if (*pg == P_INVALID) + return MDB_SUCCESS; + + mc.mc_snum = 1; + mc.mc_top = 0; + mc.mc_txn = txn; + + rc = mdb_page_get(my->mc_txn, *pg, &mc.mc_pg[0], NULL); + if (rc) + return rc; + rc = mdb_page_search_root(&mc, NULL, MDB_PS_FIRST); + if (rc) + return rc; + + /* Make cursor pages writable */ + buf = ptr = malloc(my->mc_env->me_psize * mc.mc_snum); + if (buf == NULL) + return ENOMEM; + + for (i=0; imc_env->me_psize); + mc.mc_pg[i] = (MDB_page *)ptr; + ptr += my->mc_env->me_psize; + } + + /* This is writable space for a leaf page. Usually not needed. */ + leaf = (MDB_page *)ptr; + + toggle = my->mc_toggle; + while (mc.mc_snum > 0) { + unsigned n; + mp = mc.mc_pg[mc.mc_top]; + n = NUMKEYS(mp); + + if (IS_LEAF(mp)) { + if (!IS_LEAF2(mp) && !(flags & F_DUPDATA)) { + for (i=0; imn_flags & F_BIGDATA) { + MDB_page *omp; + pgno_t pg; + + /* Need writable leaf */ + if (mp != leaf) { + mc.mc_pg[mc.mc_top] = leaf; + mdb_page_copy(leaf, mp, my->mc_env->me_psize); + mp = leaf; + ni = NODEPTR(mp, i); + } + + memcpy(&pg, NODEDATA(ni), sizeof(pg)); + rc = mdb_page_get(txn, pg, &omp, NULL); + if (rc) + goto done; + if (my->mc_wlen[toggle] >= MDB_WBUF) { + rc = mdb_env_cthr_toggle(my, 1); + if (rc) + goto done; + toggle = my->mc_toggle; + } + mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); + memcpy(mo, omp, my->mc_env->me_psize); + mo->mp_pgno = my->mc_next_pgno; + my->mc_next_pgno += omp->mp_pages; + my->mc_wlen[toggle] += my->mc_env->me_psize; + if (omp->mp_pages > 1) { + my->mc_olen[toggle] = my->mc_env->me_psize * (omp->mp_pages - 1); + my->mc_over[toggle] = (char *)omp + my->mc_env->me_psize; + rc = mdb_env_cthr_toggle(my, 1); + if (rc) + goto done; + toggle = my->mc_toggle; + } + memcpy(NODEDATA(ni), &mo->mp_pgno, sizeof(pgno_t)); + } else if (ni->mn_flags & F_SUBDATA) { + MDB_db db; + + /* Need writable leaf */ + if (mp != leaf) { + mc.mc_pg[mc.mc_top] = leaf; + mdb_page_copy(leaf, mp, my->mc_env->me_psize); + mp = leaf; + ni = NODEPTR(mp, i); + } + + memcpy(&db, NODEDATA(ni), sizeof(db)); + my->mc_toggle = toggle; + rc = mdb_env_cwalk(my, &db.md_root, ni->mn_flags & F_DUPDATA); + if (rc) + goto done; + toggle = my->mc_toggle; + memcpy(NODEDATA(ni), &db, sizeof(db)); + } + } + } + } else { + mc.mc_ki[mc.mc_top]++; + if (mc.mc_ki[mc.mc_top] < n) { + pgno_t pg; +again: + ni = NODEPTR(mp, mc.mc_ki[mc.mc_top]); + pg = NODEPGNO(ni); + rc = mdb_page_get(txn, pg, &mp, NULL); + if (rc) + goto done; + mc.mc_top++; + mc.mc_snum++; + mc.mc_ki[mc.mc_top] = 0; + if (IS_BRANCH(mp)) { + /* Whenever we advance to a sibling branch page, + * we must proceed all the way down to its first leaf. + */ + mdb_page_copy(mc.mc_pg[mc.mc_top], mp, my->mc_env->me_psize); + goto again; + } else + mc.mc_pg[mc.mc_top] = mp; + continue; + } + } + if (my->mc_wlen[toggle] >= MDB_WBUF) { + rc = mdb_env_cthr_toggle(my, 1); + if (rc) + goto done; + toggle = my->mc_toggle; + } + mo = (MDB_page *)(my->mc_wbuf[toggle] + my->mc_wlen[toggle]); + mdb_page_copy(mo, mp, my->mc_env->me_psize); + mo->mp_pgno = my->mc_next_pgno++; + my->mc_wlen[toggle] += my->mc_env->me_psize; + if (mc.mc_top) { + /* Update parent if there is one */ + ni = NODEPTR(mc.mc_pg[mc.mc_top-1], mc.mc_ki[mc.mc_top-1]); + SETPGNO(ni, mo->mp_pgno); + mdb_cursor_pop(&mc); + } else { + /* Otherwise we're done */ + *pg = mo->mp_pgno; + break; + } + } +done: + free(buf); + return rc; +} + + /** Copy environment with compaction. */ +static int ESECT +mdb_env_copyfd1(MDB_env *env, HANDLE fd) +{ + MDB_meta *mm; + MDB_page *mp; + mdb_copy my; + MDB_txn *txn = NULL; + pthread_t thr; + int rc; + +#ifdef _WIN32 + my.mc_mutex = CreateMutex(NULL, FALSE, NULL); + my.mc_cond = CreateEvent(NULL, FALSE, FALSE, NULL); + my.mc_wbuf[0] = _aligned_malloc(MDB_WBUF*2, env->me_os_psize); + if (my.mc_wbuf[0] == NULL) + return errno; +#else + pthread_mutex_init(&my.mc_mutex, NULL); + pthread_cond_init(&my.mc_cond, NULL); +#ifdef HAVE_MEMALIGN + my.mc_wbuf[0] = memalign(env->me_os_psize, MDB_WBUF*2); + if (my.mc_wbuf[0] == NULL) + return errno; +#else + rc = posix_memalign((void **)&my.mc_wbuf[0], env->me_os_psize, MDB_WBUF*2); + if (rc) + return rc; +#endif +#endif + memset(my.mc_wbuf[0], 0, MDB_WBUF*2); + my.mc_wbuf[1] = my.mc_wbuf[0] + MDB_WBUF; + my.mc_wlen[0] = 0; + my.mc_wlen[1] = 0; + my.mc_olen[0] = 0; + my.mc_olen[1] = 0; + my.mc_next_pgno = NUM_METAS; + my.mc_status = 0; + my.mc_new = 1; + my.mc_toggle = 0; + my.mc_env = env; + my.mc_fd = fd; + THREAD_CREATE(thr, mdb_env_copythr, &my); + + rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); + if (rc) + return rc; + + mp = (MDB_page *)my.mc_wbuf[0]; + memset(mp, 0, NUM_METAS * env->me_psize); + mp->mp_pgno = 0; + mp->mp_flags = P_META; + mm = (MDB_meta *)METADATA(mp); + mdb_env_init_meta0(env, mm); + mm->mm_address = env->me_metas[0]->mm_address; + + mp = (MDB_page *)(my.mc_wbuf[0] + env->me_psize); + mp->mp_pgno = 1; + mp->mp_flags = P_META; + *(MDB_meta *)METADATA(mp) = *mm; + mm = (MDB_meta *)METADATA(mp); + + /* Count the number of free pages, subtract from lastpg to find + * number of active pages + */ + { + MDB_ID freecount = 0; + MDB_cursor mc; + MDB_val key, data; + mdb_cursor_init(&mc, txn, FREE_DBI, NULL); + while ((rc = mdb_cursor_get(&mc, &key, &data, MDB_NEXT)) == 0) + freecount += *(MDB_ID *)data.mv_data; + freecount += txn->mt_dbs[FREE_DBI].md_branch_pages + + txn->mt_dbs[FREE_DBI].md_leaf_pages + + txn->mt_dbs[FREE_DBI].md_overflow_pages; + + /* Set metapage 1 */ + mm->mm_last_pg = txn->mt_next_pgno - freecount - 1; + mm->mm_dbs[MAIN_DBI] = txn->mt_dbs[MAIN_DBI]; + if (mm->mm_last_pg > NUM_METAS-1) { + mm->mm_dbs[MAIN_DBI].md_root = mm->mm_last_pg; + mm->mm_txnid = 1; + } else { + mm->mm_dbs[MAIN_DBI].md_root = P_INVALID; + } + } + my.mc_wlen[0] = env->me_psize * NUM_METAS; + my.mc_txn = txn; + pthread_mutex_lock(&my.mc_mutex); + while(my.mc_new) + pthread_cond_wait(&my.mc_cond, &my.mc_mutex); + pthread_mutex_unlock(&my.mc_mutex); + rc = mdb_env_cwalk(&my, &txn->mt_dbs[MAIN_DBI].md_root, 0); + if (rc == MDB_SUCCESS && my.mc_wlen[my.mc_toggle]) + rc = mdb_env_cthr_toggle(&my, 1); + mdb_env_cthr_toggle(&my, -1); + pthread_mutex_lock(&my.mc_mutex); + while(my.mc_new) + pthread_cond_wait(&my.mc_cond, &my.mc_mutex); + pthread_mutex_unlock(&my.mc_mutex); + THREAD_FINISH(thr); + + mdb_txn_abort(txn); +#ifdef _WIN32 + CloseHandle(my.mc_cond); + CloseHandle(my.mc_mutex); + _aligned_free(my.mc_wbuf[0]); +#else + pthread_cond_destroy(&my.mc_cond); + pthread_mutex_destroy(&my.mc_mutex); + free(my.mc_wbuf[0]); +#endif + return rc; +} + + /** Copy environment as-is. */ +static int ESECT +mdb_env_copyfd0(MDB_env *env, HANDLE fd) +{ + MDB_txn *txn = NULL; + mdb_mutexref_t wmutex = NULL; + int rc; + size_t wsize; + char *ptr; +#ifdef _WIN32 + DWORD len, w2; +#define DO_WRITE(rc, fd, ptr, w2, len) rc = WriteFile(fd, ptr, w2, &len, NULL) +#else + ssize_t len; + size_t w2; +#define DO_WRITE(rc, fd, ptr, w2, len) len = write(fd, ptr, w2); rc = (len >= 0) +#endif + + /* Do the lock/unlock of the reader mutex before starting the + * write txn. Otherwise other read txns could block writers. + */ + rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); + if (rc) + return rc; + + if (env->me_txns) { + /* We must start the actual read txn after blocking writers */ + mdb_txn_end(txn, MDB_END_RESET_TMP); + + /* Temporarily block writers until we snapshot the meta pages */ + wmutex = env->me_wmutex; + if (LOCK_MUTEX(rc, env, wmutex)) + goto leave; + + rc = mdb_txn_renew0(txn); + if (rc) { + UNLOCK_MUTEX(wmutex); + goto leave; + } + } + + wsize = env->me_psize * NUM_METAS; + ptr = env->me_map; + w2 = wsize; + while (w2 > 0) { + DO_WRITE(rc, fd, ptr, w2, len); + if (!rc) { + rc = ErrCode(); + break; + } else if (len > 0) { + rc = MDB_SUCCESS; + ptr += len; + w2 -= len; + continue; + } else { + /* Non-blocking or async handles are not supported */ + rc = EIO; + break; + } + } + if (wmutex) + UNLOCK_MUTEX(wmutex); + + if (rc) + goto leave; + + w2 = txn->mt_next_pgno * env->me_psize; + { + size_t fsize = 0; + if ((rc = mdb_fsize(env->me_fd, &fsize))) + goto leave; + if (w2 > fsize) + w2 = fsize; + } + wsize = w2 - wsize; + while (wsize > 0) { + if (wsize > MAX_WRITE) + w2 = MAX_WRITE; + else + w2 = wsize; + DO_WRITE(rc, fd, ptr, w2, len); + if (!rc) { + rc = ErrCode(); + break; + } else if (len > 0) { + rc = MDB_SUCCESS; + ptr += len; + wsize -= len; + continue; + } else { + rc = EIO; + break; + } + } + +leave: + mdb_txn_abort(txn); + return rc; +} + +int ESECT +mdb_env_copyfd2(MDB_env *env, HANDLE fd, unsigned int flags) +{ + if (flags & MDB_CP_COMPACT) + return mdb_env_copyfd1(env, fd); + else + return mdb_env_copyfd0(env, fd); +} + +int ESECT +mdb_env_copyfd(MDB_env *env, HANDLE fd) +{ + return mdb_env_copyfd2(env, fd, 0); +} + +int ESECT +mdb_env_copy2(MDB_env *env, const char *path, unsigned int flags) +{ + int rc, len; + char *lpath; + HANDLE newfd = INVALID_HANDLE_VALUE; +#ifdef _WIN32 + wchar_t *wpath; +#endif + + if (env->me_flags & MDB_NOSUBDIR) { + lpath = (char *)path; + } else { + len = strlen(path); + len += sizeof(DATANAME); + lpath = malloc(len); + if (!lpath) + return ENOMEM; + sprintf(lpath, "%s" DATANAME, path); + } + + /* The destination path must exist, but the destination file must not. + * We don't want the OS to cache the writes, since the source data is + * already in the OS cache. + */ +#ifdef _WIN32 + utf8_to_utf16(lpath, -1, &wpath, NULL); + newfd = CreateFileW(wpath, GENERIC_WRITE, 0, NULL, CREATE_NEW, + FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH, NULL); + free(wpath); +#else + newfd = open(lpath, O_WRONLY|O_CREAT|O_EXCL, 0666); +#endif + if (newfd == INVALID_HANDLE_VALUE) { + rc = ErrCode(); + goto leave; + } + + if (env->me_psize >= env->me_os_psize) { +#ifdef O_DIRECT + /* Set O_DIRECT if the file system supports it */ + if ((rc = fcntl(newfd, F_GETFL)) != -1) + (void) fcntl(newfd, F_SETFL, rc | O_DIRECT); +#endif +#ifdef F_NOCACHE /* __APPLE__ */ + rc = fcntl(newfd, F_NOCACHE, 1); + if (rc) { + rc = ErrCode(); + goto leave; + } +#endif + } + + rc = mdb_env_copyfd2(env, newfd, flags); + +leave: + if (!(env->me_flags & MDB_NOSUBDIR)) + free(lpath); + if (newfd != INVALID_HANDLE_VALUE) + if (close(newfd) < 0 && rc == MDB_SUCCESS) + rc = ErrCode(); + + return rc; +} + +int ESECT +mdb_env_copy(MDB_env *env, const char *path) +{ + return mdb_env_copy2(env, path, 0); +} + +int ESECT +mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff) +{ + if (flag & ~CHANGEABLE) + return EINVAL; + if (onoff) + env->me_flags |= flag; + else + env->me_flags &= ~flag; + return MDB_SUCCESS; +} + +int ESECT +mdb_env_get_flags(MDB_env *env, unsigned int *arg) +{ + if (!env || !arg) + return EINVAL; + + *arg = env->me_flags & (CHANGEABLE|CHANGELESS); + return MDB_SUCCESS; +} + +int ESECT +mdb_env_set_userctx(MDB_env *env, void *ctx) +{ + if (!env) + return EINVAL; + env->me_userctx = ctx; + return MDB_SUCCESS; +} + +void * ESECT +mdb_env_get_userctx(MDB_env *env) +{ + return env ? env->me_userctx : NULL; +} + +int ESECT +mdb_env_set_assert(MDB_env *env, MDB_assert_func *func) +{ + if (!env) + return EINVAL; +#ifndef NDEBUG + env->me_assert_func = func; +#endif + return MDB_SUCCESS; +} + +int ESECT +mdb_env_get_path(MDB_env *env, const char **arg) +{ + if (!env || !arg) + return EINVAL; + + *arg = env->me_path; + return MDB_SUCCESS; +} + +int ESECT +mdb_env_get_fd(MDB_env *env, mdb_filehandle_t *arg) +{ + if (!env || !arg) + return EINVAL; + + *arg = env->me_fd; + return MDB_SUCCESS; +} + +/** Common code for #mdb_stat() and #mdb_env_stat(). + * @param[in] env the environment to operate in. + * @param[in] db the #MDB_db record containing the stats to return. + * @param[out] arg the address of an #MDB_stat structure to receive the stats. + * @return 0, this function always succeeds. + */ +static int ESECT +mdb_stat0(MDB_env *env, MDB_db *db, MDB_stat *arg) +{ + arg->ms_psize = env->me_psize; + arg->ms_depth = db->md_depth; + arg->ms_branch_pages = db->md_branch_pages; + arg->ms_leaf_pages = db->md_leaf_pages; + arg->ms_overflow_pages = db->md_overflow_pages; + arg->ms_entries = db->md_entries; + + return MDB_SUCCESS; +} + +int ESECT +mdb_env_stat(MDB_env *env, MDB_stat *arg) +{ + MDB_meta *meta; + + if (env == NULL || arg == NULL) + return EINVAL; + + meta = mdb_env_pick_meta(env); + + return mdb_stat0(env, &meta->mm_dbs[MAIN_DBI], arg); +} + +int ESECT +mdb_env_info(MDB_env *env, MDB_envinfo *arg) +{ + MDB_meta *meta; + + if (env == NULL || arg == NULL) + return EINVAL; + + meta = mdb_env_pick_meta(env); + arg->me_mapaddr = meta->mm_address; + arg->me_last_pgno = meta->mm_last_pg; + arg->me_last_txnid = meta->mm_txnid; + + arg->me_mapsize = env->me_mapsize; + arg->me_maxreaders = env->me_maxreaders; + arg->me_numreaders = env->me_txns ? env->me_txns->mti_numreaders : 0; + return MDB_SUCCESS; +} + +/** Set the default comparison functions for a database. + * Called immediately after a database is opened to set the defaults. + * The user can then override them with #mdb_set_compare() or + * #mdb_set_dupsort(). + * @param[in] txn A transaction handle returned by #mdb_txn_begin() + * @param[in] dbi A database handle returned by #mdb_dbi_open() + */ +static void +mdb_default_cmp(MDB_txn *txn, MDB_dbi dbi) +{ + uint16_t f = txn->mt_dbs[dbi].md_flags; + + txn->mt_dbxs[dbi].md_cmp = + (f & MDB_REVERSEKEY) ? mdb_cmp_memnr : + (f & MDB_INTEGERKEY) ? mdb_cmp_cint : mdb_cmp_memn; + + txn->mt_dbxs[dbi].md_dcmp = + !(f & MDB_DUPSORT) ? 0 : + ((f & MDB_INTEGERDUP) + ? ((f & MDB_DUPFIXED) ? mdb_cmp_int : mdb_cmp_cint) + : ((f & MDB_REVERSEDUP) ? mdb_cmp_memnr : mdb_cmp_memn)); +} + +int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi) +{ + MDB_val key, data; + MDB_dbi i; + MDB_cursor mc; + MDB_db dummy; + int rc, dbflag, exact; + unsigned int unused = 0, seq; + size_t len; + + if (flags & ~VALID_FLAGS) + return EINVAL; + if (txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + /* main DB? */ + if (!name) { + *dbi = MAIN_DBI; + if (flags & PERSISTENT_FLAGS) { + uint16_t f2 = flags & PERSISTENT_FLAGS; + /* make sure flag changes get committed */ + if ((txn->mt_dbs[MAIN_DBI].md_flags | f2) != txn->mt_dbs[MAIN_DBI].md_flags) { + txn->mt_dbs[MAIN_DBI].md_flags |= f2; + txn->mt_flags |= MDB_TXN_DIRTY; + } + } + mdb_default_cmp(txn, MAIN_DBI); + return MDB_SUCCESS; + } + + if (txn->mt_dbxs[MAIN_DBI].md_cmp == NULL) { + mdb_default_cmp(txn, MAIN_DBI); + } + + /* Is the DB already open? */ + len = strlen(name); + for (i=CORE_DBS; imt_numdbs; i++) { + if (!txn->mt_dbxs[i].md_name.mv_size) { + /* Remember this free slot */ + if (!unused) unused = i; + continue; + } + if (len == txn->mt_dbxs[i].md_name.mv_size && + !strncmp(name, txn->mt_dbxs[i].md_name.mv_data, len)) { + *dbi = i; + return MDB_SUCCESS; + } + } + + /* If no free slot and max hit, fail */ + if (!unused && txn->mt_numdbs >= txn->mt_env->me_maxdbs) + return MDB_DBS_FULL; + + /* Cannot mix named databases with some mainDB flags */ + if (txn->mt_dbs[MAIN_DBI].md_flags & (MDB_DUPSORT|MDB_INTEGERKEY)) + return (flags & MDB_CREATE) ? MDB_INCOMPATIBLE : MDB_NOTFOUND; + + /* Find the DB info */ + dbflag = DB_NEW|DB_VALID|DB_USRVALID; + exact = 0; + key.mv_size = len; + key.mv_data = (void *)name; + mdb_cursor_init(&mc, txn, MAIN_DBI, NULL); + rc = mdb_cursor_set(&mc, &key, &data, MDB_SET, &exact); + if (rc == MDB_SUCCESS) { + /* make sure this is actually a DB */ + MDB_node *node = NODEPTR(mc.mc_pg[mc.mc_top], mc.mc_ki[mc.mc_top]); + if ((node->mn_flags & (F_DUPDATA|F_SUBDATA)) != F_SUBDATA) + return MDB_INCOMPATIBLE; + } else if (rc == MDB_NOTFOUND && (flags & MDB_CREATE)) { + /* Create if requested */ + data.mv_size = sizeof(MDB_db); + data.mv_data = &dummy; + memset(&dummy, 0, sizeof(dummy)); + dummy.md_root = P_INVALID; + dummy.md_flags = flags & PERSISTENT_FLAGS; + rc = mdb_cursor_put(&mc, &key, &data, F_SUBDATA); + dbflag |= DB_DIRTY; + } + + /* OK, got info, add to table */ + if (rc == MDB_SUCCESS) { + unsigned int slot = unused ? unused : txn->mt_numdbs; + txn->mt_dbxs[slot].md_name.mv_data = strdup(name); + txn->mt_dbxs[slot].md_name.mv_size = len; + txn->mt_dbxs[slot].md_rel = NULL; + txn->mt_dbflags[slot] = dbflag; + /* txn-> and env-> are the same in read txns, use + * tmp variable to avoid undefined assignment + */ + seq = ++txn->mt_env->me_dbiseqs[slot]; + txn->mt_dbiseqs[slot] = seq; + + memcpy(&txn->mt_dbs[slot], data.mv_data, sizeof(MDB_db)); + *dbi = slot; + mdb_default_cmp(txn, slot); + if (!unused) { + txn->mt_numdbs++; + } + } + + return rc; +} + +int ESECT +mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg) +{ + if (!arg || !TXN_DBI_EXIST(txn, dbi, DB_VALID)) + return EINVAL; + + if (txn->mt_flags & MDB_TXN_BLOCKED) + return MDB_BAD_TXN; + + if (txn->mt_dbflags[dbi] & DB_STALE) { + MDB_cursor mc; + MDB_xcursor mx; + /* Stale, must read the DB's root. cursor_init does it for us. */ + mdb_cursor_init(&mc, txn, dbi, &mx); + } + return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg); +} + +void mdb_dbi_close(MDB_env *env, MDB_dbi dbi) +{ + char *ptr; + if (dbi < CORE_DBS || dbi >= env->me_maxdbs) + return; + ptr = env->me_dbxs[dbi].md_name.mv_data; + /* If there was no name, this was already closed */ + if (ptr) { + env->me_dbxs[dbi].md_name.mv_data = NULL; + env->me_dbxs[dbi].md_name.mv_size = 0; + env->me_dbflags[dbi] = 0; + env->me_dbiseqs[dbi]++; + free(ptr); + } +} + +int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags) +{ + /* We could return the flags for the FREE_DBI too but what's the point? */ + if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + *flags = txn->mt_dbs[dbi].md_flags & PERSISTENT_FLAGS; + return MDB_SUCCESS; +} + +/** Add all the DB's pages to the free list. + * @param[in] mc Cursor on the DB to free. + * @param[in] subs non-Zero to check for sub-DBs in this DB. + * @return 0 on success, non-zero on failure. + */ +static int +mdb_drop0(MDB_cursor *mc, int subs) +{ + int rc; + + rc = mdb_page_search(mc, NULL, MDB_PS_FIRST); + if (rc == MDB_SUCCESS) { + MDB_txn *txn = mc->mc_txn; + MDB_node *ni; + MDB_cursor mx; + unsigned int i; + + /* DUPSORT sub-DBs have no ovpages/DBs. Omit scanning leaves. + * This also avoids any P_LEAF2 pages, which have no nodes. + */ + if (mc->mc_flags & C_SUB) + mdb_cursor_pop(mc); + + mdb_cursor_copy(mc, &mx); + while (mc->mc_snum > 0) { + MDB_page *mp = mc->mc_pg[mc->mc_top]; + unsigned n = NUMKEYS(mp); + if (IS_LEAF(mp)) { + for (i=0; imn_flags & F_BIGDATA) { + MDB_page *omp; + pgno_t pg; + memcpy(&pg, NODEDATA(ni), sizeof(pg)); + rc = mdb_page_get(txn, pg, &omp, NULL); + if (rc != 0) + goto done; + mdb_cassert(mc, IS_OVERFLOW(omp)); + rc = mdb_midl_append_range(&txn->mt_free_pgs, + pg, omp->mp_pages); + if (rc) + goto done; + } else if (subs && (ni->mn_flags & F_SUBDATA)) { + mdb_xcursor_init1(mc, ni); + rc = mdb_drop0(&mc->mc_xcursor->mx_cursor, 0); + if (rc) + goto done; + } + } + } else { + if ((rc = mdb_midl_need(&txn->mt_free_pgs, n)) != 0) + goto done; + for (i=0; imt_free_pgs, pg); + } + } + if (!mc->mc_top) + break; + mc->mc_ki[mc->mc_top] = i; + rc = mdb_cursor_sibling(mc, 1); + if (rc) { + if (rc != MDB_NOTFOUND) + goto done; + /* no more siblings, go back to beginning + * of previous level. + */ + mdb_cursor_pop(mc); + mc->mc_ki[0] = 0; + for (i=1; imc_snum; i++) { + mc->mc_ki[i] = 0; + mc->mc_pg[i] = mx.mc_pg[i]; + } + } + } + /* free it */ + rc = mdb_midl_append(&txn->mt_free_pgs, mc->mc_db->md_root); +done: + if (rc) + txn->mt_flags |= MDB_TXN_ERROR; + } else if (rc == MDB_NOTFOUND) { + rc = MDB_SUCCESS; + } + mc->mc_flags &= ~C_INITIALIZED; + return rc; +} + +int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del) +{ + MDB_cursor *mc, *m2; + int rc; + + if ((unsigned)del > 1 || !TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) + return EACCES; + + if (TXN_DBI_CHANGED(txn, dbi)) + return MDB_BAD_DBI; + + rc = mdb_cursor_open(txn, dbi, &mc); + if (rc) + return rc; + + rc = mdb_drop0(mc, mc->mc_db->md_flags & MDB_DUPSORT); + /* Invalidate the dropped DB's cursors */ + for (m2 = txn->mt_cursors[dbi]; m2; m2 = m2->mc_next) + m2->mc_flags &= ~(C_INITIALIZED|C_EOF); + if (rc) + goto leave; + + /* Can't delete the main DB */ + if (del && dbi >= CORE_DBS) { + rc = mdb_del0(txn, MAIN_DBI, &mc->mc_dbx->md_name, NULL, F_SUBDATA); + if (!rc) { + txn->mt_dbflags[dbi] = DB_STALE; + mdb_dbi_close(txn->mt_env, dbi); + } else { + txn->mt_flags |= MDB_TXN_ERROR; + } + } else { + /* reset the DB record, mark it dirty */ + txn->mt_dbflags[dbi] |= DB_DIRTY; + txn->mt_dbs[dbi].md_depth = 0; + txn->mt_dbs[dbi].md_branch_pages = 0; + txn->mt_dbs[dbi].md_leaf_pages = 0; + txn->mt_dbs[dbi].md_overflow_pages = 0; + txn->mt_dbs[dbi].md_entries = 0; + txn->mt_dbs[dbi].md_root = P_INVALID; + + txn->mt_flags |= MDB_TXN_DIRTY; + } +leave: + mdb_cursor_close(mc); + return rc; +} + +int mdb_set_compare(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) +{ + if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + txn->mt_dbxs[dbi].md_cmp = cmp; + return MDB_SUCCESS; +} + +int mdb_set_dupsort(MDB_txn *txn, MDB_dbi dbi, MDB_cmp_func *cmp) +{ + if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + txn->mt_dbxs[dbi].md_dcmp = cmp; + return MDB_SUCCESS; +} + +int mdb_set_relfunc(MDB_txn *txn, MDB_dbi dbi, MDB_rel_func *rel) +{ + if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + txn->mt_dbxs[dbi].md_rel = rel; + return MDB_SUCCESS; +} + +int mdb_set_relctx(MDB_txn *txn, MDB_dbi dbi, void *ctx) +{ + if (!TXN_DBI_EXIST(txn, dbi, DB_USRVALID)) + return EINVAL; + + txn->mt_dbxs[dbi].md_relctx = ctx; + return MDB_SUCCESS; +} + +int ESECT +mdb_env_get_maxkeysize(MDB_env *env) +{ + return ENV_MAXKEY(env); +} + +int ESECT +mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx) +{ + unsigned int i, rdrs; + MDB_reader *mr; + char buf[64]; + int rc = 0, first = 1; + + if (!env || !func) + return -1; + if (!env->me_txns) { + return func("(no reader locks)\n", ctx); + } + rdrs = env->me_txns->mti_numreaders; + mr = env->me_txns->mti_readers; + for (i=0; i> 1; + cursor = base + pivot + 1; + val = pid - ids[cursor]; + + if( val < 0 ) { + n = pivot; + + } else if ( val > 0 ) { + base = cursor; + n -= pivot + 1; + + } else { + /* found, so it's a duplicate */ + return -1; + } + } + + if( val > 0 ) { + ++cursor; + } + ids[0]++; + for (n = ids[0]; n > cursor; n--) + ids[n] = ids[n-1]; + ids[n] = pid; + return 0; +} + +int ESECT +mdb_reader_check(MDB_env *env, int *dead) +{ + if (!env) + return EINVAL; + if (dead) + *dead = 0; + return env->me_txns ? mdb_reader_check0(env, 0, dead) : MDB_SUCCESS; +} + +/** As #mdb_reader_check(). rlocked = . */ +static int ESECT +mdb_reader_check0(MDB_env *env, int rlocked, int *dead) +{ + mdb_mutexref_t rmutex = rlocked ? NULL : env->me_rmutex; + unsigned int i, j, rdrs; + MDB_reader *mr; + MDB_PID_T *pids, pid; + int rc = MDB_SUCCESS, count = 0; + + rdrs = env->me_txns->mti_numreaders; + pids = malloc((rdrs+1) * sizeof(MDB_PID_T)); + if (!pids) + return ENOMEM; + pids[0] = 0; + mr = env->me_txns->mti_readers; + for (i=0; ime_pid) { + if (mdb_pid_insert(pids, pid) == 0) { + if (!mdb_reader_pid(env, Pidcheck, pid)) { + /* Stale reader found */ + j = i; + if (rmutex) { + if ((rc = LOCK_MUTEX0(rmutex)) != 0) { + if ((rc = mdb_mutex_failed(env, rmutex, rc))) + break; + rdrs = 0; /* the above checked all readers */ + } else { + /* Recheck, a new process may have reused pid */ + if (mdb_reader_pid(env, Pidcheck, pid)) + j = rdrs; + } + } + for (; jme_rmutex); + if (!rlocked) { + /* Keep mti_txnid updated, otherwise next writer can + * overwrite data which latest meta page refers to. + */ + meta = mdb_env_pick_meta(env); + env->me_txns->mti_txnid = meta->mm_txnid; + /* env is hosed if the dead thread was ours */ + if (env->me_txn) { + env->me_flags |= MDB_FATAL_ERROR; + env->me_txn = NULL; + rc = MDB_PANIC; + } + } + DPRINTF(("%cmutex owner died, %s", (rlocked ? 'r' : 'w'), + (rc ? "this process' env is hosed" : "recovering"))); + rc2 = mdb_reader_check0(env, rlocked, NULL); + if (rc2 == 0) + rc2 = mdb_mutex_consistent(mutex); + if (rc || (rc = rc2)) { + DPRINTF(("LOCK_MUTEX recovery failed, %s", mdb_strerror(rc))); + UNLOCK_MUTEX(mutex); + } + } else { +#ifdef _WIN32 + rc = ErrCode(); +#endif + DPRINTF(("LOCK_MUTEX failed, %s", mdb_strerror(rc))); + } + + return rc; +} +#endif /* MDB_ROBUST_SUPPORTED */ +/** @} */ + +#if defined(_WIN32) +static int utf8_to_utf16(const char *src, int srcsize, wchar_t **dst, int *dstsize) +{ + int need; + wchar_t *result; + need = MultiByteToWideChar(CP_UTF8, 0, src, srcsize, NULL, 0); + if (need == 0xFFFD) + return EILSEQ; + if (need == 0) + return EINVAL; + result = malloc(sizeof(wchar_t) * need); + MultiByteToWideChar(CP_UTF8, 0, src, srcsize, result, need); + if (dstsize) + *dstsize = need; + *dst = result; + return 0; +} +#endif /* defined(_WIN32) */ diff --git a/deps/liblmdb/mdb_copy.1 b/deps/liblmdb/mdb_copy.1 new file mode 100644 index 00000000..1e2a9769 --- /dev/null +++ b/deps/liblmdb/mdb_copy.1 @@ -0,0 +1,54 @@ +.TH MDB_COPY 1 "2014/06/20" "LMDB 0.9.14" +.\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved. +.\" Copying restrictions apply. See COPYRIGHT/LICENSE. +.SH NAME +mdb_copy \- LMDB environment copy tool +.SH SYNOPSIS +.B mdb_copy +[\c +.BR \-V ] +[\c +.BR \-c ] +[\c +.BR \-n ] +.B srcpath +[\c +.BR dstpath ] +.SH DESCRIPTION +The +.B mdb_copy +utility copies an LMDB environment. The environment can +be copied regardless of whether it is currently in use. +No lockfile is created, since it gets recreated at need. + +If +.I dstpath +is specified it must be the path of an empty directory +for storing the backup. Otherwise, the backup will be +written to stdout. + +.SH OPTIONS +.TP +.BR \-V +Write the library version number to the standard output, and exit. +.TP +.BR \-c +Compact while copying. Only current data pages will be copied; freed +or unused pages will be omitted from the copy. This option will +slow down the backup process as it is more CPU-intensive. +.TP +.BR \-n +Open LDMB environment(s) which do not use subdirectories. + +.SH DIAGNOSTICS +Exit status is zero if no errors occur. +Errors result in a non-zero exit status and +a diagnostic message being written to standard error. +.SH CAVEATS +This utility can trigger significant file size growth if run +in parallel with write transactions, because pages which they +free during copying cannot be reused until the copy is done. +.SH "SEE ALSO" +.BR mdb_stat (1) +.SH AUTHOR +Howard Chu of Symas Corporation diff --git a/deps/liblmdb/mdb_copy.c b/deps/liblmdb/mdb_copy.c new file mode 100644 index 00000000..f37ccbcc --- /dev/null +++ b/deps/liblmdb/mdb_copy.c @@ -0,0 +1,82 @@ +/* mdb_copy.c - memory-mapped database backup tool */ +/* + * Copyright 2012-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ +#ifdef _WIN32 +#include +#define MDB_STDOUT GetStdHandle(STD_OUTPUT_HANDLE) +#else +#define MDB_STDOUT 1 +#endif +#include +#include +#include +#include "lmdb.h" + +static void +sighandle(int sig) +{ +} + +int main(int argc,char * argv[]) +{ + int rc; + MDB_env *env; + const char *progname = argv[0], *act; + unsigned flags = MDB_RDONLY; + unsigned cpflags = 0; + + for (; argc > 1 && argv[1][0] == '-'; argc--, argv++) { + if (argv[1][1] == 'n' && argv[1][2] == '\0') + flags |= MDB_NOSUBDIR; + else if (argv[1][1] == 'c' && argv[1][2] == '\0') + cpflags |= MDB_CP_COMPACT; + else if (argv[1][1] == 'V' && argv[1][2] == '\0') { + printf("%s\n", MDB_VERSION_STRING); + exit(0); + } else + argc = 0; + } + + if (argc<2 || argc>3) { + fprintf(stderr, "usage: %s [-V] [-c] [-n] srcpath [dstpath]\n", progname); + exit(EXIT_FAILURE); + } + +#ifdef SIGPIPE + signal(SIGPIPE, sighandle); +#endif +#ifdef SIGHUP + signal(SIGHUP, sighandle); +#endif + signal(SIGINT, sighandle); + signal(SIGTERM, sighandle); + + act = "opening environment"; + rc = mdb_env_create(&env); + if (rc == MDB_SUCCESS) { + rc = mdb_env_open(env, argv[1], flags, 0600); + } + if (rc == MDB_SUCCESS) { + act = "copying"; + if (argc == 2) + rc = mdb_env_copyfd2(env, MDB_STDOUT, cpflags); + else + rc = mdb_env_copy2(env, argv[2], cpflags); + } + if (rc) + fprintf(stderr, "%s: %s failed, error %d (%s)\n", + progname, act, rc, mdb_strerror(rc)); + mdb_env_close(env); + + return rc ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/deps/liblmdb/mdb_dump.1 b/deps/liblmdb/mdb_dump.1 new file mode 100644 index 00000000..5a647ba2 --- /dev/null +++ b/deps/liblmdb/mdb_dump.1 @@ -0,0 +1,75 @@ +.TH MDB_DUMP 1 "2014/06/20" "LMDB 0.9.14" +.\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved. +.\" Copying restrictions apply. See COPYRIGHT/LICENSE. +.SH NAME +mdb_dump \- LMDB environment export tool +.SH SYNOPSIS +.B mdb_dump +[\c +.BR \-V ] +[\c +.BI \-f \ file\fR] +[\c +.BR \-l ] +[\c +.BR \-n ] +[\c +.BR \-p ] +[\c +.BR \-a \ | +.BI \-s \ subdb\fR] +.BR \ envpath +.SH DESCRIPTION +The +.B mdb_dump +utility reads a database and writes its contents to the +standard output using a portable flat-text format +understood by the +.BR mdb_load (1) +utility. +.SH OPTIONS +.TP +.BR \-V +Write the library version number to the standard output, and exit. +.TP +.BR \-f \ file +Write to the specified file instead of to the standard output. +.TP +.BR \-l +List the databases stored in the environment. Just the +names will be listed, no data will be output. +.TP +.BR \-n +Dump an LMDB database which does not use subdirectories. +.TP +.BR \-p +If characters in either the key or data items are printing characters (as +defined by isprint(3)), output them directly. This option permits users to +use standard text editors and tools to modify the contents of databases. + +Note: different systems may have different notions about what characters +are considered printing characters, and databases dumped in this manner may +be less portable to external systems. +.TP +.BR \-a +Dump all of the subdatabases in the environment. +.TP +.BR \-s \ subdb +Dump a specific subdatabase. If no database is specified, only the main database is dumped. +.SH DIAGNOSTICS +Exit status is zero if no errors occur. +Errors result in a non-zero exit status and +a diagnostic message being written to standard error. + +Dumping and reloading databases that use user-defined comparison functions +will result in new databases that use the default comparison functions. +\fBIn this case it is quite likely that the reloaded database will be +damaged beyond repair permitting neither record storage nor retrieval.\fP + +The only available workaround is to modify the source for the +.BR mdb_load (1) +utility to load the database using the correct comparison functions. +.SH "SEE ALSO" +.BR mdb_load (1) +.SH AUTHOR +Howard Chu of Symas Corporation diff --git a/deps/liblmdb/mdb_dump.c b/deps/liblmdb/mdb_dump.c new file mode 100644 index 00000000..16c0aae1 --- /dev/null +++ b/deps/liblmdb/mdb_dump.c @@ -0,0 +1,317 @@ +/* mdb_dump.c - memory-mapped database dump tool */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ +#include +#include +#include +#include +#include +#include +#include +#include "lmdb.h" + +#ifdef _WIN32 +#define Z "I" +#else +#define Z "z" +#endif + +#define PRINT 1 +static int mode; + +typedef struct flagbit { + int bit; + char *name; +} flagbit; + +flagbit dbflags[] = { + { MDB_REVERSEKEY, "reversekey" }, + { MDB_DUPSORT, "dupsort" }, + { MDB_INTEGERKEY, "integerkey" }, + { MDB_DUPFIXED, "dupfixed" }, + { MDB_INTEGERDUP, "integerdup" }, + { MDB_REVERSEDUP, "reversedup" }, + { 0, NULL } +}; + +static volatile sig_atomic_t gotsig; + +static void dumpsig( int sig ) +{ + gotsig=1; +} + +static const char hexc[] = "0123456789abcdef"; + +static void hex(unsigned char c) +{ + putchar(hexc[c >> 4]); + putchar(hexc[c & 0xf]); +} + +static void text(MDB_val *v) +{ + unsigned char *c, *end; + + putchar(' '); + c = v->mv_data; + end = c + v->mv_size; + while (c < end) { + if (isprint(*c)) { + putchar(*c); + } else { + putchar('\\'); + hex(*c); + } + c++; + } + putchar('\n'); +} + +static void byte(MDB_val *v) +{ + unsigned char *c, *end; + + putchar(' '); + c = v->mv_data; + end = c + v->mv_size; + while (c < end) { + hex(*c++); + } + putchar('\n'); +} + +/* Dump in BDB-compatible format */ +static int dumpit(MDB_txn *txn, MDB_dbi dbi, char *name) +{ + MDB_cursor *mc; + MDB_stat ms; + MDB_val key, data; + MDB_envinfo info; + unsigned int flags; + int rc, i; + + rc = mdb_dbi_flags(txn, dbi, &flags); + if (rc) return rc; + + rc = mdb_stat(txn, dbi, &ms); + if (rc) return rc; + + rc = mdb_env_info(mdb_txn_env(txn), &info); + if (rc) return rc; + + printf("VERSION=3\n"); + printf("format=%s\n", mode & PRINT ? "print" : "bytevalue"); + if (name) + printf("database=%s\n", name); + printf("type=btree\n"); + printf("mapsize=%" Z "u\n", info.me_mapsize); + if (info.me_mapaddr) + printf("mapaddr=%p\n", info.me_mapaddr); + printf("maxreaders=%u\n", info.me_maxreaders); + + if (flags & MDB_DUPSORT) + printf("duplicates=1\n"); + + for (i=0; dbflags[i].bit; i++) + if (flags & dbflags[i].bit) + printf("%s=1\n", dbflags[i].name); + + printf("db_pagesize=%d\n", ms.ms_psize); + printf("HEADER=END\n"); + + rc = mdb_cursor_open(txn, dbi, &mc); + if (rc) return rc; + + while ((rc = mdb_cursor_get(mc, &key, &data, MDB_NEXT) == MDB_SUCCESS)) { + if (gotsig) { + rc = EINTR; + break; + } + if (mode & PRINT) { + text(&key); + text(&data); + } else { + byte(&key); + byte(&data); + } + } + printf("DATA=END\n"); + if (rc == MDB_NOTFOUND) + rc = MDB_SUCCESS; + + return rc; +} + +static void usage(char *prog) +{ + fprintf(stderr, "usage: %s [-V] [-f output] [-l] [-n] [-p] [-a|-s subdb] dbpath\n", prog); + exit(EXIT_FAILURE); +} + +int main(int argc, char *argv[]) +{ + int i, rc; + MDB_env *env; + MDB_txn *txn; + MDB_dbi dbi; + char *prog = argv[0]; + char *envname; + char *subname = NULL; + int alldbs = 0, envflags = 0, list = 0; + + if (argc < 2) { + usage(prog); + } + + /* -a: dump main DB and all subDBs + * -s: dump only the named subDB + * -n: use NOSUBDIR flag on env_open + * -p: use printable characters + * -f: write to file instead of stdout + * -V: print version and exit + * (default) dump only the main DB + */ + while ((i = getopt(argc, argv, "af:lnps:V")) != EOF) { + switch(i) { + case 'V': + printf("%s\n", MDB_VERSION_STRING); + exit(0); + break; + case 'l': + list = 1; + /*FALLTHROUGH*/; + case 'a': + if (subname) + usage(prog); + alldbs++; + break; + case 'f': + if (freopen(optarg, "w", stdout) == NULL) { + fprintf(stderr, "%s: %s: reopen: %s\n", + prog, optarg, strerror(errno)); + exit(EXIT_FAILURE); + } + break; + case 'n': + envflags |= MDB_NOSUBDIR; + break; + case 'p': + mode |= PRINT; + break; + case 's': + if (alldbs) + usage(prog); + subname = optarg; + break; + default: + usage(prog); + } + } + + if (optind != argc - 1) + usage(prog); + +#ifdef SIGPIPE + signal(SIGPIPE, dumpsig); +#endif +#ifdef SIGHUP + signal(SIGHUP, dumpsig); +#endif + signal(SIGINT, dumpsig); + signal(SIGTERM, dumpsig); + + envname = argv[optind]; + rc = mdb_env_create(&env); + if (rc) { + fprintf(stderr, "mdb_env_create failed, error %d %s\n", rc, mdb_strerror(rc)); + return EXIT_FAILURE; + } + + if (alldbs || subname) { + mdb_env_set_maxdbs(env, 2); + } + + rc = mdb_env_open(env, envname, envflags | MDB_RDONLY, 0664); + if (rc) { + fprintf(stderr, "mdb_env_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + + rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); + if (rc) { + fprintf(stderr, "mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + + rc = mdb_open(txn, subname, 0, &dbi); + if (rc) { + fprintf(stderr, "mdb_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + + if (alldbs) { + MDB_cursor *cursor; + MDB_val key; + int count = 0; + + rc = mdb_cursor_open(txn, dbi, &cursor); + if (rc) { + fprintf(stderr, "mdb_cursor_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + while ((rc = mdb_cursor_get(cursor, &key, NULL, MDB_NEXT_NODUP)) == 0) { + char *str; + MDB_dbi db2; + if (memchr(key.mv_data, '\0', key.mv_size)) + continue; + count++; + str = malloc(key.mv_size+1); + memcpy(str, key.mv_data, key.mv_size); + str[key.mv_size] = '\0'; + rc = mdb_open(txn, str, 0, &db2); + if (rc == MDB_SUCCESS) { + if (list) { + printf("%s\n", str); + list++; + } else { + rc = dumpit(txn, db2, str); + if (rc) + break; + } + mdb_close(env, db2); + } + free(str); + if (rc) continue; + } + mdb_cursor_close(cursor); + if (!count) { + fprintf(stderr, "%s: %s does not contain multiple databases\n", prog, envname); + rc = MDB_NOTFOUND; + } else if (rc == MDB_NOTFOUND) { + rc = MDB_SUCCESS; + } + } else { + rc = dumpit(txn, dbi, subname); + } + if (rc && rc != MDB_NOTFOUND) + fprintf(stderr, "%s: %s: %s\n", prog, envname, mdb_strerror(rc)); + + mdb_close(env, dbi); +txn_abort: + mdb_txn_abort(txn); +env_close: + mdb_env_close(env); + + return rc ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/deps/liblmdb/mdb_load.1 b/deps/liblmdb/mdb_load.1 new file mode 100644 index 00000000..712ed054 --- /dev/null +++ b/deps/liblmdb/mdb_load.1 @@ -0,0 +1,77 @@ +.TH MDB_LOAD 1 "2014/06/20" "LMDB 0.9.14" +.\" Copyright 2014-2015 Howard Chu, Symas Corp. All Rights Reserved. +.\" Copying restrictions apply. See COPYRIGHT/LICENSE. +.SH NAME +mdb_load \- LMDB environment import tool +.SH SYNOPSIS +.B mdb_load +[\c +.BR \-V ] +[\c +.BI \-f \ file\fR] +[\c +.BR \-n ] +[\c +.BI \-s \ subdb\fR] +[\c +.BR \-N ] +[\c +.BR \-T ] +.BR \ envpath +.SH DESCRIPTION +The +.B mdb_load +utility reads from the standard input and loads it into the +LMDB environment +.BR envpath . + +The input to +.B mdb_load +must be in the output format specified by the +.BR mdb_dump (1) +utility or as specified by the +.B -T +option below. +.SH OPTIONS +.TP +.BR \-V +Write the library version number to the standard output, and exit. +.TP +.BR \-f \ file +Read from the specified file instead of from the standard input. +.TP +.BR \-n +Load an LMDB database which does not use subdirectories. +.TP +.BR \-s \ subdb +Load a specific subdatabase. If no database is specified, data is loaded into the main database. +.TP +.BR \-N +Don't overwrite existing records when loading into an already existing database; just skip them. +.TP +.BR \-T +Load data from simple text files. The input must be paired lines of text, where the first +line of the pair is the key item, and the second line of the pair is its corresponding +data item. + +A simple escape mechanism, where newline and backslash (\\) characters are special, is +applied to the text input. Newline characters are interpreted as record separators. +Backslash characters in the text will be interpreted in one of two ways: If the backslash +character precedes another backslash character, the pair will be interpreted as a literal +backslash. If the backslash character precedes any other character, the two characters +following the backslash will be interpreted as a hexadecimal specification of a single +character; for example, \\0a is a newline character in the ASCII character set. + +For this reason, any backslash or newline characters that naturally occur in the text +input must be escaped to avoid misinterpretation by +.BR mdb_load . + +.SH DIAGNOSTICS +Exit status is zero if no errors occur. +Errors result in a non-zero exit status and +a diagnostic message being written to standard error. + +.SH "SEE ALSO" +.BR mdb_dump (1) +.SH AUTHOR +Howard Chu of Symas Corporation diff --git a/deps/liblmdb/mdb_load.c b/deps/liblmdb/mdb_load.c new file mode 100644 index 00000000..d2f09681 --- /dev/null +++ b/deps/liblmdb/mdb_load.c @@ -0,0 +1,456 @@ +/* mdb_load.c - memory-mapped database load tool */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ +#include +#include +#include +#include +#include +#include +#include "lmdb.h" + +#define PRINT 1 +#define NOHDR 2 +static int mode; + +static char *subname = NULL; + +static size_t lineno; +static int version; + +static int flags; + +static char *prog; + +static int Eof; + +static MDB_envinfo info; + +static MDB_val kbuf, dbuf; + +#ifdef _WIN32 +#define Z "I" +#else +#define Z "z" +#endif + +#define STRLENOF(s) (sizeof(s)-1) + +typedef struct flagbit { + int bit; + char *name; + int len; +} flagbit; + +#define S(s) s, STRLENOF(s) + +flagbit dbflags[] = { + { MDB_REVERSEKEY, S("reversekey") }, + { MDB_DUPSORT, S("dupsort") }, + { MDB_INTEGERKEY, S("integerkey") }, + { MDB_DUPFIXED, S("dupfixed") }, + { MDB_INTEGERDUP, S("integerdup") }, + { MDB_REVERSEDUP, S("reversedup") }, + { 0, NULL, 0 } +}; + +static void readhdr(void) +{ + char *ptr; + + while (fgets(dbuf.mv_data, dbuf.mv_size, stdin) != NULL) { + lineno++; + if (!strncmp(dbuf.mv_data, "VERSION=", STRLENOF("VERSION="))) { + version=atoi((char *)dbuf.mv_data+STRLENOF("VERSION=")); + if (version > 3) { + fprintf(stderr, "%s: line %" Z "d: unsupported VERSION %d\n", + prog, lineno, version); + exit(EXIT_FAILURE); + } + } else if (!strncmp(dbuf.mv_data, "HEADER=END", STRLENOF("HEADER=END"))) { + break; + } else if (!strncmp(dbuf.mv_data, "format=", STRLENOF("format="))) { + if (!strncmp((char *)dbuf.mv_data+STRLENOF("FORMAT="), "print", STRLENOF("print"))) + mode |= PRINT; + else if (strncmp((char *)dbuf.mv_data+STRLENOF("FORMAT="), "bytevalue", STRLENOF("bytevalue"))) { + fprintf(stderr, "%s: line %" Z "d: unsupported FORMAT %s\n", + prog, lineno, (char *)dbuf.mv_data+STRLENOF("FORMAT=")); + exit(EXIT_FAILURE); + } + } else if (!strncmp(dbuf.mv_data, "database=", STRLENOF("database="))) { + ptr = memchr(dbuf.mv_data, '\n', dbuf.mv_size); + if (ptr) *ptr = '\0'; + if (subname) free(subname); + subname = strdup((char *)dbuf.mv_data+STRLENOF("database=")); + } else if (!strncmp(dbuf.mv_data, "type=", STRLENOF("type="))) { + if (strncmp((char *)dbuf.mv_data+STRLENOF("type="), "btree", STRLENOF("btree"))) { + fprintf(stderr, "%s: line %" Z "d: unsupported type %s\n", + prog, lineno, (char *)dbuf.mv_data+STRLENOF("type=")); + exit(EXIT_FAILURE); + } + } else if (!strncmp(dbuf.mv_data, "mapaddr=", STRLENOF("mapaddr="))) { + int i; + ptr = memchr(dbuf.mv_data, '\n', dbuf.mv_size); + if (ptr) *ptr = '\0'; + i = sscanf((char *)dbuf.mv_data+STRLENOF("mapaddr="), "%p", &info.me_mapaddr); + if (i != 1) { + fprintf(stderr, "%s: line %" Z "d: invalid mapaddr %s\n", + prog, lineno, (char *)dbuf.mv_data+STRLENOF("mapaddr=")); + exit(EXIT_FAILURE); + } + } else if (!strncmp(dbuf.mv_data, "mapsize=", STRLENOF("mapsize="))) { + int i; + ptr = memchr(dbuf.mv_data, '\n', dbuf.mv_size); + if (ptr) *ptr = '\0'; + i = sscanf((char *)dbuf.mv_data+STRLENOF("mapsize="), "%" Z "u", &info.me_mapsize); + if (i != 1) { + fprintf(stderr, "%s: line %" Z "d: invalid mapsize %s\n", + prog, lineno, (char *)dbuf.mv_data+STRLENOF("mapsize=")); + exit(EXIT_FAILURE); + } + } else if (!strncmp(dbuf.mv_data, "maxreaders=", STRLENOF("maxreaders="))) { + int i; + ptr = memchr(dbuf.mv_data, '\n', dbuf.mv_size); + if (ptr) *ptr = '\0'; + i = sscanf((char *)dbuf.mv_data+STRLENOF("maxreaders="), "%u", &info.me_maxreaders); + if (i != 1) { + fprintf(stderr, "%s: line %" Z "d: invalid maxreaders %s\n", + prog, lineno, (char *)dbuf.mv_data+STRLENOF("maxreaders=")); + exit(EXIT_FAILURE); + } + } else { + int i; + for (i=0; dbflags[i].bit; i++) { + if (!strncmp(dbuf.mv_data, dbflags[i].name, dbflags[i].len) && + ((char *)dbuf.mv_data)[dbflags[i].len] == '=') { + flags |= dbflags[i].bit; + break; + } + } + if (!dbflags[i].bit) { + ptr = memchr(dbuf.mv_data, '=', dbuf.mv_size); + if (!ptr) { + fprintf(stderr, "%s: line %" Z "d: unexpected format\n", + prog, lineno); + exit(EXIT_FAILURE); + } else { + *ptr = '\0'; + fprintf(stderr, "%s: line %" Z "d: unrecognized keyword ignored: %s\n", + prog, lineno, (char *)dbuf.mv_data); + } + } + } + } +} + +static void badend(void) +{ + fprintf(stderr, "%s: line %" Z "d: unexpected end of input\n", + prog, lineno); +} + +static int unhex(unsigned char *c2) +{ + int x, c; + x = *c2++ & 0x4f; + if (x & 0x40) + x -= 55; + c = x << 4; + x = *c2 & 0x4f; + if (x & 0x40) + x -= 55; + c |= x; + return c; +} + +static int readline(MDB_val *out, MDB_val *buf) +{ + unsigned char *c1, *c2, *end; + size_t len, l2; + int c; + + if (!(mode & NOHDR)) { + c = fgetc(stdin); + if (c == EOF) { + Eof = 1; + return EOF; + } + if (c != ' ') { + lineno++; + if (fgets(buf->mv_data, buf->mv_size, stdin) == NULL) { +badend: + Eof = 1; + badend(); + return EOF; + } + if (c == 'D' && !strncmp(buf->mv_data, "ATA=END", STRLENOF("ATA=END"))) + return EOF; + goto badend; + } + } + if (fgets(buf->mv_data, buf->mv_size, stdin) == NULL) { + Eof = 1; + return EOF; + } + lineno++; + + c1 = buf->mv_data; + len = strlen((char *)c1); + l2 = len; + + /* Is buffer too short? */ + while (c1[len-1] != '\n') { + buf->mv_data = realloc(buf->mv_data, buf->mv_size*2); + if (!buf->mv_data) { + Eof = 1; + fprintf(stderr, "%s: line %" Z "d: out of memory, line too long\n", + prog, lineno); + return EOF; + } + c1 = buf->mv_data; + c1 += l2; + if (fgets((char *)c1, buf->mv_size+1, stdin) == NULL) { + Eof = 1; + badend(); + return EOF; + } + buf->mv_size *= 2; + len = strlen((char *)c1); + l2 += len; + } + c1 = c2 = buf->mv_data; + len = l2; + c1[--len] = '\0'; + end = c1 + len; + + if (mode & PRINT) { + while (c2 < end) { + if (*c2 == '\\') { + if (c2[1] == '\\') { + c1++; c2 += 2; + } else { + if (c2+3 > end || !isxdigit(c2[1]) || !isxdigit(c2[2])) { + Eof = 1; + badend(); + return EOF; + } + *c1++ = unhex(++c2); + c2 += 2; + } + } else { + c1++; c2++; + } + } + } else { + /* odd length not allowed */ + if (len & 1) { + Eof = 1; + badend(); + return EOF; + } + while (c2 < end) { + if (!isxdigit(*c2) || !isxdigit(c2[1])) { + Eof = 1; + badend(); + return EOF; + } + *c1++ = unhex(c2); + c2 += 2; + } + } + c2 = out->mv_data = buf->mv_data; + out->mv_size = c1 - c2; + + return 0; +} + +static void usage(void) +{ + fprintf(stderr, "usage: %s [-V] [-f input] [-n] [-s name] [-N] [-T] dbpath\n", prog); + exit(EXIT_FAILURE); +} + +int main(int argc, char *argv[]) +{ + int i, rc; + MDB_env *env; + MDB_txn *txn; + MDB_cursor *mc; + MDB_dbi dbi; + char *envname; + int envflags = 0, putflags = 0; + int dohdr = 0; + + prog = argv[0]; + + if (argc < 2) { + usage(); + } + + /* -f: load file instead of stdin + * -n: use NOSUBDIR flag on env_open + * -s: load into named subDB + * -N: use NOOVERWRITE on puts + * -T: read plaintext + * -V: print version and exit + */ + while ((i = getopt(argc, argv, "f:ns:NTV")) != EOF) { + switch(i) { + case 'V': + printf("%s\n", MDB_VERSION_STRING); + exit(0); + break; + case 'f': + if (freopen(optarg, "r", stdin) == NULL) { + fprintf(stderr, "%s: %s: reopen: %s\n", + prog, optarg, strerror(errno)); + exit(EXIT_FAILURE); + } + break; + case 'n': + envflags |= MDB_NOSUBDIR; + break; + case 's': + subname = strdup(optarg); + break; + case 'N': + putflags = MDB_NOOVERWRITE|MDB_NODUPDATA; + break; + case 'T': + mode |= NOHDR | PRINT; + break; + default: + usage(); + } + } + + if (optind != argc - 1) + usage(); + + dbuf.mv_size = 4096; + dbuf.mv_data = malloc(dbuf.mv_size); + + if (!(mode & NOHDR)) + readhdr(); + + envname = argv[optind]; + rc = mdb_env_create(&env); + if (rc) { + fprintf(stderr, "mdb_env_create failed, error %d %s\n", rc, mdb_strerror(rc)); + return EXIT_FAILURE; + } + + mdb_env_set_maxdbs(env, 2); + + if (info.me_maxreaders) + mdb_env_set_maxreaders(env, info.me_maxreaders); + + if (info.me_mapsize) + mdb_env_set_mapsize(env, info.me_mapsize); + + if (info.me_mapaddr) + envflags |= MDB_FIXEDMAP; + + rc = mdb_env_open(env, envname, envflags, 0664); + if (rc) { + fprintf(stderr, "mdb_env_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + + kbuf.mv_size = mdb_env_get_maxkeysize(env) * 2 + 2; + kbuf.mv_data = malloc(kbuf.mv_size); + + while(!Eof) { + MDB_val key, data; + int batch = 0; + flags = 0; + + if (!dohdr) { + dohdr = 1; + } else if (!(mode & NOHDR)) + readhdr(); + + rc = mdb_txn_begin(env, NULL, 0, &txn); + if (rc) { + fprintf(stderr, "mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + + rc = mdb_open(txn, subname, flags|MDB_CREATE, &dbi); + if (rc) { + fprintf(stderr, "mdb_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + + rc = mdb_cursor_open(txn, dbi, &mc); + if (rc) { + fprintf(stderr, "mdb_cursor_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + + while(1) { + rc = readline(&key, &kbuf); + if (rc) /* rc == EOF */ + break; + + rc = readline(&data, &dbuf); + if (rc) { + fprintf(stderr, "%s: line %" Z "d: failed to read key value\n", prog, lineno); + goto txn_abort; + } + + rc = mdb_cursor_put(mc, &key, &data, putflags); + if (rc == MDB_KEYEXIST && putflags) + continue; + if (rc) { + fprintf(stderr, "mdb_cursor_put failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + batch++; + if (batch == 100) { + rc = mdb_txn_commit(txn); + if (rc) { + fprintf(stderr, "%s: line %" Z "d: txn_commit: %s\n", + prog, lineno, mdb_strerror(rc)); + goto env_close; + } + rc = mdb_txn_begin(env, NULL, 0, &txn); + if (rc) { + fprintf(stderr, "mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + rc = mdb_cursor_open(txn, dbi, &mc); + if (rc) { + fprintf(stderr, "mdb_cursor_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + batch = 0; + } + } + rc = mdb_txn_commit(txn); + txn = NULL; + if (rc) { + fprintf(stderr, "%s: line %" Z "d: txn_commit: %s\n", + prog, lineno, mdb_strerror(rc)); + goto env_close; + } + mdb_dbi_close(env, dbi); + } + +txn_abort: + mdb_txn_abort(txn); +env_close: + mdb_env_close(env); + + return rc ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/deps/liblmdb/mdb_stat.1 b/deps/liblmdb/mdb_stat.1 new file mode 100644 index 00000000..351c0175 --- /dev/null +++ b/deps/liblmdb/mdb_stat.1 @@ -0,0 +1,64 @@ +.TH MDB_STAT 1 "2014/06/20" "LMDB 0.9.14" +.\" Copyright 2012-2015 Howard Chu, Symas Corp. All Rights Reserved. +.\" Copying restrictions apply. See COPYRIGHT/LICENSE. +.SH NAME +mdb_stat \- LMDB environment status tool +.SH SYNOPSIS +.B mdb_stat +[\c +.BR \-V ] +[\c +.BR \-e ] +[\c +.BR \-f [ f [ f ]]] +[\c +.BR \-n ] +[\c +.BR \-r [ r ]] +[\c +.BR \-a \ | +.BI \-s \ subdb\fR] +.BR \ envpath +.SH DESCRIPTION +The +.B mdb_stat +utility displays the status of an LMDB environment. +.SH OPTIONS +.TP +.BR \-V +Write the library version number to the standard output, and exit. +.TP +.BR \-e +Display information about the database environment. +.TP +.BR \-f +Display information about the environment freelist. +If \fB\-ff\fP is given, summarize each freelist entry. +If \fB\-fff\fP is given, display the full list of page IDs in the freelist. +.TP +.BR \-n +Display the status of an LMDB database which does not use subdirectories. +.TP +.BR \-r +Display information about the environment reader table. +Shows the process ID, thread ID, and transaction ID for each active +reader slot. The process ID and transaction ID are in decimal, the +thread ID is in hexadecimal. The transaction ID is displayed as "-" +if the reader does not currently have a read transaction open. +If \fB\-rr\fP is given, check for stale entries in the reader +table and clear them. The reader table will be printed again +after the check is performed. +.TP +.BR \-a +Display the status of all of the subdatabases in the environment. +.TP +.BR \-s \ subdb +Display the status of a specific subdatabase. +.SH DIAGNOSTICS +Exit status is zero if no errors occur. +Errors result in a non-zero exit status and +a diagnostic message being written to standard error. +.SH "SEE ALSO" +.BR mdb_copy (1) +.SH AUTHOR +Howard Chu of Symas Corporation diff --git a/deps/liblmdb/mdb_stat.c b/deps/liblmdb/mdb_stat.c new file mode 100644 index 00000000..6f4b3ee4 --- /dev/null +++ b/deps/liblmdb/mdb_stat.c @@ -0,0 +1,263 @@ +/* mdb_stat.c - memory-mapped database status tool */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ +#include +#include +#include +#include +#include "lmdb.h" + +#ifdef _WIN32 +#define Z "I" +#else +#define Z "z" +#endif + +static void prstat(MDB_stat *ms) +{ +#if 0 + printf(" Page size: %u\n", ms->ms_psize); +#endif + printf(" Tree depth: %u\n", ms->ms_depth); + printf(" Branch pages: %"Z"u\n", ms->ms_branch_pages); + printf(" Leaf pages: %"Z"u\n", ms->ms_leaf_pages); + printf(" Overflow pages: %"Z"u\n", ms->ms_overflow_pages); + printf(" Entries: %"Z"u\n", ms->ms_entries); +} + +static void usage(char *prog) +{ + fprintf(stderr, "usage: %s [-V] [-n] [-e] [-r[r]] [-f[f[f]]] [-a|-s subdb] dbpath\n", prog); + exit(EXIT_FAILURE); +} + +int main(int argc, char *argv[]) +{ + int i, rc; + MDB_env *env; + MDB_txn *txn; + MDB_dbi dbi; + MDB_stat mst; + MDB_envinfo mei; + char *prog = argv[0]; + char *envname; + char *subname = NULL; + int alldbs = 0, envinfo = 0, envflags = 0, freinfo = 0, rdrinfo = 0; + + if (argc < 2) { + usage(prog); + } + + /* -a: print stat of main DB and all subDBs + * -s: print stat of only the named subDB + * -e: print env info + * -f: print freelist info + * -r: print reader info + * -n: use NOSUBDIR flag on env_open + * -V: print version and exit + * (default) print stat of only the main DB + */ + while ((i = getopt(argc, argv, "Vaefnrs:")) != EOF) { + switch(i) { + case 'V': + printf("%s\n", MDB_VERSION_STRING); + exit(0); + break; + case 'a': + if (subname) + usage(prog); + alldbs++; + break; + case 'e': + envinfo++; + break; + case 'f': + freinfo++; + break; + case 'n': + envflags |= MDB_NOSUBDIR; + break; + case 'r': + rdrinfo++; + break; + case 's': + if (alldbs) + usage(prog); + subname = optarg; + break; + default: + usage(prog); + } + } + + if (optind != argc - 1) + usage(prog); + + envname = argv[optind]; + rc = mdb_env_create(&env); + if (rc) { + fprintf(stderr, "mdb_env_create failed, error %d %s\n", rc, mdb_strerror(rc)); + return EXIT_FAILURE; + } + + if (alldbs || subname) { + mdb_env_set_maxdbs(env, 4); + } + + rc = mdb_env_open(env, envname, envflags | MDB_RDONLY, 0664); + if (rc) { + fprintf(stderr, "mdb_env_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + + if (envinfo) { + (void)mdb_env_stat(env, &mst); + (void)mdb_env_info(env, &mei); + printf("Environment Info\n"); + printf(" Map address: %p\n", mei.me_mapaddr); + printf(" Map size: %"Z"u\n", mei.me_mapsize); + printf(" Page size: %u\n", mst.ms_psize); + printf(" Max pages: %"Z"u\n", mei.me_mapsize / mst.ms_psize); + printf(" Number of pages used: %"Z"u\n", mei.me_last_pgno+1); + printf(" Last transaction ID: %"Z"u\n", mei.me_last_txnid); + printf(" Max readers: %u\n", mei.me_maxreaders); + printf(" Number of readers used: %u\n", mei.me_numreaders); + } + + if (rdrinfo) { + printf("Reader Table Status\n"); + rc = mdb_reader_list(env, (MDB_msg_func *)fputs, stdout); + if (rdrinfo > 1) { + int dead; + mdb_reader_check(env, &dead); + printf(" %d stale readers cleared.\n", dead); + rc = mdb_reader_list(env, (MDB_msg_func *)fputs, stdout); + } + if (!(subname || alldbs || freinfo)) + goto env_close; + } + + rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); + if (rc) { + fprintf(stderr, "mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc)); + goto env_close; + } + + if (freinfo) { + MDB_cursor *cursor; + MDB_val key, data; + size_t pages = 0, *iptr; + + printf("Freelist Status\n"); + dbi = 0; + rc = mdb_cursor_open(txn, dbi, &cursor); + if (rc) { + fprintf(stderr, "mdb_cursor_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + rc = mdb_stat(txn, dbi, &mst); + if (rc) { + fprintf(stderr, "mdb_stat failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + prstat(&mst); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + iptr = data.mv_data; + pages += *iptr; + if (freinfo > 1) { + char *bad = ""; + size_t pg, prev; + ssize_t i, j, span = 0; + j = *iptr++; + for (i = j, prev = 1; --i >= 0; ) { + pg = iptr[i]; + if (pg <= prev) + bad = " [bad sequence]"; + prev = pg; + pg += span; + for (; i >= span && iptr[i-span] == pg; span++, pg++) ; + } + printf(" Transaction %"Z"u, %"Z"d pages, maxspan %"Z"d%s\n", + *(size_t *)key.mv_data, j, span, bad); + if (freinfo > 2) { + for (--j; j >= 0; ) { + pg = iptr[j]; + for (span=1; --j >= 0 && iptr[j] == pg+span; span++) ; + printf(span>1 ? " %9"Z"u[%"Z"d]\n" : " %9"Z"u\n", + pg, span); + } + } + } + } + mdb_cursor_close(cursor); + printf(" Free pages: %"Z"u\n", pages); + } + + rc = mdb_open(txn, subname, 0, &dbi); + if (rc) { + fprintf(stderr, "mdb_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + + rc = mdb_stat(txn, dbi, &mst); + if (rc) { + fprintf(stderr, "mdb_stat failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + printf("Status of %s\n", subname ? subname : "Main DB"); + prstat(&mst); + + if (alldbs) { + MDB_cursor *cursor; + MDB_val key; + + rc = mdb_cursor_open(txn, dbi, &cursor); + if (rc) { + fprintf(stderr, "mdb_cursor_open failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + while ((rc = mdb_cursor_get(cursor, &key, NULL, MDB_NEXT_NODUP)) == 0) { + char *str; + MDB_dbi db2; + if (memchr(key.mv_data, '\0', key.mv_size)) + continue; + str = malloc(key.mv_size+1); + memcpy(str, key.mv_data, key.mv_size); + str[key.mv_size] = '\0'; + rc = mdb_open(txn, str, 0, &db2); + if (rc == MDB_SUCCESS) + printf("Status of %s\n", str); + free(str); + if (rc) continue; + rc = mdb_stat(txn, db2, &mst); + if (rc) { + fprintf(stderr, "mdb_stat failed, error %d %s\n", rc, mdb_strerror(rc)); + goto txn_abort; + } + prstat(&mst); + mdb_close(env, db2); + } + mdb_cursor_close(cursor); + } + + if (rc == MDB_NOTFOUND) + rc = MDB_SUCCESS; + + mdb_close(env, dbi); +txn_abort: + mdb_txn_abort(txn); +env_close: + mdb_env_close(env); + + return rc ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/deps/liblmdb/midl.c b/deps/liblmdb/midl.c new file mode 100644 index 00000000..57a9d492 --- /dev/null +++ b/deps/liblmdb/midl.c @@ -0,0 +1,358 @@ +/** @file midl.c + * @brief ldap bdb back-end ID List functions */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 2000-2015 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#include +#include +#include +#include +#include +#include "midl.h" + +/** @defgroup internal LMDB Internals + * @{ + */ +/** @defgroup idls ID List Management + * @{ + */ +#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) ) + +unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id ) +{ + /* + * binary search of id in ids + * if found, returns position of id + * if not found, returns first position greater than id + */ + unsigned base = 0; + unsigned cursor = 1; + int val = 0; + unsigned n = ids[0]; + + while( 0 < n ) { + unsigned pivot = n >> 1; + cursor = base + pivot + 1; + val = CMP( ids[cursor], id ); + + if( val < 0 ) { + n = pivot; + + } else if ( val > 0 ) { + base = cursor; + n -= pivot + 1; + + } else { + return cursor; + } + } + + if( val > 0 ) { + ++cursor; + } + return cursor; +} + +#if 0 /* superseded by append/sort */ +int mdb_midl_insert( MDB_IDL ids, MDB_ID id ) +{ + unsigned x, i; + + x = mdb_midl_search( ids, id ); + assert( x > 0 ); + + if( x < 1 ) { + /* internal error */ + return -2; + } + + if ( x <= ids[0] && ids[x] == id ) { + /* duplicate */ + assert(0); + return -1; + } + + if ( ++ids[0] >= MDB_IDL_DB_MAX ) { + /* no room */ + --ids[0]; + return -2; + + } else { + /* insert id */ + for (i=ids[0]; i>x; i--) + ids[i] = ids[i-1]; + ids[x] = id; + } + + return 0; +} +#endif + +MDB_IDL mdb_midl_alloc(int num) +{ + MDB_IDL ids = malloc((num+2) * sizeof(MDB_ID)); + if (ids) { + *ids++ = num; + *ids = 0; + } + return ids; +} + +void mdb_midl_free(MDB_IDL ids) +{ + if (ids) + free(ids-1); +} + +void mdb_midl_shrink( MDB_IDL *idp ) +{ + MDB_IDL ids = *idp; + if (*(--ids) > MDB_IDL_UM_MAX && + (ids = realloc(ids, (MDB_IDL_UM_MAX+1) * sizeof(MDB_ID)))) + { + *ids++ = MDB_IDL_UM_MAX; + *idp = ids; + } +} + +static int mdb_midl_grow( MDB_IDL *idp, int num ) +{ + MDB_IDL idn = *idp-1; + /* grow it */ + idn = realloc(idn, (*idn + num + 2) * sizeof(MDB_ID)); + if (!idn) + return ENOMEM; + *idn++ += num; + *idp = idn; + return 0; +} + +int mdb_midl_need( MDB_IDL *idp, unsigned num ) +{ + MDB_IDL ids = *idp; + num += ids[0]; + if (num > ids[-1]) { + num = (num + num/4 + (256 + 2)) & -256; + if (!(ids = realloc(ids-1, num * sizeof(MDB_ID)))) + return ENOMEM; + *ids++ = num - 2; + *idp = ids; + } + return 0; +} + +int mdb_midl_append( MDB_IDL *idp, MDB_ID id ) +{ + MDB_IDL ids = *idp; + /* Too big? */ + if (ids[0] >= ids[-1]) { + if (mdb_midl_grow(idp, MDB_IDL_UM_MAX)) + return ENOMEM; + ids = *idp; + } + ids[0]++; + ids[ids[0]] = id; + return 0; +} + +int mdb_midl_append_list( MDB_IDL *idp, MDB_IDL app ) +{ + MDB_IDL ids = *idp; + /* Too big? */ + if (ids[0] + app[0] >= ids[-1]) { + if (mdb_midl_grow(idp, app[0])) + return ENOMEM; + ids = *idp; + } + memcpy(&ids[ids[0]+1], &app[1], app[0] * sizeof(MDB_ID)); + ids[0] += app[0]; + return 0; +} + +int mdb_midl_append_range( MDB_IDL *idp, MDB_ID id, unsigned n ) +{ + MDB_ID *ids = *idp, len = ids[0]; + /* Too big? */ + if (len + n > ids[-1]) { + if (mdb_midl_grow(idp, n | MDB_IDL_UM_MAX)) + return ENOMEM; + ids = *idp; + } + ids[0] = len + n; + ids += len; + while (n) + ids[n--] = id++; + return 0; +} + +void mdb_midl_xmerge( MDB_IDL idl, MDB_IDL merge ) +{ + MDB_ID old_id, merge_id, i = merge[0], j = idl[0], k = i+j, total = k; + idl[0] = (MDB_ID)-1; /* delimiter for idl scan below */ + old_id = idl[j]; + while (i) { + merge_id = merge[i--]; + for (; old_id < merge_id; old_id = idl[--j]) + idl[k--] = old_id; + idl[k--] = merge_id; + } + idl[0] = total; +} + +/* Quicksort + Insertion sort for small arrays */ + +#define SMALL 8 +#define MIDL_SWAP(a,b) { itmp=(a); (a)=(b); (b)=itmp; } + +void +mdb_midl_sort( MDB_IDL ids ) +{ + /* Max possible depth of int-indexed tree * 2 items/level */ + int istack[sizeof(int)*CHAR_BIT * 2]; + int i,j,k,l,ir,jstack; + MDB_ID a, itmp; + + ir = (int)ids[0]; + l = 1; + jstack = 0; + for(;;) { + if (ir - l < SMALL) { /* Insertion sort */ + for (j=l+1;j<=ir;j++) { + a = ids[j]; + for (i=j-1;i>=1;i--) { + if (ids[i] >= a) break; + ids[i+1] = ids[i]; + } + ids[i+1] = a; + } + if (jstack == 0) break; + ir = istack[jstack--]; + l = istack[jstack--]; + } else { + k = (l + ir) >> 1; /* Choose median of left, center, right */ + MIDL_SWAP(ids[k], ids[l+1]); + if (ids[l] < ids[ir]) { + MIDL_SWAP(ids[l], ids[ir]); + } + if (ids[l+1] < ids[ir]) { + MIDL_SWAP(ids[l+1], ids[ir]); + } + if (ids[l] < ids[l+1]) { + MIDL_SWAP(ids[l], ids[l+1]); + } + i = l+1; + j = ir; + a = ids[l+1]; + for(;;) { + do i++; while(ids[i] > a); + do j--; while(ids[j] < a); + if (j < i) break; + MIDL_SWAP(ids[i],ids[j]); + } + ids[l+1] = ids[j]; + ids[j] = a; + jstack += 2; + if (ir-i+1 >= j-l) { + istack[jstack] = ir; + istack[jstack-1] = i; + ir = j-1; + } else { + istack[jstack] = j-1; + istack[jstack-1] = l; + l = i; + } + } + } +} + +unsigned mdb_mid2l_search( MDB_ID2L ids, MDB_ID id ) +{ + /* + * binary search of id in ids + * if found, returns position of id + * if not found, returns first position greater than id + */ + unsigned base = 0; + unsigned cursor = 1; + int val = 0; + unsigned n = (unsigned)ids[0].mid; + + while( 0 < n ) { + unsigned pivot = n >> 1; + cursor = base + pivot + 1; + val = CMP( id, ids[cursor].mid ); + + if( val < 0 ) { + n = pivot; + + } else if ( val > 0 ) { + base = cursor; + n -= pivot + 1; + + } else { + return cursor; + } + } + + if( val > 0 ) { + ++cursor; + } + return cursor; +} + +int mdb_mid2l_insert( MDB_ID2L ids, MDB_ID2 *id ) +{ + unsigned x, i; + + x = mdb_mid2l_search( ids, id->mid ); + + if( x < 1 ) { + /* internal error */ + return -2; + } + + if ( x <= ids[0].mid && ids[x].mid == id->mid ) { + /* duplicate */ + return -1; + } + + if ( ids[0].mid >= MDB_IDL_UM_MAX ) { + /* too big */ + return -2; + + } else { + /* insert id */ + ids[0].mid++; + for (i=(unsigned)ids[0].mid; i>x; i--) + ids[i] = ids[i-1]; + ids[x] = *id; + } + + return 0; +} + +int mdb_mid2l_append( MDB_ID2L ids, MDB_ID2 *id ) +{ + /* Too big? */ + if (ids[0].mid >= MDB_IDL_UM_MAX) { + return -2; + } + ids[0].mid++; + ids[ids[0].mid] = *id; + return 0; +} + +/** @} */ +/** @} */ diff --git a/deps/liblmdb/midl.h b/deps/liblmdb/midl.h new file mode 100644 index 00000000..2331e783 --- /dev/null +++ b/deps/liblmdb/midl.h @@ -0,0 +1,185 @@ +/** @file midl.h + * @brief LMDB ID List header file. + * + * This file was originally part of back-bdb but has been + * modified for use in libmdb. Most of the macros defined + * in this file are unused, just left over from the original. + * + * This file is only used internally in libmdb and its definitions + * are not exposed publicly. + */ +/* $OpenLDAP$ */ +/* This work is part of OpenLDAP Software . + * + * Copyright 2000-2015 The OpenLDAP Foundation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +#ifndef _MDB_MIDL_H_ +#define _MDB_MIDL_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** @defgroup internal LMDB Internals + * @{ + */ + +/** @defgroup idls ID List Management + * @{ + */ + /** A generic unsigned ID number. These were entryIDs in back-bdb. + * Preferably it should have the same size as a pointer. + */ +typedef size_t MDB_ID; + + /** An IDL is an ID List, a sorted array of IDs. The first + * element of the array is a counter for how many actual + * IDs are in the list. In the original back-bdb code, IDLs are + * sorted in ascending order. For libmdb IDLs are sorted in + * descending order. + */ +typedef MDB_ID *MDB_IDL; + +/* IDL sizes - likely should be even bigger + * limiting factors: sizeof(ID), thread stack size + */ +#define MDB_IDL_LOGN 16 /* DB_SIZE is 2^16, UM_SIZE is 2^17 */ +#define MDB_IDL_DB_SIZE (1<. + */ +#include +#include +#include +#include "lmdb.h" + +#define E(expr) CHECK((rc = (expr)) == MDB_SUCCESS, #expr) +#define RES(err, expr) ((rc = expr) == (err) || (CHECK(!rc, #expr), 0)) +#define CHECK(test, msg) ((test) ? (void)0 : ((void)fprintf(stderr, \ + "%s:%d: %s: %s\n", __FILE__, __LINE__, msg, mdb_strerror(rc)), abort())) + +int main(int argc,char * argv[]) +{ + int i = 0, j = 0, rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data; + MDB_txn *txn; + MDB_stat mst; + MDB_cursor *cursor, *cur2; + MDB_cursor_op op; + int count; + int *values; + char sval[32] = ""; + + srand(time(NULL)); + + count = (rand()%384) + 64; + values = (int *)malloc(count*sizeof(int)); + + for(i = 0;i in each iteration, since MDB_NOOVERWRITE may modify it */ + data.mv_size = sizeof(sval); + data.mv_data = sval; + if (RES(MDB_KEYEXIST, mdb_put(txn, dbi, &key, &data, MDB_NOOVERWRITE))) { + j++; + data.mv_size = sizeof(sval); + data.mv_data = sval; + } + } + if (j) printf("%d duplicates skipped\n", j); + E(mdb_txn_commit(txn)); + E(mdb_env_stat(env, &mst)); + + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %p %.*s, data: %p %.*s\n", + key.mv_data, (int) key.mv_size, (char *) key.mv_data, + data.mv_data, (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + j=0; + key.mv_data = sval; + for (i= count - 1; i > -1; i-= (rand()%5)) { + j++; + txn=NULL; + E(mdb_txn_begin(env, NULL, 0, &txn)); + sprintf(sval, "%03x ", values[i]); + if (RES(MDB_NOTFOUND, mdb_del(txn, dbi, &key, NULL))) { + j--; + mdb_txn_abort(txn); + } else { + E(mdb_txn_commit(txn)); + } + } + free(values); + printf("Deleted %d values\n", j); + + E(mdb_env_stat(env, &mst)); + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + printf("Cursor next\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor last\n"); + E(mdb_cursor_get(cursor, &key, &data, MDB_LAST)); + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + printf("Cursor prev\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_PREV)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor last/prev\n"); + E(mdb_cursor_get(cursor, &key, &data, MDB_LAST)); + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + E(mdb_cursor_get(cursor, &key, &data, MDB_PREV)); + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + printf("Deleting with cursor\n"); + E(mdb_txn_begin(env, NULL, 0, &txn)); + E(mdb_cursor_open(txn, dbi, &cur2)); + for (i=0; i<50; i++) { + if (RES(MDB_NOTFOUND, mdb_cursor_get(cur2, &key, &data, MDB_NEXT))) + break; + printf("key: %p %.*s, data: %p %.*s\n", + key.mv_data, (int) key.mv_size, (char *) key.mv_data, + data.mv_data, (int) data.mv_size, (char *) data.mv_data); + E(mdb_del(txn, dbi, &key, NULL)); + } + + printf("Restarting cursor in txn\n"); + for (op=MDB_FIRST, i=0; i<=32; op=MDB_NEXT, i++) { + if (RES(MDB_NOTFOUND, mdb_cursor_get(cur2, &key, &data, op))) + break; + printf("key: %p %.*s, data: %p %.*s\n", + key.mv_data, (int) key.mv_size, (char *) key.mv_data, + data.mv_data, (int) data.mv_size, (char *) data.mv_data); + } + mdb_cursor_close(cur2); + E(mdb_txn_commit(txn)); + + printf("Restarting cursor outside txn\n"); + E(mdb_txn_begin(env, NULL, 0, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + for (op=MDB_FIRST, i=0; i<=32; op=MDB_NEXT, i++) { + if (RES(MDB_NOTFOUND, mdb_cursor_get(cursor, &key, &data, op))) + break; + printf("key: %p %.*s, data: %p %.*s\n", + key.mv_data, (int) key.mv_size, (char *) key.mv_data, + data.mv_data, (int) data.mv_size, (char *) data.mv_data); + } + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + mdb_dbi_close(env, dbi); + mdb_env_close(env); + + return 0; +} diff --git a/deps/liblmdb/mtest2.c b/deps/liblmdb/mtest2.c new file mode 100644 index 00000000..eacbe59d --- /dev/null +++ b/deps/liblmdb/mtest2.c @@ -0,0 +1,124 @@ +/* mtest2.c - memory-mapped database tester/toy */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* Just like mtest.c, but using a subDB instead of the main DB */ + +#include +#include +#include +#include "lmdb.h" + +#define E(expr) CHECK((rc = (expr)) == MDB_SUCCESS, #expr) +#define RES(err, expr) ((rc = expr) == (err) || (CHECK(!rc, #expr), 0)) +#define CHECK(test, msg) ((test) ? (void)0 : ((void)fprintf(stderr, \ + "%s:%d: %s: %s\n", __FILE__, __LINE__, msg, mdb_strerror(rc)), abort())) + +int main(int argc,char * argv[]) +{ + int i = 0, j = 0, rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data; + MDB_txn *txn; + MDB_stat mst; + MDB_cursor *cursor; + int count; + int *values; + char sval[32] = ""; + + srand(time(NULL)); + + count = (rand()%384) + 64; + values = (int *)malloc(count*sizeof(int)); + + for(i = 0;i -1; i-= (rand()%5)) { + j++; + txn=NULL; + E(mdb_txn_begin(env, NULL, 0, &txn)); + sprintf(sval, "%03x ", values[i]); + if (RES(MDB_NOTFOUND, mdb_del(txn, dbi, &key, NULL))) { + j--; + mdb_txn_abort(txn); + } else { + E(mdb_txn_commit(txn)); + } + } + free(values); + printf("Deleted %d values\n", j); + + E(mdb_env_stat(env, &mst)); + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + printf("Cursor next\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor prev\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_PREV)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + mdb_dbi_close(env, dbi); + mdb_env_close(env); + return 0; +} diff --git a/deps/liblmdb/mtest3.c b/deps/liblmdb/mtest3.c new file mode 100644 index 00000000..9db79e62 --- /dev/null +++ b/deps/liblmdb/mtest3.c @@ -0,0 +1,133 @@ +/* mtest3.c - memory-mapped database tester/toy */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* Tests for sorted duplicate DBs */ +#include +#include +#include +#include +#include "lmdb.h" + +#define E(expr) CHECK((rc = (expr)) == MDB_SUCCESS, #expr) +#define RES(err, expr) ((rc = expr) == (err) || (CHECK(!rc, #expr), 0)) +#define CHECK(test, msg) ((test) ? (void)0 : ((void)fprintf(stderr, \ + "%s:%d: %s: %s\n", __FILE__, __LINE__, msg, mdb_strerror(rc)), abort())) + +int main(int argc,char * argv[]) +{ + int i = 0, j = 0, rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data; + MDB_txn *txn; + MDB_stat mst; + MDB_cursor *cursor; + int count; + int *values; + char sval[32]; + char kval[sizeof(int)]; + + srand(time(NULL)); + + memset(sval, 0, sizeof(sval)); + + count = (rand()%384) + 64; + values = (int *)malloc(count*sizeof(int)); + + for(i = 0;i -1; i-= (rand()%5)) { + j++; + txn=NULL; + E(mdb_txn_begin(env, NULL, 0, &txn)); + sprintf(kval, "%03x", values[i & ~0x0f]); + sprintf(sval, "%03x %d foo bar", values[i], values[i]); + key.mv_size = sizeof(int); + key.mv_data = kval; + data.mv_size = sizeof(sval); + data.mv_data = sval; + if (RES(MDB_NOTFOUND, mdb_del(txn, dbi, &key, &data))) { + j--; + mdb_txn_abort(txn); + } else { + E(mdb_txn_commit(txn)); + } + } + free(values); + printf("Deleted %d values\n", j); + + E(mdb_env_stat(env, &mst)); + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + printf("Cursor next\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor prev\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_PREV)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + mdb_dbi_close(env, dbi); + mdb_env_close(env); + return 0; +} diff --git a/deps/liblmdb/mtest4.c b/deps/liblmdb/mtest4.c new file mode 100644 index 00000000..6df890e2 --- /dev/null +++ b/deps/liblmdb/mtest4.c @@ -0,0 +1,168 @@ +/* mtest4.c - memory-mapped database tester/toy */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* Tests for sorted duplicate DBs with fixed-size keys */ +#include +#include +#include +#include +#include "lmdb.h" + +#define E(expr) CHECK((rc = (expr)) == MDB_SUCCESS, #expr) +#define RES(err, expr) ((rc = expr) == (err) || (CHECK(!rc, #expr), 0)) +#define CHECK(test, msg) ((test) ? (void)0 : ((void)fprintf(stderr, \ + "%s:%d: %s: %s\n", __FILE__, __LINE__, msg, mdb_strerror(rc)), abort())) + +int main(int argc,char * argv[]) +{ + int i = 0, j = 0, rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data; + MDB_txn *txn; + MDB_stat mst; + MDB_cursor *cursor; + int count; + int *values; + char sval[8]; + char kval[sizeof(int)]; + + memset(sval, 0, sizeof(sval)); + + count = 510; + values = (int *)malloc(count*sizeof(int)); + + for(i = 0;i -1; i-= (rand()%3)) { + j++; + txn=NULL; + E(mdb_txn_begin(env, NULL, 0, &txn)); + sprintf(sval, "%07x", values[i]); + key.mv_size = sizeof(int); + key.mv_data = kval; + data.mv_size = sizeof(sval); + data.mv_data = sval; + if (RES(MDB_NOTFOUND, mdb_del(txn, dbi, &key, &data))) { + j--; + mdb_txn_abort(txn); + } else { + E(mdb_txn_commit(txn)); + } + } + free(values); + printf("Deleted %d values\n", j); + + E(mdb_env_stat(env, &mst)); + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + printf("Cursor next\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor prev\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_PREV)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + mdb_dbi_close(env, dbi); + mdb_env_close(env); + return 0; +} diff --git a/deps/liblmdb/mtest5.c b/deps/liblmdb/mtest5.c new file mode 100644 index 00000000..14e3c0da --- /dev/null +++ b/deps/liblmdb/mtest5.c @@ -0,0 +1,135 @@ +/* mtest5.c - memory-mapped database tester/toy */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* Tests for sorted duplicate DBs using cursor_put */ +#include +#include +#include +#include +#include "lmdb.h" + +#define E(expr) CHECK((rc = (expr)) == MDB_SUCCESS, #expr) +#define RES(err, expr) ((rc = expr) == (err) || (CHECK(!rc, #expr), 0)) +#define CHECK(test, msg) ((test) ? (void)0 : ((void)fprintf(stderr, \ + "%s:%d: %s: %s\n", __FILE__, __LINE__, msg, mdb_strerror(rc)), abort())) + +int main(int argc,char * argv[]) +{ + int i = 0, j = 0, rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data; + MDB_txn *txn; + MDB_stat mst; + MDB_cursor *cursor; + int count; + int *values; + char sval[32]; + char kval[sizeof(int)]; + + srand(time(NULL)); + + memset(sval, 0, sizeof(sval)); + + count = (rand()%384) + 64; + values = (int *)malloc(count*sizeof(int)); + + for(i = 0;i -1; i-= (rand()%5)) { + j++; + txn=NULL; + E(mdb_txn_begin(env, NULL, 0, &txn)); + sprintf(kval, "%03x", values[i & ~0x0f]); + sprintf(sval, "%03x %d foo bar", values[i], values[i]); + key.mv_size = sizeof(int); + key.mv_data = kval; + data.mv_size = sizeof(sval); + data.mv_data = sval; + if (RES(MDB_NOTFOUND, mdb_del(txn, dbi, &key, &data))) { + j--; + mdb_txn_abort(txn); + } else { + E(mdb_txn_commit(txn)); + } + } + free(values); + printf("Deleted %d values\n", j); + + E(mdb_env_stat(env, &mst)); + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + printf("Cursor next\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor prev\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_PREV)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + mdb_dbi_close(env, dbi); + mdb_env_close(env); + return 0; +} diff --git a/deps/liblmdb/mtest6.c b/deps/liblmdb/mtest6.c new file mode 100644 index 00000000..ae3c7f26 --- /dev/null +++ b/deps/liblmdb/mtest6.c @@ -0,0 +1,141 @@ +/* mtest6.c - memory-mapped database tester/toy */ +/* + * Copyright 2011-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ + +/* Tests for DB splits and merges */ +#include +#include +#include +#include +#include "lmdb.h" + +#define E(expr) CHECK((rc = (expr)) == MDB_SUCCESS, #expr) +#define RES(err, expr) ((rc = expr) == (err) || (CHECK(!rc, #expr), 0)) +#define CHECK(test, msg) ((test) ? (void)0 : ((void)fprintf(stderr, \ + "%s:%d: %s: %s\n", __FILE__, __LINE__, msg, mdb_strerror(rc)), abort())) + +char dkbuf[1024]; + +int main(int argc,char * argv[]) +{ + int i = 0, j = 0, rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data, sdata; + MDB_txn *txn; + MDB_stat mst; + MDB_cursor *cursor; + int count; + int *values; + long kval; + char *sval; + + srand(time(NULL)); + + E(mdb_env_create(&env)); + E(mdb_env_set_mapsize(env, 10485760)); + E(mdb_env_set_maxdbs(env, 4)); + E(mdb_env_open(env, "./testdb", MDB_FIXEDMAP|MDB_NOSYNC, 0664)); + + E(mdb_txn_begin(env, NULL, 0, &txn)); + E(mdb_dbi_open(txn, "id6", MDB_CREATE|MDB_INTEGERKEY, &dbi)); + E(mdb_cursor_open(txn, dbi, &cursor)); + E(mdb_stat(txn, dbi, &mst)); + + sval = calloc(1, mst.ms_psize / 4); + key.mv_size = sizeof(long); + key.mv_data = &kval; + sdata.mv_size = mst.ms_psize / 4 - 30; + sdata.mv_data = sval; + + printf("Adding 12 values, should yield 3 splits\n"); + for (i=0;i<12;i++) { + kval = i*5; + sprintf(sval, "%08x", kval); + data = sdata; + (void)RES(MDB_KEYEXIST, mdb_cursor_put(cursor, &key, &data, MDB_NOOVERWRITE)); + } + printf("Adding 12 more values, should yield 3 splits\n"); + for (i=0;i<12;i++) { + kval = i*5+4; + sprintf(sval, "%08x", kval); + data = sdata; + (void)RES(MDB_KEYEXIST, mdb_cursor_put(cursor, &key, &data, MDB_NOOVERWRITE)); + } + printf("Adding 12 more values, should yield 3 splits\n"); + for (i=0;i<12;i++) { + kval = i*5+1; + sprintf(sval, "%08x", kval); + data = sdata; + (void)RES(MDB_KEYEXIST, mdb_cursor_put(cursor, &key, &data, MDB_NOOVERWRITE)); + } + E(mdb_cursor_get(cursor, &key, &data, MDB_FIRST)); + + do { + printf("key: %p %s, data: %p %.*s\n", + key.mv_data, mdb_dkey(&key, dkbuf), + data.mv_data, (int) data.mv_size, (char *) data.mv_data); + } while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0); + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_commit(txn); + +#if 0 + j=0; + + for (i= count - 1; i > -1; i-= (rand()%5)) { + j++; + txn=NULL; + E(mdb_txn_begin(env, NULL, 0, &txn)); + sprintf(kval, "%03x", values[i & ~0x0f]); + sprintf(sval, "%03x %d foo bar", values[i], values[i]); + key.mv_size = sizeof(int); + key.mv_data = kval; + data.mv_size = sizeof(sval); + data.mv_data = sval; + if (RES(MDB_NOTFOUND, mdb_del(txn, dbi, &key, &data))) { + j--; + mdb_txn_abort(txn); + } else { + E(mdb_txn_commit(txn)); + } + } + free(values); + printf("Deleted %d values\n", j); + + E(mdb_env_stat(env, &mst)); + E(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + E(mdb_cursor_open(txn, dbi, &cursor)); + printf("Cursor next\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + printf("Cursor prev\n"); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_PREV)) == 0) { + printf("key: %.*s, data: %.*s\n", + (int) key.mv_size, (char *) key.mv_data, + (int) data.mv_size, (char *) data.mv_data); + } + CHECK(rc == MDB_NOTFOUND, "mdb_cursor_get"); + mdb_cursor_close(cursor); + mdb_txn_abort(txn); + + mdb_dbi_close(env, dbi); +#endif + mdb_env_close(env); + + return 0; +} diff --git a/deps/liblmdb/sample-bdb.txt b/deps/liblmdb/sample-bdb.txt new file mode 100644 index 00000000..563807a2 --- /dev/null +++ b/deps/liblmdb/sample-bdb.txt @@ -0,0 +1,73 @@ +/* sample-bdb.txt - BerkeleyDB toy/sample + * + * Do a line-by-line comparison of this and sample-mdb.txt + */ +/* + * Copyright 2012-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ +#include +#include +#include + +int main(int argc,char * argv[]) +{ + int rc; + DB_ENV *env; + DB *dbi; + DBT key, data; + DB_TXN *txn; + DBC *cursor; + char sval[32], kval[32]; + + /* Note: Most error checking omitted for simplicity */ + +#define FLAGS (DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_INIT_MPOOL|DB_CREATE|DB_THREAD) + rc = db_env_create(&env, 0); + rc = env->open(env, "./testdb", FLAGS, 0664); + rc = db_create(&dbi, env, 0); + rc = env->txn_begin(env, NULL, &txn, 0); + rc = dbi->open(dbi, txn, "test.bdb", NULL, DB_BTREE, DB_CREATE, 0664); + + memset(&key, 0, sizeof(DBT)); + memset(&data, 0, sizeof(DBT)); + key.size = sizeof(int); + key.data = sval; + data.size = sizeof(sval); + data.data = sval; + + sprintf(sval, "%03x %d foo bar", 32, 3141592); + rc = dbi->put(dbi, txn, &key, &data, 0); + rc = txn->commit(txn, 0); + if (rc) { + fprintf(stderr, "txn->commit: (%d) %s\n", rc, db_strerror(rc)); + goto leave; + } + rc = env->txn_begin(env, NULL, &txn, 0); + rc = dbi->cursor(dbi, txn, &cursor, 0); + key.flags = DB_DBT_USERMEM; + key.data = kval; + key.ulen = sizeof(kval); + data.flags = DB_DBT_USERMEM; + data.data = sval; + data.ulen = sizeof(sval); + while ((rc = cursor->c_get(cursor, &key, &data, DB_NEXT)) == 0) { + printf("key: %p %.*s, data: %p %.*s\n", + key.data, (int) key.size, (char *) key.data, + data.data, (int) data.size, (char *) data.data); + } + rc = cursor->c_close(cursor); + rc = txn->abort(txn); +leave: + rc = dbi->close(dbi, 0); + rc = env->close(env, 0); + return rc; +} diff --git a/deps/liblmdb/sample-mdb.txt b/deps/liblmdb/sample-mdb.txt new file mode 100644 index 00000000..10a25687 --- /dev/null +++ b/deps/liblmdb/sample-mdb.txt @@ -0,0 +1,62 @@ +/* sample-mdb.txt - MDB toy/sample + * + * Do a line-by-line comparison of this and sample-bdb.txt + */ +/* + * Copyright 2012-2015 Howard Chu, Symas Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted only as authorized by the OpenLDAP + * Public License. + * + * A copy of this license is available in the file LICENSE in the + * top-level directory of the distribution or, alternatively, at + * . + */ +#include +#include "lmdb.h" + +int main(int argc,char * argv[]) +{ + int rc; + MDB_env *env; + MDB_dbi dbi; + MDB_val key, data; + MDB_txn *txn; + MDB_cursor *cursor; + char sval[32]; + + /* Note: Most error checking omitted for simplicity */ + + rc = mdb_env_create(&env); + rc = mdb_env_open(env, "./testdb", 0, 0664); + rc = mdb_txn_begin(env, NULL, 0, &txn); + rc = mdb_dbi_open(txn, NULL, 0, &dbi); + + key.mv_size = sizeof(int); + key.mv_data = sval; + data.mv_size = sizeof(sval); + data.mv_data = sval; + + sprintf(sval, "%03x %d foo bar", 32, 3141592); + rc = mdb_put(txn, dbi, &key, &data, 0); + rc = mdb_txn_commit(txn); + if (rc) { + fprintf(stderr, "mdb_txn_commit: (%d) %s\n", rc, mdb_strerror(rc)); + goto leave; + } + rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn); + rc = mdb_cursor_open(txn, dbi, &cursor); + while ((rc = mdb_cursor_get(cursor, &key, &data, MDB_NEXT)) == 0) { + printf("key: %p %.*s, data: %p %.*s\n", + key.mv_data, (int) key.mv_size, (char *) key.mv_data, + data.mv_data, (int) data.mv_size, (char *) data.mv_data); + } + mdb_cursor_close(cursor); + mdb_txn_abort(txn); +leave: + mdb_dbi_close(env, dbi); + mdb_env_close(env); + return 0; +} diff --git a/deps/liblmdb/tooltag b/deps/liblmdb/tooltag new file mode 100644 index 00000000..229bf16b --- /dev/null +++ b/deps/liblmdb/tooltag @@ -0,0 +1,22 @@ + + + mdb_copy_1 + mdb_copy - environment copy tool + mdb_copy.1 + + + mdb_dump_1 + mdb_dump - environment export tool + mdb_dump.1 + + + mdb_load_1 + mdb_load - environment import tool + mdb_load.1 + + + mdb_stat_1 + mdb_stat - environment status tool + mdb_stat.1 + + diff --git a/deps/win32/unistd.h b/deps/win32/unistd.h new file mode 100644 index 00000000..663aacf5 --- /dev/null +++ b/deps/win32/unistd.h @@ -0,0 +1,8 @@ +#ifndef INCLUDED_unistd +#define INCLUDED_unistd + +#if defined (WIN32) +typedef int32_t ssize_t; +#endif + +#endif diff --git a/packages/cory/about/about.js b/packages/cory/about/about.js new file mode 100644 index 00000000..33b5b8b3 --- /dev/null +++ b/packages/cory/about/about.js @@ -0,0 +1,57 @@ +"use strict"; + +var kMessages = [ + [ + " _ _ _ ", + " / \\ | |__ ___ _ _| |_ ", + " / _ \\ | '_ \\ / _ \\| | | | __|", + " / ___ \\| |_) | (_) | |_| | |_ ", + "/_/ \\_\\_.__/ \\___/ \\__,_|\\__|", + "", + "Tilde Friends: Webapps that anyone can download, modify, run, and share.", + "", + "You are looking at a web site running on a JavaScript and C++ web server that uses Google V8 to let visitors author webapps.", + "", + ["Full source is here <", + {href: "https://www.unprompted.com/projects/browser/sandboxos/trunk/"}, + ">, but it is probably more fun and useful to poke around the ", + {href: "/~cory/index", value: "existing webapps"}, + ". A ", + {href: "https://www.unprompted.com/projects/wiki/Projects/SandboxOS", value: "prebuilt Windows .zip"}, + " is available as well. ", + ], + "", + [ + "Use the links at the top of the page to explore existing apps. When you are ready, click edit and start making your own. See the ", + {href: "/~cory/documentation", value: "documentation"}, + " for more information.", + ], + ], +]; +var gIndex = 0; + +function printNextMessage() { + if (gIndex < kMessages.length) { + var block = kMessages[gIndex]; + for (var i = 0; i < block.length; i++) { + terminal.print(block[i]); + } + terminal.print(""); + } + if (gIndex < kMessages.length) { + gIndex++; + if (gIndex < kMessages.length) { + terminal.print("(press enter to continue, \"exit\" to exit)"); + } + } +} + +core.register("onInput", function(input) { + if (input == "exit") { + exit(); + } else { + printNextMessage(); + } +}); + +printNextMessage(); diff --git a/packages/cory/administration/administration.js b/packages/cory/administration/administration.js new file mode 100644 index 00000000..39a3fc29 --- /dev/null +++ b/packages/cory/administration/administration.js @@ -0,0 +1,137 @@ +"use strict"; + +//! {"permissions": ["administration"]} + +terminal.print("Administration"); +if (core.user.credentials.permissions && + core.user.credentials.permissions.administration) { + core.register("onInput", onInput); + terminal.print("Welcome, administrator."); + terminal.print("Usage:"); + let kCommands = [ + [ + "set", + "List all global settings.", + ], + [ + ["set ", {class: "cyan", value: "key value"}], + ["Set global setting key to value."], + ], + [ + "permission list", + "List all permissions." + ], + [ + ["permission add ", {class: "cyan", value: "user action1 action2 ..."}], + ["Grant permission for ", {class: "cyan", value: "action1"}, ", ", {class: "cyan", value: "action2"}, ", ", {class: "cyan", value: "..."}, " to ", {class: "cyan", value: "user"}, "."], + ], + [ + ["permission remove ", {class: "cyan", value: "user action1 action2 ..."}], + ["Revoke permission for ", {class: "cyan", value: "action1"}, ", ", {class: "cyan", value: "action2"}, ", ", {class: "cyan", value: "..."}, " from ", {class: "cyan", value: "user"}, "."], + ], + [ + "statistics", "List statistics." + ], + ]; + for (var i = 0; i < kCommands.length; i++) { + terminal.print({class: "yellow", value: kCommands[i][0]}); + terminal.print({style: "display: block; margin-left: 2em", value: kCommands[i][1]}); + } +} else { + terminal.print("You are not an administrator."); +} + +var kSimpleSettings = [ + 'httpPort', + 'httpsPort', + 'index', +]; + +function printSettings(settings) { + terminal.print("Current settings:"); + for (let i = 0; i < kSimpleSettings.length; i++) { + terminal.print(" ", {class: "magenta", value: kSimpleSettings[i]}, " = ", {class: "yellow", value: settings[kSimpleSettings[i]]}); + } +} + +function printPermissions(settings) { + terminal.print("Current permissions:"); + let permissions = settings.permissions || {}; + for (let entry in permissions) { + terminal.print(" ", {class: "magenta", value: entry}, ": ", {class: "yellow", value: permissions[entry].join(" ")}); + } +} + +function onInput(input) { + try { + let match; + if (input == "set") { + administration.getGlobalSettings().then(printSettings); + } else if (input == "statistics") { + administration.getStatistics().then(function(s) { + for (var i in s) { + terminal.print(" ".repeat(16 - s[i].toString().length), s[i].toString(), " ", i); + } + }); + } else if (match = /^\s*set\s+(\w+)\s+(.*)/.exec(input)) { + var key = match[1]; + var value = match[2]; + administration.getGlobalSettings().then(function(settings) { + if (kSimpleSettings.indexOf(key) != -1) { + settings[key] = value; + administration.setGlobalSettings(settings).then(function() { + administration.getGlobalSettings().then(printSettings); + }).catch(function(error) { + terminal.print("Error updating settings: " + JSON.stringify(error)); + }); + } else { + terminal.print("Unknown setting: " + key); + } + }); + } else if (match = /^\s*permission\s+(\w+)(?:\s+(.*))?/.exec(input)) { + var command = match[1]; + var remaining = (match[2] || "").split(/\s+/); + if (command == "list") { + administration.getGlobalSettings().then(printPermissions); + } else if (command == "add") { + var user = remaining[0]; + administration.getGlobalSettings().then(function(settings) { + settings.permissions = settings.permissions || {}; + settings.permissions[user] = settings.permissions[user] || []; + for (var i = 1; i < remaining.length; i++) { + if (settings.permissions[user].indexOf(remaining[i]) == -1) { + settings.permissions[user].push(remaining[i]); + } + } + settings.permissions[user].sort(); + administration.setGlobalSettings(settings).then(function() { + administration.getGlobalSettings().then(printPermissions); + }).catch(function(error) { + terminal.print("Error updating permissions: " + JSON.stringify(error)); + }); + }); + } else if (command == "remove") { + var user = remaining[0]; + administration.getGlobalSettings().then(function(settings) { + if (settings.permissions && settings.permissions[user]) { + for (var i = 1; i < remaining.length; i++) { + settings.permissions[user] = settings.permissions[user].filter(x => x != remaining[i]); + } + if (settings.permissions[user].length == 0) { + delete settings.permissions[user]; + } + } + administration.setGlobalSettings(settings).then(function() { + administration.getGlobalSettings().then(printPermissions); + }).catch(function(error) { + terminal.print("Error updating permissions: " + JSON.stringify(error)); + }); + }); + } + } else if (typeof input == "string") { + terminal.print("I didn't understand that."); + } + } catch (error) { + terminal.print("error: " + error); + } +} diff --git a/packages/cory/bbs/bbs.js b/packages/cory/bbs/bbs.js new file mode 100644 index 00000000..eaa8ef92 --- /dev/null +++ b/packages/cory/bbs/bbs.js @@ -0,0 +1,315 @@ +"use strict"; +var gOnInput = null; + +var kMaxHistory = 20; +var kShowHistory = 20; + +var lastTimestamp = null; + +if (imports.terminal) { + core.register("onMessage", function(sender, message) { + if (message.message && message.when) { + printMessage(message, true); + } + }); + core.register("onSessionBegin", function(user) { + if (user.packageName === core.user.packageName && + user.index !== core.user.index) { + listUsers(user.name + " has joined the BBS. "); + } + }); + core.register("onSessionEnd", function(user) { + if (user.packageName === core.user.packageName && + user.index !== core.user.index) { + listUsers(user.name + " has left the BBS. "); + } + }); +} else { + // Chat service process. + core.register("onMessage", function(sender, message) { + if (message.message && message.when) { + message.sender = sender; + return database.get("board").catch(function() { + return null; + }).then(function(data) { + try { + data = JSON.parse(data); + } catch(error) { + data = []; + } + data.push(message); + while (data.length > kMaxHistory) { + data.shift(); + } + return saveBoard(data); + }).then(function() { + return core.broadcast(message); + }); + } + }); +} + +function listUsers() { + return core.getUsers(core.user.packageOwner, core.user.packageName).then(function(users) { + terminal.select("users"); + terminal.clear(); + terminal.print("Users:"); + var counts = {}; + for (var i = 0; i < users.length; i++) { + counts[users[i].name] = (counts[users[i].name] || 0) + 1; + } + var names = Object.keys(counts).sort(); + for (var i = 0; i < names.length; i++) { + var name = names[i]; + var message = []; + if (message.length > 1) { + message.push(", "); + } + message.push({class: "orange", value: name}); + if (counts[name] > 1) { + message.push({class: "base01", value: "(x" + counts[name] + ")"}); + } + terminal.print(message); + } + terminal.select("terminal"); + }); +} + +function saveBoard(data) { + return database.set("board", JSON.stringify(data)).catch(function(error) { + if (error.message.indexOf("MDB_MAP_FULL") != -1) { + data.shift(); + return saveBoard(data); + } else { + throw error; + } + }); +} + +core.register("onInput", function(input) { + if (gOnInput && typeof input == "string") { + gOnInput(input); + } +}); + +function logo() { + terminal.clear(); + terminal.print(""); + terminal.print(""); + terminal.print('Welcome to'); + terminal.print(' ______ _ ____ ____ _____'); + terminal.print(' / ____/___ _______ _( )_____ / __ )/ __ ) ___/'); + terminal.print(' / / / __ \\/ ___/ / / /// ___/ / __ / __ \\__ \\ '); + terminal.print('/ /___/ /_/ / / / /_/ / (__ ) / /_/ / /_/ /__/ / '); + terminal.print('\\____/\\____/_/ \\__, / /____/ /_____/_____/____/ '); + terminal.print(' /____/ '); + terminal.print(' yesterday\'s technology...today!'); + terminal.print(""); +} + +function welcome() { + logo(); + chat(); +} + +function main() { + terminal.clear(); + logo(); + terminal.print(""); + terminal.print("Main menu commands:"); + terminal.print(" ", {command: "chat"}, " chat message board"); + terminal.print(" ", {command: "guess"}, " guess the number game"); + terminal.print(" ", {command: "exit"}, " back to that sweet logo"); + gOnInput = function(input) { + input = input.toLowerCase(); + if (input == "chat") { + chat(); + } else if (input == "guess") { + guess(); + } else if (input == "exit") { + terminal.print("Goodbye."); + exit(0); + } else { + terminal.print("I didn't understand that: " + input); + main(); + } + }; +} + +function formatMessage(message) { + var result; + if (typeof message == "string") { + result = []; + var regex = /(\w+:\/*\S+?)(?=(?:[\.!?])?(?:$|\s))/gi; + var match; + var lastIndex = 0; + while ((match = regex.exec(message)) !== null) { + result.push({class: "base1", value: message.substring(lastIndex, match.index)}); + result.push({href: match[0]}); + lastIndex = regex.lastIndex; + } + result.push({class: "base1", value: message.substring(lastIndex)}); + } else { + result = message; + } + return result; +} + +function niceTime(lastTime, thisTime) { + if (!lastTime) { + return thisTime; + } + let result = []; + let lastParts = lastTime.split(" "); + let thisParts = thisTime.split(" "); + for (let i = 0; i < thisParts.length; i++) { + if (thisParts[i] !== lastParts[i]) { + result.push(thisParts[i]); + } + } + return result.join(" "); +} + +function printMessage(message, notify) { + terminal.print( + {class: "base0", value: niceTime(lastTimestamp, message.when)}, + " ", + {class: "base00", value: "<"}, + {class: "base3", value: (message.sender ? message.sender.name : "unknown")}, + {class: "base00", value: ">"}, + " ", + formatMessage(message.message)); + lastTimestamp = message.when; + if (notify) { + return core.getUser().then(function(user) { + if (message.message.indexOf("!") != -1) { + return terminal.notify("SOMEONE IS SHOUTING!", {body: "<" + (message.sender ? message.sender.name : "unknown") + "> " + message.message}); + } else if (message.message.indexOf(user.name + ":") != -1) { + return terminal.notify("Someone is talking at you.", {body: "<" + (message.sender ? message.sender.name : "unknown") + "> " + message.message}); + } + }); + } +} + +function chat() { + terminal.setEcho(false); + terminal.print(""); + terminal.print("You are now in a chat. Anything you type will be broadcast to everyone else connected. To leave, say ", {command: "exit"}, "."); + listUsers(); + database.get("board").catch(function() { + return null; + }).then(function(board) { + try { + board = JSON.parse(board); + } catch (error) { + board = []; + } + + for (let i = Math.max(0, board.length - kShowHistory); i < board.length; i++) { + printMessage(board[i], false); + } + }); + gOnInput = function(input) { + if (input == "exit") { + terminal.setEcho(true); + main(); + } else { + core.getService("chat").then(function(chatService) { + return chatService.postMessage({when: new Date().toString(), message: input}); + }).catch(function(error) { + terminal.print("ERROR: " + JSON.stringify(error)); + }); + } + }; +} + +function guess() { + terminal.clear(); + var number = Math.round(Math.random() * 100); + var guesses = 0; + terminal.print("OK, I have a number in mind. What do you think it is? Use ", {command: "exit"}, " to stop."); + gOnInput = function(input) { + if (input == "exit") { + main(); + } else { + var guess = parseInt(input); + guesses++; + if (input != guess.toString()) { + terminal.print("I'm not sure that's an integer. Please guess only integers."); + } else { + if (guess < number) { + terminal.print("Too low."); + } else if (guess > number) { + terminal.print("Too high."); + } else if (guess == number) { + terminal.print("Wow, you got it in " + guesses + " guesses! It was " + number + "."); + guessEnd(guesses); + } + } + } + }; +} + +function guessEnd(guesses) { + terminal.print("What's your name, for the high score table?"); + gOnInput = function(name) { + var entry = {'guesses': guesses, 'name': name, 'when': new Date().toString()}; + database.get("guessHighScores").then(function(data) { + data = JSON.parse(data); + var index = data.length; + for (var i in data) { + if (guesses < data[i].guesses) { + index = i; + break; + } + } + data.splice(index, 0, entry); + printHighScores(data); + database.set("guessHighScores", JSON.stringify(data)); + gOnInput = function() { + main(); + }; + }).catch(function() { + var data = [entry]; + printHighScores(data); + database.set("guessHighScores", JSON.stringify(data)); + main(); + }); + }; +} + +function printTable(data) { + var widths = []; + for (var i in data) { + var row = data[i]; + for (var c in row) { + widths[c] = Math.max(widths[c] || 0, row[c].length); + } + } + + for (var i in data) { + var row = data[i]; + var line = ""; + for (var c in row) { + line += row[c]; + line += " ".repeat(widths[c] - row[c].length + 2); + } + terminal.print(line); + } +} + +function printHighScores(data) { + printTable([["Name", "Guesses", "Date"]].concat(data.map(function(entry) { + return [entry.name, entry.guesses.toString(), entry.when]; + }))); +} + +if (imports.terminal) { + terminal.split([ + {type: "horizontal", children: [ + {name: "terminal", grow: 1}, + {name: "users", grow: 0}, + ]}, + ]); + welcome(); +} diff --git a/packages/cory/documentation/documentation.js b/packages/cory/documentation/documentation.js new file mode 100644 index 00000000..2e4df34d --- /dev/null +++ b/packages/cory/documentation/documentation.js @@ -0,0 +1,93 @@ +"use strict"; + +let kDocumentation = { + "core.broadcast": ["message", "Broadcast a message to every other instance of the same app. Messages will be received through the \"onMessage\" event."], + "core.getService": ["name", "Get a reference to a long-running service process identified by name. A process will be started if it is not already running. Useful for coordinating between client processes."], + "core.getPackages": ["", "Get a list of all available applications."], + "core.getUser": ["", "Gets information about the current user."], + "core.getUsers": ["packageOwner, packageName", "Get a list of all online users, restricted to a package if specified."], + "core.register": ["eventName, handlerFunction", "Register a callback function for the given event."], + "database.get": ["key", "Retrieve the database value associated with the given key."], + "database.set": ["key, value", "Sets the database value for the given key, overwriting any existing value."], + "database.getAll": ["", "Retrieve a list of all key names."], + "database.remove": ["key", "Remove the database entry for the given key."], + "terminal.print": ["arguments...", `Print to the terminal. Multiple arguments and lists are all expanded. The following special values are supported: + {href: "http://www..."} => Create a link to the href value. Text will be the href value or 'value' if specified. + {iframe: "...", width: 640, height: 480} => Create an iframe with the given srcdoc. + {style: "color: #f00", value: "Hello, world!"} => Create styled text. + {command: "exit", value: "get out of here"} => Create a link that when clicked will act as if the user typed the given command.`], + "terminal.clear": ["", "Remove all terminal output."], + "terminal.readLine": ["", "Produces the next line of text from user input."], + "terminal.setEcho": ["echo", "Controls whether the terminal will automatically echo user input (default=true)."], + "terminal.setTitle": ["title", "Sets the browser window/tab title."], + "terminal.setPrompt": ["prompt", "Sets the terminal prompt (default \">\")."], + "terminal.setPassword": ["enabled", "Controls whether the terminal input box is set as a password input and obscures the entered text."], + "terminal.setHash": ["hash", "Sets the URL #hash, typically so that the user can copy / bookmark it and return to a similar state."], + "terminal.postMessageToIframe": ["name, message", "Sends the message to the iframe that was created with the given name using window.postMessage."], + "terminal.notify": ["body, {title, icon}", ["Produces an ", {href: "https://developer.mozilla.org/en-US/docs/Web/API/notification", value: "HTML5 Notification"}, ". Arguments are the same as the Notification constructor."]], +}; + +terminal.print("V8 Version ", version); +terminal.print(""); + +heading("API Documentation"); +dumpDocumentation("imports", imports); + +heading("Notes"); +terminal.print(`All API functions are invoked asynchronously. They +immediately return a `, +{href: "https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise", value: "Promise"}, +` object. If you want to do +something with the result, you most likely want to call them +like this: + + database.get(key).then(function(value) { + doSomethingWithTheResult(value); + });`); +terminal.print(""); +heading("Colors (CSS class names)"); +dumpColors(); + +function heading(text) { + terminal.print({class: "green", value: "+" + "-".repeat(text.length + 2) + "+"}); + terminal.print({class: "green", value: "| " + text + " |"}); + terminal.print({class: "green", value: "+" + "-".repeat(text.length + 2) + "+"}); +} + +function dumpDocumentation(prefix, object, depth) { + if (typeof object == "function") { + let documentation = kDocumentation[prefix.substring("imports.".length)] || ["", ""]; + terminal.print( + {class: "yellow", value: prefix.substring("imports.".length)}, + "(", + {class: "base0", value: documentation[0]}, + ")"); + terminal.print({style: "display: block; margin-left: 2em", value: documentation[1]}); + terminal.print(""); + } else if (object && typeof object != "string") { + for (let i in object) { + dumpDocumentation(prefix + "." + i, object[i], (depth || 0) + 1); + } + } +} + +function dumpColors() { + var kColors = [ + "base03", + "base02", + "base01", + "base00", + "base0", + "base1", + "base2", + "base3", + "yellow", + "red", + "magenta", + "violet", + "blue", + "cyan", + "green", + ]; + terminal.print({style: "background-color: #000", value: kColors.map(function(color) { return [" ", {class: color, value: color}, " "]; })}); +} diff --git a/packages/cory/index/index.js b/packages/cory/index/index.js new file mode 100644 index 00000000..00efdb24 --- /dev/null +++ b/packages/cory/index/index.js @@ -0,0 +1,53 @@ +"use strict"; + +core.register("onSessionBegin", index); +core.register("onSessionEnd", index); + +function index() { + Promise.all([core.getPackages(), core.getUsers()]).then(function(values) { + let packages = values[0]; + let users = values[1]; + let usersByApp = {}; + for (let i in users) { + let user = users[i]; + if (!usersByApp["/~" + user.packageOwner + "/" + user.packageName]) { + usersByApp["/~" + user.packageOwner + "/" + user.packageName] = []; + } + usersByApp["/~" + user.packageOwner + "/" + user.packageName].push(user.name); + } + + terminal.clear(); + terminal.print("Available applications [active users]:"); + packages.sort(function(x, y) { + return Math.sign(x.owner.localeCompare(y.owner)) * 10 + Math.sign(x.name.localeCompare(y.name)) * 1; + }).forEach(function(app) { + let users = usersByApp["/~" + app.owner + "/" + app.name]; + let message = []; + if (users) { + message.push(" ["); + let counts = {}; + for (let i = 0; i < users.length; i++) { + counts[users[i]] = (counts[users[i]] || 0) + 1; + } + let names = Object.keys(counts).sort(); + for (let i = 0; i < names.length; i++) { + var name = names[i]; + if (message.length > 1) { + message.push(", "); + } + message.push({class: "orange", value: name}); + if (counts[name] > 1) { + message.push({class: "base01", value: "(x" + counts[name] + ")"}); + } + } + message.push("]"); + } + terminal.print( + "* ", + {href: "/~" + app.owner + "/" + app.name}, + message); + }); + }); +} + +index(); diff --git a/packages/cory/mmoturtle/mmoturtle.js b/packages/cory/mmoturtle/mmoturtle.js new file mode 100644 index 00000000..f1b9d603 --- /dev/null +++ b/packages/cory/mmoturtle/mmoturtle.js @@ -0,0 +1,124 @@ +"use strict"; + +// This script runs server-side, once for each client session. + +if (imports.terminal) { + terminal.setEcho(false); + terminal.split([ + {name: "graphics", basis: "520px", shrink: "0", grow: "0"}, + {name: "text"}, + ]); + + // Request a callback every time the user hits enter at the terminal prompt. + core.register("onInput", function(input) { + // Ask a persistent service session to broadcast our message. We'll also get a copy back. + return core.getService("turtle").then(function(service) { + return service.postMessage(input); + }); + }); + + // Request a callback for every message that is broadcast. + core.register("onMessage", function(sender, message) { + if (message.history) { + for (var i = 0; i < message.history.length; i++) { + // Pass the message on to the iframe in the client. + terminal.postMessageToIframe("turtle", message.history[i]); + } + } else { + // Pass the message on to the iframe in the client. + terminal.postMessageToIframe("turtle", message); + } + }); + + core.register("onWindowMessage", function(data) { + terminal.print(data.message); + }); + + terminal.select("graphics"); + terminal.print("MMO Turtle Graphics using ", {href: "http://codeheartjs.com/turtle/"}, "."); + + // Add an iframe to the terminal. This is how we sandbox code running on the client. + var contents = ` + + + ` + terminal.print({iframe: contents, width: 640, height: 480, name: "turtle"}); + + terminal.select("text"); + terminal.print("Supported commands: ", ["fd ", "bk ", "rt ", "lt ", "pu", "pd", "home", "reset", "clear"].join(", ")); + + // Get the party started by asking for the history of commands (the turtle party). + setTimeout(function() { + core.getService("turtle").then(function(service) { + return service.postMessage("sync"); + }); + }, 1000); +} else { + var gHistory = null; + + function ensureHistoryLoaded() { + if (!gHistory) { + return database.get("history").then(function(data) { + gHistory = JSON.parse(data); + return gHistory; + }).catch(function(error) { + gHistory = []; + return gHistory; + }); + } else { + return new Promise(function(resolve, reject) { resolve(gHistory); }); + } + } + + core.register("onMessage", function(sender, message) { + return ensureHistoryLoaded().then(function(history) { + if (message == "reset") { + history.length = 0; + database.set("history", JSON.stringify(history)); + return core.broadcast(message); + } else if (message == "sync") { + sender.postMessage({history: history}); + } else { + history.push(message); + database.set("history", JSON.stringify(history)); + return core.broadcast(message); + } + }); + }); +} \ No newline at end of file diff --git a/packages/cory/smtp/smtp.js b/packages/cory/smtp/smtp.js new file mode 100644 index 00000000..12affced --- /dev/null +++ b/packages/cory/smtp/smtp.js @@ -0,0 +1,74 @@ +"use strict"; + +//! {"permissions": ["network"]} + +terminal.print("Hello, world!"); + +let kFrom = core.user.name + "@unprompted.com"; +let kTo = "test@unprompted.com"; +let kSubject = "Hello, world!"; +let kBody = "This is the body of the email." + +let inBuffer = ""; +let sentFrom = false; +let sentTo = false; +let sentData = false; + +function lineReceived(socket, line) { + terminal.print("> ", line); + let parts = line.split(" ", 1); + terminal.print(JSON.stringify(parts)); + if (parts[0] == "220") { + socket.write("HELO rowlf.unprompted.com\r\n"); + } else if (parts[0] == "250") { + if (!sentFrom) { + terminal.print("FROM"); + socket.write("MAIL FROM: " + kFrom + "\r\n"); + sentFrom = true; + } else if (!sentTo) { + terminal.print("TO"); + socket.write("RCPT TO: " + kTo + "\r\n"); + sentTo = true; + } else if (!sentData) { + terminal.print("DATA"); + socket.write("DATA\r\n"); + sentData = true; + } else { + terminal.print("QUIT"); + socket.write("QUIT\r\n"); + } + } else if (parts[0] == "354") { + terminal.print("MESSAGE"); + socket.write("Subject: " + kSubject + "\r\n\r\n" + kBody + "\r\n.\r\n"); + } +} + +function dataReceived(socket, data) { + if (data === null) { + return; + } + terminal.print(data); + inBuffer += data; + let again = true; + while (again) { + again = false; + let end = inBuffer.indexOf("\n"); + if (end != -1) { + again = true; + let line = inBuffer.substring(0, end); + inBuffer = inBuffer.substring(end + 1); + lineReceived(socket, line); + } + } +} + +network.newConnection().then(function(socket) { + socket.read(function(data) { + try { + dataReceived(socket, data); + } catch (error) { + terminal.print("ERROR: ", error.message); + } + }); + socket.connect("localhost", 25); +}); \ No newline at end of file diff --git a/packages/cory/todo/todo.js b/packages/cory/todo/todo.js new file mode 100644 index 00000000..e7909120 --- /dev/null +++ b/packages/cory/todo/todo.js @@ -0,0 +1,204 @@ +"use strict"; + +var kUnchecked = "☐"; +var kChecked = "☑"; + +let activeList = null; +let confirmRemove; + +terminal.setPrompt("Add Item>"); + +core.register("onInput", function(command) { + if (typeof command == "string" && command.substring(0, "action:".length) == "action:") { + command = JSON.parse(command.substring("action:".length)); + if (confirmRemove && command.action != "reallyRemoveList" && command.action != "reallyRemove") { + confirmRemove = false; + } + if (command.action == "set") { + setItem(command.key, command.item, command.value).then(notifyChanged).then(redisplay); + } else if (command.action == "remove") { + confirmRemove = command; + redisplay(); + } else if (command.action == "reallyRemove") { + confirmRemove = false; + removeItem(command.key, command.item).then(notifyChanged).then(redisplay); + } else if (command.action == "editList") { + activeList = command.key; + terminal.setHash(activeList); + redisplay(); + } else if (command.action == "lists") { + activeList = null; + redisplay(); + } else if (command.action == "removeList") { + confirmRemove = true; + redisplay(); + } else if (command.action == "reallyRemoveList") { + confirmRemove = false; + activeList = null; + database.remove(command.key).then(notifyChanged).then(redisplay).catch(function(error) { + terminal.print(JSON.stringify(error)); + terminal.print(command.key); + }); + } + } else if (typeof command == "string") { + if (activeList) { + addItem(activeList, command).then(notifyChanged).then(redisplay); + } else { + activeList = makePrivateKey(command); + writeList(activeList, {name: command, items: []}).then(notifyChanged).then(redisplay); + } + } else if (command.hash) { + activeList = command.hash; + if (activeList.charAt(0) == "#") { + activeList = activeList.substring(1); + } + redisplay(); + } +}); + +core.register("onMessage", function(message) { + return redisplay(); +}); + +function notifyChanged() { + return core.broadcast({changed: true}); +} + +function readList(key) { + return database.get(key).catch(function(error) { + return null; + }).then(function(todo) { + try { + todo = JSON.parse(todo); + } catch (error) { + todo = {name: "TODO", items: []}; + } + return todo; + }); +} + +function writeList(key, todo) { + return database.set(key, JSON.stringify(todo)); +} + +function addItem(key, name) { + return readList(key).then(function(todo) { + todo.items.push({name: name, value: false}); + return writeList(key, todo); + }); +} + +function setItem(key, name, value) { + return readList(key).then(function(todo) { + for (var i = 0; i < todo.items.length; i++) { + if (todo.items[i].name == name) { + todo.items[i].value = value; + } + } + return writeList(key, todo); + }); +} + +function removeItem(key, name) { + return readList(key).then(function(todo) { + todo.items = todo.items.filter(function(item) { + return item.name != name; + }); + return writeList(key, todo); + }); +} + +function printList(name, key, items) { + terminal.print(name, + " - ", + {command: "action:" + JSON.stringify({action: "lists"}), value: "back"}, + " - ", + {command: "action:" + JSON.stringify({action: (confirmRemove === true ? "reallyRemoveList" : "removeList"), key: key}), value: (confirmRemove === true ? "confirm remove" : "remove")}); + terminal.print("=".repeat(name.length)); + for (var i = 0; i < items.length; i++) { + var isChecked = items[i].value; + var style = ["", "text-decoration: line-through"]; + terminal.print( + {command: "action:" + JSON.stringify({action: "set", key: key, item: items[i].name, value: !isChecked}), value: isChecked ? kChecked : kUnchecked}, + " ", + {style: style[isChecked ? 1 : 0], value: items[i].name}, + " (", + {command: "action:" + JSON.stringify({ + action: (confirmRemove && confirmRemove.item == items[i].name ? "reallyRemove" : "remove"), + key: key, + item: items[i].name}), value: (confirmRemove && confirmRemove.item == items[i].name ? "confirm remove" : "remove")}, + ")"); + } +} + +function redisplay() { + terminal.clear(); + terminal.setEcho(false); + if (activeList) { + readList(activeList).then(function(data) { + printList(getName(activeList), activeList, data.items); + }).catch(function(error) { + terminal.print("error: " + error); + }); + } else { + printListOfLists(); + } +} + +function makeId() { + var alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + var result = ""; + for (var i = 0; i < 32; i++) { + result += alphabet.charAt(Math.floor(Math.random() * alphabet.length)); + } + return result; +} + +function makePublicKey(name) { + return JSON.stringify({public: true, id: makeId(), name: name}); +} + +function makePrivateKey(name) { + return JSON.stringify({public: false, id: makeId(), name: name, user: core.user.name}); +} + +function hasPermission(key) { + let result = false; + try { + let data = JSON.parse(key); + result = data.public || data.user == core.user.name; + } catch (error) { + result = true; + } + return result; +} + +function getName(key) { + let name = "TODO"; + try { + name = JSON.parse(key).name || name; + } catch (error) { + } + return name; +} + +function getVisibleLists() { + return database.getAll().then(function(data) { + return data.filter(hasPermission); + }); +} + +function printListOfLists() { + terminal.print("TODO Lists:"); + getVisibleLists().then(function(keys) { + for (var i = 0; i < keys.length; i++) { + let key = keys[i]; + terminal.print({ + command: "action:" + JSON.stringify({action: "editList", key: key}), + value: getName(key), + }); + } + }); +} + +redisplay(); diff --git a/packages/cory/turtle/turtle.js b/packages/cory/turtle/turtle.js new file mode 100644 index 00000000..7a8894e9 --- /dev/null +++ b/packages/cory/turtle/turtle.js @@ -0,0 +1,41 @@ +"use strict"; + +// Start at bottom left facing up. +// Height = 20. Width = 10. +// 10 between. + +var letters = { + A: 'fd(20); rt(90); fd(10); rt(90); fd(10); rt(90); fd(10); pu(); bk(10); lt(90); pd(); fd(10); pu(); lt(90); fd(10); lt(90); pd();', + D: 'fd(20); rt(90); fd(10); rt(70); fd(11); rt(40); fd(11); rt(70); fd(10); pu(); bk(20); rt(90); pd();', + E: 'pu(); fd(20); rt(90); fd(10); lt(180); pd(); fd(10); lt(90); fd(10); lt(90); fd(8); pu(); rt(180); fd(8); lt(90); pd(); fd(10); lt(90); fd(10); pu(); fd(10); lt(90); pd()', + H: 'fd(20); pu(); bk(10); pd(); rt(90); fd(10); lt(90); pu(); fd(10); rt(180); pd(); fd(20); pu(); lt(90); fd(10); lt(90); pd();', + L: 'pu(); fd(20); rt(180); pd(); fd(20); lt(90); fd(10); pu(); fd(10); lt(90); pd();', + O: 'fd(20); rt(90); fd(10); rt(90); fd(20); rt(90); fd(10); pu(); bk(20); rt(90); pd();', + R: 'fd(20); rt(90); fd(10); rt(90); fd(10); rt(90); fd(10); pu(); bk(8); lt(90); pd(); fd(10); pu(); lt(90); fd(12); lt(90); pd();', + W: 'pu(); fd(20); rt(180); pd(); fd(20); lt(90); fd(5); lt(90); fd(12); rt(180); pu(); fd(12); pd(); lt(90); fd(5); lt(90); fd(20); pu(); bk(20); rt(90); fd(10); lt(90); pd();', + ' ': 'pu(); rt(90); fd(20); lt(90); pd();', +}; + +function render(text) { + terminal.clear(); + terminal.print(text, " using ", {href: "http://codeheartjs.com/turtle/"}, "."); + var contents = '\n"; + terminal.print({iframe: contents, width: 640, height: 480}); + terminal.print("Type text and the letters ", {style: "color: #ff0", value: Object.keys(letters).join("")}, " in it will be drawn."); +} + +render("Hello, world!"); + +core.register("onInput", render); \ No newline at end of file diff --git a/packages/cory/xmpp/xmpp.js b/packages/cory/xmpp/xmpp.js new file mode 100644 index 00000000..248680e7 --- /dev/null +++ b/packages/cory/xmpp/xmpp.js @@ -0,0 +1,952 @@ +"use strict"; + +//! {"permissions": ["network"]} + +// md5.js + +/* + * JavaScript MD5 1.0.1 + * https://github.com/blueimp/JavaScript-MD5 + * + * Copyright 2011, Sebastian Tschan + * https://blueimp.net + * + * Licensed under the MIT license: + * http://www.opensource.org/licenses/MIT + * + * Based on + * A JavaScript implementation of the RSA Data Security, Inc. MD5 Message + * Digest Algorithm, as defined in RFC 1321. + * Version 2.2 Copyright (C) Paul Johnston 1999 - 2009 + * Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet + * Distributed under the BSD License + * See http://pajhome.org.uk/crypt/md5 for more info. + */ + +/*jslint bitwise: true */ +/*global unescape, define */ + +'use strict'; + +/* + * Add integers, wrapping at 2^32. This uses 16-bit operations internally + * to work around bugs in some JS interpreters. + */ +function safe_add(x, y) { + var lsw = (x & 0xFFFF) + (y & 0xFFFF), + msw = (x >> 16) + (y >> 16) + (lsw >> 16); + return (msw << 16) | (lsw & 0xFFFF); +} + +/* + * Bitwise rotate a 32-bit number to the left. + */ +function bit_rol(num, cnt) { + return (num << cnt) | (num >>> (32 - cnt)); +} + +/* + * These functions implement the four basic operations the algorithm uses. + */ +function md5_cmn(q, a, b, x, s, t) { + return safe_add(bit_rol(safe_add(safe_add(a, q), safe_add(x, t)), s), b); +} +function md5_ff(a, b, c, d, x, s, t) { + return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t); +} +function md5_gg(a, b, c, d, x, s, t) { + return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t); +} +function md5_hh(a, b, c, d, x, s, t) { + return md5_cmn(b ^ c ^ d, a, b, x, s, t); +} +function md5_ii(a, b, c, d, x, s, t) { + return md5_cmn(c ^ (b | (~d)), a, b, x, s, t); +} + +/* + * Calculate the MD5 of an array of little-endian words, and a bit length. + */ +function binl_md5(x, len) { + /* append padding */ + x[len >> 5] |= 0x80 << (len % 32); + x[(((len + 64) >>> 9) << 4) + 14] = len; + + var i, olda, oldb, oldc, oldd, + a = 1732584193, + b = -271733879, + c = -1732584194, + d = 271733878; + + for (i = 0; i < x.length; i += 16) { + olda = a; + oldb = b; + oldc = c; + oldd = d; + + a = md5_ff(a, b, c, d, x[i], 7, -680876936); + d = md5_ff(d, a, b, c, x[i + 1], 12, -389564586); + c = md5_ff(c, d, a, b, x[i + 2], 17, 606105819); + b = md5_ff(b, c, d, a, x[i + 3], 22, -1044525330); + a = md5_ff(a, b, c, d, x[i + 4], 7, -176418897); + d = md5_ff(d, a, b, c, x[i + 5], 12, 1200080426); + c = md5_ff(c, d, a, b, x[i + 6], 17, -1473231341); + b = md5_ff(b, c, d, a, x[i + 7], 22, -45705983); + a = md5_ff(a, b, c, d, x[i + 8], 7, 1770035416); + d = md5_ff(d, a, b, c, x[i + 9], 12, -1958414417); + c = md5_ff(c, d, a, b, x[i + 10], 17, -42063); + b = md5_ff(b, c, d, a, x[i + 11], 22, -1990404162); + a = md5_ff(a, b, c, d, x[i + 12], 7, 1804603682); + d = md5_ff(d, a, b, c, x[i + 13], 12, -40341101); + c = md5_ff(c, d, a, b, x[i + 14], 17, -1502002290); + b = md5_ff(b, c, d, a, x[i + 15], 22, 1236535329); + + a = md5_gg(a, b, c, d, x[i + 1], 5, -165796510); + d = md5_gg(d, a, b, c, x[i + 6], 9, -1069501632); + c = md5_gg(c, d, a, b, x[i + 11], 14, 643717713); + b = md5_gg(b, c, d, a, x[i], 20, -373897302); + a = md5_gg(a, b, c, d, x[i + 5], 5, -701558691); + d = md5_gg(d, a, b, c, x[i + 10], 9, 38016083); + c = md5_gg(c, d, a, b, x[i + 15], 14, -660478335); + b = md5_gg(b, c, d, a, x[i + 4], 20, -405537848); + a = md5_gg(a, b, c, d, x[i + 9], 5, 568446438); + d = md5_gg(d, a, b, c, x[i + 14], 9, -1019803690); + c = md5_gg(c, d, a, b, x[i + 3], 14, -187363961); + b = md5_gg(b, c, d, a, x[i + 8], 20, 1163531501); + a = md5_gg(a, b, c, d, x[i + 13], 5, -1444681467); + d = md5_gg(d, a, b, c, x[i + 2], 9, -51403784); + c = md5_gg(c, d, a, b, x[i + 7], 14, 1735328473); + b = md5_gg(b, c, d, a, x[i + 12], 20, -1926607734); + + a = md5_hh(a, b, c, d, x[i + 5], 4, -378558); + d = md5_hh(d, a, b, c, x[i + 8], 11, -2022574463); + c = md5_hh(c, d, a, b, x[i + 11], 16, 1839030562); + b = md5_hh(b, c, d, a, x[i + 14], 23, -35309556); + a = md5_hh(a, b, c, d, x[i + 1], 4, -1530992060); + d = md5_hh(d, a, b, c, x[i + 4], 11, 1272893353); + c = md5_hh(c, d, a, b, x[i + 7], 16, -155497632); + b = md5_hh(b, c, d, a, x[i + 10], 23, -1094730640); + a = md5_hh(a, b, c, d, x[i + 13], 4, 681279174); + d = md5_hh(d, a, b, c, x[i], 11, -358537222); + c = md5_hh(c, d, a, b, x[i + 3], 16, -722521979); + b = md5_hh(b, c, d, a, x[i + 6], 23, 76029189); + a = md5_hh(a, b, c, d, x[i + 9], 4, -640364487); + d = md5_hh(d, a, b, c, x[i + 12], 11, -421815835); + c = md5_hh(c, d, a, b, x[i + 15], 16, 530742520); + b = md5_hh(b, c, d, a, x[i + 2], 23, -995338651); + + a = md5_ii(a, b, c, d, x[i], 6, -198630844); + d = md5_ii(d, a, b, c, x[i + 7], 10, 1126891415); + c = md5_ii(c, d, a, b, x[i + 14], 15, -1416354905); + b = md5_ii(b, c, d, a, x[i + 5], 21, -57434055); + a = md5_ii(a, b, c, d, x[i + 12], 6, 1700485571); + d = md5_ii(d, a, b, c, x[i + 3], 10, -1894986606); + c = md5_ii(c, d, a, b, x[i + 10], 15, -1051523); + b = md5_ii(b, c, d, a, x[i + 1], 21, -2054922799); + a = md5_ii(a, b, c, d, x[i + 8], 6, 1873313359); + d = md5_ii(d, a, b, c, x[i + 15], 10, -30611744); + c = md5_ii(c, d, a, b, x[i + 6], 15, -1560198380); + b = md5_ii(b, c, d, a, x[i + 13], 21, 1309151649); + a = md5_ii(a, b, c, d, x[i + 4], 6, -145523070); + d = md5_ii(d, a, b, c, x[i + 11], 10, -1120210379); + c = md5_ii(c, d, a, b, x[i + 2], 15, 718787259); + b = md5_ii(b, c, d, a, x[i + 9], 21, -343485551); + + a = safe_add(a, olda); + b = safe_add(b, oldb); + c = safe_add(c, oldc); + d = safe_add(d, oldd); + } + return [a, b, c, d]; +} + +/* + * Convert an array of little-endian words to a string + */ +function binl2rstr(input) { + var i, + output = ''; + for (i = 0; i < input.length * 32; i += 8) { + output += String.fromCharCode((input[i >> 5] >>> (i % 32)) & 0xFF); + } + return output; +} + +/* + * Convert a raw string to an array of little-endian words + * Characters >255 have their high-byte silently ignored. + */ +function rstr2binl(input) { + var i, + output = []; + output[(input.length >> 2) - 1] = undefined; + for (i = 0; i < output.length; i += 1) { + output[i] = 0; + } + for (i = 0; i < input.length * 8; i += 8) { + output[i >> 5] |= (input.charCodeAt(i / 8) & 0xFF) << (i % 32); + } + return output; +} + +/* + * Calculate the MD5 of a raw string + */ +function rstr_md5(s) { + return binl2rstr(binl_md5(rstr2binl(s), s.length * 8)); +} + +/* + * Calculate the HMAC-MD5, of a key and some data (raw strings) + */ +function rstr_hmac_md5(key, data) { + var i, + bkey = rstr2binl(key), + ipad = [], + opad = [], + hash; + ipad[15] = opad[15] = undefined; + if (bkey.length > 16) { + bkey = binl_md5(bkey, key.length * 8); + } + for (i = 0; i < 16; i += 1) { + ipad[i] = bkey[i] ^ 0x36363636; + opad[i] = bkey[i] ^ 0x5C5C5C5C; + } + hash = binl_md5(ipad.concat(rstr2binl(data)), 512 + data.length * 8); + return binl2rstr(binl_md5(opad.concat(hash), 512 + 128)); +} + +/* + * Convert a raw string to a hex string + */ +function rstr2hex(input) { + var hex_tab = '0123456789abcdef', + output = '', + x, + i; + for (i = 0; i < input.length; i += 1) { + x = input.charCodeAt(i); + output += hex_tab.charAt((x >>> 4) & 0x0F) + + hex_tab.charAt(x & 0x0F); + } + return output; +} + +/* + * Encode a string as utf-8 + */ +function str2rstr_utf8(input) { + return unescape(input); +} + +/* + * Take string arguments and return either raw or hex encoded strings + */ +function raw_md5(s) { + return rstr_md5(str2rstr_utf8(s)); +} +function hex_md5(s) { + return rstr2hex(raw_md5(s)); +} +function raw_hmac_md5(k, d) { + return rstr_hmac_md5(str2rstr_utf8(k), str2rstr_utf8(d)); +} +function hex_hmac_md5(k, d) { + return rstr2hex(raw_hmac_md5(k, d)); +} + +function md5(string, key, raw) { + if (!key) { + if (!raw) { + return hex_md5(string); + } + return raw_md5(string); + } + if (!raw) { + return hex_hmac_md5(key, string); + } + return raw_hmac_md5(key, string); +} + +// end md5.js + +// base64.js +/** +* +* Base64 encode / decode +* http://www.webtoolkit.info/ +* +**/ + +// private property +var _keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; + +var Base64 = { + +// public method for encoding +encode : function (input) { + var output = ""; + var chr1, chr2, chr3, enc1, enc2, enc3, enc4; + var i = 0; + + input = Base64._utf8_encode(input); + + while (i < input.length) { + + chr1 = input.charCodeAt(i++); + chr2 = input.charCodeAt(i++); + chr3 = input.charCodeAt(i++); + + enc1 = chr1 >> 2; + enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); + enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); + enc4 = chr3 & 63; + + if (isNaN(chr2)) { + enc3 = enc4 = 64; + } else if (isNaN(chr3)) { + enc4 = 64; + } + + output = output + + _keyStr.charAt(enc1) + _keyStr.charAt(enc2) + + _keyStr.charAt(enc3) + _keyStr.charAt(enc4); + + } + + return output; +}, + +// public method for decoding +decode : function (input) { + var output = ""; + var chr1, chr2, chr3; + var enc1, enc2, enc3, enc4; + var i = 0; + + input = input.replace(/[^A-Za-z0-9\+\/\=]/g, ""); + + while (i < input.length) { + + enc1 = _keyStr.indexOf(input.charAt(i++)); + enc2 = _keyStr.indexOf(input.charAt(i++)); + enc3 = _keyStr.indexOf(input.charAt(i++)); + enc4 = _keyStr.indexOf(input.charAt(i++)); + + chr1 = (enc1 << 2) | (enc2 >> 4); + chr2 = ((enc2 & 15) << 4) | (enc3 >> 2); + chr3 = ((enc3 & 3) << 6) | enc4; + + output = output + String.fromCharCode(chr1); + + if (enc3 != 64) { + output = output + String.fromCharCode(chr2); + } + if (enc4 != 64) { + output = output + String.fromCharCode(chr3); + } + + } + + output = Base64._utf8_decode(output); + + return output; + +}, + +// private method for UTF-8 encoding +_utf8_encode : function (string) { + string = string.replace(/\r\n/g,"\n"); + var utftext = ""; + + for (var n = 0; n < string.length; n++) { + + var c = string.charCodeAt(n); + + if (c < 128) { + utftext += String.fromCharCode(c); + } + else if((c > 127) && (c < 2048)) { + utftext += String.fromCharCode((c >> 6) | 192); + utftext += String.fromCharCode((c & 63) | 128); + } + else { + utftext += String.fromCharCode((c >> 12) | 224); + utftext += String.fromCharCode(((c >> 6) & 63) | 128); + utftext += String.fromCharCode((c & 63) | 128); + } + + } + + return utftext; +}, + +// private method for UTF-8 decoding +_utf8_decode : function (utftext) { + var string = ""; + var i = 0; + var c = 0; + var c1 = 0; + var c2 = 0; + + while ( i < utftext.length ) { + + c = utftext.charCodeAt(i); + + if (c < 128) { + string += String.fromCharCode(c); + i++; + } + else if((c > 191) && (c < 224)) { + c2 = utftext.charCodeAt(i+1); + string += String.fromCharCode(((c & 31) << 6) | (c2 & 63)); + i += 2; + } + else { + c2 = utftext.charCodeAt(i+1); + c3 = utftext.charCodeAt(i+2); + string += String.fromCharCode(((c & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63)); + i += 3; + } + + } + + return string; +} + +} + +// end base64.js + +function xmlEncode(text) { + return text.replace(/([\&"'<>])/g, function(x, item) { + return {'&': '&', '"': '"', '<': '<', '>': '>', "'": '''}[item]; + }); +} +function xmlDecode(xml) { + return xml.replace(/("|<|>|&|')/g, function(x, item) { + return {'&': '&', '"': '"', '<': '<', '>': '>', ''': "'"}[item]; + }); +} + +// xmpp.js +function XmlStreamParser() { + this.buffer = ""; + this._parsed = []; + this.reset(); + return this; +} + +XmlStreamParser.kText = "text"; +XmlStreamParser.kElement = "element"; +XmlStreamParser.kEndElement = "endElement"; +XmlStreamParser.kAttributeName = "attributeName"; +XmlStreamParser.kAttributeValue = "attributeValue"; + +XmlStreamParser.prototype.reset = function() { + this._state = XmlStreamParser.kText; + this._attributes = {}; + this._attributeName = ""; + this._attributeValue = ""; + this._attributeEquals = false; + this._attributeQuote = ""; + this._slash = false; + this._value = ""; + this._decl = false; +} + +XmlStreamParser.prototype.parse = function(data) { + this._parsed = []; + + for (var i = 0; i < data.length; i++) { + var c = data.charAt(i); + this.parseCharacter(c); + } + + return this._parsed; +} + +XmlStreamParser.prototype.flush = function() { + var node = {type: this._state}; + if (this._value) { + node.value = xmlDecode(this._value); + } + if (this._attributes.length || this._state == XmlStreamParser.kElement) { + node.attributes = this._attributes; + } + if (this._state != XmlStreamParser.kText || this._value) { + this.emit(node); + } + this.reset(); +} + +XmlStreamParser.prototype.parseCharacter = function(c) { + switch (this._state) { + case XmlStreamParser.kText: + if (c == '<') { + this.flush(); + this._state = XmlStreamParser.kElement; + } else { + this._value += c; + } + break; + case XmlStreamParser.kElement: + case XmlStreamParser.kEndElement: + switch (c) { + case '>': + this.finishElement(); + break; + case '/': + if (!this._value) { + this._state = XmlStreamParser.kEndElement; + } else if (!this._slash) { + this._slash = true; + } else { + this._value += c; + } + break; + case '?': + if (!this._value) { + this._decl = true; + } else { + this._value += '?'; + } + break; + case ' ': + case '\t': + case '\r': + case '\n': + this._state = XmlStreamParser.kAttributeName; + break; + default: + if (this._slash) { + this._slash = false; + this._value += '/'; + } + this._value += c; + break; + } + break; + case XmlStreamParser.kAttributeName: + switch (c) { + case ' ': + case '\t': + case '\r': + case '\n': + if (this._attributeName) { + this._state = XmlStreamParser.kAttributeValue; + } + break; + case '/': + if (!this._slash) { + this._slash = true; + } else { + this._value += '/'; + } + break; + case '=': + this._state = XmlStreamParser.kAttributeValue; + break; + case '>': + if (this._attributeName) { + this._attributes[this._attributeName] = null; + } + this._state = XmlStreamParser.kElement; + this.finishElement(); + break; + default: + this._attributeName += c; + break; + } + break; + case XmlStreamParser.kAttributeValue: + switch (c) { + case ' ': + case '\t': + case '\r': + case '\n': + if (this._attributeValue) { + this._state = XmlStreamParser.kAttributeName; + } + break; + case '"': + case "'": + if (!this._attributeValue && !this._attributeQuote) { + this._attributeQuote = c; + } else if (this._attributeQuote == c) { + this._attributes[this._attributeName] = this._attributeValue; + this._attributeName = ""; + this._attributeValue = ""; + this._attributeQuote = ""; + this._state = XmlStreamParser.kAttributeName; + } else { + this._attributeValue += c; + } + break; + case '>': + this.finishElement(); + break; + default: + this._attributeValue += c; + break; + } + break; + } +} + +XmlStreamParser.prototype.finishElement = function() { + if (this._decl) { + this.reset(); + } else { + var value = this._value; + var slash = this._slash; + this.flush(); + if (slash) { + this._state = XmlStreamParser.kEndElement; + this._value = value; + this.flush(); + } + } + this._state = XmlStreamParser.kText; +} + +XmlStreamParser.prototype.emit = function(node) { + this._parsed.push(node); +} + +function XmlStanzaParser(depth) { + this._depth = depth || 0; + this._parsed = []; + this._stack = []; + this._stream = new XmlStreamParser(); + return this; +} + +XmlStanzaParser.prototype.reset = function() { + this._parsed = []; + this._stack = []; + this._stream.reset(); +} + +XmlStanzaParser.prototype.emit = function(stanza) { + this._parsed.push(stanza); +} + +XmlStanzaParser.prototype.parse = function(data) { + this._parsed = []; + var nodes = this._stream.parse(data); + for (var i = 0; i < nodes.length; i++) { + this.parseNode(nodes[i]); + } + return this._parsed; +} + +XmlStanzaParser.prototype.parseNode = function(node) { + switch (node.type) { + case XmlStreamParser.kElement: + this._stack.push({name: node.value, attributes: node.attributes, children: [], text: ""}); + break; + case XmlStreamParser.kEndElement: + if (this._stack.length == 1 + this._depth) { + this.emit(this._stack.pop()); + } else { + var last = this._stack.pop(); + this._stack[this._stack.length - 1].children.push(last); + } + break; + case XmlStreamParser.kText: + if (this._stack) { + this._stack[this._stack.length - 1].text += node.value; + } + break; + } +} + +// end xmpp.js + +var gFocus = false; +var gUnread = 0; + +function updateTitle() { + if (gUnread) { + terminal.setTitle("(" + gUnread.toString() + ") ~Friends XMPP"); + } else { + terminal.setTitle("~Friends XMPP"); + } +} + +terminal.print("~Friends XMPP"); +updateTitle(); +terminal.setEcho(false); +terminal.setPrompt("Username:"); +terminal.readLine().then(function(userName) { + terminal.setPassword(true); + terminal.setPrompt("Password:"); + terminal.readLine().then(function(password) { + terminal.setPrompt(">"); + terminal.setPassword(false); + network.newConnection().then(function(socket) { + connect(socket, userName, password); + }).catch(function(error) { + terminal.print(error); + }); + }); +}); + +function niceTime(lastTime, thisTime) { + if (!lastTime) { + return thisTime; + } + let result = []; + let lastParts = lastTime.split(" "); + let thisParts = thisTime.split(" "); + for (let i = 0; i < thisParts.length; i++) { + if (thisParts[i] !== lastParts[i]) { + result.push(thisParts[i]); + } + } + return result.join(" "); +} + +function formatMessage(message) { + var result; + if (typeof message == "string") { + result = []; + var regex = /(\w+:\/*\S+?)(?=(?:[\.!?])?(?:$|\s))/gi; + var match; + var lastIndex = 0; + while ((match = regex.exec(message)) !== null) { + result.push({class: "base1", value: message.substring(lastIndex, match.index)}); + result.push({href: match[0]}); + lastIndex = regex.lastIndex; + } + result.push({class: "base1", value: message.substring(lastIndex)}); + } else { + result = message; + } + return result; +} + +var lastTimestamp = null; +function printMessage(stanza) { + var body; + var delayed = false; + var now = new Date().toString(); + for (var i in stanza.children) { + if (stanza.children[i].name == "body") { + body = stanza.children[i].text; + } + if (stanza.children[i].name == "delay") { + delayed = true; + now = new Date(stanza.children[i].attributes.stamp).toString(); + } + } + + var from = stanza.attributes.from || "unknown"; + if (from && from.indexOf('/') != -1) { + from = from.split("/")[1]; + } + + terminal.print( + {class: "base0", value: niceTime(lastTimestamp, now)}, + " ", + {class: "base00", value: "<"}, + {class: "base3", value: from}, + {class: "base00", value: ">"}, + " ", + formatMessage(body)); + lastTimestamp = now; +} + +var gRecent = []; + +core.register("focus", function() { + gFocus = true; + gUnread = 0; + updateTitle(); +}); + +core.register("blur", function() { + gFocus = false; +}); + +var gPingCount = 0; + +function schedulePing(socket) { + setTimeout(function() { + socket.write(""); + schedulePing(socket); + }, 60000); +} + +terminal.split([ + {type: "horizontal", children: [ + {name: "terminal", grow: 1}, + {name: "users", grow: 0}, + ]}, +]); +terminal.select("terminal"); + +var gPresence = {}; + +function refreshUsers() { + terminal.select("users"); + terminal.clear(); + for (var i in gPresence) { + terminal.print(i); + } + terminal.select("terminal"); +} + +function connect(socket, userName, password) { + var kTrustedCertificate = "-----BEGIN CERTIFICATE-----\n" + + "MIICqjCCAhOgAwIBAgIJAPEhMguftPdoMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV\n" + + "BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEPMA0GA1UEBwwGQXVzdGluMRswGQYDVQQK\n" + + "DBJUcm91YmxlIEltcGFjdCBMTEMxITAfBgNVBAMMGGphYmJlci50cm91YmxlaW1w\n" + + "YWN0LmNvbTAeFw0xNDEyMjYwMzU5NDRaFw0yNDEyMjMwMzU5NDRaMG4xCzAJBgNV\n" + + "BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEPMA0GA1UEBwwGQXVzdGluMRswGQYDVQQK\n" + + "DBJUcm91YmxlIEltcGFjdCBMTEMxITAfBgNVBAMMGGphYmJlci50cm91YmxlaW1w\n" + + "YWN0LmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAueniASgCpF7mQFGt\n" + + "TycOhMt9VMetFwwDkwVglvO+VKq8JWxWkJaCWm8YYacG6+zn4RlV3zVQhrAcReTU\n" + + "pPQAe+28wJdqVt/HPyfcwJtLKUEL7Nk5N8mY2s6yyBVvMn9e7Yt/fnv7pOCpcmBi\n" + + "kuLlwSGEfMnDskt8kH4coidP4w0CAwEAAaNQME4wHQYDVR0OBBYEFOztZhuuqXrN\n" + + "yUnPo/9aoNNb/o2CMB8GA1UdIwQYMBaAFOztZhuuqXrNyUnPo/9aoNNb/o2CMAwG\n" + + "A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAgK/7yoGEHeG95i6E1A8ZBkeL\n" + + "monKMys3RxnJciuFdBrUcvymsgOTrAGvatPXatNbHQ/eY8LnkKHtf0pCCs0B/xST\n" + + "DTO3KdlNCXApMUieFPjVggRzikbmbPCvtTt2BzqQKzVqubf9eM+kbsD7Pkgycm5+\n" + + "q46TZws0oz5lAvklIgo=\n" + + "-----END CERTIFICATE-----"; + var resource = "tildefriend" + core.user.index; + socket.connect("jabber.troubleimpact.com", 5222).then(function() { + var parse = new XmlStanzaParser(1); + socket.write(""); + socket.write(""); + + var started = false; + var authenticated = false; + socket.onError(function(error) { + terminal.print("SOCKET ERROR"); + terminal.print(JSON.stringify(error)); + terminal.print(error); + terminal.print(error.message); + }); + socket.read(function(data) { + try { + gRecent.push(data); + while (gRecent.length > 10) { + gRecent.shift(); + } + if (data === undefined) { + terminal.print(JSON.stringify(data)); + terminal.print("Disconnected."); + terminal.print("Recent data:"); + for (let i = 0; i < gRecent.length; i++) { + terminal.print(JSON.stringify(gRecent[i])); + } + return; + } + parse.parse(data).forEach(function(stanza) { + if (stanza.name == "stream:features") { + if (!started) { + socket.write(""); + } else if (!authenticated) { + socket.write(""); + } else { + socket.write("" + resource + ""); + } + } else if (stanza.name == "proceed") { + if (!started) { + started = true; + socket.addTrustedCertificate(kTrustedCertificate); + socket.startTls().then(function() { + parse.reset(); + socket.write(""); + }).catch(function(e) { + terminal.print("TLS FAILED: " + e); + }); + } + } else if (stanza.name == "success") { + authenticated = true; + socket.write(""); + socket.write(""); + parse.reset(); + } else if (stanza.name == "iq") { + if (stanza.attributes.id == "bind0") { + socket.write(""); + } else if (stanza.attributes.id == "session0") { + socket.write("1"); + core.register("onInput", function(input) { + socket.write("" + xmlEncode(input) + ""); + }); + schedulePing(socket); + } else if (stanza.attributes.id == "ping" + gPingCount) { + // Ping response. + } else { + terminal.print(JSON.stringify(stanza)); + } + } else if (stanza.name == "message") { + printMessage(stanza); + if (!gFocus) { + ++gUnread; + updateTitle(); + } + } else if (stanza.name == "challenge") { + var challenge = Base64.decode(stanza.text); + var parts = challenge.split(','); + challenge = {}; + for (var i = 0; i < parts.length; i++) { + var equals = parts[i].indexOf("="); + if (equals != -1) { + var key = parts[i].substring(0, equals); + var value = parts[i].substring(equals + 1); + if (value.length > 2 && value.charAt(0) == '"' && value.charAt(value.length - 1) == '"') { + value = value.substring(1, value.length - 1); + } + challenge[key] = value; + } + } + if (challenge.rspauth) { + socket.write(""); + } else { + var realm = "jabber.troubleimpact.com"; + var cnonce = Base64.encode(new Date().toString()); + var x = userName + ":" + realm + ":" + password; + var y = raw_md5(x); + var a1 = y + ":" + challenge.nonce + ":" + cnonce; + var digestUri = "xmpp/" + realm; + var a2 = "AUTHENTICATE:" + digestUri; + var ha1 = md5(a1); + var ha2 = md5(a2); + var nc = "00000001"; + var kd = ha1 + ":" + challenge.nonce + ":" + nc + ":" + cnonce + ":" + challenge.qop + ":" + ha2; + var response = md5(kd); + var value = Base64.encode('username="' + userName + '",realm="' + realm + '",nonce="' + challenge.nonce + '",cnonce="' + cnonce + '",nc=' + nc + ',qop=' + challenge.qop + ',digest-uri="' + digestUri + '",response=' + response + ',charset=utf-8'); + socket.write("" + value + ""); + } + } else if (stanza.name == "presence") { + var name = stanza.attributes.from.split('/', 2)[1]; + if (stanza.attributes.type == "unavailable") { + terminal.print(name + " has left the room."); + delete gPresence[name]; + } else { + if (!gPresence[name]) { + terminal.print(name + " has joined the room."); + } + gPresence[name] = stanza; + } + refreshUsers(); + } else { + terminal.print(data); + } + }); + } catch (error) { + terminal.print("ERROR: " + error); + terminal.print("ERROR: " + JSON.stringify(error)); + terminal.print("ERROR: " + error.message); + } + }); + }).catch(function(e) { + terminal.print("connect failed: ", e); + }); +} diff --git a/src/Database.cpp b/src/Database.cpp new file mode 100644 index 00000000..ffba7724 --- /dev/null +++ b/src/Database.cpp @@ -0,0 +1,190 @@ +#include "Database.h" + +#include "Task.h" + +#include +#include + +int Database::_count = 0; + +Database::Database(Task* task) { + ++_count; + + _task = task; + + v8::Handle data = v8::External::New(task->getIsolate(), this); + + v8::Local databaseTemplate = v8::ObjectTemplate::New(task->getIsolate()); + databaseTemplate->SetInternalFieldCount(1); + databaseTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "get"), v8::FunctionTemplate::New(task->getIsolate(), get, data)); + databaseTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "set"), v8::FunctionTemplate::New(task->getIsolate(), set, data)); + databaseTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "remove"), v8::FunctionTemplate::New(task->getIsolate(), remove, data)); + databaseTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "getAll"), v8::FunctionTemplate::New(task->getIsolate(), getAll, data)); + + v8::Local databaseObject = databaseTemplate->NewInstance(); + databaseObject->SetInternalField(0, v8::External::New(task->getIsolate(), this)); + _object = v8::Persistent >(task->getIsolate(), databaseObject); +} + +Database::~Database() { + --_count; +} + +void Database::create(const v8::FunctionCallbackInfo& args) { + v8::HandleScope handleScope(args.GetIsolate()); + if (Database* database = new Database(Task::get(args.GetIsolate()))) { + if (database->open(args.GetIsolate(), *v8::String::Utf8Value(args[0].As()))) { + v8::Handle result = v8::Local::New(args.GetIsolate(), database->_object); + args.GetReturnValue().Set(result); + } + database->release(); + } +} + +bool Database::checkError(const char* command, int result) { + bool isError = false; + if (result != MDB_SUCCESS) { + isError = true; + + std::ostringstream buffer; + buffer << command << " failed (" << result << "): " << mdb_strerror(result); + _task->getIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(_task->getIsolate(), buffer.str().c_str()))); + } + return isError; +} + +bool Database::open(v8::Isolate* isolate, const char* path) { + int result = mdb_env_create(&_environment); + if (checkError("mdb_env_create", result)) { + return false; + } + + result = mdb_env_set_maxdbs(_environment, 10); + checkError("mdb_env_set_maxdbs", result); + + result = mdb_env_open(_environment, path, 0, 0644); + if (!checkError("mdb_env_open", result)) { + result = mdb_txn_begin(_environment, 0, 0, &_transaction); + if (!checkError("mdb_txn_begin", result)) { + result = mdb_dbi_open(_transaction, NULL, MDB_CREATE, &_database); + if (!checkError("mdb_dbi_open", result)) { + result = mdb_txn_commit(_transaction); + checkError("mdb_txn_commit", result); + } + } + + if (result != MDB_SUCCESS) { + mdb_txn_abort(_transaction); + } + } + + if (result != MDB_SUCCESS) { + mdb_env_close(_environment); + } + + return result == MDB_SUCCESS; +} + +void Database::get(const v8::FunctionCallbackInfo& args) { + if (Database* database = Database::get(args.Data())) { + int result = mdb_txn_begin(database->_environment, 0, MDB_RDONLY, &database->_transaction); + if (!database->checkError("mdb_txn_begin", result)) { + MDB_val key; + MDB_val value; + v8::String::Utf8Value keyString(args[0].As()); + key.mv_data = *keyString; + key.mv_size = keyString.length(); + if (mdb_get(database->_transaction, database->_database, &key, &value) == MDB_SUCCESS) { + args.GetReturnValue().Set(v8::String::NewFromUtf8(args.GetIsolate(), reinterpret_cast(value.mv_data), v8::String::kNormalString, value.mv_size)); + } + mdb_txn_reset(database->_transaction); + } + } +} + +void Database::set(const v8::FunctionCallbackInfo& args) { + if (Database* database = Database::get(args.Data())) { + int result = mdb_txn_begin(database->_environment, 0, 0, &database->_transaction); + if (!database->checkError("mdb_txn_begin", result)) { + MDB_val key; + MDB_val data; + v8::String::Utf8Value keyString(args[0].As()); + key.mv_data = *keyString; + key.mv_size = keyString.length(); + v8::String::Utf8Value valueString(args[1]->ToString(args.GetIsolate())); + data.mv_data = *valueString; + data.mv_size = valueString.length(); + result = mdb_put(database->_transaction, database->_database, &key, &data, 0); + database->checkError("mdb_put", result); + mdb_txn_commit(database->_transaction); + } + } +} + +void Database::remove(const v8::FunctionCallbackInfo& args) { + if (Database* database = Database::get(args.Data())) { + int result = mdb_txn_begin(database->_environment, 0, 0, &database->_transaction); + if (!database->checkError("mdb_txn_begin", result)) { + MDB_val key; + v8::String::Utf8Value keyString(args[0].As()); + key.mv_data = *keyString; + key.mv_size = keyString.length(); + result = mdb_del(database->_transaction, database->_database, &key, 0); + database->checkError("mdb_del", result); + mdb_txn_commit(database->_transaction); + } + } +} + +void Database::getAll(const v8::FunctionCallbackInfo& args) { + if (Database* database = Database::get(args.Data())) { + int result = mdb_txn_begin(database->_environment, 0, MDB_RDONLY, &database->_transaction); + if (!database->checkError("mdb_txn_begin", result)) { + MDB_cursor* cursor; + result = mdb_cursor_open(database->_transaction, database->_database, &cursor); + if (!database->checkError("mdb_cursor_open", result)) { + int expectedCount = 0; + MDB_stat statistics; + if (mdb_stat(database->_transaction, database->_database, &statistics) == 0) { + expectedCount = statistics.ms_entries; + } + v8::Local array = v8::Array::New(args.GetIsolate(), expectedCount); + + MDB_val key; + int index = 0; + while ((result = mdb_cursor_get(cursor, &key, 0, MDB_NEXT)) == 0) { + array->Set(index++, v8::String::NewFromUtf8(args.GetIsolate(), reinterpret_cast(key.mv_data), v8::String::kNormalString, key.mv_size)); + } + if (result == MDB_NOTFOUND) { + args.GetReturnValue().Set(array); + } else { + database->checkError("mdb_cursor_get", result); + } + mdb_cursor_close(cursor); + } + mdb_txn_reset(database->_transaction); + } + } +} + +void Database::onRelease(const v8::WeakCallbackData& data) { + data.GetParameter()->_object.Reset(); + delete data.GetParameter(); +} + +void Database::ref() { + if (++_refCount == 1) { + _object.ClearWeak(); + } +} + +void Database::release() { + assert(_refCount >= 1); + if (--_refCount == 0) { + _object.SetWeak(this, onRelease); + } +} + +Database* Database::get(v8::Handle databaseObject) { + return reinterpret_cast(v8::Handle::Cast(databaseObject)->Value()); +} diff --git a/src/Database.h b/src/Database.h new file mode 100644 index 00000000..e4bd5d5a --- /dev/null +++ b/src/Database.h @@ -0,0 +1,44 @@ +#ifndef INCLUDED_Database +#define INCLUDED_Database + +#include +#include + +class Task; + +class Database { +public: + static void create(const v8::FunctionCallbackInfo& args); + static int getCount() { return _count; } + +private: + Database(Task* task); + ~Database(); + + Task* _task; + int _refCount = 1; + v8::Persistent > _object; + + MDB_env* _environment; + MDB_dbi _database; + MDB_txn* _transaction; + + static int _count; + + static Database* get(v8::Handle databaseObject); + static void onRelease(const v8::WeakCallbackData& data); + + static void get(const v8::FunctionCallbackInfo& args); + static void set(const v8::FunctionCallbackInfo& args); + static void remove(const v8::FunctionCallbackInfo& args); + static void getAll(const v8::FunctionCallbackInfo& args); + + bool open(v8::Isolate* isolate, const char* path); + + bool checkError(const char* command, int result); + + void ref(); + void release(); +}; + +#endif diff --git a/src/File.cpp b/src/File.cpp new file mode 100644 index 00000000..28a6d878 --- /dev/null +++ b/src/File.cpp @@ -0,0 +1,129 @@ +#include "File.h" + +#include "Task.h" + +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#else +#include +#include +#endif + +void File::configure(v8::Isolate* isolate, v8::Handle global) { + v8::Local fileTemplate = v8::ObjectTemplate::New(isolate); + fileTemplate->Set(v8::String::NewFromUtf8(isolate, "readFile"), v8::FunctionTemplate::New(isolate, readFile)); + fileTemplate->Set(v8::String::NewFromUtf8(isolate, "readDirectory"), v8::FunctionTemplate::New(isolate, readDirectory)); + fileTemplate->Set(v8::String::NewFromUtf8(isolate, "makeDirectory"), v8::FunctionTemplate::New(isolate, makeDirectory)); + fileTemplate->Set(v8::String::NewFromUtf8(isolate, "writeFile"), v8::FunctionTemplate::New(isolate, writeFile)); + fileTemplate->Set(v8::String::NewFromUtf8(isolate, "renameFile"), v8::FunctionTemplate::New(isolate, renameFile)); + fileTemplate->Set(v8::String::NewFromUtf8(isolate, "unlinkFile"), v8::FunctionTemplate::New(isolate, unlinkFile)); + global->Set(v8::String::NewFromUtf8(isolate, "File"), fileTemplate); +} + +void File::readFile(const v8::FunctionCallbackInfo& args) { + v8::HandleScope scope(args.GetIsolate()); + v8::Handle fileName = args[0]->ToString(); + + v8::String::Utf8Value utf8FileName(fileName); + std::ifstream file(*utf8FileName, std::ios_base::in | std::ios_base::binary | std::ios_base::ate); + std::streampos fileSize = file.tellg(); + if (fileSize >= 0 && fileSize < 4 * 1024 * 1024) { + file.seekg(0, std::ios_base::beg); + char* buffer = new char[fileSize]; + file.read(buffer, fileSize); + std::string contents(buffer, buffer + fileSize); + args.GetReturnValue().Set(v8::String::NewFromOneByte(args.GetIsolate(), reinterpret_cast(buffer), v8::String::kNormalString, fileSize)); + delete[] buffer; + } +} + +void File::writeFile(const v8::FunctionCallbackInfo& args) { + v8::HandleScope scope(args.GetIsolate()); + v8::Handle fileName = args[0]->ToString(); + v8::Handle contents = args[1]->ToString(); + + v8::String::Utf8Value utf8FileName(fileName); + std::ofstream file(*utf8FileName, std::ios_base::out | std::ios_base::binary); + + if (contents->ContainsOnlyOneByte()) { + std::vector bytes(contents->Length()); + contents->WriteOneByte(bytes.data(), 0, bytes.size(), v8::String::NO_NULL_TERMINATION); + if (!file.write(reinterpret_cast(bytes.data()), bytes.size())) { + args.GetReturnValue().Set(v8::Integer::New(args.GetIsolate(), -1)); + } + } else { + v8::String::Utf8Value utf8Contents(contents); + if (!file.write(*utf8Contents, utf8Contents.length())) { + args.GetReturnValue().Set(v8::Integer::New(args.GetIsolate(), -1)); + } + } +} + +void File::renameFile(const v8::FunctionCallbackInfo& args) { + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + v8::HandleScope scope(args.GetIsolate()); + + v8::String::Utf8Value oldName(args[0]->ToString()); + v8::String::Utf8Value newName(args[1]->ToString()); + + uv_fs_t req; + int result = uv_fs_rename(task->getLoop(), &req, *oldName, *newName, 0); + args.GetReturnValue().Set(v8::Integer::New(args.GetIsolate(), result)); +} + +void File::unlinkFile(const v8::FunctionCallbackInfo& args) { + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + v8::HandleScope scope(args.GetIsolate()); + + v8::String::Utf8Value fileName(args[0]->ToString()); + + uv_fs_t req; + int result = uv_fs_unlink(task->getLoop(), &req, *fileName, 0); + args.GetReturnValue().Set(v8::Integer::New(args.GetIsolate(), result)); +} + +void File::readDirectory(const v8::FunctionCallbackInfo& args) { + v8::HandleScope scope(args.GetIsolate()); + v8::Handle directory = args[0]->ToString(); + + v8::Handle array = v8::Array::New(args.GetIsolate(), 0); + +#ifdef _WIN32 + WIN32_FIND_DATA find; + std::string pattern = *v8::String::Utf8Value(directory); + pattern += "\\*"; + HANDLE handle = FindFirstFile(pattern.c_str(), &find); + if (handle != INVALID_HANDLE_VALUE) { + int index = 0; + do { + array->Set(v8::Integer::New(args.GetIsolate(), index++), v8::String::NewFromUtf8(args.GetIsolate(), find.cFileName)); + } while (FindNextFile(handle, &find) != 0); + FindClose(handle); + } +#else + if (DIR* dir = opendir(*v8::String::Utf8Value(directory))) { + int index = 0; + while (struct dirent* entry = readdir(dir)) { + array->Set(v8::Integer::New(args.GetIsolate(), index++), v8::String::NewFromUtf8(args.GetIsolate(), entry->d_name)); + } + closedir(dir); + } +#endif + + args.GetReturnValue().Set(array); +} + +void File::makeDirectory(const v8::FunctionCallbackInfo& args) { + Task* task = Task::get(args.GetIsolate()); + v8::HandleScope scope(args.GetIsolate()); + v8::Handle directory = args[0]->ToString(); + + uv_fs_t req; + int result = uv_fs_mkdir(task->getLoop(), &req, *v8::String::Utf8Value(directory), 0755, 0); + args.GetReturnValue().Set(result); +} diff --git a/src/File.h b/src/File.h new file mode 100644 index 00000000..7b693eb1 --- /dev/null +++ b/src/File.h @@ -0,0 +1,19 @@ +#ifndef INCLUDED_File +#define INCLUDED_File + +#include + +class File { +public: + static void configure(v8::Isolate* isolate, v8::Handle global); + +private: + static void readFile(const v8::FunctionCallbackInfo& args); + static void writeFile(const v8::FunctionCallbackInfo& args); + static void readDirectory(const v8::FunctionCallbackInfo& args); + static void makeDirectory(const v8::FunctionCallbackInfo& args); + static void unlinkFile(const v8::FunctionCallbackInfo& args); + static void renameFile(const v8::FunctionCallbackInfo& args); +}; + +#endif diff --git a/src/Mutex.cpp b/src/Mutex.cpp new file mode 100644 index 00000000..2b913a6a --- /dev/null +++ b/src/Mutex.cpp @@ -0,0 +1,32 @@ +#include "Mutex.h" + +#include +#include + +Mutex::Mutex() { + int result = uv_mutex_init(&_mutex); + if (result != 0) { + assert("Mutex lock failed."); + } +} + +Mutex::~Mutex() { + uv_mutex_destroy(&_mutex); +} + +void Mutex::lock() { + uv_mutex_lock(&_mutex); +} + +void Mutex::unlock() { + uv_mutex_unlock(&_mutex); +} + +Lock::Lock(Mutex& mutex) +: _mutex(mutex) { + _mutex.lock(); +} + +Lock::~Lock() { + _mutex.unlock(); +} diff --git a/src/Mutex.h b/src/Mutex.h new file mode 100644 index 00000000..8e574ba7 --- /dev/null +++ b/src/Mutex.h @@ -0,0 +1,26 @@ +#ifndef INCLUDED_Mutex +#define INCLUDED_Mutex + +#include + +class Mutex { +public: + Mutex(); + ~Mutex(); + + void lock(); + void unlock(); + +private: + uv_mutex_t _mutex; +}; + +class Lock { +public: + Lock(Mutex& mutex); + ~Lock(); +private: + Mutex& _mutex; +}; + +#endif diff --git a/src/PacketStream.cpp b/src/PacketStream.cpp new file mode 100644 index 00000000..13013e01 --- /dev/null +++ b/src/PacketStream.cpp @@ -0,0 +1,85 @@ +#include "PacketStream.h" + +#include +#include + +PacketStream::PacketStream() +: _onReceive(0), + _onReceiveUserData(0) { +} + +PacketStream::~PacketStream() { + _onReceive = 0; + _onReceiveUserData = 0; + close(); +} + +void PacketStream::close() { + if (!uv_is_closing(reinterpret_cast(&_stream))) { + uv_close(reinterpret_cast(&_stream), 0); + } +} + +void PacketStream::start() { + _stream.data = this; + uv_read_start(reinterpret_cast(&_stream), onAllocate, onRead); +} + +void PacketStream::send(int packetType, char* begin, size_t length) { + size_t bufferLength = sizeof(uv_write_t) + sizeof(packetType) + sizeof(length) + length; + char* buffer = new char[bufferLength]; + uv_write_t* request = reinterpret_cast(buffer); + buffer += sizeof(uv_write_t); + memcpy(buffer, &packetType, sizeof(packetType)); + memcpy(buffer + sizeof(packetType), &length, sizeof(length)); + memcpy(buffer + sizeof(packetType) + sizeof(length), begin, length); + uv_buf_t writeBuffer; + writeBuffer.base = buffer; + writeBuffer.len = sizeof(packetType) + sizeof(length) + length; + uv_write(request, reinterpret_cast(&_stream), &writeBuffer, 1, onWrite); +} + +void PacketStream::setOnReceive(OnReceive* onReceiveCallback, void* userData) { + _onReceive = onReceiveCallback; + _onReceiveUserData = userData; +} + +void PacketStream::onWrite(uv_write_t* request, int status) { + delete[] reinterpret_cast(request); +} + +void PacketStream::onAllocate(uv_handle_t* handle, size_t suggestedSize, uv_buf_t* buffer) { + buffer->base = new char[suggestedSize]; + buffer->len = suggestedSize; +} + +void PacketStream::onRead(uv_stream_t* handle, ssize_t count, const uv_buf_t* buffer) { + PacketStream* owner = reinterpret_cast(handle->data); + if (count >= 0) { + if (count > 0) { + owner->_buffer.insert(owner->_buffer.end(), buffer->base, buffer->base + count); + owner->processMessages(); + } + delete[] reinterpret_cast(buffer->base); + } else { + owner->close(); + } +} + +void PacketStream::processMessages() { + int packetType = 0; + size_t length = 0; + while (_buffer.size() >= sizeof(packetType) + sizeof(length)) { + memcpy(&packetType, &*_buffer.begin(), sizeof(packetType)); + memcpy(&length, &*_buffer.begin() + sizeof(packetType), sizeof(length)); + + if (_buffer.size() >= sizeof(packetType) + sizeof(length) + length) { + if (_onReceive) { + _onReceive(packetType, &*_buffer.begin() + sizeof(length) + sizeof(packetType), length, _onReceiveUserData); + } + _buffer.erase(_buffer.begin(), _buffer.begin() + sizeof(length) + sizeof(packetType) + length); + } else { + break; + } + } +} diff --git a/src/PacketStream.h b/src/PacketStream.h new file mode 100644 index 00000000..4afafe1e --- /dev/null +++ b/src/PacketStream.h @@ -0,0 +1,34 @@ +#ifndef INCLUDED_PacketStream +#define INCLUDED_PacketStream + +#include +#include + +class PacketStream { +public: + PacketStream(); + ~PacketStream(); + + void start(); + + typedef void (OnReceive)(int packetType, const char* begin, size_t length, void* userData); + void send(int packetType, char* begin, size_t length); + void setOnReceive(OnReceive* onReceiveCallback, void* userData); + void close(); + + uv_pipe_t& getStream() { return _stream; } + +private: + OnReceive* _onReceive; + void* _onReceiveUserData; + uv_pipe_t _stream; + std::vector _buffer; + + void processMessages(); + + static void onAllocate(uv_handle_t* handle, size_t suggestedSize, uv_buf_t* buffer); + static void onRead(uv_stream_t* handle, ssize_t count, const uv_buf_t* buffer); + static void onWrite(uv_write_t* request, int status); +}; + +#endif diff --git a/src/Serialize.cpp b/src/Serialize.cpp new file mode 100644 index 00000000..483b0301 --- /dev/null +++ b/src/Serialize.cpp @@ -0,0 +1,210 @@ +#include "Serialize.h" + +#include "Task.h" +#include "TaskStub.h" + +#include + +void Serialize::writeInt8(std::vector& buffer, int8_t value) { + buffer.insert(buffer.end(), value); +} + +void Serialize::writeInt32(std::vector& buffer, int32_t value) { + const char* p = reinterpret_cast(&value); + buffer.insert(buffer.end(), p, p + sizeof(value)); +} + +void Serialize::writeUint32(std::vector& buffer, uint32_t value) { + const char* p = reinterpret_cast(&value); + buffer.insert(buffer.end(), p, p + sizeof(value)); +} + +void Serialize::writeDouble(std::vector& buffer, double value) { + const char* p = reinterpret_cast(&value); + buffer.insert(buffer.end(), p, p + sizeof(value)); +} + +int8_t Serialize::readInt8(const std::vector& buffer, int& offset) { + int8_t result; + std::memcpy(&result, &*buffer.begin() + offset, sizeof(result)); + offset += sizeof(result); + return result; +} + +int32_t Serialize::readInt32(const std::vector& buffer, int& offset) { + int32_t result; + std::memcpy(&result, &*buffer.begin() + offset, sizeof(result)); + offset += sizeof(result); + return result; +} + +uint32_t Serialize::readUint32(const std::vector& buffer, int& offset) { + uint32_t result; + std::memcpy(&result, &*buffer.begin() + offset, sizeof(result)); + offset += sizeof(result); + return result; +} + +double Serialize::readDouble(const std::vector& buffer, int& offset) { + double result; + std::memcpy(&result, &*buffer.begin() + offset, sizeof(result)); + offset += sizeof(result); + return result; +} + +bool Serialize::store(Task* task, std::vector& buffer, v8::Handle value) { + return storeInternal(task, buffer, value, 0); +} + +bool Serialize::storeInternal(Task* task, std::vector& buffer, v8::Handle value, int depth) { + if (value.IsEmpty()) { + return false; + } else if (value->IsUndefined()) { + writeInt32(buffer, kUndefined); + } else if (value->IsNull()) { + writeInt32(buffer, kNull); + } else if (value->IsBoolean()) { + writeInt32(buffer, kBoolean); + writeInt8(buffer, value->IsTrue() ? 1 : 0); + } else if (value->IsInt32()) { + writeInt32(buffer, kInt32); + writeInt32(buffer, value->Int32Value()); + } else if (value->IsUint32()) { + writeInt32(buffer, kUint32); + writeInt32(buffer, value->Uint32Value()); + } else if (value->IsNumber()) { + writeInt32(buffer, kNumber); + writeDouble(buffer, value->NumberValue()); + } else if (value->IsString()) { + writeInt32(buffer, kString); + v8::String::Utf8Value utf8(value->ToString()); + writeInt32(buffer, utf8.length()); + buffer.insert(buffer.end(), *utf8, *utf8 + utf8.length()); + } else if (value->IsArray()) { + writeInt32(buffer, kArray); + v8::Handle array = v8::Handle::Cast(value); + writeInt32(buffer, array->Length()); + for (size_t i = 0; i < array->Length(); ++i) { + storeInternal(task, buffer, array->Get(i), depth + 1); + } + } else if (value->IsFunction()) { + writeInt32(buffer, kFunction); + exportid_t exportId = task->exportFunction(v8::Handle::Cast(value)); + writeInt32(buffer, exportId); + } else if (value->IsNativeError()) { + storeInternal(task, buffer, storeMessage(task, v8::Exception::CreateMessage(value)), depth); + } else if (value->IsObject()) { + writeInt32(buffer, kObject); + v8::Handle object = value->ToObject(); + + // XXX: For some reason IsNativeError isn't working reliably. Catch an + // object that still looks like an error object and treat it as such. + if (object->GetOwnPropertyNames()->Length() == 0 + && !object->Get(v8::String::NewFromUtf8(task->getIsolate(), "stackTrace")).IsEmpty()) { + object = v8::Handle::Cast(storeMessage(task, v8::Exception::CreateMessage(value))); + } + + v8::Handle keys = object->GetOwnPropertyNames(); + writeInt32(buffer, keys->Length()); + for (size_t i = 0; i < keys->Length(); ++i) { + v8::Handle key = keys->Get(i); + storeInternal(task, buffer, key, depth + 1); + storeInternal(task, buffer, object->Get(key), depth + 1); + } + } else { + writeInt32(buffer, kString); + v8::String::Utf8Value utf8(value->ToString()); + writeInt32(buffer, utf8.length()); + buffer.insert(buffer.end(), *utf8, *utf8 + utf8.length()); + } + + return true; +} + +v8::Handle Serialize::store(Task* task, v8::TryCatch& tryCatch) { + return storeMessage(task, tryCatch.Message()); +} + +v8::Handle Serialize::storeMessage(Task* task, v8::Handle message) { + v8::Handle error = v8::Object::New(task->getIsolate()); + error->Set(v8::String::NewFromUtf8(task->getIsolate(), "message"), message->Get()); + error->Set(v8::String::NewFromUtf8(task->getIsolate(), "fileName"), message->GetScriptResourceName()); + error->Set(v8::String::NewFromUtf8(task->getIsolate(), "lineNumber"), v8::Integer::New(task->getIsolate(), message->GetLineNumber())); + error->Set(v8::String::NewFromUtf8(task->getIsolate(), "sourceLine"), message->GetSourceLine()); + if (!message->GetStackTrace().IsEmpty()) { + error->Set(v8::String::NewFromUtf8(task->getIsolate(), "stackTrace"), message->GetStackTrace()->AsArray()); + } + return error; +} + +v8::Handle Serialize::load(Task* task, TaskStub* from, const std::vector& buffer) { + int offset = 0; + return loadInternal(task, from, buffer, offset, 0); +} + +v8::Handle Serialize::loadInternal(Task* task, TaskStub* from, const std::vector& buffer, int& offset, int depth) { + if (static_cast(offset) >= buffer.size()) { + return v8::Undefined(task->getIsolate()); + } else { + int32_t type = readInt32(buffer, offset); + v8::Handle result; + + switch (type) { + case kUndefined: + result = v8::Undefined(task->getIsolate()); + break; + case kNull: + result = v8::Null(task->getIsolate()); + break; + case kBoolean: + result = v8::Boolean::New(task->getIsolate(), readInt8(buffer, offset) != 0); + break; + case kInt32: + result = v8::Int32::New(task->getIsolate(), readInt32(buffer, offset)); + break; + case kUint32: + result = v8::Uint32::New(task->getIsolate(), readUint32(buffer, offset)); + break; + case kNumber: + result = v8::Number::New(task->getIsolate(), readDouble(buffer, offset)); + break; + case kString: + { + int32_t length = readInt32(buffer, offset); + result = v8::String::NewFromUtf8(task->getIsolate(), &*buffer.begin() + offset, v8::String::kNormalString, length); + offset += length; + } + break; + case kArray: + { + int32_t length = readInt32(buffer, offset); + v8::Handle array = v8::Array::New(task->getIsolate()); + for (int i = 0; i < length; ++i) { + v8::Handle value = loadInternal(task, from, buffer, offset, depth + 1); + array->Set(i, value); + } + result = array; + } + break; + case kFunction: + { + exportid_t exportId = readInt32(buffer, offset); + result = task->addImport(from->getId(), exportId); + } + break; + case kObject: + { + int32_t length = readInt32(buffer, offset); + v8::Handle object = v8::Object::New(task->getIsolate()); + for (int i = 0; i < length; ++i) { + v8::Handle key = loadInternal(task, from, buffer, offset, depth + 1); + v8::Handle value = loadInternal(task, from, buffer, offset, depth + 1); + object->Set(key, value); + } + result = object; + } + break; + } + return result; + } +} diff --git a/src/Serialize.h b/src/Serialize.h new file mode 100644 index 00000000..9bd82d34 --- /dev/null +++ b/src/Serialize.h @@ -0,0 +1,47 @@ +#ifndef INCLUDED_Serialize +#define INCLUDED_Serialize + +#include +#include + +class Task; +class TaskStub; + +class Serialize { +public: + static bool store(Task* task, std::vector& buffer, v8::Handle value); + static v8::Handle load(Task* task, TaskStub* from, const std::vector& buffer); + + static v8::Handle store(Task* task, v8::TryCatch& tryCatch); + static v8::Handle storeMessage(Task* task, v8::Handle message); + +private: + static bool storeInternal(Task* task, std::vector& buffer, v8::Handle value, int depth); + static v8::Handle loadInternal(Task* task, TaskStub* from, const std::vector& buffer, int& offse, int deptht); + + static void writeInt8(std::vector& buffer, int8_t value); + static void writeInt32(std::vector& buffer, int32_t value); + static void writeUint32(std::vector& buffer, uint32_t value); + static void writeDouble(std::vector& buffer, double value); + + static int8_t readInt8(const std::vector& buffer, int& offset); + static int32_t readInt32(const std::vector& buffer, int& offset); + static uint32_t readUint32(const std::vector& buffer, int& offset); + static double readDouble(const std::vector& buffer, int& offset); + + enum Types { + kUndefined, + kNull, + kBoolean, + kInt32, + kUint32, + kNumber, + kString, + kArray, + kObject, + kFunction, + kError, + }; +}; + +#endif diff --git a/src/Socket.cpp b/src/Socket.cpp new file mode 100644 index 00000000..a6cde6f8 --- /dev/null +++ b/src/Socket.cpp @@ -0,0 +1,653 @@ +#include "Socket.h" + +#include "Task.h" +#include "TaskTryCatch.h" +#include "Tls.h" +#include "TlsContextWrapper.h" + +#include +#include +#include + +int Socket::_count = 0; +int Socket::_openCount = 0; +TlsContext* Socket::_defaultTlsContext = 0; + +struct SocketResolveData { + uv_getaddrinfo_t resolver; + Socket* socket; + promiseid_t promise; +}; + +Socket::Socket(Task* task) { + v8::HandleScope scope(task->getIsolate()); + ++_count; + + v8::Handle data = v8::External::New(task->getIsolate(), this); + + v8::Local socketTemplate = v8::ObjectTemplate::New(task->getIsolate()); + socketTemplate->SetInternalFieldCount(1); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "bind"), v8::FunctionTemplate::New(task->getIsolate(), bind, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "connect"), v8::FunctionTemplate::New(task->getIsolate(), connect, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "listen"), v8::FunctionTemplate::New(task->getIsolate(), listen, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "accept"), v8::FunctionTemplate::New(task->getIsolate(), accept, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "startTls"), v8::FunctionTemplate::New(task->getIsolate(), startTls, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "stopTls"), v8::FunctionTemplate::New(task->getIsolate(), stopTls, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "shutdown"), v8::FunctionTemplate::New(task->getIsolate(), shutdown, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "close"), v8::FunctionTemplate::New(task->getIsolate(), close, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "read"), v8::FunctionTemplate::New(task->getIsolate(), read, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "onError"), v8::FunctionTemplate::New(task->getIsolate(), onError, data)); + socketTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "write"), v8::FunctionTemplate::New(task->getIsolate(), write, data)); + socketTemplate->SetAccessor(v8::String::NewFromUtf8(task->getIsolate(), "peerName"), getPeerName, 0, data); + socketTemplate->SetAccessor(v8::String::NewFromUtf8(task->getIsolate(), "peerCertificate"), getPeerCertificate, 0, data); + socketTemplate->SetAccessor(v8::String::NewFromUtf8(task->getIsolate(), "isConnected"), isConnected, 0, data); + + v8::Local socketObject = socketTemplate->NewInstance(); + socketObject->SetInternalField(0, v8::External::New(task->getIsolate(), this)); + _object = v8::Persistent >(task->getIsolate(), socketObject); + + uv_tcp_init(task->getLoop(), &_socket); + ++_openCount; + _socket.data = this; + _task = task; +} + +Socket::~Socket() { + if (_tls) { + delete _tls; + _tls = 0; + } + --_count; +} + +void Socket::close() { + if (!uv_is_closing(reinterpret_cast(&_socket))) { + if (!_onRead.IsEmpty()) { + _onRead.Reset(); + } + uv_close(reinterpret_cast(&_socket), onClose); + } +} + +void Socket::reportError(const char* error) { + v8::Handle exception = v8::Exception::Error(v8::String::NewFromUtf8(_task->getIsolate(), error)); + if (!_onError.IsEmpty()) { + v8::Handle callback = v8::Local::New(_task->getIsolate(), _onError); + callback->Call(callback, 1, &exception); + } else { + _task->getIsolate()->ThrowException(exception); + } +} + +void Socket::reportTlsErrors() { + char buffer[4096]; + while (_tls && _tls->getError(buffer, sizeof(buffer))) { + reportError(buffer); + } +} + +void Socket::startTls(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + if (!socket->_tls) { + TlsContext* context = 0; + + if (args.Length() > 0 && !args[0].IsEmpty() && !args[0]->IsUndefined()) { + if (TlsContextWrapper* wrapper = TlsContextWrapper::get(args[0])) { + context = wrapper->getContext(); + } + } else { + if (!_defaultTlsContext) { + _defaultTlsContext = TlsContext::create(); + } + context = _defaultTlsContext; + } + + if (context) { + socket->_tls = context->createSession(); + } + + if (socket->_tls) { + socket->_tls->setHostname(socket->_peerName.c_str()); + if (socket->_direction == kAccept) { + socket->_tls->startAccept(); + } else if (socket->_direction == kConnect) { + socket->_tls->startConnect(); + } + socket->_startTlsPromise = socket->_task->allocatePromise(); + socket->processOutgoingTls(); + args.GetReturnValue().Set(socket->_task->getPromise(socket->_startTlsPromise)); + } else { + args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), "Failed to get TLS context"))); + } + } else { + args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), "startTls with TLS already started"))); + } + } +} + +void Socket::stopTls(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + if (socket->_tls) { + socket->processOutgoingTls(); + delete socket->_tls; + socket->_tls = 0; + } else { + args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), "stopTls with TLS already stopped"))); + } + } +} + +bool Socket::processSomeOutgoingTls(promiseid_t promise, uv_write_cb callback) { + char buffer[8192]; + int result = _tls->readEncrypted(buffer, sizeof(buffer)); + if (result > 0) { + char* rawBuffer = new char[sizeof(uv_write_t) + result]; + uv_write_t* request = reinterpret_cast(rawBuffer); + std::memset(request, 0, sizeof(*request)); + request->data = reinterpret_cast(promise); + rawBuffer += sizeof(uv_write_t); + std::memcpy(rawBuffer, buffer, result); + + uv_buf_t writeBuffer; + writeBuffer.base = rawBuffer; + writeBuffer.len = result; + + int writeResult = uv_write(request, reinterpret_cast(&_socket), &writeBuffer, 1, callback); + if (writeResult != 0) { + std::string error = "uv_write: " + std::string(uv_strerror(writeResult)); + reportError(error.c_str()); + } + } else { + reportTlsErrors(); + } + return result > 0; +} + +void Socket::processOutgoingTls() { + while (processSomeOutgoingTls(-1, onWrite)) {} +} + +void Socket::bind(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + v8::String::Utf8Value node(args[0]->ToString()); + v8::String::Utf8Value port(args[1]->ToString()); + + SocketResolveData* data = new SocketResolveData(); + std::memset(data, 0, sizeof(*data)); + struct addrinfo hints; + hints.ai_family = PF_INET; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = IPPROTO_TCP; + hints.ai_flags = 0; + data->resolver.data = data; + data->socket = socket; + data->promise = socket->_task->allocatePromise(); + + int result = uv_getaddrinfo(socket->_task->getLoop(), &data->resolver, onResolvedForBind, *node, *port, &hints); + if (result != 0) { + std::string error = "uv_getaddrinfo: " + std::string(uv_strerror(result)); + socket->_task->rejectPromise(data->promise, v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), error.c_str()))); + delete data; + } + + args.GetReturnValue().Set(socket->_task->getPromise(data->promise)); + } +} + +void Socket::onResolvedForBind(uv_getaddrinfo_t* resolver, int status, struct addrinfo* result) { + SocketResolveData* data = reinterpret_cast(resolver->data); + if (status != 0) { + std::string error = "uv_getaddrinfo: " + std::string(uv_strerror(status)); + data->socket->_task->rejectPromise(data->promise, v8::Exception::Error(v8::String::NewFromUtf8(data->socket->_task->getIsolate(), error.c_str()))); + } else { + int bindResult = uv_tcp_bind(&data->socket->_socket, result->ai_addr, 0); + if (bindResult != 0) { + std::string error = "uv_tcp_bind: " + std::string(uv_strerror(bindResult)); + data->socket->_task->rejectPromise(data->promise, v8::Exception::Error(v8::String::NewFromUtf8(data->socket->_task->getIsolate(), error.c_str()))); + } else { + data->socket->_task->resolvePromise(data->promise, v8::Undefined(data->socket->_task->getIsolate())); + } + } + delete data; +} + +void Socket::connect(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + socket->_direction = kConnect; + v8::String::Utf8Value node(args[0]->ToString()); + v8::String::Utf8Value port(args[1]->ToString()); + + socket->_peerName = *node; + + promiseid_t promise = socket->_task->allocatePromise(); + + SocketResolveData* data = new SocketResolveData(); + std::memset(data, 0, sizeof(*data)); + struct addrinfo hints; + hints.ai_family = PF_INET; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = IPPROTO_TCP; + hints.ai_flags = 0; + data->resolver.data = data; + data->socket = socket; + data->promise = promise; + + int result = uv_getaddrinfo(socket->_task->getLoop(), &data->resolver, onResolvedForConnect, *node, *port, &hints); + if (result != 0) { + std::string error = "uv_getaddrinfo: " + std::string(uv_strerror(result)); + socket->_task->rejectPromise(promise, v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), error.c_str()))); + delete data; + } + + args.GetReturnValue().Set(socket->_task->getPromise(promise)); + } +} + +void Socket::onResolvedForConnect(uv_getaddrinfo_t* resolver, int status, struct addrinfo* result) { + SocketResolveData* data = reinterpret_cast(resolver->data); + if (status != 0) { + std::string error = "uv_getaddrinfo: " + std::string(uv_strerror(status)); + data->socket->_task->rejectPromise(data->promise, v8::Exception::Error(v8::String::NewFromUtf8(data->socket->_task->getIsolate(), error.c_str()))); + } else { + uv_connect_t* request = new uv_connect_t(); + std::memset(request, 0, sizeof(*request)); + request->data = reinterpret_cast(data->promise); + int connectResult = uv_tcp_connect(request, &data->socket->_socket, result->ai_addr, onConnect); + if (connectResult != 0) { + std::string error("uv_tcp_connect: " + std::string(uv_strerror(connectResult))); + data->socket->_task->rejectPromise(data->promise, v8::Exception::Error(v8::String::NewFromUtf8(data->socket->_task->getIsolate(), error.c_str()))); + } + } + delete data; +} + + +void Socket::onConnect(uv_connect_t* request, int status) { + promiseid_t promise = reinterpret_cast(request->data); + if (promise != -1) { + Socket* socket = reinterpret_cast(request->handle->data); + if (status == 0) { + socket->_connected = true; + socket->_task->resolvePromise(promise, v8::Integer::New(socket->_task->getIsolate(), status)); + } else { + std::string error("uv_tcp_connect: " + std::string(uv_strerror(status))); + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(socket->_task->getIsolate(), error.c_str())); + } + } +} + +void Socket::listen(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + int backlog = args[0]->ToInteger()->Value(); + if (socket->_onConnect.IsEmpty()) { + v8::Persistent > callback(args.GetIsolate(), args[1].As()); + socket->_onConnect = callback; + int result = uv_listen(reinterpret_cast(&socket->_socket), backlog, onNewConnection); + if (result != 0) { + std::string error = "uv_listen: " + std::string(uv_strerror(result)); + args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), error.c_str(), v8::String::kNormalString, error.size()))); + } + args.GetReturnValue().Set(v8::Integer::New(args.GetIsolate(), result)); + } else { + args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), "listen: Already listening."))); + } + } +} + +void Socket::onNewConnection(uv_stream_t* server, int status) { + if (Socket* socket = reinterpret_cast(server->data)) { + v8::HandleScope handleScope(socket->_task->getIsolate()); + TaskTryCatch tryCatch(socket->_task); + if (!socket->_onConnect.IsEmpty()) { + v8::Handle callback = v8::Local::New(socket->_task->getIsolate(), socket->_onConnect); + callback->Call(callback, 0, 0); + } + } +} + +void Socket::accept(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + v8::HandleScope handleScope(args.GetIsolate()); + Socket* client = new Socket(socket->_task); + client->_direction = kAccept; + promiseid_t promise = socket->_task->allocatePromise(); + v8::Handle promiseObject = socket->_task->getPromise(promise); + v8::Handle result = v8::Local::New(args.GetIsolate(), client->_object); + int status = uv_accept(reinterpret_cast(&socket->_socket), reinterpret_cast(&client->_socket)); + if (status == 0) { + client->_connected = true; + socket->_task->resolvePromise(promise, result); + } else { + std::string error = "uv_accept: " + std::string(uv_strerror(status)); + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(args.GetIsolate(), error.c_str(), v8::String::kNormalString, error.size())); + } + args.GetReturnValue().Set(promiseObject); + client->release(); + } +} + +void Socket::close(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + if (socket->_closePromise == -1) { + socket->_closePromise = socket->_task->allocatePromise(); + args.GetReturnValue().Set(socket->_task->getPromise(socket->_closePromise)); + socket->close(); + } + } +} + +void Socket::shutdown(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + if (socket->_tls) { + promiseid_t promise = socket->_task->allocatePromise(); + socket->processTlsShutdown(promise); + args.GetReturnValue().Set(socket->_task->getPromise(promise)); + } else { + promiseid_t promise = socket->_task->allocatePromise(); + socket->shutdownInternal(promise); + args.GetReturnValue().Set(socket->_task->getPromise(promise)); + } + } +} + +void Socket::shutdownInternal(promiseid_t promise) { + uv_shutdown_t* request = new uv_shutdown_t(); + std::memset(request, 0, sizeof(*request)); + request->data = reinterpret_cast(promise); + int result = uv_shutdown(request, reinterpret_cast(&_socket), onShutdown); + if (result != 0) { + std::string error = "uv_shutdown: " + std::string(uv_strerror(result)); + _task->rejectPromise(promise, v8::Exception::Error(v8::String::NewFromUtf8(_task->getIsolate(), error.c_str()))); + delete request; + } +} + +void Socket::processTlsShutdown(promiseid_t promise) { + _tls->shutdown(); + if (!processSomeOutgoingTls(promise, onTlsShutdown)) { + shutdownInternal(promise); + } +} + +void Socket::onTlsShutdown(uv_write_t* request, int status) { + if (Socket* socket = reinterpret_cast(request->handle->data)) { + promiseid_t promise = reinterpret_cast(request->data); + socket->processTlsShutdown(promise); + } +} + +void Socket::onError(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + v8::Persistent > callback(args.GetIsolate(), args[0].As()); + socket->_onError = callback; + } +} + +void Socket::read(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + v8::Persistent > callback(args.GetIsolate(), args[0].As()); + socket->_onRead = callback; + int result = uv_read_start(reinterpret_cast(&socket->_socket), allocateBuffer, onRead); + promiseid_t promise = socket->_task->allocatePromise(); + if (result != 0) { + std::string error = "uv_read_start: " + std::string(uv_strerror(result)); + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(socket->_task->getIsolate(), error.c_str(), v8::String::kNormalString, error.size())); + } else { + socket->_task->resolvePromise(promise, v8::Undefined(socket->_task->getIsolate())); + } + } +} + +void Socket::allocateBuffer(uv_handle_t* handle, size_t suggestedSize, uv_buf_t* buf) { + *buf = uv_buf_init(new char[suggestedSize], suggestedSize); +} + +void Socket::onRead(uv_stream_t* stream, ssize_t readSize, const uv_buf_t* buffer) { + if (Socket* socket = reinterpret_cast(stream->data)) { + v8::HandleScope handleScope(socket->_task->getIsolate()); + TaskTryCatch tryCatch(socket->_task); + v8::Handle data; + + if (readSize <= 0) { + socket->_connected = false; + v8::Local callback = v8::Local::New(socket->_task->getIsolate(), socket->_onRead); + if (!callback.IsEmpty()) { + data = v8::Undefined(socket->_task->getIsolate()); + callback->Call(callback, 1, &data); + } + socket->close(); + } else { + if (socket->_tls) { + socket->reportTlsErrors(); + socket->_tls->writeEncrypted(buffer->base, readSize); + if (socket->_startTlsPromise != -1) { + TlsSession::HandshakeResult result = socket->_tls->handshake(); + if (result == TlsSession::kDone) { + promiseid_t promise = socket->_startTlsPromise; + socket->_startTlsPromise = -1; + socket->_task->resolvePromise(promise, v8::Undefined(socket->_task->getIsolate())); + } else if (result == TlsSession::kFailed) { + promiseid_t promise = socket->_startTlsPromise; + socket->_startTlsPromise = -1; + char buffer[8192]; + if (socket->_tls->getError(buffer, sizeof(buffer))) { + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(socket->_task->getIsolate(), buffer)); + } else { + socket->_task->rejectPromise(promise, v8::Undefined(socket->_task->getIsolate())); + } + } + } + + while (true) { + char plain[8192]; + int result = socket->_tls->readPlain(plain, sizeof(plain)); + if (result > 0) { + v8::Local callback = v8::Local::New(socket->_task->getIsolate(), socket->_onRead); + if (!callback.IsEmpty()) { + data = v8::String::NewFromOneByte(socket->_task->getIsolate(), reinterpret_cast(plain), v8::String::kNormalString, result); + callback->Call(callback, 1, &data); + } + } else if (result == TlsSession::kReadFailed) { + socket->reportTlsErrors(); + socket->close(); + break; + } else if (result == TlsSession::kReadZero) { + v8::Local callback = v8::Local::New(socket->_task->getIsolate(), socket->_onRead); + if (!callback.IsEmpty()) { + data = v8::Undefined(socket->_task->getIsolate()); + callback->Call(callback, 1, &data); + } + break; + } else { + break; + } + } + if (socket->_tls) { + socket->processOutgoingTls(); + } + } else { + v8::Local callback = v8::Local::New(socket->_task->getIsolate(), socket->_onRead); + if (!callback.IsEmpty()) { + data = v8::String::NewFromOneByte(socket->_task->getIsolate(), reinterpret_cast(buffer->base), v8::String::kNormalString, readSize); + callback->Call(callback, 1, &data); + } + } + } + } + delete[] buffer->base; +} + +void Socket::write(const v8::FunctionCallbackInfo& args) { + if (Socket* socket = Socket::get(args.Data())) { + promiseid_t promise = socket->_task->allocatePromise(); + args.GetReturnValue().Set(socket->_task->getPromise(promise)); + v8::Handle value = args[0].As(); + if (!value.IsEmpty() && value->IsString()) { + if (socket->_tls) { + socket->reportTlsErrors(); + int result; + int length; + if (value->ContainsOnlyOneByte()) { + length = value->Length(); + std::vector bytes(length); + value->WriteOneByte(bytes.data(), 0, bytes.size(), v8::String::NO_NULL_TERMINATION); + result = socket->_tls->writePlain(reinterpret_cast(bytes.data()), bytes.size()); + } else { + v8::String::Utf8Value utf8(value); + length = utf8.length(); + result = socket->_tls->writePlain(*utf8, utf8.length()); + } + char buffer[8192]; + if (result <= 0 && socket->_tls->getError(buffer, sizeof(buffer))) { + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(args.GetIsolate(), buffer)); + } else if (result < length) { + socket->_task->rejectPromise(promise, v8::Integer::New(socket->_task->getIsolate(), result)); + } else { + socket->_task->resolvePromise(promise, v8::Integer::New(socket->_task->getIsolate(), result)); + } + socket->processOutgoingTls(); + } else { + v8::String::Utf8Value utf8(value); + int length; + char* rawBuffer = 0; + if (value->ContainsOnlyOneByte()) { + length = value->Length(); + rawBuffer = new char[sizeof(uv_write_t) + length]; + value->WriteOneByte(reinterpret_cast(rawBuffer) + sizeof(uv_write_t), 0, length, v8::String::NO_NULL_TERMINATION); + } else { + v8::String::Utf8Value utf8(value); + length = utf8.length(); + rawBuffer = new char[sizeof(uv_write_t) + length]; + std::memcpy(rawBuffer + sizeof(uv_write_t), *utf8, length); + } + uv_write_t* request = reinterpret_cast(rawBuffer); + + uv_buf_t buffer; + buffer.base = rawBuffer + sizeof(uv_write_t); + buffer.len = length; + + request->data = reinterpret_cast(promise); + int result = uv_write(request, reinterpret_cast(&socket->_socket), &buffer, 1, onWrite); + if (result != 0) { + std::string error = "uv_write: " + std::string(uv_strerror(result)); + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(args.GetIsolate(), error.c_str(), v8::String::kNormalString, error.size())); + } + } + } else { + socket->_task->rejectPromise(promise, v8::Integer::New(args.GetIsolate(), -2)); + } + } +} + +void Socket::onWrite(uv_write_t* request, int status) { + if (Socket* socket = reinterpret_cast(request->handle->data)) { + v8::HandleScope handleScope(socket->_task->getIsolate()); + promiseid_t promise = reinterpret_cast(request->data); + if (promise != -1) { + if (status == 0) { + socket->_task->resolvePromise(promise, v8::Integer::New(socket->_task->getIsolate(), status)); + } else { + std::string error = "uv_write: " + std::string(uv_strerror(status)); + socket->_task->rejectPromise(promise, v8::String::NewFromUtf8(socket->_task->getIsolate(), error.c_str(), v8::String::kNormalString, error.size())); + } + } + } + delete[] reinterpret_cast(request); +} + +void Socket::onClose(uv_handle_t* handle) { + --_openCount; + if (Socket* socket = reinterpret_cast(handle->data)) { + if (socket->_closePromise != -1) { + v8::HandleScope scope(socket->_task->getIsolate()); + promiseid_t promise = socket->_closePromise; + socket->_closePromise = -1; + socket->_connected = false; + socket->_task->resolvePromise(promise, v8::Integer::New(socket->_task->getIsolate(), 0)); + } + if (socket->_object.IsEmpty()) { + delete socket; + } + } +} + +void Socket::onShutdown(uv_shutdown_t* request, int status) { + if (Socket* socket = reinterpret_cast(request->handle->data)) { + promiseid_t promise = reinterpret_cast(request->data); + if (status == 0) { + socket->_task->resolvePromise(promise, v8::Undefined(socket->_task->getIsolate())); + } else { + std::string error = "uv_shutdown: " + std::string(uv_strerror(status)); + socket->_task->rejectPromise(promise, v8::Exception::Error(v8::String::NewFromUtf8(socket->_task->getIsolate(), error.c_str()))); + } + } + delete request; +} + +void Socket::getPeerName(v8::Local property, const v8::PropertyCallbackInfo& info) { + if (Socket* socket = Socket::get(info.Data())) { + struct sockaddr_in6 addr; + int nameLength = sizeof(addr); + if (uv_tcp_getpeername(&socket->_socket, reinterpret_cast(&addr), &nameLength) == 0) { + char name[1024]; + if (static_cast(nameLength) > sizeof(struct sockaddr_in)) { + if (uv_ip6_name(&addr, name, sizeof(name)) == 0) { + info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), name)); + } + } else { + if (uv_ip4_name(reinterpret_cast(&addr), name, sizeof(name)) == 0) { + info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), name)); + } + } + } + } +} + +void Socket::getPeerCertificate(v8::Local property, const v8::PropertyCallbackInfo& info) { + if (Socket* socket = Socket::get(info.Data())) { + if (socket->_tls) { + std::vector buffer(128 * 1024); + int result = socket->_tls->getPeerCertificate(buffer.data(), buffer.size()); + if (result > 0) { + info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), buffer.data(), v8::String::kNormalString, result)); + } + } + } +} + +void Socket::isConnected(v8::Local property, const v8::PropertyCallbackInfo& info) { + if (Socket* socket = Socket::get(info.Data())) { + info.GetReturnValue().Set(v8::Boolean::New(socket->_task->getIsolate(), socket->_connected)); + } +} + +void Socket::create(const v8::FunctionCallbackInfo& args) { + v8::HandleScope handleScope(args.GetIsolate()); + if (Socket* socket = new Socket(Task::get(args.GetIsolate()))) { + v8::Handle result = v8::Local::New(args.GetIsolate(), socket->_object); + args.GetReturnValue().Set(result); + socket->release(); + } +} + +Socket* Socket::get(v8::Handle socketObject) { + return reinterpret_cast(v8::Handle::Cast(socketObject)->Value()); +} + +void Socket::ref() { + if (++_refCount == 1) { + _object.ClearWeak(); + } +} + +void Socket::release() { + assert(_refCount >= 1); + if (--_refCount == 0) { + _object.SetWeak(this, onRelease); + } +} + +void Socket::onRelease(const v8::WeakCallbackData& data) { + data.GetParameter()->_object.Reset(); + data.GetParameter()->close(); +} diff --git a/src/Socket.h b/src/Socket.h new file mode 100644 index 00000000..b2fa0b25 --- /dev/null +++ b/src/Socket.h @@ -0,0 +1,90 @@ +#ifndef INCLUDED_Socket +#define INCLUDED_Socket + +#include +#include +#include + +typedef int promiseid_t; +class Task; +class TlsContext; +class TlsSession; + +class Socket { +public: + static void create(const v8::FunctionCallbackInfo& args); + + void close(); + + static int getCount() { return _count; } + static int getOpenCount() { return _openCount; } + +private: + Socket(Task* task); + ~Socket(); + + Task* _task; + uv_tcp_t _socket; + TlsSession* _tls = 0; + promiseid_t _startTlsPromise = -1; + promiseid_t _closePromise = -1; + int _refCount = 1; + bool _connected = false; + std::string _peerName; + + enum Direction { kUndetermined, kAccept, kConnect }; + Direction _direction = kUndetermined; + + static int _count; + static int _openCount; + + static TlsContext* _defaultTlsContext; + + v8::Persistent > _object; + + v8::Persistent > _onConnect; + v8::Persistent > _onRead; + v8::Persistent > _onError; + + static void startTls(const v8::FunctionCallbackInfo& args); + static void stopTls(const v8::FunctionCallbackInfo& args); + static void bind(const v8::FunctionCallbackInfo& args); + static void connect(const v8::FunctionCallbackInfo& args); + static void listen(const v8::FunctionCallbackInfo& args); + static void accept(const v8::FunctionCallbackInfo& args); + static void close(const v8::FunctionCallbackInfo& args); + static void shutdown(const v8::FunctionCallbackInfo& args); + static void read(const v8::FunctionCallbackInfo& args); + static void onError(const v8::FunctionCallbackInfo& args); + static void write(const v8::FunctionCallbackInfo& args); + static void getPeerName(v8::Local property, const v8::PropertyCallbackInfo& info); + static void getPeerCertificate(v8::Local property, const v8::PropertyCallbackInfo& info); + static void isConnected(v8::Local property, const v8::PropertyCallbackInfo& info); + + static Socket* get(v8::Handle socketObject); + static void onClose(uv_handle_t* handle); + static void onShutdown(uv_shutdown_t* request, int status); + static void onResolvedForBind(uv_getaddrinfo_t* resolver, int status, struct addrinfo* result); + static void onResolvedForConnect(uv_getaddrinfo_t* resolver, int status, struct addrinfo* result); + static void onConnect(uv_connect_t* request, int status); + static void onNewConnection(uv_stream_t* server, int status); + + static void allocateBuffer(uv_handle_t* handle, size_t suggestedSize, uv_buf_t* buffer); + static void onRead(uv_stream_t* stream, ssize_t readSize, const uv_buf_t* buffer); + static void onWrite(uv_write_t* request, int status); + static void onRelease(const v8::WeakCallbackData& data); + + void processTlsShutdown(promiseid_t promise); + static void onTlsShutdown(uv_write_t* request, int status); + void shutdownInternal(promiseid_t promise); + + bool processSomeOutgoingTls(promiseid_t promise, uv_write_cb callback); + void processOutgoingTls(); + void reportTlsErrors(); + void reportError(const char* error); + + void ref(); + void release(); +}; + +#endif diff --git a/src/Task.cpp b/src/Task.cpp new file mode 100644 index 00000000..e39c3055 --- /dev/null +++ b/src/Task.cpp @@ -0,0 +1,749 @@ +#include "Task.h" + +#include "Database.h" +#include "File.h" +#include "Serialize.h" +#include "Socket.h" +#include "TaskStub.h" +#include "TaskTryCatch.h" +#include "TlsContextWrapper.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +static const int STDIN_FILENO = 0; +#else +#include +#endif + +extern v8::Platform* gPlatform; +int gNextTaskId = 1; + +int Task::_count; + +struct ExportRecord { + v8::Persistent > _persistent; + int _useCount; + + ExportRecord(v8::Isolate* isolate, v8::Handle function) + : _persistent(isolate, function), + _useCount(0) { + } + + void ref() { + ++_useCount; + } + + bool release() { + return --_useCount == 0; + } +}; + +struct ImportRecord { + v8::Persistent > _persistent; + exportid_t _export; + taskid_t _task; + Task* _owner; + int _useCount; + + ImportRecord(v8::Isolate* isolate, v8::Handle function, exportid_t exportId, taskid_t taskId, Task* owner) + : _persistent(isolate, function), + _export(exportId), + _task(taskId), + _owner(owner), + _useCount(0) { + _persistent.SetWeak(this, ImportRecord::onRelease); + } + + void ref() { + if (_useCount++ == 0) { + // Make a strong ref again until an in-flight function call is finished. + _persistent.ClearWeak(); + } + } + + void release() { + if (--_useCount == 0) { + // All in-flight calls are finished. Make weak. + _persistent.SetWeak(this, ImportRecord::onRelease); + } + } + + static void onRelease(const v8::WeakCallbackData& data) { + ImportRecord* import = data.GetParameter(); + import->_owner->releaseExport(import->_task, import->_export); + for (size_t i = 0; i < import->_owner->_imports.size(); ++i) { + if (import->_owner->_imports[i] == import) { + import->_owner->_imports.erase(import->_owner->_imports.begin() + i); + break; + } + } + import->_persistent.Reset(); + delete import; + } +}; + +Task::Task() { + _loop = uv_loop_new(); + ++_count; + v8::Isolate::CreateParams options; + options.array_buffer_allocator = &_allocator; + _isolate = v8::Isolate::New(options); + _isolate->SetData(0, this); + _isolate->SetCaptureStackTraceForUncaughtExceptions(true, 16); +} + +Task::~Task() { + { + v8::Isolate::Scope isolateScope(_isolate); + v8::HandleScope handleScope(_isolate); + _context.Reset(); + } + + _isolate->Dispose(); + _isolate = 0; + + uv_loop_delete(_loop); + --_count; +} + +v8::Handle Task::getContext() { + return v8::Local::New(_isolate, _context); +} + +void Task::run() { + { + v8::Isolate::Scope isolateScope(_isolate); + v8::HandleScope handleScope(_isolate); + v8::Context::Scope contextScope(v8::Local::New(_isolate, _context)); + uv_run(_loop, UV_RUN_DEFAULT); + } + _promises.clear(); + _exports.clear(); + _imports.clear(); +} + +v8::Handle Task::loadFile(v8::Isolate* isolate, const char* fileName) { + v8::Handle value; + std::ifstream file(fileName, std::ios_base::in | std::ios_base::binary | std::ios_base::ate); + std::streampos fileSize = file.tellg(); + if (fileSize >= 0) { + file.seekg(0, std::ios_base::beg); + char* buffer = new char[fileSize]; + file.read(buffer, fileSize); + std::string contents(buffer, buffer + fileSize); + value = v8::String::NewFromOneByte(isolate, reinterpret_cast(buffer), v8::String::kNormalString, fileSize); + delete[] buffer; + } + return value; +} + +void Task::activate() { + v8::Isolate::Scope isolateScope(_isolate); + v8::HandleScope handleScope(_isolate); + + v8::Handle global = v8::ObjectTemplate::New(); + + if (!_importObject.IsEmpty()) { + v8::Local imports(_importObject.Get(_isolate)); + v8::Handle keys = imports->GetOwnPropertyNames(); + for (size_t i = 0; i < keys->Length(); ++i) { + global->SetAccessor(keys->Get(i).As(), getImportProperty); + } + } + + global->Set(v8::String::NewFromUtf8(_isolate, "print"), v8::FunctionTemplate::New(_isolate, print)); + global->Set(v8::String::NewFromUtf8(_isolate, "setTimeout"), v8::FunctionTemplate::New(_isolate, setTimeout)); + global->Set(v8::String::NewFromUtf8(_isolate, "require"), v8::FunctionTemplate::New(_isolate, require)); + global->SetAccessor(v8::String::NewFromUtf8(_isolate, "parent"), parent); + global->Set(v8::String::NewFromUtf8(_isolate, "exit"), v8::FunctionTemplate::New(_isolate, exit)); + global->Set(v8::String::NewFromUtf8(_isolate, "utf8Length"), v8::FunctionTemplate::New(_isolate, utf8Length)); + global->SetAccessor(v8::String::NewFromUtf8(_isolate, "exports"), getExports, setExports); + global->SetAccessor(v8::String::NewFromUtf8(_isolate, "imports"), getImports); + global->SetAccessor(v8::String::NewFromUtf8(_isolate, "version"), version); + global->SetAccessor(v8::String::NewFromUtf8(_isolate, "statistics"), statistics); + if (_trusted) { + global->Set(v8::String::NewFromUtf8(_isolate, "Database"), v8::FunctionTemplate::New(_isolate, Database::create)); + global->Set(v8::String::NewFromUtf8(_isolate, "Socket"), v8::FunctionTemplate::New(_isolate, Socket::create)); + global->Set(v8::String::NewFromUtf8(_isolate, "Task"), v8::FunctionTemplate::New(_isolate, TaskStub::create)); + global->Set(v8::String::NewFromUtf8(_isolate, "TlsContext"), v8::FunctionTemplate::New(_isolate, TlsContextWrapper::create)); + File::configure(_isolate, global); + } + + v8::Local context = v8::Context::New(_isolate, 0, global); + _context = v8::Persistent >(_isolate, context); +} + +void Task::activate(const v8::FunctionCallbackInfo& args) { + Task* task = Task::get(args.GetIsolate()); + task->activate(); +} + +void Task::print(const v8::FunctionCallbackInfo& args) { + v8::HandleScope scope(args.GetIsolate()); + v8::Local context = args.GetIsolate()->GetCurrentContext(); + v8::Handle json = context->Global()->Get(v8::String::NewFromUtf8(args.GetIsolate(), "JSON"))->ToObject(); + v8::Handle stringify = v8::Handle::Cast(json->Get(v8::String::NewFromUtf8(args.GetIsolate(), "stringify"))); + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + TaskTryCatch tryCatch(task); + std::cout << "Task[" << task << ':' << task->_scriptName << "]>"; + for (int i = 0; i < args.Length(); i++) { + std::cout << ' '; + v8::Handle arg = args[i]; + if (arg->IsNativeError()) { + arg = Serialize::storeMessage(task, v8::Exception::CreateMessage(arg)); + } + v8::String::Utf8Value value(stringify->Call(json, 1, &arg)); + std::cout << (*value ? *value : "(null)"); + } + std::cout << '\n'; +} + +struct TimeoutData { + Task* _task; + v8::Persistent > _callback; +}; + +void Task::setTimeout(const v8::FunctionCallbackInfo& args) { + v8::HandleScope scope(args.GetIsolate()); + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + + TimeoutData* timeout = new TimeoutData(); + timeout->_task = task; + + v8::Persistent > function(args.GetIsolate(), v8::Handle::Cast(args[0])); + timeout->_callback = function; + + uv_timer_t* timer = new uv_timer_t(); + uv_timer_init(task->_loop, timer); + timer->data = timeout; + uv_timer_start(timer, timeoutCallback, static_cast(args[1].As()->Value()), 0); +} + +void Task::timeoutCallback(uv_timer_t* handle) { + TimeoutData* timeout = reinterpret_cast(handle->data); + TaskTryCatch tryCatch(timeout->_task); + v8::HandleScope scope(timeout->_task->_isolate); + v8::Handle function = v8::Local::New(timeout->_task->_isolate, timeout->_callback); + function->Call(v8::Undefined(timeout->_task->_isolate), 0, 0); + delete timeout; +} + +void Task::utf8Length(const v8::FunctionCallbackInfo& args) { + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + TaskTryCatch tryCatch(task); + v8::HandleScope scope(task->_isolate); + args.GetReturnValue().Set(v8::Integer::New(args.GetIsolate(), args[0].As()->Utf8Length())); +} + +void Task::exit(const v8::FunctionCallbackInfo& args) { + ::exit(args[0]->Int32Value()); +} + +void Task::kill() { + if (!_killed && _isolate) { + _killed = true; + v8::V8::TerminateExecution(_isolate); + } +} + +void Task::execute(const char* fileName) { + v8::Isolate::Scope isolateScope(_isolate); + v8::HandleScope handleScope(_isolate); + v8::Context::Scope contextScope(v8::Local::New(_isolate, _context)); + + v8::Handle name = v8::String::NewFromUtf8(_isolate, fileName); + + v8::Handle source = loadFile(_isolate, fileName); + std::cout << "Running script " << fileName << "\n"; + if (!_scriptName.size()) { + _scriptName = fileName; + } + if (!source.IsEmpty()) { + v8::Handle script = v8::Script::Compile(source, name); + if (!script.IsEmpty()) { + script->Run(); + std::cout << "Script " << fileName << " completed\n"; + } else { + std::cerr << "Failed to compile: " << fileName << ".\n"; + } + } else { + std::string message; + message = "Failed to load file: "; + message += fileName; + _isolate->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(_isolate, message.c_str()))); + } +} + +void Task::invokeExport(const v8::FunctionCallbackInfo& args) { + Task* sender = Task::get(args.GetIsolate()); + TaskTryCatch tryCatch(sender); + v8::Handle data = v8::Handle::Cast(args.Data()); + exportid_t exportId = data->Get(v8::String::NewFromUtf8(args.GetIsolate(), "export"))->Int32Value(); + taskid_t recipientId = data->Get(v8::String::NewFromUtf8(args.GetIsolate(), "task"))->Int32Value(); + + for (size_t i = 0; i < sender->_imports.size(); ++i) { + if (sender->_imports[i]->_task == recipientId && sender->_imports[i]->_export == exportId) { + sender->_imports[i]->ref(); + break; + } + } + + v8::Local array = v8::Array::New(args.GetIsolate(), args.Length() + 1); + array->Set(0, args.This()); + for (int i = 0; i < args.Length(); ++i) { + array->Set(i + 1, args[i]); + } + + if (TaskStub* recipient = sender->get(recipientId)) { + promiseid_t promise = sender->allocatePromise(); + sendPromiseExportMessage(sender, recipient, kInvokeExport, promise, exportId, array); + args.GetReturnValue().Set(sender->getPromise(promise)); + } else { + args.GetReturnValue().Set(args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), "Invoking a function on a nonexistant task.")))); + } +} + +v8::Handle Task::invokeExport(TaskStub* from, Task* to, exportid_t exportId, const std::vector& buffer) { + v8::Handle result; + if (to->_exports[exportId]) { + v8::Handle arguments = v8::Handle::Cast(Serialize::load(to, from, buffer)); + std::vector > argumentArray; + for (size_t i = 1; i < arguments->Length(); ++i) { + argumentArray.push_back(arguments->Get(i)); + } + v8::Handle function = v8::Local::New(to->_isolate, to->_exports[exportId]->_persistent); + v8::Handle* argumentPointer = 0; + if (argumentArray.size()) { + argumentPointer = &*argumentArray.begin(); + } + result = function->Call(v8::Handle::Cast(arguments->Get(0)), argumentArray.size(), argumentPointer); + } else { + std::cout << to->_scriptName << ": That's not an export we have (exportId=" << exportId << ", exports = " << to->_exports.size() << ")\n"; + } + from->getStream().send(kReleaseImport, reinterpret_cast(&exportId), sizeof(exportId)); + return result; +} + +void Task::sendPromiseResolve(Task* from, TaskStub* to, promiseid_t promise, v8::Handle result) { + if (!result.IsEmpty() && result->IsPromise()) { + // We're not going to serialize/deserialize a promise... + v8::Handle data = v8::Object::New(from->_isolate); + data->Set(v8::String::NewFromUtf8(from->_isolate, "task"), v8::Int32::New(from->_isolate, to->getId())); + data->Set(v8::String::NewFromUtf8(from->_isolate, "promise"), v8::Int32::New(from->_isolate, promise)); + v8::Handle promise = v8::Handle::Cast(result); + v8::Handle then = v8::Function::New(from->_isolate, invokeThen, data); + promise->Then(then); + v8::Handle catchCallback = v8::Function::New(from->_isolate, invokeCatch, data); + promise->Catch(catchCallback); + from->_isolate->RunMicrotasks(); + } else { + sendPromiseMessage(from, to, kResolvePromise, promise, result); + } +} + +void Task::sendPromiseReject(Task* from, TaskStub* to, promiseid_t promise, v8::Handle result) { + if (!result.IsEmpty() && result->IsPromise()) { + // We're not going to serialize/deserialize a promise... + v8::Handle data = v8::Object::New(from->_isolate); + data->Set(v8::String::NewFromUtf8(from->_isolate, "task"), v8::Int32::New(from->_isolate, to->getId())); + data->Set(v8::String::NewFromUtf8(from->_isolate, "promise"), v8::Int32::New(from->_isolate, promise)); + v8::Handle promise = v8::Handle::Cast(result); + v8::Handle then = v8::Function::New(from->_isolate, invokeThen, data); + promise->Then(then); + v8::Handle catchCallback = v8::Function::New(from->_isolate, invokeCatch, data); + promise->Catch(catchCallback); + from->_isolate->RunMicrotasks(); + } else { + sendPromiseMessage(from, to, kRejectPromise, promise, result); + } +} + +void Task::sendPromiseMessage(Task* from, TaskStub* to, MessageType messageType, promiseid_t promise, v8::Handle result) { + if (to) { + std::vector buffer; + buffer.insert(buffer.end(), reinterpret_cast(&promise), reinterpret_cast(&promise) + sizeof(promise)); + if (!result.IsEmpty() && !result->IsUndefined() && !result->IsNull()) { + Serialize::store(from, buffer, result); + } + to->getStream().send(messageType, &*buffer.begin(), buffer.size()); + } else { + std::cerr << "Sending to a NULL task.\n"; + } +} + +void Task::sendPromiseExportMessage(Task* from, TaskStub* to, MessageType messageType, promiseid_t promise, exportid_t exportId, v8::Handle result) { + std::vector buffer; + buffer.insert(buffer.end(), reinterpret_cast(&promise), reinterpret_cast(&promise) + sizeof(promise)); + buffer.insert(buffer.end(), reinterpret_cast(&exportId), reinterpret_cast(&exportId) + sizeof(exportId)); + if (!result.IsEmpty() && !result->IsUndefined() && !result->IsNull()) { + Serialize::store(from, buffer, result); + } + to->getStream().send(messageType, &*buffer.begin(), buffer.size()); +} + +TaskStub* Task::get(taskid_t taskId) { + return taskId == kParentId ? _parent : _children[taskId]; +} + +void Task::invokeThen(const v8::FunctionCallbackInfo& args) { + Task* from = reinterpret_cast(args.GetIsolate()->GetData(0)); + v8::Handle data = v8::Handle::Cast(args.Data()); + TaskStub* to = from->get(data->Get(v8::String::NewFromUtf8(args.GetIsolate(), "task"))->Int32Value()); + promiseid_t promise = data->Get(v8::String::NewFromUtf8(args.GetIsolate(), "promise"))->Int32Value(); + sendPromiseMessage(from, to, kResolvePromise, promise, args[0]); +} + +void Task::invokeCatch(const v8::FunctionCallbackInfo& args) { + Task* from = reinterpret_cast(args.GetIsolate()->GetData(0)); + v8::Handle data = v8::Handle::Cast(args.Data()); + TaskStub* to = from->get(data->Get(v8::String::NewFromUtf8(args.GetIsolate(), "task"))->Int32Value()); + promiseid_t promise = data->Get(v8::String::NewFromUtf8(args.GetIsolate(), "promise"))->Int32Value(); + sendPromiseMessage(from, to, kRejectPromise, promise, args[0]); +} + +void Task::parent(v8::Local property, const v8::PropertyCallbackInfo& args) { + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + if (task->_parent) { + args.GetReturnValue().Set(task->_parent->getTaskObject()); + } else { + args.GetReturnValue().Set(v8::Undefined(task->_isolate)); + } +} + +void Task::version(v8::Local property, const v8::PropertyCallbackInfo& args) { + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + args.GetReturnValue().Set(v8::String::NewFromUtf8(task->_isolate, v8::V8::GetVersion())); +} + +void Task::getImportProperty(v8::Local property, const v8::PropertyCallbackInfo& args) { + v8::Local imports = Task::get(args.GetIsolate())->_importObject.Get(args.GetIsolate()); + args.GetReturnValue().Set(imports->Get(property)); +} + +void Task::getImports(v8::Local property, const v8::PropertyCallbackInfo& args) { + args.GetReturnValue().Set(v8::Local::New(args.GetIsolate(), Task::get(args.GetIsolate())->_importObject)); +} + +void Task::getExports(v8::Local property, const v8::PropertyCallbackInfo& args) { + args.GetReturnValue().Set(v8::Local::New(args.GetIsolate(), Task::get(args.GetIsolate())->_exportObject)); +} + +void Task::setExports(v8::Local property, v8::Local value, const v8::PropertyCallbackInfo& args) { + Task::get(args.GetIsolate())->_exportObject = v8::Persistent >(args.GetIsolate(), v8::Handle::Cast(value)); +} + +Task* Task::get(v8::Isolate* isolate) { + return reinterpret_cast(isolate->GetData(0)); +} + +promiseid_t Task::allocatePromise() { + promiseid_t promiseId; + do { + promiseId = _nextPromise++; + } while (_promises.find(promiseId) != _promises.end()); + v8::Persistent > promise(_isolate, v8::Promise::Resolver::New(_isolate)); + _promises[promiseId] = promise; + return promiseId; +} + +v8::Handle Task::getPromise(promiseid_t promise) { + v8::Handle result; + if (!_promises[promise].IsEmpty()) { + result = v8::Local::New(_isolate, _promises[promise]); + } + return result; +} + +void Task::resolvePromise(promiseid_t promise, v8::Handle value) { + TaskTryCatch tryCatch(this); + if (!_promises[promise].IsEmpty()) { + v8::HandleScope handleScope(_isolate); + v8::Handle resolver = v8::Local::New(_isolate, _promises[promise]); + resolver->Resolve(value); + _isolate->RunMicrotasks(); + _promises[promise].Reset(); + _promises.erase(promise); + } +} + +void Task::rejectPromise(promiseid_t promise, v8::Handle value) { + TaskTryCatch tryCatch(this); + if (!_promises[promise].IsEmpty()) { + v8::HandleScope handleScope(_isolate); + v8::Handle resolver = v8::Local::New(_isolate, _promises[promise]); + resolver->Reject(value); + _isolate->RunMicrotasks(); + _promises[promise].Reset(); + _promises.erase(promise); + } +} + +exportid_t Task::exportFunction(v8::Handle function) { + exportid_t exportId = -1; + v8::Handle exportName = v8::String::NewFromUtf8(_isolate, "export"); + + v8::Local value = function->GetHiddenValue(exportName); + if (!value.IsEmpty() && value->IsNumber()) + { + exportid_t foundId = value->ToInteger(_isolate)->Int32Value(); + if (_exports[foundId]) { + exportId = foundId; + } + } + + if (exportId == -1) { + do { + exportId = _nextExport++; + } while (_exports[_nextExport]); + ExportRecord* record = new ExportRecord(_isolate, function); + function->SetHiddenValue(exportName, v8::Integer::New(_isolate, exportId)); + _exports[exportId] = record; + } + + if (_exports[exportId]) { + _exports[exportId]->ref(); + } + + return exportId; +} + +void Task::releaseExport(taskid_t taskId, exportid_t exportId) { + if (TaskStub* task = get(taskId)) { + std::vector buffer; + buffer.insert(buffer.end(), reinterpret_cast(&exportId), reinterpret_cast(&exportId) + sizeof(exportId)); + task->getStream().send(kReleaseExport, &*buffer.begin(), buffer.size()); + } +} + +v8::Handle Task::addImport(taskid_t taskId, exportid_t exportId) { + v8::Local data = v8::Object::New(_isolate); + data->Set(v8::String::NewFromUtf8(_isolate, "export"), v8::Int32::New(_isolate, exportId)); + data->Set(v8::String::NewFromUtf8(_isolate, "task"), v8::Int32::New(_isolate, taskId)); + v8::Local function = v8::Function::New(_isolate, Task::invokeExport, data); + _imports.push_back(new ImportRecord(_isolate, function, exportId, taskId, this)); + return function; +} + +void Task::statistics(v8::Local property, const v8::PropertyCallbackInfo& args) { + Task* task = reinterpret_cast(args.GetIsolate()->GetData(0)); + args.GetReturnValue().Set(task->getStatistics()); +} + +v8::Handle Task::getStatistics() { + v8::Handle result = v8::Object::New(_isolate); + result->Set(v8::String::NewFromUtf8(_isolate, "sockets"), v8::Integer::New(_isolate, Socket::getCount())); + result->Set(v8::String::NewFromUtf8(_isolate, "openSockets"), v8::Integer::New(_isolate, Socket::getOpenCount())); + result->Set(v8::String::NewFromUtf8(_isolate, "promises"), v8::Integer::New(_isolate, _promises.size())); + result->Set(v8::String::NewFromUtf8(_isolate, "exports"), v8::Integer::New(_isolate, _exports.size())); + result->Set(v8::String::NewFromUtf8(_isolate, "imports"), v8::Integer::New(_isolate, _imports.size())); + result->Set(v8::String::NewFromUtf8(_isolate, "tlsContexts"), v8::Integer::New(_isolate, TlsContextWrapper::getCount())); + + uv_rusage_t usage; + if (uv_getrusage(&usage) == 0) { + result->Set(v8::String::NewFromUtf8(_isolate, "utime"), v8::Number::New(_isolate, usage.ru_utime.tv_sec + usage.ru_utime.tv_usec / 1000000.0)); + result->Set(v8::String::NewFromUtf8(_isolate, "stime"), v8::Number::New(_isolate, usage.ru_stime.tv_sec + usage.ru_stime.tv_usec / 1000000.0)); + result->Set(v8::String::NewFromUtf8(_isolate, "maxrss"), v8::Number::New(_isolate, usage.ru_maxrss)); + } + return result; +} + +void Task::onReceivePacket(int packetType, const char* begin, size_t length, void* userData) { + TaskStub* stub = reinterpret_cast(userData); + TaskStub* from = stub; + Task* to = stub->getOwner(); + + TaskTryCatch tryCatch(to); + v8::HandleScope scope(to->_isolate); + + switch (static_cast(packetType)) { + case kStatistics: + { + promiseid_t promise; + std::memcpy(&promise, begin, sizeof(promise)); + v8::Handle result = to->getStatistics(); + sendPromiseResolve(to, from, promise, result); + } + break; + case kInvokeExport: + { + promiseid_t promise; + exportid_t exportId; + std::memcpy(&promise, begin, sizeof(promise)); + std::memcpy(&exportId, begin + sizeof(promise), sizeof(exportId)); + + v8::TryCatch tryCatch; + v8::Handle result = invokeExport(from, to, exportId, std::vector(begin + sizeof(promiseid_t) + sizeof(exportid_t), begin + length)); + if (tryCatch.HasCaught()) { + sendPromiseReject(to, from, promise, Serialize::store(to, tryCatch)); + } else { + sendPromiseResolve(to, from, promise, result); + } + } + break; + case kResolvePromise: + case kRejectPromise: + { + v8::Handle arg; + promiseid_t promise; + std::memcpy(&promise, begin, sizeof(promiseid_t)); + if (length > sizeof(promiseid_t)) { + arg = Serialize::load(to, from, std::vector(begin + sizeof(promiseid_t), begin + length)); + } + else { + arg = v8::Undefined(to->_isolate); + } + if (static_cast(packetType) == kResolvePromise) { + to->resolvePromise(promise, arg); + } + else { + to->rejectPromise(promise, arg); + } + } + break; + case kReleaseExport: + assert(length == sizeof(exportid_t)); + exportid_t exportId; + memcpy(&exportId, begin, sizeof(exportId)); + if (to->_exports[exportId]) { + if (to->_exports[exportId]->release()) { + to->_exports.erase(exportId); + } + } + break; + case kReleaseImport: + { + assert(length == sizeof(exportid_t)); + exportid_t exportId; + memcpy(&exportId, begin, sizeof(exportId)); + for (size_t i = 0; i < to->_imports.size(); ++i) { + if (to->_imports[i]->_task == from->getId() && to->_imports[i]->_export == exportId) { + to->_imports[i]->release(); + break; + } + } + } + break; + case kSetTrusted: + { + assert(length == sizeof(bool)); + bool trusted = false; + memcpy(&trusted, begin, sizeof(bool)); + to->_trusted = trusted; + } + break; + case kActivate: + to->activate(); + break; + case kExecute: + { + assert(length >= sizeof(promiseid_t)); + v8::Handle arg; + promiseid_t promise; + std::memcpy(&promise, begin, sizeof(promiseid_t)); + arg = Serialize::load(to, from, std::vector(begin + sizeof(promiseid_t), begin + length)); + v8::TryCatch tryCatch(to->_isolate); + tryCatch.SetCaptureMessage(true); + tryCatch.SetVerbose(true); + to->execute(*v8::String::Utf8Value(arg)); + if (tryCatch.HasCaught()) { + sendPromiseReject(to, from, promise, Serialize::store(to, tryCatch)); + } + else { + sendPromiseResolve(to, from, promise, v8::Undefined(to->_isolate)); + } + } + break; + case kKill: + ::exit(1); + break; + case kSetImports: + { + v8::Handle result = v8::Handle::Cast(Serialize::load(to, from, std::vector(begin, begin + length))); + to->_importObject = v8::Persistent >(to->_isolate, result); + } + break; + case kGetExports: + promiseid_t promise; + assert(length == sizeof(promise)); + std::memcpy(&promise, begin, sizeof(promiseid_t)); + v8::Handle result = v8::Local::New(to->_isolate, to->_exportObject); + sendPromiseResolve(to, from, promise, result); + break; + } +} + +void Task::configureFromStdin() { + _parent = TaskStub::createParent(this, STDIN_FILENO); +} + +std::string Task::resolveRequire(const std::string& require) { + std::string result; + std::string path = _scriptName; + size_t position = path.rfind('/'); + if (position != std::string::npos) { + path.resize(position + 1); + std::cout << "Looking in " << path << " for " << require << "\n"; + if (require.find("..") == std::string::npos && require.find('/') == std::string::npos) { + result = path + require; + } + if (result.size() && require.rfind(".js") != require.size() - 3) { + result += ".js"; + } + } + return result; +} + +void Task::require(const v8::FunctionCallbackInfo& args) { + v8::HandleScope scope(args.GetIsolate()); + Task* task = Task::get(args.GetIsolate()); + v8::String::Utf8Value pathValue(args[0]); + if (*pathValue) { + std::string unresolved(*pathValue, *pathValue + pathValue.length()); + std::string path = task->resolveRequire(unresolved); + if (!path.size()) { + args.GetReturnValue().Set(args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), ("require(): Unable to resolve module: " + unresolved).c_str())))); + } else { + ScriptExportMap::iterator it = task->_scriptExports.find(path); + if (it != task->_scriptExports.end()) { + v8::Handle exports = v8::Local::New(args.GetIsolate(), it->second); + args.GetReturnValue().Set(exports); + } else { + v8::Handle exports = v8::Object::New(args.GetIsolate()); + task->_scriptExports[path] = v8::Persistent >(args.GetIsolate(), exports); + + v8::Handle name = v8::String::NewFromUtf8(args.GetIsolate(), path.c_str()); + v8::Handle source = loadFile(args.GetIsolate(), path.c_str()); + std::cout << "Requiring script " << path << "\n"; + if (!source.IsEmpty()) { + v8::Handle global = args.GetIsolate()->GetCurrentContext()->Global(); + v8::Handle oldExports = global->Get(v8::String::NewFromUtf8(args.GetIsolate(), "exports")); + global->Set(v8::String::NewFromUtf8(args.GetIsolate(), "exports"), exports); + v8::Handle script = v8::Script::Compile(source, name); + if (!script.IsEmpty()) { + script->Run(); + std::cout << "Script " << path << " completed\n"; + } else { + std::cerr << "Failed to compile script.\n"; + } + global->Set(v8::String::NewFromUtf8(args.GetIsolate(), "exports"), oldExports); + args.GetReturnValue().Set(exports); + } else { + std::cerr << "Failed to load " << path << ".\n"; + } + } + } + } else { + args.GetReturnValue().Set(args.GetIsolate()->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8(args.GetIsolate(), "require(): No module specified.")))); + } +} diff --git a/src/Task.h b/src/Task.h new file mode 100644 index 00000000..d21d0bee --- /dev/null +++ b/src/Task.h @@ -0,0 +1,163 @@ +#ifndef INCLUDED_Task +#define INCLUDED_Task + +#include "PacketStream.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +struct ExportRecord; +struct ImportRecord; +class Task; +class TaskStub; + +struct uv_loop_s; +typedef struct uv_loop_s uv_loop_t; + +typedef int taskid_t; +typedef int promiseid_t; +typedef int exportid_t; + +enum MessageType { + kResolvePromise, + kRejectPromise, + kInvokeExport, + kReleaseExport, + kReleaseImport, + kSetTrusted, + kActivate, + kExecute, + kKill, + kStatistics, + kSetImports, + kGetExports, +}; + +class NewArrayBufferAllocator : public v8::ArrayBuffer::Allocator { +public: + void* Allocate(size_t length) { + char* bytes = new char[length]; + std::memset(bytes, 0, length); + return bytes; + } + + void* AllocateUninitialized(size_t length) { + return new char[length]; + } + + void Free(void* data, size_t length) { + delete[] reinterpret_cast(data); + } +}; + +class Task { +public: + Task(); + ~Task(); + + const std::string& getName() const { return _scriptName; } + v8::Isolate* getIsolate() { return _isolate; } + uv_loop_t* getLoop() { return _loop; } + v8::Handle getContext(); + void kill(); + + promiseid_t allocatePromise(); + v8::Handle getPromise(promiseid_t promise); + void resolvePromise(promiseid_t promise, v8::Handle value); + void rejectPromise(promiseid_t promise, v8::Handle value); + + void configureFromStdin(); + void setTrusted(bool trusted) { _trusted = trusted; } + void execute(const char* fileName); + void activate(); + void run(); + + static int getCount() { return _count; } + static Task* get(v8::Isolate* isolate); + TaskStub* get(taskid_t taskId); + + exportid_t exportFunction(v8::Handle function); + static void invokeExport(const v8::FunctionCallbackInfo& args); + v8::Handle addImport(taskid_t taskId, exportid_t exportId); + void releaseExport(taskid_t taskId, exportid_t exportId); + +private: + static int _count; + + TaskStub* _stub = 0; + TaskStub* _parent = 0; + taskid_t _nextTask = 1; + static const taskid_t kParentId = 0; + std::map _children; + + typedef std::map > > ScriptExportMap; + ScriptExportMap _scriptExports; + + bool _trusted = false; + bool _killed = false; + std::string _scriptName; + NewArrayBufferAllocator _allocator; + v8::Isolate* _isolate = 0; + + std::map > > _promises; + promiseid_t _nextPromise = 0; + uv_loop_t* _loop = 0; + + std::map _exports; + exportid_t _nextExport = 0; + + v8::Persistent > _context; + + std::vector _imports; + + v8::Persistent > _importObject; + v8::Persistent > _exportObject; + + v8::Handle getStatistics(); + + std::string resolveRequire(const std::string& require); + + static void activate(const v8::FunctionCallbackInfo& args); + static void exit(const v8::FunctionCallbackInfo& args); + static void print(const v8::FunctionCallbackInfo& args); + static void require(const v8::FunctionCallbackInfo& args); + + static void setTimeout(const v8::FunctionCallbackInfo& args); + static void timeoutCallback(uv_timer_t* handle); + + static void invokeThen(const v8::FunctionCallbackInfo& args); + static void invokeCatch(const v8::FunctionCallbackInfo& args); + + static void parent(v8::Local property, const v8::PropertyCallbackInfo& args); + static void version(v8::Local property, const v8::PropertyCallbackInfo& args); + static void statistics(v8::Local property, const v8::PropertyCallbackInfo& args); + + static void utf8Length(const v8::FunctionCallbackInfo& args); + + static void getImportProperty(v8::Local property, const v8::PropertyCallbackInfo& args); + static void getImports(v8::Local property, const v8::PropertyCallbackInfo& args); + static void getExports(v8::Local property, const v8::PropertyCallbackInfo& args); + static void setExports(v8::Local property, v8::Local value, const v8::PropertyCallbackInfo& args); + + static v8::Handle invokeExport(TaskStub* from, Task* to, exportid_t exportId, const std::vector& buffer); + static void sendPromiseResolve(Task* from, TaskStub* to, promiseid_t promise, v8::Handle result); + static void sendPromiseReject(Task* from, TaskStub* to, promiseid_t promise, v8::Handle result); + + static void onReceivePacket(int packetType, const char* begin, size_t length, void* userData); + + static void sendPromiseMessage(Task* from, TaskStub* to, MessageType messageType, promiseid_t promise, v8::Handle result); + static void sendPromiseExportMessage(Task* from, TaskStub* to, MessageType messageType, promiseid_t promiseId, exportid_t exportId, v8::Handle result); + + static v8::Handle loadFile(v8::Isolate* isolate, const char* fileName); + + friend struct ImportRecord; + friend class TaskStub; +}; + +#endif diff --git a/src/TaskStub.cpp b/src/TaskStub.cpp new file mode 100644 index 00000000..80e07bc1 --- /dev/null +++ b/src/TaskStub.cpp @@ -0,0 +1,254 @@ +#include "TaskStub.h" + +#include "PacketStream.h" +#include "Serialize.h" +#include "Task.h" +#include "TaskTryCatch.h" + +#include + +#ifdef _WIN32 +#include +#include +#include +static const int STDIN_FILENO = 0; +static const int STDOUT_FILENO = 1; +static const int STDERR_FILENO = 2; +#else +#include +#endif + +bool TaskStub::_determinedExecutable = false; +char TaskStub::_executable[1024]; + +void TaskStub::initialize() { + if (!_determinedExecutable) { + size_t size = sizeof(_executable); + uv_exepath(_executable, &size); + _determinedExecutable = true; + } +} + +TaskStub::TaskStub() { + initialize(); + std::memset(&_process, 0, sizeof(_process)); +} + +void TaskStub::ref() { + if (++_refCount == 1) { + _taskObject.ClearWeak(); + } +} + +void TaskStub::release() { + if (--_refCount == 0) { + _taskObject.SetWeak(this, onRelease); + } +} + +TaskStub* TaskStub::createParent(Task* task, uv_file file) { + v8::Isolate::Scope isolateScope(task->_isolate); + v8::HandleScope scope(task->_isolate); + + v8::Local context = v8::Context::New(task->_isolate, 0); + context->Enter(); + + v8::Handle parentTemplate = v8::ObjectTemplate::New(task->_isolate); + parentTemplate->SetInternalFieldCount(1); + + v8::Handle parentObject = parentTemplate->NewInstance(); + TaskStub* parentStub = new TaskStub(); + parentStub->_taskObject.Reset(task->_isolate, v8::Local::New(task->_isolate, parentObject)); + parentObject->SetInternalField(0, v8::External::New(task->_isolate, parentStub)); + parentStub->_owner = task; + parentStub->_id = Task::kParentId; + + if (uv_pipe_init(task->_loop, &parentStub->_stream.getStream(), 1) != 0) { + std::cerr << "uv_pipe_init failed\n"; + } + parentStub->_stream.setOnReceive(Task::onReceivePacket, parentStub); + if (uv_pipe_open(&parentStub->_stream.getStream(), file) != 0) { + std::cerr << "uv_pipe_open failed\n"; + } + parentStub->_stream.start(); + + return parentStub; +} + +void TaskStub::create(const v8::FunctionCallbackInfo& args) { + Task* parent = Task::get(args.GetIsolate()); + v8::HandleScope scope(args.GetIsolate()); + + TaskStub* stub = new TaskStub(); + v8::Handle data = v8::External::New(args.GetIsolate(), stub); + + v8::Handle taskTemplate = v8::ObjectTemplate::New(args.GetIsolate()); + taskTemplate->SetAccessor(v8::String::NewFromUtf8(args.GetIsolate(), "trusted"), getTrusted, setTrusted, data); + taskTemplate->Set(v8::String::NewFromUtf8(args.GetIsolate(), "setImports"), v8::FunctionTemplate::New(args.GetIsolate(), setImports, data)); + taskTemplate->Set(v8::String::NewFromUtf8(args.GetIsolate(), "getExports"), v8::FunctionTemplate::New(args.GetIsolate(), getExports, data)); + taskTemplate->SetAccessor(v8::String::NewFromUtf8(args.GetIsolate(), "onExit"), getOnExit, setOnExit, data); + taskTemplate->Set(v8::String::NewFromUtf8(args.GetIsolate(), "activate"), v8::FunctionTemplate::New(args.GetIsolate(), TaskStub::activate, data)); + taskTemplate->Set(v8::String::NewFromUtf8(args.GetIsolate(), "execute"), v8::FunctionTemplate::New(args.GetIsolate(), TaskStub::execute, data)); + taskTemplate->Set(v8::String::NewFromUtf8(args.GetIsolate(), "kill"), v8::FunctionTemplate::New(args.GetIsolate(), TaskStub::kill, data)); + taskTemplate->Set(v8::String::NewFromUtf8(args.GetIsolate(), "statistics"), v8::FunctionTemplate::New(args.GetIsolate(), TaskStub::statistics, data)); + taskTemplate->SetInternalFieldCount(1); + + v8::Handle taskObject = taskTemplate->NewInstance(); + stub->_taskObject.Reset(args.GetIsolate(), taskObject); + taskObject->SetInternalField(0, v8::External::New(args.GetIsolate(), stub)); + stub->_owner = parent; + + taskid_t id = 0; + if (parent) { + do { + id = parent->_nextTask++; + if (parent->_nextTask == Task::kParentId) { + ++parent->_nextTask; + } + } while (parent->_children.find(id) != parent->_children.end()); + parent->_children[id] = stub; + } + stub->_id = id; + + char arg1[] = "--child"; + char* argv[] = { _executable, arg1, 0 }; + + uv_pipe_t* pipe = reinterpret_cast(&stub->_stream.getStream()); + std::memset(pipe, 0, sizeof(*pipe)); + if (uv_pipe_init(parent->getLoop(), pipe, 1) != 0) { + std::cerr << "uv_pipe_init failed\n"; + } + + uv_stdio_container_t io[3]; + io[0].flags = static_cast(UV_CREATE_PIPE | UV_READABLE_PIPE | UV_WRITABLE_PIPE); + io[0].data.stream = reinterpret_cast(pipe); + io[1].flags = UV_INHERIT_FD; + io[1].data.fd = STDOUT_FILENO; + io[2].flags = UV_INHERIT_FD; + io[2].data.fd = STDERR_FILENO; + + uv_process_options_t options = {0}; + options.args = argv; + options.exit_cb = onProcessExit; + options.stdio = io; + options.stdio_count = sizeof(io) / sizeof(*io); + options.file = argv[0]; + + stub->_process.data = stub; + int result = uv_spawn(parent->getLoop(), &stub->_process, &options); + if (result == 0) { + stub->_stream.setOnReceive(Task::onReceivePacket, stub); + stub->_stream.start(); + + args.GetReturnValue().Set(taskObject); + } else { + std::cerr << "uv_spawn failed: " << uv_strerror(result) << "\n"; + } +} + +void TaskStub::onProcessExit(uv_process_t* process, int64_t status, int terminationSignal) { + TaskStub* stub = reinterpret_cast(process->data); + if (!stub->_onExit.IsEmpty()) { + TaskTryCatch tryCatch(stub->_owner); + v8::HandleScope scope(stub->_owner->_isolate); + v8::Handle callback = v8::Local::New(stub->_owner->_isolate, stub->_onExit); + v8::Handle args[2]; + args[0] = v8::Integer::New(stub->_owner->_isolate, status); + args[1] = v8::Integer::New(stub->_owner->_isolate, terminationSignal); + callback->Call(callback, 2, &args[0]); + } + stub->_stream.close(); + stub->_owner->_children.erase(stub->_id); + uv_close(reinterpret_cast(process), 0); +} + +void TaskStub::onRelease(const v8::WeakCallbackData& data) { +} + +void TaskStub::getTrusted(v8::Local property, const v8::PropertyCallbackInfo& args) { + args.GetReturnValue().Set(v8::Boolean::New(args.GetIsolate(), false)); +} + +void TaskStub::setTrusted(v8::Local property, v8::Local value, const v8::PropertyCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + bool trusted = value->BooleanValue(); + stub->_stream.send(kSetTrusted, reinterpret_cast(&trusted), sizeof(trusted)); + } +} + +void TaskStub::getExports(const v8::FunctionCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + TaskTryCatch tryCatch(stub->_owner); + v8::HandleScope scope(args.GetIsolate()); + + promiseid_t promise = stub->_owner->allocatePromise(); + Task::sendPromiseMessage(stub->_owner, stub, kGetExports, promise, v8::Undefined(args.GetIsolate())); + args.GetReturnValue().Set(stub->_owner->getPromise(promise)); + } +} + +void TaskStub::setImports(const v8::FunctionCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + std::vector buffer; + Serialize::store(Task::get(args.GetIsolate()), buffer, args[0]); + stub->_stream.send(kSetImports, &*buffer.begin(), buffer.size()); + } +} + +void TaskStub::getOnExit(v8::Local property, const v8::PropertyCallbackInfo& args) { + TaskTryCatch tryCatch(TaskStub::get(args.Data())->_owner); + v8::HandleScope scope(args.GetIsolate()); + args.GetReturnValue().Set(v8::Local::New(args.GetIsolate(), TaskStub::get(args.Data())->_onExit)); +} + +void TaskStub::setOnExit(v8::Local property, v8::Local value, const v8::PropertyCallbackInfo& args) { + TaskTryCatch tryCatch(TaskStub::get(args.Data())->_owner); + v8::HandleScope scope(args.GetIsolate()); + v8::Persistent > function(args.GetIsolate(), v8::Handle::Cast(value)); + TaskStub::get(args.Data())->_onExit = function; +} + +TaskStub* TaskStub::get(v8::Handle object) { + return reinterpret_cast(v8::Handle::Cast(object)->Value()); +} + +v8::Handle TaskStub::getTaskObject() { + return v8::Local::New(_owner->getIsolate(), _taskObject); +} + +void TaskStub::activate(const v8::FunctionCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + TaskTryCatch tryCatch(stub->_owner); + v8::HandleScope scope(args.GetIsolate()); + v8::String::Utf8Value fileName(args[0]->ToString(args.GetIsolate())); + stub->_stream.send(kActivate, 0, 0); + } +} + +void TaskStub::execute(const v8::FunctionCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + TaskTryCatch tryCatch(stub->_owner); + v8::HandleScope scope(args.GetIsolate()); + + promiseid_t promise = stub->_owner->allocatePromise(); + Task::sendPromiseMessage(stub->_owner, stub, kExecute, promise, args[0]); + args.GetReturnValue().Set(stub->_owner->getPromise(promise)); + } +} + +void TaskStub::kill(const v8::FunctionCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + uv_process_kill(&stub->_process, SIGTERM); + } +} + +void TaskStub::statistics(const v8::FunctionCallbackInfo& args) { + if (TaskStub* stub = TaskStub::get(args.Data())) { + TaskTryCatch tryCatch(stub->_owner); + v8::HandleScope scope(args.GetIsolate()); + + promiseid_t promise = stub->_owner->allocatePromise(); + Task::sendPromiseMessage(stub->_owner, stub, kStatistics, promise, v8::Undefined(args.GetIsolate())); + args.GetReturnValue().Set(stub->_owner->getPromise(promise)); + } +} diff --git a/src/TaskStub.h b/src/TaskStub.h new file mode 100644 index 00000000..9a8ed64e --- /dev/null +++ b/src/TaskStub.h @@ -0,0 +1,63 @@ +#ifndef INCLUDED_TaskStub +#define INCLUDED_TaskStub + +#include "PacketStream.h" + +#include + +class Task; + +typedef int taskid_t; + +class TaskStub { +public: + void ref(); + void release(); + + static void create(const v8::FunctionCallbackInfo& args); + static TaskStub* createParent(Task* task, uv_file file); + static void initialize(); + + taskid_t getId() { return _id; } + Task* getOwner() { return _owner; } + v8::Handle getTaskObject(); + PacketStream& getStream() { return _stream; } + +private: + v8::Persistent _taskObject; + int _refCount = 1; + + Task* _owner = 0; + PacketStream _stream; + taskid_t _id = -1; + uv_process_t _process; + + v8::Persistent > _onExit; + + static bool _determinedExecutable; + static char _executable[1024]; + + TaskStub(); + + static TaskStub* get(v8::Handle object); + + static void getTrusted(v8::Local property, const v8::PropertyCallbackInfo& args); + static void setTrusted(v8::Local property, v8::Local value, const v8::PropertyCallbackInfo& args); + + static void getExports(const v8::FunctionCallbackInfo& args); + static void setImports(const v8::FunctionCallbackInfo& args); + + static void getOnExit(v8::Local property, const v8::PropertyCallbackInfo& args); + static void setOnExit(v8::Local property, v8::Local value, const v8::PropertyCallbackInfo& args); + + static void activate(const v8::FunctionCallbackInfo& args); + static void execute(const v8::FunctionCallbackInfo& args); + static void kill(const v8::FunctionCallbackInfo& args); + static void statistics(const v8::FunctionCallbackInfo& args); + + static void onRelease(const v8::WeakCallbackData& data); + + static void onProcessExit(uv_process_t* process, int64_t status, int terminationSignal); +}; + +#endif diff --git a/src/TaskTryCatch.cpp b/src/TaskTryCatch.cpp new file mode 100644 index 00000000..03d59df4 --- /dev/null +++ b/src/TaskTryCatch.cpp @@ -0,0 +1,57 @@ +#include "TaskTryCatch.h" + +#include "Task.h" + +#include + +const char* TaskTryCatch::toString(const v8::String::Utf8Value& value) { + return *value ? *value : "(null)"; +} + +TaskTryCatch::TaskTryCatch(Task* task) { + _tryCatch.SetCaptureMessage(true); + _tryCatch.SetVerbose(true); +} + +TaskTryCatch::~TaskTryCatch() { + if (_tryCatch.HasCaught()) { + if (v8::Isolate* isolate = v8::Isolate::GetCurrent()) { + if (Task* task = reinterpret_cast(isolate->GetData(0))) { + std::cerr << "Task[" << task << ':' << task->getName() << "] "; + } + } + std::cerr << "Exception:\n"; + + v8::Handle message(_tryCatch.Message()); + if (!message.IsEmpty()) { + std::cerr + << toString(v8::String::Utf8Value(message->GetScriptResourceName())) + << ':' + << message->GetLineNumber() + << ": " + << toString(v8::String::Utf8Value(_tryCatch.Exception())) + << '\n'; + std::cerr << toString(v8::String::Utf8Value(message->GetSourceLine())) << '\n'; + + for (int i = 0; i < message->GetStartColumn(); ++i) { + std::cerr << ' '; + } + for (int i = message->GetStartColumn(); i < message->GetEndColumn(); ++i) { + std::cerr << '^'; + } + if (!message->GetStackTrace().IsEmpty()) { + for (int i = 0; i < message->GetStackTrace()->GetFrameCount(); ++i) { + std::cerr << "oops " << i << "\n"; + } + } + std::cerr << '\n'; + } else { + std::cerr << toString(v8::String::Utf8Value(_tryCatch.Exception())) << '\n'; + } + + v8::String::Utf8Value stackTrace(_tryCatch.StackTrace()); + if (stackTrace.length() > 0) { + std::cerr << *stackTrace << '\n'; + } + } +} diff --git a/src/TaskTryCatch.h b/src/TaskTryCatch.h new file mode 100644 index 00000000..41bdd1e7 --- /dev/null +++ b/src/TaskTryCatch.h @@ -0,0 +1,18 @@ +#ifndef INCLUDED_TaskTryCatch +#define INCLUDED_TaskTryCatch + +#include + +class Task; + +class TaskTryCatch { +public: + TaskTryCatch(Task* task); + ~TaskTryCatch(); + +private: + v8::TryCatch _tryCatch; + static const char* toString(const v8::String::Utf8Value& value); +}; + +#endif diff --git a/src/Tls.cpp b/src/Tls.cpp new file mode 100644 index 00000000..18df8852 --- /dev/null +++ b/src/Tls.cpp @@ -0,0 +1,1126 @@ +#include "Tls.h" + +#if !defined (_WIN32) && !defined (__MACH__) +#include +#include +#include +#include +#include +#include +#include +#include + +class TlsContext_openssl : public TlsContext { +public: + TlsContext_openssl(); + ~TlsContext_openssl() override; + TlsSession* createSession() override; + bool setCertificate(const char* certificate) override; + bool setPrivateKey(const char* privateKey) override; + bool addTrustedCertificate(const char* certificate) override; + + SSL_CTX* getContext() { return _context; } + +private: + SSL_CTX* _context = 0; +}; + +class TlsSession_openssl : public TlsSession { +public: + TlsSession_openssl(TlsContext_openssl* context); + ~TlsSession_openssl(); + + void setHostname(const char* hostname) override; + void startConnect() override; + void startAccept() override; + + int getPeerCertificate(char* buffer, size_t size) override; + + void shutdown() override; + HandshakeResult handshake() override; + + int readPlain(char* buffer, size_t bytes) override; + int writePlain(const char* buffer, size_t bytes) override; + + int readEncrypted(char* buffer, size_t bytes) override; + int writeEncrypted(const char* buffer, size_t bytes) override; + + bool getError(char* buffer, size_t bytes) override; + +private: + bool verifyPeerCertificate(); + bool verifyHostname(X509* certificate, const char* hostname); + bool wildcardMatch(const char* pattern, const char* name); + + TlsContext_openssl* _context = 0; + BIO* _bioIn = 0; + BIO* _bioOut = 0; + SSL* _ssl = 0; + std::string _hostname; + enum { kUndetermined, kAccept, kConnect } _direction = kUndetermined; +}; + +TlsSession* TlsContext_openssl::createSession() { + return new TlsSession_openssl(this); +} + +TlsContext_openssl::TlsContext_openssl() { + SSL_library_init(); + SSL_load_error_strings(); + + _context = SSL_CTX_new(SSLv23_method()); + SSL_CTX_set_default_verify_paths(_context); +} + +TlsContext_openssl::~TlsContext_openssl() { + SSL_CTX_free(_context); +} + +bool TlsContext_openssl::setCertificate(const char* certificate) { + int result = 0; + BIO* bio = BIO_new(BIO_s_mem()); + BIO_puts(bio, certificate); + X509* x509 = PEM_read_bio_X509(bio, 0, 0, 0); + result = SSL_CTX_use_certificate(_context, x509); + BIO_free(bio); + return result == 1; +} + +bool TlsContext_openssl::setPrivateKey(const char* privateKey) { + int result = 0; + BIO* bio = BIO_new(BIO_s_mem()); + BIO_puts(bio, privateKey); + EVP_PKEY* key = PEM_read_bio_PrivateKey(bio, 0, 0, 0); + result = SSL_CTX_use_PrivateKey(_context, key); + BIO_free(bio); + return result == 1; +} + +bool TlsContext_openssl::addTrustedCertificate(const char* certificate) { + bool result = false; + BIO* bio = BIO_new_mem_buf(const_cast(certificate), -1); + X509* x509 = PEM_read_bio_X509(bio, 0, 0, 0); + BIO_free(bio); + + if (x509) { + X509_STORE* store = SSL_CTX_get_cert_store(_context); + if (store && X509_STORE_add_cert(store, x509) == 1) { + result = true; + } + X509_free(x509); + } + + return result; +} + +TlsContext* TlsContext::create() { + return new TlsContext_openssl(); +} + +TlsSession_openssl::TlsSession_openssl(TlsContext_openssl* context) { + _context = context; + _bioIn = BIO_new(BIO_s_mem()); + _bioOut = BIO_new(BIO_s_mem()); +} + +TlsSession_openssl::~TlsSession_openssl() { + if (_ssl) { + SSL_free(_ssl); + } +} + +void TlsSession_openssl::setHostname(const char* hostname) { + _hostname = hostname; +} + +void TlsSession_openssl::startAccept() { + _direction = kAccept; + _ssl = SSL_new(_context->getContext()); + SSL_set_bio(_ssl, _bioIn, _bioOut); + SSL_accept(_ssl); + handshake(); +} + +void TlsSession_openssl::startConnect() { + _direction = kConnect; + _ssl = SSL_new(_context->getContext()); + SSL_set_bio(_ssl, _bioIn, _bioOut); + + SSL_connect(_ssl); + handshake(); +} + +void TlsSession_openssl::shutdown() { + SSL_shutdown(_ssl); +} + +TlsSession::HandshakeResult TlsSession_openssl::handshake() { + TlsSession::HandshakeResult result = kDone; + if (!SSL_is_init_finished(_ssl)) { + int value = SSL_do_handshake(_ssl); + if (value <= 0) { + int error = SSL_get_error(_ssl, value); + if (error != SSL_ERROR_WANT_READ && error != SSL_ERROR_WANT_WRITE) { + result = kFailed; + } else { + result = kMore; + } + } + } + if (result == kDone && _direction == kConnect && !verifyPeerCertificate()) { + result = kFailed; + } + return result; +} + +int TlsSession_openssl::readPlain(char* buffer, size_t bytes) { + int result = SSL_read(_ssl, buffer, bytes); + if (result <= 0) { + int error = SSL_get_error(_ssl, result); + if (error == SSL_ERROR_WANT_READ || error == SSL_ERROR_WANT_WRITE) { + result = 0; + } else if (error == SSL_ERROR_ZERO_RETURN) { + if ((SSL_get_shutdown(_ssl) & SSL_RECEIVED_SHUTDOWN) != 0) { + result = kReadZero; + } else { + result = 0; + } + } else { + result = kReadFailed; + } + } + return result; +} + +int TlsSession_openssl::writePlain(const char* buffer, size_t bytes) { + return SSL_write(_ssl, buffer, bytes); +} + +int TlsSession_openssl::readEncrypted(char* buffer, size_t bytes) { + return BIO_read(_bioOut, buffer, bytes); +} + +int TlsSession_openssl::writeEncrypted(const char* buffer, size_t bytes) { + return BIO_write(_bioIn, buffer, bytes); +} + +int TlsSession_openssl::getPeerCertificate(char* buffer, size_t size) { + int result = -1; + X509* certificate = SSL_get_peer_certificate(_ssl); + BIO* bio = BIO_new(BIO_s_mem()); + PEM_write_bio_X509(bio, certificate); + BUF_MEM* mem; + BIO_get_mem_ptr(bio, &mem); + if (mem->length <= size) { + std::memcpy(buffer, mem->data, mem->length); + result = mem->length; + } + BIO_free(bio); + return result; +} + +bool TlsSession_openssl::verifyPeerCertificate() { + bool verified = false; + X509* certificate = SSL_get_peer_certificate(_ssl); + if (certificate) { + + if (SSL_get_verify_result(_ssl) == X509_V_OK) { + if (verifyHostname(certificate, _hostname.c_str())) { + verified = true; + } + } + X509_free(certificate); + } + return verified; +} + +bool TlsSession_openssl::wildcardMatch(const char* pattern, const char* name) { + while (*pattern && *name) { + if (*pattern == '*') { + for (const char* p = name; *p; ++p) { + if (wildcardMatch(pattern + 1, p)) { + return true; + } + } + return false; + } else if (std::tolower(*pattern) == std::tolower(*name)) { + ++pattern; + ++name; + } else { + break; + } + } + return *pattern == 0 && *name == 0; +} + +bool TlsSession_openssl::verifyHostname(X509* certificate, const char* hostname) { + bool verified = false; + void* names = X509_get_ext_d2i(certificate, NID_subject_alt_name, 0, 0); + if (names) { + int count = sk_GENERAL_NAME_num(names); + for (int i = 0; i < count; ++i) { + const GENERAL_NAME* check = sk_GENERAL_NAME_value(names, i); + const char* name = reinterpret_cast(ASN1_STRING_data(check->d.ia5)); + size_t length = ASN1_STRING_length(check->d.ia5); + if (wildcardMatch(std::string(name, length).c_str(), hostname)) { + verified = true; + break; + } + } + } + + if (!verified) { + int index = X509_NAME_get_index_by_NID(X509_get_subject_name(certificate), NID_commonName, -1); + if (index >= 0) { + X509_NAME_ENTRY* entry = X509_NAME_get_entry(X509_get_subject_name(certificate), index); + if (entry) { + ASN1_STRING* asn1 = X509_NAME_ENTRY_get_data(entry); + if (asn1) { + const char* commonName = reinterpret_cast(ASN1_STRING_data(asn1)); + if (static_cast(ASN1_STRING_length(asn1)) == std::strlen(commonName)) { + verified = wildcardMatch(commonName, hostname); + } + } + } + } + } + + return verified; +} + +bool TlsSession_openssl::getError(char* buffer, size_t bytes) { + unsigned long error = ERR_get_error(); + if (error != 0) { + ERR_error_string_n(error, buffer, bytes); + } + return error != 0; +} +#elif defined (__MACH__) +#include +#include +#include +#include +#include +#include + +extern "C" SecIdentityRef SecIdentityCreate(CFAllocatorRef allocator, SecCertificateRef certificate, SecKeyRef privateKey); + +class TlsContext_osx : public TlsContext { +public: + ~TlsContext_osx() override; + TlsSession* createSession() override; + bool setCertificate(const char* certificate) override; + bool setPrivateKey(const char* privateKey) override; + bool addTrustedCertificate(const char* certificate) override; + + SecKeyRef& getPrivateKey() { return _privateKey; } + SecCertificateRef& getCertificate() { return _certificate; } + CFArrayRef getTrustedCertificates() { return _trustedCertificates; } + +private: + SecKeyRef _privateKey = 0; + SecCertificateRef _certificate = 0; + CFMutableArrayRef _trustedCertificates = 0; +}; + +class TlsSession_osx : public TlsSession { +public: + TlsSession_osx(TlsContext_osx* context); + ~TlsSession_osx(); + + void startConnect() override; + void startAccept() override; + void shutdown() override; + HandshakeResult handshake() override; + + int readPlain(char* buffer, size_t bytes) override; + int writePlain(const char* buffer, size_t bytes) override; + + int readEncrypted(char* buffer, size_t bytes) override; + int writeEncrypted(const char* buffer, size_t bytes) override; + + void setHostname(const char* hostname) override { _hostname = hostname; } + virtual int getPeerCertificate(char* buffer, size_t bytes) override; + +private: + static OSStatus writeCallback(SSLConnectionRef connection, const void* data, size_t* dataLength); + static OSStatus readCallback(SSLConnectionRef connection, void* data, size_t* dataLength); + + TlsContext_osx* _context = 0; + SSLContextRef _session = 0; + std::vector _inBuffer; + std::vector _outBuffer; + std::string _hostname; + bool _shutdown = false; +}; + +TlsContext_osx::~TlsContext_osx() { + if (_privateKey) { + CFRelease(_privateKey); + _privateKey = 0; + } + if (_certificate) { + CFRelease(_certificate); + _certificate = 0; + } + if (_trustedCertificates) { + CFRelease(_trustedCertificates); + _trustedCertificates = 0; + } +} + +TlsSession* TlsContext_osx::createSession() { + return new TlsSession_osx(this); +} + +bool TlsContext_osx::setCertificate(const char* certificate) { + if (_certificate) { + CFRelease(_certificate); + _certificate = 0; + } + CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, reinterpret_cast(certificate), std::strlen(certificate), kCFAllocatorDefault); + CFArrayRef items = 0; + SecExternalFormat format = kSecFormatPEMSequence; + SecExternalItemType itemType = kSecItemTypeCertificate; + OSStatus status = SecItemImport(data, 0, &format, &itemType, 0, 0, 0, &items); + if (status == noErr && CFArrayGetCount(items) > 0) { + _certificate = (SecCertificateRef)CFArrayGetValueAtIndex(items, 0); + } + return _certificate != 0; +} + +bool TlsContext_osx::setPrivateKey(const char* privateKey) { + if (_privateKey) { + CFRelease(_privateKey); + _privateKey = 0; + } + + CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, reinterpret_cast(privateKey), std::strlen(privateKey), kCFAllocatorDefault); + CFArrayRef items = 0; + SecExternalFormat format = kSecFormatPEMSequence; + SecExternalItemType itemType = kSecItemTypePrivateKey; + OSStatus status = SecItemImport(data, 0, &format, &itemType, 0, 0, 0, &items); + if (status == noErr && CFArrayGetCount(items) > 0) { + _privateKey = (SecKeyRef)CFArrayGetValueAtIndex(items, 0); + } + return _privateKey != 0; +} + +bool TlsContext_osx::addTrustedCertificate(const char* certificate) { + if (!_trustedCertificates) { + _trustedCertificates = CFArrayCreateMutable(kCFAllocatorDefault, 0, &kCFTypeArrayCallBacks); + } + SecCertificateRef certificateItem = 0; + CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, reinterpret_cast(certificate), std::strlen(certificate), kCFAllocatorDefault); + CFArrayRef items = 0; + SecExternalFormat format = kSecFormatPEMSequence; + SecExternalItemType itemType = kSecItemTypeCertificate; + OSStatus status = SecItemImport(data, 0, &format, &itemType, 0, 0, 0, &items); + if (status == noErr && CFArrayGetCount(items) > 0) { + certificateItem = (SecCertificateRef)CFArrayGetValueAtIndex(items, 0); + } + if (certificateItem) { + CFArrayAppendValue(_trustedCertificates, certificateItem); + } + return certificateItem != 0; +} + +TlsSession_osx::TlsSession_osx(TlsContext_osx* context) { + _context = context; +} + +TlsSession_osx::~TlsSession_osx() { + if (_session) { + CFRelease(_session); + _session = 0; + } +} + +void TlsSession_osx::startAccept() { + _session = SSLCreateContext(0, kSSLServerSide, kSSLStreamType); + if (_context->getCertificate() && _context->getPrivateKey()) { + SecIdentityRef identity = SecIdentityCreate(kCFAllocatorDefault, _context->getCertificate(), _context->getPrivateKey()); + CFArrayRef array = CFArrayCreate(kCFAllocatorDefault, (const void**)&identity, 1, &kCFTypeArrayCallBacks); + SSLSetCertificate(_session, array); + } + SSLSetIOFuncs(_session, readCallback, writeCallback); + SSLSetConnection(_session, this); + handshake(); +} + +void TlsSession_osx::startConnect() { + _session = SSLCreateContext(0, kSSLClientSide, kSSLStreamType); + if (_context->getTrustedCertificates()) { + // XXX: SSLSetTrustedRoots(_session, _context->getTrustedCertificates(), false); + } + SSLSetIOFuncs(_session, readCallback, writeCallback); + SSLSetConnection(_session, this); + SSLSetPeerDomainName(_session, _hostname.c_str(), _hostname.size()); + handshake(); +} + +void TlsSession_osx::shutdown() { + if (!_outBuffer.size()) { + SSLClose(_session); + _shutdown = false; + } else { + _shutdown = true; + } +} + +TlsSession::HandshakeResult TlsSession_osx::handshake() { + TlsSession::HandshakeResult result = TlsSession::kFailed; + OSStatus status = SSLHandshake(_session); + switch (status) { + case noErr: + result = TlsSession::kDone; + break; + case errSSLWouldBlock: + result = TlsSession::kMore; + break; + default: + result = TlsSession::kFailed; + break; + } + return result; +} + +int TlsSession_osx::readPlain(char* buffer, size_t bytes) { + int result = 0; + size_t processed = bytes; + OSStatus status = SSLRead(_session, buffer, bytes, &processed); + if (status == noErr) { + result = processed; + } else if (status == errSSLWouldBlock) { + result = processed; + } else if (status == errSSLClosedGraceful) { + result = kReadZero; + } else { + result = kReadFailed; + } + return result; +} + +int TlsSession_osx::writePlain(const char* buffer, size_t bytes) { + int result = 0; + size_t processed; + OSStatus status = SSLWrite(_session, buffer, bytes, &processed); + if (status == noErr) { + result = processed; + } else { + result = -1; + } + return result; +} + +OSStatus TlsSession_osx::writeCallback(SSLConnectionRef connection, const void* data, size_t* dataLength) { + TlsSession_osx* tls = reinterpret_cast(const_cast(connection)); + tls->_outBuffer.insert(tls->_outBuffer.end(), reinterpret_cast(data), reinterpret_cast(data) + *dataLength); + if (tls->_shutdown && !tls->_outBuffer.size()) { + SSLClose(tls->_session); + tls->_shutdown = false; + } + return noErr; +} + +OSStatus TlsSession_osx::readCallback(SSLConnectionRef connection, void* data, size_t* dataLength) { + TlsSession_osx* tls = reinterpret_cast(const_cast(connection)); + OSStatus result = noErr; + size_t bytes = std::min(tls->_inBuffer.size(), *dataLength); + if (bytes > 0) { + std::memcpy(data, tls->_inBuffer.data(), bytes); + tls->_inBuffer.erase(tls->_inBuffer.begin(), tls->_inBuffer.begin() + bytes); + } + if (bytes < *dataLength) { + result = errSSLWouldBlock; + } + *dataLength = bytes; + return result; +} + +int TlsSession_osx::readEncrypted(char* buffer, size_t bytes) { + size_t size = std::min(bytes, _outBuffer.size()); + if (size > 0) { + std::memcpy(buffer, _outBuffer.data(), size); + _outBuffer.erase(_outBuffer.begin(), _outBuffer.begin() + size); + } + return size; +} + +int TlsSession_osx::writeEncrypted(const char* buffer, size_t bytes) { + _inBuffer.insert(_inBuffer.end(), buffer, buffer + bytes); + return bytes; +} + +int TlsSession_osx::getPeerCertificate(char* buffer, size_t size) { + int result = -1; + SecTrustRef trust = 0; + if (SSLCopyPeerTrust(_session, &trust) == noErr) { + if (SecTrustGetCertificateCount(trust) > 0) { + SecCertificateRef certificate = SecTrustGetCertificateAtIndex(trust, 0); + CFDataRef data = 0; + if (SecItemExport(certificate, kSecFormatX509Cert, kSecItemPemArmour, nil, &data) == noErr) { + size_t actualSize = CFDataGetLength(data); + if (actualSize <= size) { + CFDataGetBytes(data, CFRangeMake(0, actualSize), reinterpret_cast(buffer)); + result = actualSize; + } + } + } + CFRelease(trust); + } + return result; +} + +TlsContext* TlsContext::create() { + return new TlsContext_osx(); +} +#elif defined (_WIN32) +#include +#include +#include +#include +#include + +#define SECURITY_WIN32 +#define NOMINMAX +#include +#include +#include +#undef SECURITY_WIN32 +#undef NOMINMAX + +PSecurityFunctionTable getSecurityLibrary(); + +class TlsContext_sspi : public TlsContext { +public: + TlsContext_sspi(); + ~TlsContext_sspi(); + + TlsSession* createSession() override; + bool setCertificate(const char* certificate) override; + bool setPrivateKey(const char* privateKey) override; + bool addTrustedCertificate(const char* certificate) { return false; } + + PCCERT_CONTEXT& getCertificate() { return _certificate; } + +private: + static const char* kContainerName; + void importKeyAndCertificate(); + + HCRYPTPROV _provider = 0; + HCERTSTORE _store = INVALID_HANDLE_VALUE; + PCCERT_CONTEXT _certificate = 0; + bool _dirty = true; +}; + +const char* TlsContext_sspi::kContainerName = "_tmp0"; + +class TlsSession_sspi : public TlsSession { +public: + TlsSession_sspi(TlsContext_sspi* context); + ~TlsSession_sspi(); + + void startConnect() override; + void startAccept() override; + int getPeerCertificate(char* buffer, size_t size) override; + void shutdown() override; + HandshakeResult handshake() override; + + int readPlain(char* buffer, size_t bytes) override; + int writePlain(const char* buffer, size_t bytes) override; + + int readEncrypted(char* buffer, size_t bytes) override; + int writeEncrypted(const char* buffer, size_t bytes) override; + + void setHostname(const char* hostname) { _hostname = hostname; } + + static void setError(HRESULT error); + bool getError(char* buffer, size_t bytes) override; + +private: + HandshakeResult handshakeInternal(bool initial); + + TlsContext_sspi* _context; + CredHandle _credentialsHandle; + CtxtHandle _securityContext; + SecPkgContext_StreamSizes _sizes; + enum { kUndetermined, kConnect, kAccept } _direction = kUndetermined; + bool _initial = false; + static HRESULT _lastError; + + std::vector _inBuffer; + std::vector _outBuffer; + std::vector _decryptedBuffer; + std::string _hostname; +}; + +HRESULT TlsSession_sspi::_lastError = S_OK; + +PSecurityFunctionTable getSecurityLibrary() { + PSecurityFunctionTable (*table)(); + static PSecurityFunctionTable security; + static bool loaded; + if (!loaded) { + HMODULE module = LoadLibrary("security.dll"); + table = (PSecurityFunctionTable(*)())GetProcAddress(module, "InitSecurityInterfaceA"); + assert(table && "failed to load security.dll"); + security = table(); + assert(security && "No function table in security.dll"); + loaded = true; + } + return security; +} + +TlsContext_sspi::TlsContext_sspi() { + if (!CryptAcquireContext(&_provider, kContainerName, MS_DEF_RSA_SCHANNEL_PROV, PROV_RSA_SCHANNEL, CRYPT_NEWKEYSET)) { + if (GetLastError() != NTE_EXISTS || !CryptAcquireContext(&_provider, kContainerName, MS_DEF_RSA_SCHANNEL_PROV, PROV_RSA_SCHANNEL, 0)) { + TlsSession_sspi::setError(GetLastError()); + } + } + + _store = CertOpenStore(CERT_STORE_PROV_SYSTEM, + X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, + _provider, + CERT_SYSTEM_STORE_LOCAL_MACHINE | CERT_STORE_NO_CRYPT_RELEASE_FLAG | CERT_STORE_OPEN_EXISTING_FLAG, + L"MY"); +} + +TlsContext_sspi::~TlsContext_sspi() { + if (_store) { + CertCloseStore(_store, 0); + _store = INVALID_HANDLE_VALUE; + } + if (_provider) { + CryptReleaseContext(_provider, 0); + _provider = 0; + } + if (_certificate) { + CertFreeCertificateContext(_certificate); + _certificate = 0; + } +} + +bool TlsContext_sspi::setCertificate(const char* certificate) { + if (_certificate) { + CertFreeCertificateContext(_certificate); + _certificate = 0; + } + + std::vector certificateBuffer; + DWORD size = 0; + if (CryptStringToBinary(certificate, 0, CRYPT_STRING_BASE64HEADER, 0, &size, 0, 0)) { + certificateBuffer.resize(size); + if (!CryptStringToBinary(certificate, 0, CRYPT_STRING_BASE64HEADER, certificateBuffer.data(), &size, 0, 0)) { + certificateBuffer.resize(0); + } + } + + if (certificateBuffer.size()) { + _certificate = CertCreateCertificateContext(X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, certificateBuffer.data(), certificateBuffer.size()); + } + + if (!CertAddCertificateContextToStore(_store, _certificate, CERT_STORE_ADD_REPLACE_EXISTING, 0)) { + TlsSession_sspi::setError(GetLastError()); + } + + _dirty = true; + + return _certificate != 0; +} + +bool TlsContext_sspi::setPrivateKey(const char* privateKey) { + bool result = false; + std::vector keyBuffer; + std::vector keyBlob; + + DWORD size = 0; + if (CryptStringToBinary(privateKey, 0, CRYPT_STRING_BASE64HEADER, 0, &size, 0, 0)) { + keyBuffer.resize(size); + if (!CryptStringToBinary(privateKey, 0, CRYPT_STRING_BASE64HEADER, keyBuffer.data(), &size, 0, 0)) { + TlsSession_sspi::setError(GetLastError()); + keyBuffer.resize(0); + } + } + + size = 0; + if (CryptDecodeObjectEx(X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, PKCS_RSA_PRIVATE_KEY, keyBuffer.data(), keyBuffer.size(), 0, 0, 0, &size)) { + keyBlob.resize(size); + if (!CryptDecodeObjectEx(X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, PKCS_RSA_PRIVATE_KEY, keyBuffer.data(), keyBuffer.size(), 0, 0, keyBlob.data(), &size)) { + TlsSession_sspi::setError(GetLastError()); + keyBlob.resize(0); + } + } + + HCRYPTKEY cryptKey = 0; + if (CryptImportKey(_provider, keyBlob.data(), keyBlob.size(), 0, 0, &cryptKey)) { + CryptDestroyKey(cryptKey); + cryptKey = 0; + result = true; + } else { + TlsSession_sspi::setError(GetLastError()); + } + + _dirty = true; + + return result; +} + +void TlsContext_sspi::importKeyAndCertificate() { + if (_certificate) { + WCHAR wname[32]; + mbstowcs(wname, kContainerName, sizeof(kContainerName) + 1); + + CRYPT_KEY_PROV_INFO info; + ZeroMemory(&info, sizeof(info)); + info.pwszContainerName = wname; + info.pwszProvName = MS_DEF_RSA_SCHANNEL_PROV_W; + info.dwProvType = PROV_RSA_SCHANNEL; + info.dwKeySpec = AT_KEYEXCHANGE; + + if (!CertSetCertificateContextProperty(_certificate, CERT_KEY_PROV_INFO_PROP_ID, 0, reinterpret_cast(&info))) { + TlsSession_sspi::setError(GetLastError()); + } + } + _dirty = false; +} + +TlsSession* TlsContext_sspi::createSession() { + if (_dirty) { + importKeyAndCertificate(); + } + return new TlsSession_sspi(this); +} + +TlsSession_sspi::TlsSession_sspi(TlsContext_sspi* context) { + _context = context; + ZeroMemory(&_credentialsHandle, sizeof(_credentialsHandle)); + ZeroMemory(&_securityContext, sizeof(_securityContext)); + ZeroMemory(&_sizes, sizeof(_sizes)); +} + +TlsSession_sspi::~TlsSession_sspi() { + getSecurityLibrary()->FreeCredentialsHandle(&_credentialsHandle); + getSecurityLibrary()->DeleteSecurityContext(&_securityContext); +} + +void TlsSession_sspi::startAccept() { + _direction = kAccept; + _initial = true; + SCHANNEL_CRED credentials; + ZeroMemory(&credentials, sizeof(credentials)); + credentials.dwVersion = SCHANNEL_CRED_VERSION; + credentials.cCreds = 1; + credentials.paCred = &_context->getCertificate(); + _lastError = getSecurityLibrary()->AcquireCredentialsHandleA(0, UNISP_NAME_A, SECPKG_CRED_INBOUND, 0, &credentials, 0, 0, &_credentialsHandle, 0); + if (_lastError == S_OK) { + handshakeInternal(true); + } +} + +void TlsSession_sspi::startConnect() { + _direction = kConnect; + _initial = true; + SCHANNEL_CRED credentials; + ZeroMemory(&credentials, sizeof(credentials)); + credentials.dwVersion = SCHANNEL_CRED_VERSION; + credentials.dwFlags = SCH_CRED_NO_DEFAULT_CREDS; + _lastError = getSecurityLibrary()->AcquireCredentialsHandleA(0, UNISP_NAME_A, SECPKG_CRED_OUTBOUND, 0, &credentials, 0, 0, &_credentialsHandle, 0); + if (_lastError == S_OK) { + handshakeInternal(true); + } +} + +int TlsSession_sspi::getPeerCertificate(char* buffer, size_t size) { + int result = -1; + PCCERT_CONTEXT certificate; + HRESULT status = getSecurityLibrary()->QueryContextAttributesA(&_securityContext, SECPKG_ATTR_REMOTE_CERT_CONTEXT, &certificate); + if (FAILED(status)) { + _lastError = status; + } else { + DWORD bufferSize = size; + if (CryptBinaryToString(certificate->pbCertEncoded, certificate->cbCertEncoded, CRYPT_STRING_BASE64HEADER, buffer, &bufferSize)) { + return bufferSize; + } + } + return result; +} + +void TlsSession_sspi::shutdown() { + DWORD type = SCHANNEL_SHUTDOWN; + SecBufferDesc bufferDesc; + SecBuffer buffers[1]; + buffers[0].pvBuffer = &type; + buffers[0].BufferType = SECBUFFER_TOKEN; + buffers[0].cbBuffer = sizeof(type); + bufferDesc.cBuffers = 1; + bufferDesc.pBuffers = buffers; + bufferDesc.ulVersion = SECBUFFER_TOKEN; + + SECURITY_STATUS status = getSecurityLibrary()->ApplyControlToken(&_securityContext, &bufferDesc); + if (!FAILED(status)) { + buffers[0].pvBuffer = 0; + buffers[0].BufferType = SECBUFFER_TOKEN; + buffers[0].cbBuffer = 0; + bufferDesc.cBuffers = 1; + bufferDesc.pBuffers = buffers; + bufferDesc.ulVersion = SECBUFFER_VERSION; + + DWORD outFlags = 0; + + status = getSecurityLibrary()->InitializeSecurityContextA( + &_credentialsHandle, + &_securityContext, + 0, + ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT | ISC_REQ_CONFIDENTIALITY | ISC_RET_EXTENDED_ERROR | ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM, + 0, + 0, + 0, + 0, + &_securityContext, + &bufferDesc, + &outFlags, + 0); + + if (!FAILED(status) && buffers[0].pvBuffer && buffers[0].cbBuffer) { + const char* data = reinterpret_cast(buffers[0].pvBuffer); + _outBuffer.insert(_outBuffer.end(), data, data + buffers[0].cbBuffer); + } + } +} + +TlsSession::HandshakeResult TlsSession_sspi::handshake() { + return handshakeInternal(_initial); +} + +TlsSession::HandshakeResult TlsSession_sspi::handshakeInternal(bool initial) { + SecBufferDesc outBuffer; + SecBuffer outBuffers[1]; + SecBufferDesc inBuffer; + SecBuffer inBuffers[2]; + DWORD outFlags = 0; + outBuffers[0].pvBuffer = 0; + outBuffers[0].BufferType = SECBUFFER_TOKEN; + outBuffers[0].cbBuffer = 0; + outBuffer.cBuffers = 1; + outBuffer.pBuffers = outBuffers; + outBuffer.ulVersion = SECBUFFER_VERSION; + std::vector buffer(_inBuffer); + inBuffers[0].pvBuffer = buffer.data(); + inBuffers[0].cbBuffer = buffer.size(); + inBuffers[0].BufferType = SECBUFFER_TOKEN; + inBuffers[1].pvBuffer = 0; + inBuffers[1].cbBuffer = 0; + inBuffers[1].BufferType = SECBUFFER_EMPTY; + inBuffer.cBuffers = 2; + inBuffer.pBuffers = inBuffers; + inBuffer.ulVersion = SECBUFFER_VERSION; + + SECURITY_STATUS status = SEC_E_OK; + + if (_direction == kConnect) { + status = getSecurityLibrary()->InitializeSecurityContextA( + &_credentialsHandle, + initial ? 0 : &_securityContext, + _hostname.size() ? const_cast(_hostname.c_str()) : 0, + ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT | ISC_REQ_CONFIDENTIALITY | ISC_RET_EXTENDED_ERROR | ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM, + 0, + 0, + &inBuffer, + 0, + &_securityContext, + &outBuffer, + &outFlags, + 0); + } else if (_direction = kAccept) { + status = getSecurityLibrary()->AcceptSecurityContext( + &_credentialsHandle, + initial ? 0 : &_securityContext, + &inBuffer, + ASC_REQ_SEQUENCE_DETECT | ASC_REQ_REPLAY_DETECT | ASC_REQ_CONFIDENTIALITY | ASC_REQ_EXTENDED_ERROR | ASC_REQ_ALLOCATE_MEMORY | ASC_REQ_STREAM, + 0, + &_securityContext, + &outBuffer, + &outFlags, + 0); + } + + if (!FAILED(status)) { + _initial = false; + } + + TlsSession::HandshakeResult result = TlsSession::kFailed; + + size_t extra = 0; + for (int i = 0; i < inBuffer.cBuffers; ++i) { + if (inBuffers[i].BufferType == SECBUFFER_EXTRA && inBuffers[i].cbBuffer) { + extra += inBuffers[i].cbBuffer; + } + } + size_t missing = 0; + for (int i = 0; i < inBuffer.cBuffers; ++i) { + if (inBuffers[i].BufferType == SECBUFFER_MISSING && inBuffers[i].cbBuffer) { + missing += inBuffers[i].cbBuffer; + } + } + + if (outBuffers[0].cbBuffer && outBuffers[0].pvBuffer) { + const char* data = reinterpret_cast(outBuffers[0].pvBuffer); + _outBuffer.insert(_outBuffer.end(), data, data + outBuffers[0].cbBuffer); + getSecurityLibrary()->FreeContextBuffer(outBuffers[0].pvBuffer); + } + + if (status == SEC_E_OK) { + result = TlsSession::kDone; + } else if (status == SEC_E_INCOMPLETE_MESSAGE + || status == SEC_I_CONTINUE_NEEDED) { + result = TlsSession::kMore; + } else if (FAILED(status)) { + result = TlsSession::kFailed; + } + + _inBuffer.erase(_inBuffer.begin(), _inBuffer.end() - extra); + + if (result == TlsSession::kDone) { + status = getSecurityLibrary()->QueryContextAttributesA(&_securityContext, SECPKG_ATTR_STREAM_SIZES, &_sizes); + if (FAILED(status)) { + result = TlsSession::kFailed; + } + } + + return result; +} + +int TlsSession_sspi::readPlain(char* buffer, size_t bytes) { + int result = TlsSession::kReadFailed; + if (bytes <= _decryptedBuffer.size()) { + std::memcpy(buffer, _decryptedBuffer.data(), bytes); + _decryptedBuffer.erase(_decryptedBuffer.begin(), _decryptedBuffer.begin() + bytes); + result = bytes; + } else if (_inBuffer.size()) { + SecBufferDesc bufferDesc; + SecBuffer buffers[4]; + std::vector data(_inBuffer); + buffers[0].pvBuffer = data.data(); + buffers[0].cbBuffer = data.size(); + buffers[0].BufferType = SECBUFFER_DATA; + buffers[1].BufferType = SECBUFFER_EMPTY; + buffers[2].BufferType = SECBUFFER_EMPTY; + buffers[3].BufferType = SECBUFFER_EMPTY; + bufferDesc.ulVersion = SECBUFFER_VERSION; + bufferDesc.cBuffers = 4; + bufferDesc.pBuffers = buffers; + SECURITY_STATUS status = getSecurityLibrary()->DecryptMessage(&_securityContext, &bufferDesc, 0, 0); + + if (status == SEC_I_CONTEXT_EXPIRED) { + _inBuffer.clear(); + result = TlsSession::kReadZero; + } else if (status == SEC_E_INCOMPLETE_MESSAGE) { + result = 0; + } else if (status == SEC_E_OK) { + result = 0; + size_t extra = 0; + for (int i = 0; i < bufferDesc.cBuffers; ++i) { + if (buffers[i].BufferType == SECBUFFER_DATA) { + const char* decrypted = reinterpret_cast(buffers[i].pvBuffer); + _decryptedBuffer.insert(_decryptedBuffer.end(), decrypted, decrypted + buffers[i].cbBuffer); + } else if (buffers[i].BufferType == SECBUFFER_EXTRA) { + extra += buffers[i].cbBuffer; + } + } + _inBuffer.erase(_inBuffer.begin(), _inBuffer.end() - extra); + + size_t actual = std::min(_decryptedBuffer.size(), bytes); + if (actual > 0) { + std::memcpy(buffer, _decryptedBuffer.data(), actual); + _decryptedBuffer.erase(_decryptedBuffer.begin(), _decryptedBuffer.begin() + actual); + result = actual; + } + } else { + _inBuffer.clear(); + result = TlsSession::kReadFailed; + } + } else { + size_t actual = std::min(_decryptedBuffer.size(), bytes); + if (actual > 0) { + std::memcpy(buffer, _decryptedBuffer.data(), actual); + _decryptedBuffer.erase(_decryptedBuffer.begin(), _decryptedBuffer.begin() + actual); + result = actual; + } else { + result = 0; + } + } + return result; +} + +int TlsSession_sspi::writePlain(const char* buffer, size_t bytes) { + SecBufferDesc bufferDesc; + SecBuffer buffers[4]; + std::vector data(_sizes.cbHeader + _sizes.cbTrailer + bytes); + std::memcpy(data.data() + _sizes.cbHeader, buffer, bytes); + + buffers[0].pvBuffer = data.data(); + buffers[0].cbBuffer = _sizes.cbHeader; + buffers[0].BufferType = SECBUFFER_STREAM_HEADER; + + buffers[1].pvBuffer = data.data() + _sizes.cbHeader; + buffers[1].cbBuffer = bytes; + buffers[1].BufferType = SECBUFFER_DATA; + + buffers[2].pvBuffer = data.data() + _sizes.cbHeader + bytes; + buffers[2].cbBuffer = _sizes.cbTrailer; + buffers[2].BufferType = SECBUFFER_STREAM_TRAILER; + + buffers[3].BufferType = SECBUFFER_EMPTY; + + bufferDesc.ulVersion = SECBUFFER_VERSION; + bufferDesc.cBuffers = 4; + bufferDesc.pBuffers = buffers; + SECURITY_STATUS status = getSecurityLibrary()->EncryptMessage(&_securityContext, 0, &bufferDesc, 0); + for (int i = 0; i < bufferDesc.cBuffers; ++i) { + if (buffers[i].BufferType != SECBUFFER_EMPTY && buffers[i].pvBuffer && buffers[i].cbBuffer) { + const char* bufferData = reinterpret_cast(buffers[i].pvBuffer); + _outBuffer.insert(_outBuffer.end(), bufferData, bufferData + buffers[i].cbBuffer); + } + } + return 0; +} + +int TlsSession_sspi::readEncrypted(char* buffer, size_t bytes) { + size_t size = std::min(bytes, _outBuffer.size()); + if (size > 0) { + std::memcpy(buffer, _outBuffer.data(), size); + _outBuffer.erase(_outBuffer.begin(), _outBuffer.begin() + size); + } + return size; +} + +int TlsSession_sspi::writeEncrypted(const char* buffer, size_t bytes) { + _inBuffer.insert(_inBuffer.end(), buffer, buffer + bytes); + return bytes; +} + +void TlsSession_sspi::setError(HRESULT error) { + _lastError = error; +} + +bool TlsSession_sspi::getError(char* buffer, size_t bytes) { + bool result = false; + if (_lastError != S_OK) { + DWORD length = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, _lastError, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)buffer, bytes, 0); + _lastError = S_OK; + if (length > 0) { + result = true; + } + } + return result; +} + +TlsContext* TlsContext::create() { + return new TlsContext_sspi(); +} +#else +TlsContext* TlsContext::create() { + return 0; +} +#endif diff --git a/src/Tls.h b/src/Tls.h new file mode 100644 index 00000000..b78fd95f --- /dev/null +++ b/src/Tls.h @@ -0,0 +1,50 @@ +#ifndef INCLUDED_Tls +#define INCLUDED_Tls + +#include + +class TlsSession; + +class TlsContext { +public: + static TlsContext* create(); + virtual ~TlsContext() {} + + virtual TlsSession* createSession() { return 0; } + virtual bool setCertificate(const char* certificate) { return false; } + virtual bool setPrivateKey(const char* privateKey) { return false; } + virtual bool addTrustedCertificate(const char* certificate) { return false; } +}; + +class TlsSession { +public: + virtual ~TlsSession() {} + + virtual void setHostname(const char* hostname) {} + virtual void startAccept() = 0; + virtual void startConnect() = 0; + virtual void shutdown() = 0; + + virtual int getPeerCertificate(char* buffer, size_t bytes) { return -1; } + + enum HandshakeResult { + kDone, + kMore, + kFailed, + }; + virtual HandshakeResult handshake() = 0; + + enum ReadResult { + kReadZero = -1, + kReadFailed = -2, + }; + virtual int readPlain(char* buffer, size_t bytes) = 0; + virtual int writePlain(const char* buffer, size_t bytes) = 0; + + virtual int readEncrypted(char* buffer, size_t bytes) = 0; + virtual int writeEncrypted(const char* buffer, size_t bytes) = 0; + + virtual bool getError(char* buffer, size_t bytes) { return false; } +}; + +#endif diff --git a/src/TlsContextWrapper.cpp b/src/TlsContextWrapper.cpp new file mode 100644 index 00000000..162310bb --- /dev/null +++ b/src/TlsContextWrapper.cpp @@ -0,0 +1,115 @@ +#include "TlsContextWrapper.h" + +#include "Task.h" +#include "Tls.h" + +#include + +int TlsContextWrapper::_count = 0; + +void TlsContextWrapper::create(const v8::FunctionCallbackInfo& args) { + v8::HandleScope handleScope(args.GetIsolate()); + if (TlsContextWrapper* wrapper = new TlsContextWrapper(Task::get(args.GetIsolate()))) { + v8::Handle result = v8::Local::New(args.GetIsolate(), wrapper->_object); + args.GetReturnValue().Set(result); + wrapper->release(); + } +} + +TlsContextWrapper::TlsContextWrapper(Task* task) { + ++_count; + v8::HandleScope scope(task->getIsolate()); + v8::Handle identifier = v8::External::New(task->getIsolate(), reinterpret_cast(&TlsContextWrapper::create)); + v8::Handle data = v8::External::New(task->getIsolate(), this); + + v8::Local wrapperTemplate = v8::ObjectTemplate::New(task->getIsolate()); + wrapperTemplate->SetInternalFieldCount(2); + wrapperTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "setCertificate"), v8::FunctionTemplate::New(task->getIsolate(), setCertificate, data)); + wrapperTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "setPrivateKey"), v8::FunctionTemplate::New(task->getIsolate(), setPrivateKey, data)); + wrapperTemplate->Set(v8::String::NewFromUtf8(task->getIsolate(), "addTrustedCertificate"), v8::FunctionTemplate::New(task->getIsolate(), addTrustedCertificate, data)); + + v8::Local wrapperObject = wrapperTemplate->NewInstance(); + wrapperObject->SetInternalField(0, identifier); + wrapperObject->SetInternalField(1, data); + _object.Reset(task->getIsolate(), wrapperObject); + + _context = TlsContext::create(); + _task = task; +} + +TlsContextWrapper::~TlsContextWrapper() { + close(); + --_count; +} + +void TlsContextWrapper::close() { + if (_context) { + delete _context; + _context = 0; + } +} + +void TlsContextWrapper::onRelease(const v8::WeakCallbackData& data) { + data.GetParameter()->_object.Reset(); + delete data.GetParameter(); +} + +TlsContextWrapper* TlsContextWrapper::get(v8::Handle value) { + TlsContextWrapper* result = 0; + + if (!value.IsEmpty() + && value->IsObject()) + { + v8::Handle object = v8::Handle::Cast(value); + if (object->InternalFieldCount() == 2 + && v8::Handle::Cast(object->GetInternalField(0))->Value() == &TlsContextWrapper::create) + { + result = reinterpret_cast(v8::Handle::Cast(object->GetInternalField(1))->Value()); + } + } + + return result; +} + +TlsContextWrapper* TlsContextWrapper::get(const v8::FunctionCallbackInfo& args) { + return reinterpret_cast(v8::Handle::Cast(args.Data())->Value()); +} + +void TlsContextWrapper::ref() { + if (++_refCount == 1) { + _object.ClearWeak(); + } +} + +void TlsContextWrapper::release() { + assert(_refCount >= 1); + if (--_refCount == 0) { + _object.SetWeak(this, onRelease); + } +} + +void TlsContextWrapper::setCertificate(const v8::FunctionCallbackInfo& args) { + if (TlsContextWrapper* wrapper = TlsContextWrapper::get(args)) { + v8::String::Utf8Value value(args[0]->ToString(args.GetIsolate())); + wrapper->_context->setCertificate(*value); + } +} + +void TlsContextWrapper::setPrivateKey(const v8::FunctionCallbackInfo& args) { + if (TlsContextWrapper* wrapper = TlsContextWrapper::get(args)) { + v8::String::Utf8Value value(args[0]->ToString(args.GetIsolate())); + wrapper->_context->setPrivateKey(*value); + } +} + +void TlsContextWrapper::addTrustedCertificate(const v8::FunctionCallbackInfo& args) { + if (TlsContextWrapper* wrapper = TlsContextWrapper::get(args)) { + v8::String::Utf8Value value(args[0]->ToString(args.GetIsolate())); + wrapper->_context->addTrustedCertificate(*value); + } +} + +int TlsContextWrapper::getCount() +{ + return _count; +} diff --git a/src/TlsContextWrapper.h b/src/TlsContextWrapper.h new file mode 100644 index 00000000..32db80f5 --- /dev/null +++ b/src/TlsContextWrapper.h @@ -0,0 +1,42 @@ +#ifndef INCLUDED_TlsContextWrapper +#define INCLUDED_TlsContextWrapper + +#include + +class Task; +class TlsContext; + +class TlsContextWrapper { +public: + static void create(const v8::FunctionCallbackInfo& args); + void close(); + + static TlsContextWrapper* get(v8::Handle value); + + static void setCertificate(const v8::FunctionCallbackInfo& args); + static void setPrivateKey(const v8::FunctionCallbackInfo& args); + static void addTrustedCertificate(const v8::FunctionCallbackInfo& args); + + static void onRelease(const v8::WeakCallbackData& data); + + TlsContext* getContext() { return _context; } + + static int getCount(); + +private: + TlsContextWrapper(Task* task); + ~TlsContextWrapper(); + + static TlsContextWrapper* get(const v8::FunctionCallbackInfo& args); + + TlsContext* _context = 0; + Task* _task = 0; + v8::Persistent _object; + int _refCount = 1; + static int _count; + + void ref(); + void release(); +}; + +#endif diff --git a/src/main.cpp b/src/main.cpp new file mode 100644 index 00000000..128223d9 --- /dev/null +++ b/src/main.cpp @@ -0,0 +1,75 @@ +#include "Task.h" +#include "TaskStub.h" +#include "TaskTryCatch.h" + +#include +#include +#include +#include +#include + +#if !defined (_WIN32) && !defined (__MACH__) +#include +#include +#include +#endif + +v8::Platform* gPlatform = 0; + + +int main(int argc, char* argv[]) { + uv_setup_args(argc, argv); + TaskStub::initialize(); + v8::V8::InitializeICU(); + gPlatform = v8::platform::CreateDefaultPlatform(); + v8::V8::InitializePlatform(gPlatform); + v8::V8::Initialize(); + v8::V8::SetFlagsFromCommandLine(&argc, argv, true); + + bool isChild = false; + const char* coreTask = "core/core.js"; + + for (int i = 1; i < argc; ++i) { + if (!std::strcmp(argv[i], "--child")) { + isChild = true; + } else { + coreTask = argv[i]; + } + } + +#if !defined (_WIN32) + if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) { + perror("signal"); + } +#endif + + if (isChild) { +#if !defined (_WIN32) && !defined (__MACH__) + prctl(PR_SET_PDEATHSIG, SIGHUP); +#endif + Task task; + task.configureFromStdin(); + task.activate(); + task.run(); + } else { +#if !defined (_WIN32) && !defined (__MACH__) + setpgid(0, 0); +#endif + Task task; + task.setTrusted(true); + task.activate(); + + { + v8::Isolate::Scope isolateScope(task.getIsolate()); + v8::HandleScope handleScope(task.getIsolate()); + v8::Context::Scope contextScope(task.getContext()); + TaskTryCatch tryCatch(&task); + task.execute(coreTask); + } + task.run(); + } + + v8::V8::Dispose(); + + return 0; +} diff --git a/tests/01-nop b/tests/01-nop new file mode 100755 index 00000000..f929cc49 --- /dev/null +++ b/tests/01-nop @@ -0,0 +1,7 @@ +#!/bin/bash + +cat > test.js << EOF +print("hi"); +EOF + +$SANDBOXOS test.js diff --git a/tests/02-valgrind b/tests/02-valgrind new file mode 100755 index 00000000..32744c56 --- /dev/null +++ b/tests/02-valgrind @@ -0,0 +1,7 @@ +#!/bin/bash + +cat > test.js << EOF +print("hi"); +EOF + +valgrind --log-file=$LOGDIR/valgrind.log $SANDBOXOS test.js diff --git a/tests/03-child b/tests/03-child new file mode 100755 index 00000000..49472cbd --- /dev/null +++ b/tests/03-child @@ -0,0 +1,19 @@ +#!/bin/bash + +cat > test.js << EOF +var task = new Task(); +task.onExit = function() { + print("child exited"); +}; +task.activate(); +task.execute("child.js").then(function() { + print("child started"); +}); +EOF + +cat > child.js << EOF +print("I am the child process."); +exit(0); +EOF + +$SANDBOXOS test.js diff --git a/tests/04-promise b/tests/04-promise new file mode 100755 index 00000000..33f55df7 --- /dev/null +++ b/tests/04-promise @@ -0,0 +1,27 @@ +#!/bin/bash + +cat > test.js << EOF +var task = new Task(); +task.activate(); +task.execute("child.js").then(function() { + task.getExports().then(function(exports) { + return exports.add(1, 1); + }).then(function(sum) { + if (sum == 2) { + exit(0); + } else { + exit(1); + } + }); +}); +EOF + +cat > child.js << EOF +exports = { + add: function(left, right) { + return left + right; + } +} +EOF + +$SANDBOXOS test.js diff --git a/tests/05-promise-remote-throw b/tests/05-promise-remote-throw new file mode 100755 index 00000000..ce674f94 --- /dev/null +++ b/tests/05-promise-remote-throw @@ -0,0 +1,26 @@ +#!/bin/bash + +cat > test.js << EOF +var task = new Task(); +task.activate(); +task.execute("child.js").then(function() { + task.getExports().then(function(exports) { + return exports.add(1, 1); + }).then(function(sum) { + exit(1); + }).catch(function(error) { + print("Caught: " + error.message); + exit(0); + }); +}); +EOF + +cat > child.js << EOF +exports = { + add: function(left, right) { + throw new Error("fail"); + } +} +EOF + +$SANDBOXOS test.js diff --git a/tests/06-restartTask b/tests/06-restartTask new file mode 100755 index 00000000..9bb344e0 --- /dev/null +++ b/tests/06-restartTask @@ -0,0 +1,80 @@ +#!/bin/bash + +mkdir -p packages +for i in filesystem packager; do + ln -s $ROOT/packages/$i packages/$i +done +cp -R $ROOT/packages/system packages/system + +mkdir -p packages/test + +cat > packages/test/test.js << EOF +print("Hello!"); + +File.writeFile("packages/hello/hello.js", "this will fail to run!$^!U#%^#$%#%"); + +var p = imports.system.restartTask("hello"); +print("here is our promise: " + p.toString()); +print(p); + +p.then(function(r) { + print("restart succeeded when it should not have: " + r); + imports.system.finishTest(1); +}).catch(function(e) { + print("restart failed: " + e); + print(e.toString()); + for (var i in e) { + print(i); + print(e[i]); + } + imports.system.finishTest(0); +}); +EOF + +cat > packages/test/package.json << EOF +{ + "name": "test", + "start": "test.js", + "trusted": true, + "imports": ["packager", "system"] +} +EOF + +cat >> packages/system/system.js << EOF +exports.finishTest = function(result) { + exit(result); +} +EOF + +mkdir -p packages/hello + +cat > packages/hello/hello.js << EOF +print("Hi."); +EOF + +cat > packages/hello/package.json << EOF +{ + "name": "hello", + "start": "hello.js" +} +EOF + + +mkdir -p packages/auth + +cat > packages/auth/auth.js << EOF +exports = { + query: function() { return null; }, + getCredentials: function() { return {user: 'test', token: 'token'}; }, + verifyCredentials: function() { return {permissions: []}; }, +}; +EOF + +cat > packages/auth/package.json << EOF +{ + "name": "auth", + "start": "auth.js" +} +EOF + +$SANDBOXOS packages/system/system.js diff --git a/tests/07-promise-remote-reject b/tests/07-promise-remote-reject new file mode 100755 index 00000000..d958fe5a --- /dev/null +++ b/tests/07-promise-remote-reject @@ -0,0 +1,29 @@ +#!/bin/bash + +cat > test.js << EOF +var task = new Task(); +task.activate(); +task.execute("child.js").then(function() { + task.getExports().then(function(exports) { + return exports.add(1, 1); + }).then(function(sum) { + exit(1); + }).catch(function(error) { + print(error); + print("Caught: " + error.message); + exit(0); + }); +}); +EOF + +cat > child.js << EOF +exports = { + add: function(left, right) { + return new Promise(function(resolve, reject) { + reject(new Error("oops")); + }); + } +} +EOF + +$SANDBOXOS test.js diff --git a/tests/08-database b/tests/08-database new file mode 100755 index 00000000..907e19de --- /dev/null +++ b/tests/08-database @@ -0,0 +1,34 @@ +#!/bin/bash + +mkdir testdb + +cat > test.js << EOF +var db = new Database("testdb"); +if (db.get("a")) { + exit(1); +} +db.set("a", 1); +if (db.get("a") != 1) { + exit(1); +} +db.set("b", 2); +db.set("c", 3); + +var expected = ['a', 'b', 'c']; +var have = db.getAll(); +for (var i = 0; i < have.length; i++) { + var item = have[i]; + if (expected.indexOf(item) == -1) { + print("Did not find " + item + " in db."); + exit(2); + } else { + expected.splice(expected.indexOf(item), 1); + } +} +if (expected.length) { + print("Expected but did not find: " + JSON.stringify(expected)); + exit(3); +} +EOF + +$SANDBOXOS test.js diff --git a/tests/09-this b/tests/09-this new file mode 100755 index 00000000..3d992b9f --- /dev/null +++ b/tests/09-this @@ -0,0 +1,9 @@ +#!/bin/bash + +cat > test.js << EOF +var task = new Task(); +task.activate.bind(null).apply(); +exit(0); +EOF + +$SANDBOXOS test.js diff --git a/tools/run-tests b/tools/run-tests new file mode 100755 index 00000000..40474175 --- /dev/null +++ b/tools/run-tests @@ -0,0 +1,36 @@ +#!/bin/bash + +export ROOT=$(cd $(dirname ${BASH_SOURCE[0]})/..; pwd) +TMP=$ROOT/tmp +LOGS=$ROOT/logs +TESTS=$ROOT/tests +export SANDBOXOS=$ROOT/sandboxos + +mkdir $TMP 2> /dev/null +mkdir $LOGS 2> /dev/null + +REQUESTED_TESTS=$* +if [[ -z $REQUESTED_TESTS ]]; then + REQUESTED_TESTS=$(ls $TESTS/* | xargs basename -a) +fi + +for NAME in $REQUESTED_TESTS; do + TEST=$TESTS/$NAME + echo -n "$(basename $TEST) " + rm -rf $TMP/* + pushd $TMP > /dev/null + export LOGDIR=$LOGS/$NAME + mkdir $LOGDIR 2> /dev/null + unbuffer $TEST > $LOGDIR/stdout.log 2> $LOGDIR/stderr.log + RESULT=$? + popd > /dev/null + if [[ $RESULT != 0 ]]; then + echo "FAILED (with exit code $RESULT)" + exit $RESULT + else + echo "SUCCESS" + fi +done + +echo +echo "All tests completed successfully." diff --git a/tools/update-deps b/tools/update-deps new file mode 100755 index 00000000..aa451adc --- /dev/null +++ b/tools/update-deps @@ -0,0 +1,118 @@ +#!/usr/bin/python -u + +import os +import shutil +import subprocess +import sys + +if len(sys.argv) == 1: + kWork = os.path.join('deps', sys.platform) +elif len(sys.argv) == 2: + kWork = sys.argv[1] + +if not os.path.isdir(kWork): + os.makedirs(kWork) +os.chdir(kWork) + +kUvRepository = 'https://github.com/libuv/libuv.git' +kUvBranch = 'v1.0.0' +kUvWork = 'uv' + +kV8Repository = 'https://github.com/v8/v8.git' +kV8Branch = 'branch-heads/4.9' +kV8Work = 'v8' + +def updateUv(): + print 'libuv' + print + clean = False + + if os.path.exists(os.path.join(kUvWork, '.update-deps-branch')): + haveBranch = open(os.path.join(kUvWork, '.update-deps-branch'), 'r').read().strip() + if kUvBranch != haveBranch: + print haveBranch, '=>', kUvBranch + clean = True + else: + clean = True + + if clean: + if os.path.isdir(os.path.join(kUvWork)): + print 'Cleaning', kUvWork + shutil.rmtree(os.path.join(kUvWork)) + if not os.path.isdir(kUvWork): + subprocess.check_call(['git', 'clone', '--branch', kUvBranch, kUvRepository, kUvWork]) + open(os.path.join(kUvWork, '.update-deps-branch'), 'w').write(kUvBranch) + if sys.platform in ('darwin', 'win32'): + if not os.path.isdir(os.path.join(kUvWork, 'build', 'gyp')): + subprocess.check_call(['git', 'clone', 'https://chromium.googlesource.com/external/gyp.git', 'build/gyp'], cwd=kUvWork) + + if sys.platform == 'linux2': + subprocess.check_call(['./gyp_uv.py', '-f', 'make'], cwd=kUvWork) + subprocess.check_call(['make', '-j8', '-C', 'out'], cwd=kUvWork) + elif sys.platform == 'darwin': + subprocess.check_call(['./gyp_uv.py', '-f', 'xcode'], cwd=kUvWork) + subprocess.check_call(['xcodebuild', '-ARCHS="x86_64"', '-project', 'uv.xcodeproj', '-configuration', 'Release', '-target', 'All'], cwd=kUvWork) + elif sys.platform == 'win32': + env = os.environ.copy() + env['VCINSTALLDIR'] = '' + env['WINDOWSSDKDIR'] = '' + subprocess.check_call(['cmd', '/C', 'call', 'vcbuild.bat', 'release', 'x64'], cwd=kUvWork, env=env) + +def updateV8(): + print 'v8' + print + clean = False + + if False: + if os.path.exists(os.path.join(kV8Work, '.update-deps-branch')): + haveBranch = open(os.path.join(kV8Work, '.update-deps-branch'), 'r').read().strip() + if kV8Branch != haveBranch: + print haveBranch, '=>', kV8Branch + clean = True + else: + clean = True + + if clean: + if os.path.isdir(kV8Work): + shutil.rmtree(kV8Work) + + # XXX We modify one .py file which assumes invalid things. + if os.path.isfile(os.path.join(kV8Work, 'tools/swarming_client/third_party/requests/packages/urllib3/contrib/pyopenssl.py')): + print 'resetting tools/swarming_client' + subprocess.check_call(['git', 'reset', '--hard'], cwd=os.path.join(kV8Work, 'tools', 'swarming_client')) + + extension = '' + if sys.platform == 'win32': + extension = '.bat' + if not os.path.isdir(kV8Work): + subprocess.check_call(['fetch' + extension, 'v8']) + + win32Env = os.environ.copy() + win32Env['GYP_MSVS_VERSION'] = '2013' + win32Env['DEPOT_TOOLS_WIN_TOOLCHAIN'] = '0' + + open(os.path.join(kV8Work, '.update-deps-branch'), 'w').write(kV8Branch) + subprocess.check_call(['git', 'fetch'], cwd=kV8Work) + subprocess.check_call(['git', 'checkout', kV8Branch], cwd=kV8Work) + + if sys.platform == 'win32': + subprocess.check_call(['gclient' + extension, 'sync'], cwd=kV8Work, env=win32Env) + else: + subprocess.check_call(['gclient' + extension, 'sync'], cwd=kV8Work) + + contents = open(os.path.join(kV8Work, 'tools/swarming_client/third_party/requests/packages/urllib3/contrib/pyopenssl.py'), 'r').readlines() + contents = [line for line in contents if not 'SSLv3' in line] + open(os.path.join(kV8Work, 'tools/swarming_client/third_party/requests/packages/urllib3/contrib/pyopenssl.py'), 'w').write(''.join(contents)) + + if sys.platform == 'linux2': + subprocess.check_call(['make', '-j4', 'native'], cwd=kV8Work) + elif sys.platform == 'darwin': + subprocess.check_call(['build/gyp_v8', '-Dtarget_arch=x64'], cwd=kV8Work) + subprocess.check_call(['xcodebuild', '-project', 'build/all.xcodeproj', '-configuration', 'Release'], cwd=kV8Work) + elif sys.platform == 'win32': + subprocess.check_call(['python', 'build\\gyp_v8', '-Dtarget_arch=x64'], cwd=kV8Work, env=win32Env) + subprocess.check_call(['devenv.com', '/Build', 'Release', 'build\\All.sln'], cwd=kV8Work) + +if __name__ == '__main__': + updateUv() + updateV8()