aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Hashing.Portable/src/IdentityUtility/HashingExtensions.cs8
-rw-r--r--lib/Hashing.Portable/src/IdentityUtility/JsonWebKey.cs2
-rw-r--r--lib/Hashing.Portable/src/ManagedHash.cs8
-rw-r--r--lib/Hashing.Portable/src/RandomHash.cs8
-rw-r--r--lib/NativeHeapApi/LICENSE.txt346
-rw-r--r--lib/NativeHeapApi/README.md47
-rw-r--r--lib/NativeHeapApi/src/NativeHeapApi.h124
-rw-r--r--lib/Net.Http/src/Helpers/CoreBufferHelpers.cs39
-rw-r--r--lib/Net.Http/src/Helpers/InitDataBuffer.cs2
-rw-r--r--lib/Plugins.Essentials/src/Accounts/NonceExtensions.cs4
-rw-r--r--lib/Plugins.Essentials/src/Accounts/PasswordHashing.cs10
-rw-r--r--lib/Utils/src/Memory/Diagnostics/TrackedHeapWrapper.cs7
-rw-r--r--lib/Utils/src/Memory/HeapCreation.cs54
-rw-r--r--lib/Utils/src/Memory/IUnmangedHeap.cs7
-rw-r--r--lib/Utils/src/Memory/MemoryHandle.cs7
-rw-r--r--lib/Utils/src/Memory/MemoryUtil.cs205
-rw-r--r--lib/Utils/src/Memory/MemoryUtilAlloc.cs291
-rw-r--r--lib/Utils/src/Memory/NativeHeap.cs212
-rw-r--r--lib/Utils/src/Memory/ProcessHeap.cs16
-rw-r--r--lib/Utils/src/Memory/RpMallocPrivateHeap.cs284
-rw-r--r--lib/Utils/src/Memory/UnmanagedHeapBase.cs89
-rw-r--r--lib/Utils/src/Memory/VnTable.cs11
-rw-r--r--lib/Utils/src/Memory/VnTempBuffer.cs37
-rw-r--r--lib/Utils/src/Memory/Win32PrivateHeap.cs52
-rw-r--r--lib/Utils/src/VnEncoding.cs6
-rw-r--r--lib/Utils/tests/Memory/MemoryHandleTest.cs8
-rw-r--r--lib/Utils/tests/Memory/MemoryUtilTests.cs50
-rw-r--r--lib/Utils/tests/Memory/NativeHeapTests.cs32
-rw-r--r--lib/Utils/tests/Memory/VnTableTests.cs14
-rw-r--r--lib/WinRpMalloc/src/WinRpMalloc.vcxproj1
-rw-r--r--lib/WinRpMalloc/src/dllmain.c164
-rw-r--r--lib/WinRpMalloc/src/rpmalloc.c191
-rw-r--r--lib/WinRpMalloc/src/rpmalloc.h26
33 files changed, 1683 insertions, 679 deletions
diff --git a/lib/Hashing.Portable/src/IdentityUtility/HashingExtensions.cs b/lib/Hashing.Portable/src/IdentityUtility/HashingExtensions.cs
index 71a3cd3..278359c 100644
--- a/lib/Hashing.Portable/src/IdentityUtility/HashingExtensions.cs
+++ b/lib/Hashing.Portable/src/IdentityUtility/HashingExtensions.cs
@@ -60,7 +60,7 @@ namespace VNLib.Hashing.IdentityUtility
int encBufSize = encoding.GetByteCount(data);
//Alloc buffer for encoding data
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(encBufSize + hashBufSize);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(encBufSize + hashBufSize);
Span<byte> encBuffer = buffer.Span[0..encBufSize];
Span<byte> hashBuffer = buffer.Span[encBufSize..];
@@ -113,7 +113,7 @@ namespace VNLib.Hashing.IdentityUtility
int base64BufSize = base64Hmac.Length;
//Alloc buffer for encoding and raw data
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(rawDataBufSize + base64BufSize, true);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(rawDataBufSize + base64BufSize, true);
Span<byte> rawDataBuf = buffer.Span[0..rawDataBufSize];
Span<byte> base64Buf = buffer.Span[rawDataBufSize..];
@@ -160,7 +160,7 @@ namespace VNLib.Hashing.IdentityUtility
int hashBufSize = hmac.HashSize / 8;
//Alloc buffer for hash
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(hashBufSize);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(hashBufSize);
//compute hash
if (!hmac.TryComputeHash(raw, buffer, out int hashBytesWritten))
@@ -196,7 +196,7 @@ namespace VNLib.Hashing.IdentityUtility
int buffSize = enc.GetByteCount(data);
//Alloc buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(buffSize, true);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(buffSize, true);
//Encode data
int converted = enc.GetBytes(data, buffer);
diff --git a/lib/Hashing.Portable/src/IdentityUtility/JsonWebKey.cs b/lib/Hashing.Portable/src/IdentityUtility/JsonWebKey.cs
index 8813e97..ef85e5d 100644
--- a/lib/Hashing.Portable/src/IdentityUtility/JsonWebKey.cs
+++ b/lib/Hashing.Portable/src/IdentityUtility/JsonWebKey.cs
@@ -426,7 +426,7 @@ namespace VNLib.Hashing.IdentityUtility
else
{
//bin buffer for temp decoding
- using UnsafeMemoryHandle<byte> binBuffer = MemoryUtil.UnsafeAlloc<byte>(base64.Length + 16, false);
+ using UnsafeMemoryHandle<byte> binBuffer = MemoryUtil.UnsafeAlloc(base64.Length + 16, false);
//base64url decode
ERRNO count = VnEncoding.Base64UrlDecode(base64, binBuffer.Span);
diff --git a/lib/Hashing.Portable/src/ManagedHash.cs b/lib/Hashing.Portable/src/ManagedHash.cs
index dd0a8af..70cc5c0 100644
--- a/lib/Hashing.Portable/src/ManagedHash.cs
+++ b/lib/Hashing.Portable/src/ManagedHash.cs
@@ -105,7 +105,7 @@ namespace VNLib.Hashing
int byteCount = CharEncoding.GetByteCount(data);
//Alloc buffer
- using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc<byte>(byteCount, true);
+ using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc(byteCount, true);
//Encode data
byteCount = CharEncoding.GetBytes(data, binbuf);
@@ -126,7 +126,7 @@ namespace VNLib.Hashing
{
int byteCount = CharEncoding.GetByteCount(data);
//Alloc buffer
- using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc<byte>(byteCount, true);
+ using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc(byteCount, true);
//Encode data
byteCount = CharEncoding.GetBytes(data, binbuf);
//hash the buffer
@@ -258,7 +258,7 @@ namespace VNLib.Hashing
int byteCount = CharEncoding.GetByteCount(data);
//Alloc buffer
- using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc<byte>(byteCount, true);
+ using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc(byteCount, true);
//Encode data
byteCount = CharEncoding.GetBytes(data, binbuf);
@@ -281,7 +281,7 @@ namespace VNLib.Hashing
int byteCount = CharEncoding.GetByteCount(data);
//Alloc buffer
- using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc<byte>(byteCount, true);
+ using UnsafeMemoryHandle<byte> binbuf = MemoryUtil.UnsafeAlloc(byteCount, true);
//Encode data
byteCount = CharEncoding.GetBytes(data, binbuf);
diff --git a/lib/Hashing.Portable/src/RandomHash.cs b/lib/Hashing.Portable/src/RandomHash.cs
index 67518ad..ebc8845 100644
--- a/lib/Hashing.Portable/src/RandomHash.cs
+++ b/lib/Hashing.Portable/src/RandomHash.cs
@@ -51,7 +51,7 @@ namespace VNLib.Hashing
if(size > MAX_STACK_ALLOC)
{
//Get temporary buffer for storing random keys
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(size);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(size);
//Fill with random non-zero bytes
GetRandomBytes(buffer.Span);
@@ -114,7 +114,7 @@ namespace VNLib.Hashing
if (size > MAX_STACK_ALLOC)
{
//Get temp buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(size);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(size);
//Generate non zero bytes
GetRandomBytes(buffer.Span);
@@ -145,7 +145,7 @@ namespace VNLib.Hashing
if (size > MAX_STACK_ALLOC)
{
//Get temp buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(size);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(size);
//Generate non zero bytes
GetRandomBytes(buffer.Span);
@@ -176,7 +176,7 @@ namespace VNLib.Hashing
if (size > MAX_STACK_ALLOC)
{
//Get temp buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(size);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(size);
//Generate non zero bytes
GetRandomBytes(buffer.Span);
diff --git a/lib/NativeHeapApi/LICENSE.txt b/lib/NativeHeapApi/LICENSE.txt
new file mode 100644
index 0000000..2848520
--- /dev/null
+++ b/lib/NativeHeapApi/LICENSE.txt
@@ -0,0 +1,346 @@
+The software in this repository is licensed under the GNU GPL version 2.0 (or any later version).
+
+SPDX-License-Identifier: GPL-2.0-or-later
+
+License-Text:
+
+GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ VNLib.Hashing.Portable is a compact .NET managed cryptographic operation
+ utilities library.
+ Copyright (C) 2022 Vaughn Nugent
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. \ No newline at end of file
diff --git a/lib/NativeHeapApi/README.md b/lib/NativeHeapApi/README.md
new file mode 100644
index 0000000..5341e63
--- /dev/null
+++ b/lib/NativeHeapApi/README.md
@@ -0,0 +1,47 @@
+# NativeHeapApi
+
+Contains necessary API header files for user defined heap dlls. Contains the required type definitions, structures, and method signatures to implement (and export) for dll usage that matches the VNLib.Utils.NativeHeap implementation. The managed heap api does a minimal amount of parameter validation, and you may want to verify that your implementation does not require more strict validation.
+
+## Getting started
+
+You may copy the [NativeHeapApi.h](src/NativeHeapApi.h) header file into your project and begin implementing the heap methods defined in the header file.
+
+You must define a constant called **HEAP_METHOD_EXPORT** that defines the method calling convention for proper dll loading for your given platform. On windows, this defaults to `__declspec(dllexport)`.
+
+When the `heapCreate` method is called, a mutable structure pointer is passed as an argument and expected to be updated by your create method. The VNLib.Utils library implements two types of heaps, a global/shared heap and "private" or "first class" heaps exposed by the Memory namespace. Consumers are allowed to create a private heap to use at will.
+
+### UnmanagedHeapFlags structure
+
+`UnmanagedHeapFlags.HeapPointer` - Set your heap pointer that will be passed to all heap methods
+
+`UnmanagedHeapFlags.CreationFlags` - Managed creation flags, that may be read and written. The managed heap implementation will observe the result after the `heapCreate` method returns.
+
+`UnmanagedHeapFlags.Flags` - Generic flags passed by the caller directly to the heapCreate method, not observed or modified by the managed library in any way.
+
+### Example Create
+``` c
+HEAP_METHOD_EXPORT ERRNO heapCreate(UnmanagedHeapFlags* flags)
+{
+ //Check flags
+ if (flags->CreationFlags & HEAP_CREATION_IS_SHARED)
+ {
+ //Shared heap may not require synchronization, so we can clear that flag
+ flags->CreationFlags &= ~(HEAP_CREATION_SERIALZE_ENABLED);
+
+ //Heap structure pointer is required
+ flags->HeapPointer = yourSharedHeapPointer;
+
+ //Success
+ return 1;
+ }
+ else
+ {
+ flags->HeapPointer = yourPrivateHeap;
+ return 1;
+ }
+}
+```
+
+## License
+The software in this repository is licensed under the GNU GPL version 2.0 (or any later version).
+See the LICENSE files for more information.
diff --git a/lib/NativeHeapApi/src/NativeHeapApi.h b/lib/NativeHeapApi/src/NativeHeapApi.h
new file mode 100644
index 0000000..5e83108
--- /dev/null
+++ b/lib/NativeHeapApi/src/NativeHeapApi.h
@@ -0,0 +1,124 @@
+/*
+* Copyright (c) 2023 Vaughn Nugent
+*
+* Library: VNLib
+* Package: NativeHeapApi
+* File: NativeHeapApi.h
+*
+* NativeHeapApi is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published
+* by the Free Software Foundation, either version 2 of the License,
+* or (at your option) any later version.
+*
+* NativeHeapApi is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with NativeHeapApi. If not, see http://www.gnu.org/licenses/.
+*/
+
+#pragma once
+
+#ifndef NATIVE_HEAP_API
+#define NATIVE_HEAP_API
+
+/*
+* Method calling convention for export
+*/
+#ifndef HEAP_METHOD_EXPORT
+ #ifdef WIN32
+ #define HEAP_METHOD_EXPORT __declspec(dllexport)
+ #else
+ #define METHOD_EXPORT
+ #endif
+#endif // HEAP_METHOD_EXPORT!
+
+/// <summary>
+/// Internal heap creation flags passed to the creation method by the library loader
+/// </summary>
+typedef enum HeapCreationFlags
+{
+ /// <summary>
+ /// Default/no flags
+ /// </summary>
+ HEAP_CREATION_NO_FLAGS,
+ /// <summary>
+ /// Specifies that all allocations be zeroed before returning to caller
+ /// </summary>
+ HEAP_CREATION_GLOBAL_ZERO = 0x01,
+ /// <summary>
+ /// Specifies that the heap should use internal locking, aka its not thread safe
+ /// and needs to be made thread safe
+ /// </summary>
+ HEAP_CREATION_SERIALZE_ENABLED = 0x02,
+ /// <summary>
+ /// Specifies that the requested heap will be a shared heap for the process/library
+ /// </summary>
+ HEAP_CREATION_IS_SHARED = 0x04
+} HeapCreationFlags;
+
+/// <summary>
+/// The vnlib ERRNO type, integer/process dependent,
+/// internally represented as a pointer
+/// </summary>
+typedef void* ERRNO;
+
+/// <summary>
+/// A structure for heap initialization
+/// </summary>
+typedef struct UnmanagedHeapFlags
+{
+ LPVOID HeapPointer;
+ HeapCreationFlags CreationFlags;
+ ERRNO Flags;
+} UnmanagedHeapFlags;
+
+/// <summary>
+/// The heap creation method. You must set the flags->HeapPointer = your heap
+/// structure
+/// </summary>
+/// <param name="flags">Creation flags passed by the caller to create the heap. This structure will be initialized, and may be modified</param>
+/// <returns>A boolean value that indicates the result of the operation</returns>
+HEAP_METHOD_EXPORT ERRNO heapCreate(UnmanagedHeapFlags* flags);
+
+/// <summary>
+/// Destroys a previously created heap
+/// </summary>
+/// <param name="heap">The pointer to your custom heap structure from heap creation</param>
+HEAP_METHOD_EXPORT ERRNO heapDestroy(LPVOID heap);
+
+/// <summary>
+/// Allocates a block from the desired heap and returns a pointer
+/// to the block. Optionally zeros the block before returning
+/// </summary>
+/// <param name="heap">A pointer to your heap structure</param>
+/// <param name="elements">The number of elements to allocate</param>
+/// <param name="alignment">The alignment (or size) of each element in bytes</param>
+/// <param name="zero">A flag to zero the block before returning the block</param>
+/// <returns>A pointer to the allocated block</returns>
+HEAP_METHOD_EXPORT LPVOID heapAlloc(LPVOID heap, size_t elements, size_t alignment, BOOL zero);
+
+/// <summary>
+/// Reallocates a block on the desired heap and returns a pointer to the new block. If reallocation
+/// is not supported, you should only return 0 and leave the block unmodified. The data in the valid
+/// size of the block MUST remain unmodified.
+/// </summary>
+/// <param name="heap">A pointer to your heap structure</param>
+/// <param name="block">A pointer to the block to reallocate</param>
+/// <param name="elements">The new size of the block, in elements</param>
+/// <param name="alignment">The element size or block alignment</param>
+/// <param name="zero">A flag to zero the block (or the new size) before returning.</param>
+/// <returns>A pointer to the reallocated block, or zero if the operation failed or is not supported</returns>
+HEAP_METHOD_EXPORT LPVOID heapRealloc(LPVOID heap, LPVOID block, size_t elements, size_t alignment, BOOL zero);
+
+/// <summary>
+/// Frees a previously allocated block on the desired heap.
+/// </summary>
+/// <param name="heap">A pointer to your heap structure</param>
+/// <param name="block">A pointer to the block to free</param>
+/// <returns>A value that indicates the result of the operation, nonzero if success, 0 if a failure occurred </returns>
+HEAP_METHOD_EXPORT ERRNO heapFree(LPVOID heap, LPVOID block);
+
+#endif // !NATIVE_HEAP_API \ No newline at end of file
diff --git a/lib/Net.Http/src/Helpers/CoreBufferHelpers.cs b/lib/Net.Http/src/Helpers/CoreBufferHelpers.cs
index 719f4f8..cdfeac7 100644
--- a/lib/Net.Http/src/Helpers/CoreBufferHelpers.cs
+++ b/lib/Net.Http/src/Helpers/CoreBufferHelpers.cs
@@ -73,18 +73,20 @@ namespace VNLib.Net.Http.Core
//Calc buffer size to the nearest page size
size = (int)MemoryUtil.NearestPage(size);
- //If rpmalloc lib is loaded, use it
- if (MemoryUtil.IsRpMallocLoaded)
+ /*
+ * Heap synchronziation may be enabled for our private heap, so we may want
+ * to avoid it in favor of performance over private heap segmentation.
+ *
+ * If synchronization is enabled, use the system heap
+ */
+
+ if ((HttpPrivateHeap.CreationFlags & HeapCreation.UseSynchronization) > 0)
{
- return MemoryUtil.Shared.UnsafeAlloc<byte>(size, zero);
- }
- else if (size > MemoryUtil.MAX_UNSAFE_POOL_SIZE)
- {
- return HttpPrivateHeap.UnsafeAlloc<byte>(size, zero);
+ return MemoryUtil.UnsafeAlloc(size, zero);
}
else
{
- return new(HttpBinBufferPool, size, zero);
+ return HttpPrivateHeap.UnsafeAlloc<byte>(size, zero);
}
}
@@ -93,23 +95,26 @@ namespace VNLib.Net.Http.Core
//Calc buffer size to the nearest page size
size = (int)MemoryUtil.NearestPage(size);
- //If rpmalloc lib is loaded, use it
- if (MemoryUtil.IsRpMallocLoaded)
+ /*
+ * Heap synchronziation may be enabled for our private heap, so we may want
+ * to avoid it in favor of performance over private heap segmentation.
+ *
+ * If synchronization is enabled, use the system heap
+ */
+
+ if ((HttpPrivateHeap.CreationFlags & HeapCreation.UseSynchronization) > 0)
{
return MemoryUtil.Shared.DirectAlloc<byte>(size, zero);
}
- //Avoid locking in heap unless the buffer is too large to alloc array
- else if (size > MemoryUtil.MAX_UNSAFE_POOL_SIZE)
+ //If the block is larger than an safe array size, avoid LOH pressure
+ else if(size > MemoryUtil.MAX_UNSAFE_POOL_SIZE)
{
return HttpPrivateHeap.DirectAlloc<byte>(size, zero);
}
+ //Use the array pool to get a memory handle
else
{
- //Convert temp buffer to memory owner
-
-#pragma warning disable CA2000 // Dispose objects before losing scope
- return new VnTempBuffer<byte>(HttpBinBufferPool, size, zero).ToMemoryManager();
-#pragma warning restore CA2000 // Dispose objects before losing scope
+ return new VnTempBuffer<byte>(HttpBinBufferPool, size, zero);
}
}
diff --git a/lib/Net.Http/src/Helpers/InitDataBuffer.cs b/lib/Net.Http/src/Helpers/InitDataBuffer.cs
index 8dab633..c191bb0 100644
--- a/lib/Net.Http/src/Helpers/InitDataBuffer.cs
+++ b/lib/Net.Http/src/Helpers/InitDataBuffer.cs
@@ -36,7 +36,7 @@ namespace VNLib.Net.Http.Core
/// A structure that buffers data remaining from an initial transport read. Stored
/// data will be read by copying.
/// </summary>
- internal readonly struct InitDataBuffer
+ internal readonly record struct InitDataBuffer
{
const int POSITION_SEG_SIZE = sizeof(int);
diff --git a/lib/Plugins.Essentials/src/Accounts/NonceExtensions.cs b/lib/Plugins.Essentials/src/Accounts/NonceExtensions.cs
index 5a40d29..0d9ca10 100644
--- a/lib/Plugins.Essentials/src/Accounts/NonceExtensions.cs
+++ b/lib/Plugins.Essentials/src/Accounts/NonceExtensions.cs
@@ -44,7 +44,7 @@ namespace VNLib.Plugins.Essentials.Accounts
public static string ComputeNonce<T>(this T nonce, int size) where T: INonce
{
//Alloc bin buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(size);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(size);
//Compute nonce
nonce.ComputeNonce(buffer.Span);
@@ -63,7 +63,7 @@ namespace VNLib.Plugins.Essentials.Accounts
public static bool VerifyNonce<T>(this T nonce, ReadOnlySpan<char> base32Nonce) where T : INonce
{
//Alloc bin buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(base32Nonce.Length);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(base32Nonce.Length);
//Decode base32 nonce
ERRNO count = VnEncoding.TryFromBase32Chars(base32Nonce, buffer.Span);
diff --git a/lib/Plugins.Essentials/src/Accounts/PasswordHashing.cs b/lib/Plugins.Essentials/src/Accounts/PasswordHashing.cs
index db5b309..32e04e3 100644
--- a/lib/Plugins.Essentials/src/Accounts/PasswordHashing.cs
+++ b/lib/Plugins.Essentials/src/Accounts/PasswordHashing.cs
@@ -96,7 +96,7 @@ namespace VNLib.Plugins.Essentials.Accounts
else
{
//Alloc heap buffer
- using UnsafeMemoryHandle<byte> secretBuffer = MemoryUtil.UnsafeAlloc<byte>(_secret.BufferSize, true);
+ using UnsafeMemoryHandle<byte> secretBuffer = MemoryUtil.UnsafeAlloc(_secret.BufferSize, true);
return VerifyInternal(passHash, password, secretBuffer);
}
@@ -130,7 +130,7 @@ namespace VNLib.Plugins.Essentials.Accounts
public bool Verify(ReadOnlySpan<byte> hash, ReadOnlySpan<byte> salt, ReadOnlySpan<byte> password)
{
//Alloc a buffer with the same size as the hash
- using UnsafeMemoryHandle<byte> hashBuf = MemoryUtil.UnsafeAlloc<byte>(hash.Length, true);
+ using UnsafeMemoryHandle<byte> hashBuf = MemoryUtil.UnsafeAlloc(hash.Length, true);
//Hash the password with the current config
Hash(password, salt, hashBuf.Span);
//Compare the hashed password to the specified hash and return results
@@ -143,7 +143,7 @@ namespace VNLib.Plugins.Essentials.Accounts
public PrivateString Hash(ReadOnlySpan<char> password)
{
//Alloc shared buffer for the salt and secret buffer
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(SaltLen + _secret.BufferSize, true);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(SaltLen + _secret.BufferSize, true);
try
{
//Split buffers
@@ -170,7 +170,7 @@ namespace VNLib.Plugins.Essentials.Accounts
/// <returns>A <see cref="PrivateString"/> of the hashed and encoded password</returns>
public PrivateString Hash(ReadOnlySpan<byte> password)
{
- using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc<byte>(SaltLen + _secret.BufferSize, true);
+ using UnsafeMemoryHandle<byte> buffer = MemoryUtil.UnsafeAlloc(SaltLen + _secret.BufferSize, true);
try
{
//Split buffers
@@ -203,7 +203,7 @@ namespace VNLib.Plugins.Essentials.Accounts
public void Hash(ReadOnlySpan<byte> password, ReadOnlySpan<byte> salt, Span<byte> hashOutput)
{
//alloc secret buffer
- using UnsafeMemoryHandle<byte> secretBuffer = MemoryUtil.UnsafeAlloc<byte>(_secret.BufferSize, true);
+ using UnsafeMemoryHandle<byte> secretBuffer = MemoryUtil.UnsafeAlloc(_secret.BufferSize, true);
try
{
//Get the secret from the callback
diff --git a/lib/Utils/src/Memory/Diagnostics/TrackedHeapWrapper.cs b/lib/Utils/src/Memory/Diagnostics/TrackedHeapWrapper.cs
index 2069d08..41b08c1 100644
--- a/lib/Utils/src/Memory/Diagnostics/TrackedHeapWrapper.cs
+++ b/lib/Utils/src/Memory/Diagnostics/TrackedHeapWrapper.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2022 Vaughn Nugent
+* Copyright (c) 2023 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -23,10 +23,8 @@
*/
using System;
-using System.Collections;
using System.Collections.Concurrent;
-
namespace VNLib.Utils.Memory.Diagnostics
{
/// <summary>
@@ -39,6 +37,9 @@ namespace VNLib.Utils.Memory.Diagnostics
private readonly object _statsLock;
private readonly ConcurrentDictionary<IntPtr, ulong> _table;
+ ///<inheritdoc/>
+ public HeapCreation CreationFlags => _heap.CreationFlags;
+
/// <summary>
/// Gets the underlying heap
/// </summary>
diff --git a/lib/Utils/src/Memory/HeapCreation.cs b/lib/Utils/src/Memory/HeapCreation.cs
new file mode 100644
index 0000000..2d30c29
--- /dev/null
+++ b/lib/Utils/src/Memory/HeapCreation.cs
@@ -0,0 +1,54 @@
+/*
+* Copyright (c) 2023 Vaughn Nugent
+*
+* Library: VNLib
+* Package: VNLib.Utils
+* File: HeapCreation.cs
+*
+* HeapCreation.cs is part of VNLib.Utils which is part of the larger
+* VNLib collection of libraries and utilities.
+*
+* VNLib.Utils is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published
+* by the Free Software Foundation, either version 2 of the License,
+* or (at your option) any later version.
+*
+* VNLib.Utils is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with VNLib.Utils. If not, see http://www.gnu.org/licenses/.
+*/
+
+using System;
+
+namespace VNLib.Utils.Memory
+{
+ /// <summary>
+ /// Internal heap creation flags passed to the heap creation method
+ /// on initialization
+ /// </summary>
+ [Flags]
+ public enum HeapCreation : int
+ {
+ /// <summary>
+ /// Default/no flags
+ /// </summary>
+ None,
+ /// <summary>
+ /// Specifies that all allocations be zeroed before returning to caller
+ /// </summary>
+ GlobalZero = 0x01,
+ /// <summary>
+ /// Specifies that the heap should use internal locking, aka its not thread safe
+ /// and needs to be made thread safe
+ /// </summary>
+ UseSynchronization = 0x02,
+ /// <summary>
+ /// Specifies that the requested heap will be a shared heap for the process/library
+ /// </summary>
+ IsSharedHeap = 0x04
+ }
+} \ No newline at end of file
diff --git a/lib/Utils/src/Memory/IUnmangedHeap.cs b/lib/Utils/src/Memory/IUnmangedHeap.cs
index 94f34c8..cb4b6ba 100644
--- a/lib/Utils/src/Memory/IUnmangedHeap.cs
+++ b/lib/Utils/src/Memory/IUnmangedHeap.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2022 Vaughn Nugent
+* Copyright (c) 2023 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -32,6 +32,11 @@ namespace VNLib.Utils.Memory
public interface IUnmangedHeap : IDisposable
{
/// <summary>
+ /// The creation flags the heap was initialized with
+ /// </summary>
+ HeapCreation CreationFlags { get; }
+
+ /// <summary>
/// Allocates a block of memory from the heap and returns a pointer to the new memory block
/// </summary>
/// <param name="size">The size (in bytes) of the element</param>
diff --git a/lib/Utils/src/Memory/MemoryHandle.cs b/lib/Utils/src/Memory/MemoryHandle.cs
index 7a7cb6a..067f6c0 100644
--- a/lib/Utils/src/Memory/MemoryHandle.cs
+++ b/lib/Utils/src/Memory/MemoryHandle.cs
@@ -179,7 +179,9 @@ namespace VNLib.Utils.Memory
{
throw new ArgumentOutOfRangeException(nameof(elements), "Element offset cannot be larger than allocated size");
}
+
this.ThrowIfClosed();
+
//Get ptr and offset it
T* bs = ((T*)handle) + elements;
return bs;
@@ -194,6 +196,11 @@ namespace VNLib.Utils.Memory
///</remarks>
public unsafe MemoryHandle Pin(int elementIndex)
{
+ if(elementIndex < 0)
+ {
+ throw new ArgumentOutOfRangeException(nameof(elementIndex));
+ }
+
//Get ptr and guard checks before adding the referrence
T* ptr = GetOffset((nuint)elementIndex);
diff --git a/lib/Utils/src/Memory/MemoryUtil.cs b/lib/Utils/src/Memory/MemoryUtil.cs
index 5fa381a..7f96c4a 100644
--- a/lib/Utils/src/Memory/MemoryUtil.cs
+++ b/lib/Utils/src/Memory/MemoryUtil.cs
@@ -35,23 +35,26 @@ using VNLib.Utils.Memory.Diagnostics;
namespace VNLib.Utils.Memory
{
+
/// <summary>
/// Provides optimized cross-platform maanged/umanaged safe/unsafe memory operations
/// </summary>
[SecurityCritical]
[ComVisible(false)]
- public unsafe static class MemoryUtil
+ public static unsafe partial class MemoryUtil
{
/// <summary>
/// The environment variable name used to specify the shared heap type
/// to create
/// </summary>
- public const string SHARED_HEAP_TYPE_ENV= "VNLIB_SHARED_HEAP_TYPE";
+ public const string SHARED_HEAP_FILE_PATH = "VNLIB_SHARED_HEAP_FILE_PATH";
+
/// <summary>
/// When creating a heap that accepts an initial size, this value is passed
/// to it, otherwise no initial heap size is set.
/// </summary>
public const string SHARED_HEAP_INTIAL_SIZE_ENV = "VNLIB_SHARED_HEAP_SIZE";
+
/// <summary>
/// The environment variable name used to enable share heap diagnostics
/// </summary>
@@ -70,7 +73,7 @@ namespace VNLib.Utils.Memory
/// that will use the array pool before falling back to the <see cref="Shared"/>.
/// heap.
/// </summary>
- public const int MAX_UNSAFE_POOL_SIZE = 500 * 1024;
+ public const int MAX_UNSAFE_POOL_SIZE = 80 * 1024;
/// <summary>
/// Provides a shared heap instance for the process to allocate memory from.
@@ -140,38 +143,59 @@ namespace VNLib.Utils.Memory
bool IsWindows = OperatingSystem.IsWindows();
//Get environment varable
- string? heapType = Environment.GetEnvironmentVariable(SHARED_HEAP_TYPE_ENV);
-
- //Get inital size
- string? sharedSize = Environment.GetEnvironmentVariable(SHARED_HEAP_INTIAL_SIZE_ENV);
-
- //Try to parse the shared size from the env
- if (!nuint.TryParse(sharedSize, out nuint defaultSize))
+ string? heapDllPath = Environment.GetEnvironmentVariable(SHARED_HEAP_FILE_PATH);
+
+ //Default flags
+ HeapCreation cFlags = HeapCreation.UseSynchronization;
+
+ /*
+ * We need to set the shared flag and the synchronziation flag.
+ *
+ * The heap impl may reset the synchronziation flag if it does not
+ * need serialziation
+ */
+ cFlags |= isShared ? HeapCreation.IsSharedHeap : HeapCreation.None;
+
+ IUnmangedHeap heap;
+
+ //Check for heap api dll
+ if (!string.IsNullOrWhiteSpace(heapDllPath))
{
- defaultSize = SHARED_HEAP_INIT_SIZE;
+ //Attempt to load the heap
+ heap = NativeHeap.LoadHeap(heapDllPath, DllImportSearchPath.SafeDirectories, cFlags, 0);
}
+ //No user heap was specified, use fallback
+ else if (IsWindows)
+ {
+ //We can use win32 heaps
+
+ //Get inital size
+ string? sharedSize = Environment.GetEnvironmentVariable(SHARED_HEAP_INTIAL_SIZE_ENV);
- //convert to upper
- heapType = heapType?.ToUpperInvariant();
-
- //Create the heap
- IUnmangedHeap heap = heapType switch
+ //Try to parse the shared size from the env
+ if (!nuint.TryParse(sharedSize, out nuint defaultSize))
+ {
+ defaultSize = SHARED_HEAP_INIT_SIZE;
+ }
+
+ //Create win32 private heap
+ heap = Win32PrivateHeap.Create(defaultSize, cFlags);
+ }
+ else
{
- "WIN32" => IsWindows ? Win32PrivateHeap.Create(defaultSize) : throw new PlatformNotSupportedException("Win32 private heaps are not supported on non-windows platforms"),
- //If the shared heap is being allocated, then return a lock free global heap
- "RPMALLOC" => isShared ? RpMallocPrivateHeap.GlobalHeap : new RpMallocPrivateHeap(false),
- //Get the process heap if the heap is shared, otherwise create a new win32 private heap
- _ => IsWindows && !isShared ? Win32PrivateHeap.Create(defaultSize) : new ProcessHeap(),
- };
-
- //If diagnosticts is enabled, wrap the heap in a stats heap
+ //Finally fallback to .NET native mem impl
+ heap = new ProcessHeap();
+ }
+
+ //Enable heap statistics
return enableStats ? new TrackedHeapWrapper(heap) : heap;
}
/// <summary>
- /// Gets a value that indicates if the Rpmalloc native library is loaded
+ /// Gets a value that indicates if the use defined a custom heap
+ /// implementation
/// </summary>
- public static bool IsRpMallocLoaded { get; } = Environment.GetEnvironmentVariable(SHARED_HEAP_TYPE_ENV)?.ToUpperInvariant() == "RPMALLOC";
+ public static bool IsUserDefinedHeap { get; } = !string.IsNullOrWhiteSpace(Environment.GetEnvironmentVariable(SHARED_HEAP_FILE_PATH));
#region Zero
@@ -579,7 +603,7 @@ namespace VNLib.Utils.Memory
{
if (((nuint)block.LongLength - offset) <= count)
{
- throw new ArgumentException("The offset or count is outside of the range of the block of memory");
+ throw new ArgumentOutOfRangeException("The offset or count is outside of the range of the block of memory");
}
}
@@ -596,8 +620,13 @@ namespace VNLib.Utils.Memory
/// <exception cref="IndexOutOfRangeException"></exception>
public static MemoryHandle PinArrayAndGetHandle<T>(T[] array, int elementOffset)
{
- //Quick verify index exists
- _ = array[elementOffset];
+ if(elementOffset < 0)
+ {
+ throw new ArgumentOutOfRangeException(nameof(elementOffset));
+ }
+
+ //Quick verify index exists, may be the very last index
+ CheckBounds(array, (nuint)elementOffset, 1);
//Pin the array
GCHandle arrHandle = GCHandle.Alloc(array, GCHandleType.Pinned);
@@ -609,8 +638,6 @@ namespace VNLib.Utils.Memory
return new(indexOffet, arrHandle);
}
- #region alloc
-
/// <summary>
/// Gets a <see cref="Span{T}"/> from the supplied address
/// </summary>
@@ -652,121 +679,5 @@ namespace VNLib.Utils.Memory
//Multiply back to page sizes
return pages * Environment.SystemPageSize;
}
-
- /// <summary>
- /// Allocates a block of unmanaged, or pooled manaaged memory depending on
- /// compilation flags and runtime unamanged allocators.
- /// </summary>
- /// <typeparam name="T">The unamanged type to allocate</typeparam>
- /// <param name="elements">The number of elements of the type within the block</param>
- /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
- /// <returns>A handle to the block of memory</returns>
- /// <exception cref="ArgumentException"></exception>
- /// <exception cref="OutOfMemoryException"></exception>
- public static UnsafeMemoryHandle<T> UnsafeAlloc<T>(int elements, bool zero = false) where T : unmanaged
- {
- if (elements < 0)
- {
- throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
- }
-
- if(elements > MAX_UNSAFE_POOL_SIZE || IsRpMallocLoaded)
- {
- // Alloc from heap
- IntPtr block = Shared.Alloc((uint)elements, (uint)sizeof(T), zero);
- //Init new handle
- return new(Shared, block, elements);
- }
- else
- {
- return new(ArrayPool<T>.Shared, elements, zero);
- }
- }
-
- /// <summary>
- /// Allocates a block of unmanaged, or pooled manaaged memory depending on
- /// compilation flags and runtime unamanged allocators, rounded up to the
- /// neareset memory page.
- /// </summary>
- /// <typeparam name="T">The unamanged type to allocate</typeparam>
- /// <param name="elements">The number of elements of the type within the block</param>
- /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
- /// <returns>A handle to the block of memory</returns>
- /// <exception cref="ArgumentException"></exception>
- /// <exception cref="OutOfMemoryException"></exception>
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- public static UnsafeMemoryHandle<T> UnsafeAllocNearestPage<T>(int elements, bool zero = false) where T : unmanaged
- {
- if (elements < 0)
- {
- throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
- }
- //Round to nearest page (in bytes)
- nint np = NearestPage(elements * sizeof(T));
-
- //Resize to element size
- np /= sizeof(T);
-
- return UnsafeAlloc<T>((int)np, zero);
- }
-
- /// <summary>
- /// Allocates a block of unmanaged, or pooled manaaged memory depending on
- /// compilation flags and runtime unamanged allocators.
- /// </summary>
- /// <typeparam name="T">The unamanged type to allocate</typeparam>
- /// <param name="elements">The number of elements of the type within the block</param>
- /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
- /// <returns>A handle to the block of memory</returns>
- /// <exception cref="ArgumentException"></exception>
- /// <exception cref="OutOfMemoryException"></exception>
- public static IMemoryHandle<T> SafeAlloc<T>(int elements, bool zero = false) where T: unmanaged
- {
- if (elements < 0)
- {
- throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
- }
-
- //If the element count is larger than max pool size, alloc from shared heap
- if (elements > MAX_UNSAFE_POOL_SIZE)
- {
- //Alloc from shared heap
- return Shared.Alloc<T>(elements, zero);
- }
- else
- {
- //Get temp buffer from shared buffer pool
- return new VnTempBuffer<T>(elements, zero);
- }
- }
-
- /// <summary>
- /// Allocates a block of unmanaged, or pooled manaaged memory depending on
- /// compilation flags and runtime unamanged allocators, rounded up to the
- /// neareset memory page.
- /// </summary>
- /// <typeparam name="T">The unamanged type to allocate</typeparam>
- /// <param name="elements">The number of elements of the type within the block</param>
- /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
- /// <returns>A handle to the block of memory</returns>
- /// <exception cref="ArgumentException"></exception>
- /// <exception cref="OutOfMemoryException"></exception>
- public static IMemoryHandle<T> SafeAllocNearestPage<T>(int elements, bool zero = false) where T : unmanaged
- {
- if (elements < 0)
- {
- throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
- }
-
- //Round to nearest page (in bytes)
- nint np = NearestPage(elements * sizeof(T));
-
- //Resize to element size
- np /= sizeof(T);
-
- return SafeAlloc<T>((int)np, zero);
- }
-
- #endregion
}
} \ No newline at end of file
diff --git a/lib/Utils/src/Memory/MemoryUtilAlloc.cs b/lib/Utils/src/Memory/MemoryUtilAlloc.cs
new file mode 100644
index 0000000..e4210e7
--- /dev/null
+++ b/lib/Utils/src/Memory/MemoryUtilAlloc.cs
@@ -0,0 +1,291 @@
+/*
+* Copyright (c) 2023 Vaughn Nugent
+*
+* Library: VNLib
+* Package: VNLib.Utils
+* File: MemoryUtilAlloc.cs
+*
+* MemoryUtilAlloc.cs is part of VNLib.Utils which is part of
+* the larger VNLib collection of libraries and utilities.
+*
+* VNLib.Utils is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published
+* by the Free Software Foundation, either version 2 of the License,
+* or (at your option) any later version.
+*
+* VNLib.Utils is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with VNLib.Utils. If not, see http://www.gnu.org/licenses/.
+*/
+
+using System;
+using System.Buffers;
+using System.Runtime.CompilerServices;
+
+using VNLib.Utils.Extensions;
+
+namespace VNLib.Utils.Memory
+{
+ public static unsafe partial class MemoryUtil
+ {
+ #region alloc
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators.
+ /// </summary>
+ /// <typeparam name="T">The unamanged type to allocate</typeparam>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ public static UnsafeMemoryHandle<T> UnsafeAlloc<T>(int elements, bool zero = false) where T : unmanaged
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ /*
+ * We may allocate from the share heap only if the heap is not using locks
+ * or if the element size could cause performance issues because its too large
+ * to use a managed array.
+ *
+ * We want to avoid allocations, that may end up in the LOH if we can
+ */
+
+ if ((Shared.CreationFlags & HeapCreation.UseSynchronization) == 0 || ByteCount<T>((uint)elements) > MAX_UNSAFE_POOL_SIZE)
+ {
+ // Alloc from heap
+ IntPtr block = Shared.Alloc((uint)elements, (uint)sizeof(T), zero);
+ //Init new handle
+ return new(Shared, block, elements);
+ }
+ else
+ {
+ return new(ArrayPool<T>.Shared, elements, zero);
+ }
+ }
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators, rounded up to the
+ /// neareset memory page.
+ /// </summary>
+ /// <typeparam name="T">The unamanged type to allocate</typeparam>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static UnsafeMemoryHandle<T> UnsafeAllocNearestPage<T>(int elements, bool zero = false) where T : unmanaged
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ //Round to nearest page (in bytes)
+ nint np = NearestPage(elements * sizeof(T));
+
+ //Resize to element size
+ np /= sizeof(T);
+
+ return UnsafeAlloc<T>((int)np, zero);
+ }
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators.
+ /// </summary>
+ /// <typeparam name="T">The unamanged type to allocate</typeparam>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ public static IMemoryHandle<T> SafeAlloc<T>(int elements, bool zero = false) where T : unmanaged
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ /*
+ * We may allocate from the share heap only if the heap is not using locks
+ * or if the element size could cause performance issues because its too large
+ * to use a managed array.
+ *
+ * We want to avoid allocations, that may end up in the LOH if we can
+ */
+
+ if ((Shared.CreationFlags & HeapCreation.UseSynchronization) == 0 || ByteCount<T>((uint)elements) > MAX_UNSAFE_POOL_SIZE)
+ {
+ return Shared.Alloc<T>(elements, zero);
+ }
+ else
+ {
+ return new VnTempBuffer<T>(ArrayPool<T>.Shared, elements, zero);
+ }
+ }
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators, rounded up to the
+ /// neareset memory page.
+ /// </summary>
+ /// <typeparam name="T">The unamanged type to allocate</typeparam>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ public static IMemoryHandle<T> SafeAllocNearestPage<T>(int elements, bool zero = false) where T : unmanaged
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ //Round to nearest page (in bytes)
+ nint np = NearestPage(elements * sizeof(T));
+
+ //Resize to element size
+ np /= sizeof(T);
+
+ return SafeAlloc<T>((int)np, zero);
+ }
+
+ #endregion
+
+ #region ByteOptimimzations
+
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators.
+ /// </summary>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ public static UnsafeMemoryHandle<byte> UnsafeAlloc(int elements, bool zero = false)
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ /*
+ * We may allocate from the share heap only if the heap is not using locks
+ * or if the element size could cause performance issues because its too large
+ * to use a managed array.
+ *
+ * We want to avoid allocations, that may end up in the LOH if we can
+ */
+
+ if ((Shared.CreationFlags & HeapCreation.UseSynchronization) == 0 || elements > MAX_UNSAFE_POOL_SIZE)
+ {
+ // Alloc from heap
+ IntPtr block = Shared.Alloc((uint)elements, 1, zero);
+ //Init new handle
+ return new(Shared, block, elements);
+ }
+ else
+ {
+ return new(ArrayPool<byte>.Shared, elements, zero);
+ }
+ }
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators, rounded up to the
+ /// neareset memory page.
+ /// </summary>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public static UnsafeMemoryHandle<byte> UnsafeAllocNearestPage(int elements, bool zero = false)
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ //Round to nearest page (in bytes)
+ nint np = NearestPage(elements);
+
+ return UnsafeAlloc((int)np, zero);
+ }
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators.
+ /// </summary>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ public static IMemoryHandle<byte> SafeAlloc(int elements, bool zero = false)
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ /*
+ * We may allocate from the share heap only if the heap is not using locks
+ * or if the element size could cause performance issues because its too large
+ * to use a managed array.
+ *
+ * We want to avoid allocations, that may end up in the LOH if we can
+ */
+
+ if ((Shared.CreationFlags & HeapCreation.UseSynchronization) == 0 || elements > MAX_UNSAFE_POOL_SIZE)
+ {
+ return Shared.Alloc<byte>(elements, zero);
+ }
+ else
+ {
+ return new VnTempBuffer<byte>(ArrayPool<byte>.Shared, elements, zero);
+ }
+ }
+
+ /// <summary>
+ /// Allocates a block of unmanaged, or pooled manaaged memory depending on
+ /// compilation flags and runtime unamanged allocators, rounded up to the
+ /// neareset memory page.
+ /// </summary>
+ /// <param name="elements">The number of elements of the type within the block</param>
+ /// <param name="zero">Flag to zero elements during allocation before the method returns</param>
+ /// <returns>A handle to the block of memory</returns>
+ /// <exception cref="ArgumentException"></exception>
+ /// <exception cref="OutOfMemoryException"></exception>
+ public static IMemoryHandle<byte> SafeAllocNearestPage(int elements, bool zero = false)
+ {
+ if (elements < 0)
+ {
+ throw new ArgumentException("Number of elements must be a positive integer", nameof(elements));
+ }
+
+ //Round to nearest page (in bytes)
+ nint np = NearestPage(elements);
+
+ return SafeAlloc((int)np, zero);
+ }
+
+ #endregion
+ }
+
+}
diff --git a/lib/Utils/src/Memory/NativeHeap.cs b/lib/Utils/src/Memory/NativeHeap.cs
new file mode 100644
index 0000000..30a65ae
--- /dev/null
+++ b/lib/Utils/src/Memory/NativeHeap.cs
@@ -0,0 +1,212 @@
+/*
+* Copyright (c) 2023 Vaughn Nugent
+*
+* Library: VNLib
+* Package: VNLib.Utils
+* File: NativeHeap.cs
+*
+* NativeHeap.cs is part of VNLib.Utils which is part of the larger
+* VNLib collection of libraries and utilities.
+*
+* VNLib.Utils is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published
+* by the Free Software Foundation, either version 2 of the License,
+* or (at your option) any later version.
+*
+* VNLib.Utils is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with VNLib.Utils. If not, see http://www.gnu.org/licenses/.
+*/
+
+using System;
+using System.Runtime.InteropServices;
+using System.Runtime.CompilerServices;
+
+using VNLib.Utils.Native;
+
+namespace VNLib.Utils.Memory
+{
+ /// <summary>
+ /// <para>
+ /// Allows for exposing a dynamically loaded native heap implementation.
+ /// </para>
+ /// </summary>
+ public class NativeHeap : UnmanagedHeapBase
+ {
+ public const string CREATE_METHOD_NAME = "heapCreate";
+ public const string ALLOCATE_METHOD_NAME = "heapAlloc";
+ public const string REALLOC_METHOD_NAME = "heapRealloc";
+ public const string FREE_METHOD_NAME = "heapFree";
+ public const string DESTROY_METHOD_NAME = "heapDestroy";
+
+ /// <summary>
+ /// <para>
+ /// Loads an unmanaged heap at runtime, into the current process at the given path. The dll must conform
+ /// to the unmanaged heap format. After the method table is loaded, the heapCreate method is called to
+ /// initialze the heap.
+ /// </para>
+ /// </summary>
+ /// <param name="dllPath">The path to the heap's dll file to load into the process.</param>
+ /// <param name="searchPath">The native library search path</param>
+ /// <param name="creationFlags">Specifes the creation flags to pass to the heap creaetion method</param>
+ /// <param name="flags">Generic flags passed directly to the heap creation method</param>
+ /// <returns>The newly initialized <see cref="NativeHeap"/></returns>
+ public unsafe static NativeHeap LoadHeap(string dllPath, DllImportSearchPath searchPath, HeapCreation creationFlags, ERRNO flags)
+ {
+ //Create a flags structure
+ UnmanagedHeapFlags hf;
+ UnmanagedHeapFlags* hFlags = &hf;
+
+ //Set defaults
+ hFlags->Flags = flags;
+ hFlags->InternalFlags = creationFlags;
+ hFlags->HeapPointer = IntPtr.Zero;
+
+ //Create the heap
+ return LoadHeapCore(dllPath, searchPath, hFlags);
+ }
+
+ private unsafe static NativeHeap LoadHeapCore(string path, DllImportSearchPath searchPath, UnmanagedHeapFlags* flags)
+ {
+ //Try to load the library
+ SafeLibraryHandle library = SafeLibraryHandle.LoadLibrary(path, searchPath);
+ try
+ {
+ //Open method table
+ HeapMethods table = new()
+ {
+ //Get method delegates
+ Alloc = library.DangerousGetMethod<AllocDelegate>(ALLOCATE_METHOD_NAME),
+
+ Destroy = library.DangerousGetMethod<DestroyHeapDelegate>(DESTROY_METHOD_NAME),
+
+ Free = library.DangerousGetMethod<FreeDelegate>(FREE_METHOD_NAME),
+
+ Realloc = library.DangerousGetMethod<ReallocDelegate>(REALLOC_METHOD_NAME),
+
+ Library = library
+ };
+
+ //Get the create method
+ CreateHeapDelegate create = library.DangerousGetMethod<CreateHeapDelegate>(CREATE_METHOD_NAME);
+
+ //Create the new heap
+ bool success = create(flags);
+
+ if (!success)
+ {
+ throw new NativeMemoryException("Failed to create the new heap, the heap create method returned a null pointer");
+ }
+
+ //Return the neap heap
+ return new(flags, table);
+ }
+ catch
+ {
+ //Cleanup
+ library.Dispose();
+ throw;
+ }
+ }
+
+
+ private readonly SafeLibraryHandle LibHandle;
+ private AllocDelegate AllocMethod;
+ private ReallocDelegate ReallocMethod;
+ private FreeDelegate FreeMethod;
+ private DestroyHeapDelegate Destroy;
+
+ private unsafe NativeHeap(UnmanagedHeapFlags* flags, HeapMethods methodTable) :base(flags->InternalFlags, true)
+ {
+ //Store heap pointer
+ handle = flags->HeapPointer;
+
+ //Store the method table
+ AllocMethod = methodTable.Alloc;
+ ReallocMethod = methodTable.Realloc;
+ FreeMethod = methodTable.Free;
+ Destroy = methodTable.Destroy;
+
+ //Store library
+ LibHandle = methodTable.Library;
+ }
+
+ ///<inheritdoc/>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected override IntPtr AllocBlock(nuint elements, nuint size, bool zero) => AllocMethod(handle, elements, size, zero);
+
+ ///<inheritdoc/>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected override IntPtr ReAllocBlock(IntPtr block, nuint elements, nuint size, bool zero) => ReallocMethod(handle, block, elements, size, zero);
+
+ ///<inheritdoc/>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ protected override bool FreeBlock(IntPtr block) => FreeMethod(handle, block);
+
+ ///<inheritdoc/>
+ protected override bool ReleaseHandle()
+ {
+ //Destroy the heap
+ bool ret = Destroy(handle);
+
+ //Cleanup the method table
+ Cleanup();
+
+ //Free the library
+ LibHandle.Dispose();
+
+ return ret;
+ }
+
+#nullable disable
+ private void Cleanup()
+ {
+ AllocMethod = null;
+ ReallocMethod = null;
+ FreeMethod = null;
+ Destroy = null;
+ }
+#nullable enable
+
+ /*
+ * Delegate methods match the native header impl for unmanaged heaps
+ */
+
+ unsafe delegate ERRNO CreateHeapDelegate(UnmanagedHeapFlags* createFlags);
+
+ delegate IntPtr AllocDelegate(IntPtr handle, nuint elements, nuint alignment, [MarshalAs(UnmanagedType.Bool)] bool zero);
+
+ delegate IntPtr ReallocDelegate(IntPtr heap, IntPtr block, nuint elements, nuint alignment, [MarshalAs(UnmanagedType.Bool)] bool zero);
+
+ delegate ERRNO FreeDelegate(IntPtr heap, IntPtr block);
+
+ delegate ERRNO DestroyHeapDelegate(IntPtr heap);
+
+ [StructLayout(LayoutKind.Sequential)]
+ record struct UnmanagedHeapFlags
+ {
+ public IntPtr HeapPointer;
+
+ public HeapCreation InternalFlags;
+
+ public ERRNO Flags;
+ }
+
+ readonly record struct HeapMethods
+ {
+ public readonly SafeLibraryHandle Library { get; init; }
+
+ public readonly AllocDelegate Alloc { get; init; }
+
+ public readonly ReallocDelegate Realloc { get; init; }
+
+ public readonly FreeDelegate Free { get; init; }
+
+ public readonly DestroyHeapDelegate Destroy { get; init; }
+ }
+ }
+} \ No newline at end of file
diff --git a/lib/Utils/src/Memory/ProcessHeap.cs b/lib/Utils/src/Memory/ProcessHeap.cs
index 7afe4b1..2792af9 100644
--- a/lib/Utils/src/Memory/ProcessHeap.cs
+++ b/lib/Utils/src/Memory/ProcessHeap.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2022 Vaughn Nugent
+* Copyright (c) 2023 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -36,6 +36,20 @@ namespace VNLib.Utils.Memory
public unsafe class ProcessHeap : VnDisposeable, IUnmangedHeap
{
/// <summary>
+ /// Gets the shared process heap instance
+ /// </summary>
+ public static ProcessHeap Shared { get; } = new();
+
+ /// <summary>
+ /// <inheritdoc/>
+ /// <para>
+ /// Is always <see cref="HeapCreation.IsSharedHeap"/> as this heap is the default
+ /// process heap. Meaining memory will be shared across the process
+ /// </para>
+ /// </summary>
+ public HeapCreation CreationFlags { get; } = HeapCreation.IsSharedHeap;
+
+ /// <summary>
/// Initalizes a new global (cross platform) process heap
/// </summary>
public ProcessHeap()
diff --git a/lib/Utils/src/Memory/RpMallocPrivateHeap.cs b/lib/Utils/src/Memory/RpMallocPrivateHeap.cs
deleted file mode 100644
index 323f228..0000000
--- a/lib/Utils/src/Memory/RpMallocPrivateHeap.cs
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
-* Copyright (c) 2023 Vaughn Nugent
-*
-* Library: VNLib
-* Package: VNLib.Utils
-* File: RpMallocPrivateHeap.cs
-*
-* RpMallocPrivateHeap.cs is part of VNLib.Utils which is part of the larger
-* VNLib collection of libraries and utilities.
-*
-* VNLib.Utils is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published
-* by the Free Software Foundation, either version 2 of the License,
-* or (at your option) any later version.
-*
-* VNLib.Utils is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with VNLib.Utils. If not, see http://www.gnu.org/licenses/.
-*/
-
-using System;
-using System.Buffers;
-using System.Diagnostics;
-using System.Runtime.InteropServices;
-using System.Runtime.CompilerServices;
-
-using LPVOID = System.IntPtr;
-using LPHEAPHANDLE = System.IntPtr;
-
-namespace VNLib.Utils.Memory
-{
- /// <summary>
- /// A wrapper class for cross platform RpMalloc implementation.
- /// </summary>
- [ComVisible(false)]
- public sealed class RpMallocPrivateHeap : UnmanagedHeapBase
- {
- const string DLL_NAME = "rpmalloc";
-
- #region statics
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern int rpmalloc_initialize();
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_finalize();
-
- //Heap api
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPHEAPHANDLE rpmalloc_heap_acquire();
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_heap_release(LPHEAPHANDLE heap);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc_heap_alloc(LPHEAPHANDLE heap, nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc_heap_aligned_alloc(LPHEAPHANDLE heap, nuint alignment, nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc_heap_calloc(LPHEAPHANDLE heap, nuint num, nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc_heap_aligned_calloc(LPHEAPHANDLE heap, nuint alignment, nuint num, nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc_heap_realloc(LPHEAPHANDLE heap, LPVOID ptr, nuint size, nuint flags);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc_heap_aligned_realloc(LPHEAPHANDLE heap, LPVOID ptr, nuint alignment, nuint size, nuint flags);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_heap_free(LPHEAPHANDLE heap, LPVOID ptr);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_heap_free_all(LPHEAPHANDLE heap);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_heap_thread_set_current(LPHEAPHANDLE heap);
-
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_thread_initialize();
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern int rpmalloc_is_thread_initialized();
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpmalloc_thread_finalize(int release_caches);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpmalloc(nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rpcalloc(nuint num, nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern LPVOID rprealloc(LPVOID ptr, nuint size);
- [DllImport(DLL_NAME, ExactSpelling = true)]
- [DefaultDllImportSearchPaths(DllImportSearchPath.SafeDirectories)]
- static extern void rpfree(LPVOID ptr);
-
- #endregion
-
- private sealed class RpMallocGlobalHeap : IUnmangedHeap
- {
- IntPtr IUnmangedHeap.Alloc(nuint elements, nuint size, bool zero)
- {
- return RpMalloc(elements, size, zero);
- }
-
- //Global heap does not need to be disposed
- void IDisposable.Dispose()
- { }
-
- bool IUnmangedHeap.Free(ref IntPtr block)
- {
- //Free the block
- RpFree(ref block);
- return true;
- }
-
- void IUnmangedHeap.Resize(ref IntPtr block, nuint elements, nuint size, bool zero)
- {
- //Try to resize the block
- IntPtr resize = RpRealloc(block, elements, size);
-
- //assign ptr
- block = resize != IntPtr.Zero ? resize : throw new NativeMemoryOutOfMemoryException("Failed to resize the block");
- }
- }
-
- /// <summary>
- /// <para>
- /// A <see cref="IUnmangedHeap"/> API for the RPMalloc library if loaded.
- /// </para>
- /// <para>
- /// This heap is thread safe and may be converted to a <see cref="MemoryManager{T}"/>
- /// infinitley and disposed safely.
- /// </para>
- /// <para>
- /// If the native library is not loaded, calls to this API will throw a <see cref="DllNotFoundException"/>.
- /// </para>
- /// </summary>
- public static IUnmangedHeap GlobalHeap { get; } = new RpMallocGlobalHeap();
-
- /// <summary>
- /// <para>
- /// Initializes RpMalloc for the current thread and alloctes a block of memory
- /// </para>
- /// </summary>
- /// <param name="elements">The number of elements to allocate</param>
- /// <param name="size">The number of bytes per element type (aligment)</param>
- /// <param name="zero">Zero the block of memory before returning</param>
- /// <returns>A pointer to the block, (zero if failed)</returns>
- public static LPVOID RpMalloc(nuint elements, nuint size, bool zero)
- {
- //See if the current thread has been initialized
- if (rpmalloc_is_thread_initialized() == 0)
- {
- //Initialize the current thread
- rpmalloc_thread_initialize();
- }
-
- //Alloc block
- LPVOID block;
-
- if (zero)
- {
- block = rpcalloc(elements, size);
- }
- else
- {
- //Calculate the block size
- nuint blockSize = checked(elements * size);
-
- block = rpmalloc(blockSize);
- }
- return block;
- }
-
- /// <summary>
- /// Frees a block of memory allocated by RpMalloc
- /// </summary>
- /// <param name="block">A ref to the pointer of the block to free</param>
- public static void RpFree(ref LPVOID block)
- {
- if (block != IntPtr.Zero)
- {
- rpfree(block);
- block = IntPtr.Zero;
- }
- }
-
- /// <summary>
- /// Attempts to re-allocate the specified block on the global heap
- /// </summary>
- /// <param name="block">A pointer to a previously allocated block of memory</param>
- /// <param name="elements">The number of elements in the block</param>
- /// <param name="size">The number of bytes in the element</param>
- /// <returns>A pointer to the new block if the reallocation succeeded, null if the resize failed</returns>
- /// <exception cref="ArgumentException"></exception>
- /// <exception cref="OverflowException"></exception>
- public static LPVOID RpRealloc(LPVOID block, nuint elements, nuint size)
- {
- if(block == IntPtr.Zero)
- {
- throw new ArgumentException("The supplied block is not valid", nameof(block));
- }
-
- //Calc new block size
- nuint blockSize = checked(elements * size);
-
- return rprealloc(block, blockSize);
- }
-
- #region instance
-
- /// <summary>
- /// Initializes a new RpMalloc first class heap to allocate memory blocks from
- /// </summary>
- /// <param name="zeroAll">A global flag to zero all blocks of memory allocated</param>
- /// <exception cref="NativeMemoryException"></exception>
- public RpMallocPrivateHeap(bool zeroAll):base(zeroAll, true)
- {
- //Alloc the heap
- handle = rpmalloc_heap_acquire();
- if(IsInvalid)
- {
- throw new NativeMemoryException("Failed to aquire a new heap");
- }
-#if TRACE
- Trace.WriteLine($"RPMalloc heap {handle:x} created");
-#endif
- }
-
- ///<inheritdoc/>
- protected override bool ReleaseHandle()
- {
-#if TRACE
- Trace.WriteLine($"RPMalloc heap {handle:x} destroyed");
-#endif
- //Release all heap memory
- rpmalloc_heap_free_all(handle);
- //Destroy the heap
- rpmalloc_heap_release(handle);
- //Release base
- return true;
- }
-
- ///<inheritdoc/>
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- protected sealed override LPVOID AllocBlock(nuint elements, nuint size, bool zero)
- {
- //Alloc or calloc and initalize
- return zero ? rpmalloc_heap_calloc(handle, elements, size) : rpmalloc_heap_alloc(handle, checked(size * elements));
- }
-
- ///<inheritdoc/>
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- protected sealed override bool FreeBlock(LPVOID block)
- {
- //Free block
- rpmalloc_heap_free(handle, block);
- return true;
- }
-
- ///<inheritdoc/>
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- protected sealed override LPVOID ReAllocBlock(LPVOID block, nuint elements, nuint size, bool zero)
- {
- //Realloc
- return rpmalloc_heap_realloc(handle, block, checked(elements * size), 0);
- }
-
- #endregion
- }
-}
diff --git a/lib/Utils/src/Memory/UnmanagedHeapBase.cs b/lib/Utils/src/Memory/UnmanagedHeapBase.cs
index 4f5084a..eea84c8 100644
--- a/lib/Utils/src/Memory/UnmanagedHeapBase.cs
+++ b/lib/Utils/src/Memory/UnmanagedHeapBase.cs
@@ -48,22 +48,37 @@ namespace VNLib.Utils.Memory
protected readonly bool GlobalZero;
/// <summary>
+ /// A value that inidicates that locking will
+ /// be used when invoking heap operations
+ /// </summary>
+ protected readonly bool UseSynchronization;
+
+ /// <summary>
/// Initalizes the unmanaged heap base class (init synchronization handle)
/// </summary>
- /// <param name="globalZero">A global flag to zero all blocks of memory during allocation</param>
+ /// <param name="flags">Creation flags to obey</param>
/// <param name="ownsHandle">A flag that indicates if the handle is owned by the instance</param>
- protected UnmanagedHeapBase(bool globalZero, bool ownsHandle) : base(ownsHandle)
+ protected UnmanagedHeapBase(HeapCreation flags, bool ownsHandle) : base(ownsHandle)
{
HeapLock = new();
- GlobalZero = globalZero;
+ GlobalZero = flags.HasFlag(HeapCreation.GlobalZero);
+ UseSynchronization = flags.HasFlag(HeapCreation.UseSynchronization);
+ CreationFlags = flags;
}
///<inheritdoc/>
- ///<remarks>Increments the handle count</remarks>
+ public HeapCreation CreationFlags { get; }
+
+ ///<inheritdoc/>
+ ///<remarks>Increments the handle count, free must be called to decrement the handle count</remarks>
+ ///<exception cref="OverflowException"></exception>
///<exception cref="OutOfMemoryException"></exception>
///<exception cref="ObjectDisposedException"></exception>
public LPVOID Alloc(nuint elements, nuint size, bool zero)
{
+ //Check for overflow for size
+ _ = checked(elements * size);
+
//Force zero if global flag is set
zero |= GlobalZero;
bool handleCountIncremented = false;
@@ -80,10 +95,20 @@ namespace VNLib.Utils.Memory
try
{
LPVOID block;
- //Enter lock
- lock(HeapLock)
+
+ //Check if lock should be used
+ if (UseSynchronization)
{
- //Alloc block
+ //Enter lock
+ lock(HeapLock)
+ {
+ //Alloc block
+ block = AllocBlock(elements, size, zero);
+ }
+ }
+ else
+ {
+ //Alloc block without lock
block = AllocBlock(elements, size, zero);
}
//Check if block was allocated
@@ -96,8 +121,9 @@ namespace VNLib.Utils.Memory
throw;
}
}
-
+
///<inheritdoc/>
+ ///<exception cref="OverflowException"></exception>
///<remarks>Decrements the handle count</remarks>
public bool Free(ref LPVOID block)
{
@@ -110,12 +136,20 @@ namespace VNLib.Utils.Memory
return true;
}
- //wait for lock
- lock (HeapLock)
+ if (UseSynchronization)
{
- //Free block
+ //wait for lock
+ lock (HeapLock)
+ {
+ //Free block
+ result = FreeBlock(block);
+ //Release lock before releasing handle
+ }
+ }
+ else
+ {
+ //No lock
result = FreeBlock(block);
- //Release lock before releasing handle
}
//Decrement handle count
@@ -126,20 +160,35 @@ namespace VNLib.Utils.Memory
}
///<inheritdoc/>
+ ///<exception cref="OverflowException"></exception>
///<exception cref="OutOfMemoryException"></exception>
///<exception cref="ObjectDisposedException"></exception>
public void Resize(ref LPVOID block, nuint elements, nuint size, bool zero)
{
+ //Check for overflow for size
+ _ = checked(elements * size);
+
LPVOID newBlock;
- lock (HeapLock)
+ //Global zero flag will cause a zero
+ zero |= GlobalZero;
+
+ /*
+ * Realloc may return a null pointer if allocation fails
+ * so check the results and only assign the block pointer
+ * if the result is valid. Otherwise pointer block should
+ * be left untouched
+ */
+
+ if (UseSynchronization)
+ {
+ lock (HeapLock)
+ {
+ newBlock = ReAllocBlock(block, elements, size, zero);
+ }
+ }
+ else
{
- /*
- * Realloc may return a null pointer if allocation fails
- * so check the results and only assign the block pointer
- * if the result is valid. Otherwise pointer block should
- * be left untouched
- */
newBlock = ReAllocBlock(block, elements, size, zero);
}
@@ -167,7 +216,7 @@ namespace VNLib.Utils.Memory
/// </summary>
/// <param name="block">The block to free</param>
protected abstract bool FreeBlock(LPVOID block);
-
+
/// <summary>
/// Resizes the previously allocated block of memory on the current heap
/// </summary>
diff --git a/lib/Utils/src/Memory/VnTable.cs b/lib/Utils/src/Memory/VnTable.cs
index 2c6ce74..7769c23 100644
--- a/lib/Utils/src/Memory/VnTable.cs
+++ b/lib/Utils/src/Memory/VnTable.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2022 Vaughn Nugent
+* Copyright (c) 2023 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -23,6 +23,7 @@
*/
using System;
+using System.Runtime.CompilerServices;
using VNLib.Utils.Extensions;
@@ -84,13 +85,11 @@ namespace VNLib.Utils.Memory
this.Rows = rows;
this.Cols = cols;
- ulong tableSize = checked((ulong) rows * (ulong) cols);
+ ulong tableSize = checked((ulong)rows * (ulong)cols);
- if (tableSize > nuint.MaxValue)
+ if ((tableSize * (uint)Unsafe.SizeOf<T>()) > nuint.MaxValue)
{
-#pragma warning disable CA2201 // Do not raise reserved exception types
- throw new OutOfMemoryException("Table size is too large");
-#pragma warning restore CA2201 // Do not raise reserved exception types
+ throw new ArgumentOutOfRangeException("Rows and cols","Table size is too large");
}
//Alloc a buffer with zero memory enabled, with Rows * Cols number of elements
diff --git a/lib/Utils/src/Memory/VnTempBuffer.cs b/lib/Utils/src/Memory/VnTempBuffer.cs
index 1d8e42f..5f5f831 100644
--- a/lib/Utils/src/Memory/VnTempBuffer.cs
+++ b/lib/Utils/src/Memory/VnTempBuffer.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2022 Vaughn Nugent
+* Copyright (c) 2023 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -24,8 +24,6 @@
using System;
using System.Buffers;
-using System.Runtime.InteropServices;
-using System.Runtime.CompilerServices;
using VNLib.Utils.Extensions;
@@ -35,7 +33,7 @@ namespace VNLib.Utils.Memory
/// A disposable temporary buffer from shared ArrayPool
/// </summary>
/// <typeparam name="T">Type of buffer to create</typeparam>
- public sealed class VnTempBuffer<T> : VnDisposeable, IIndexable<int, T>, IMemoryHandle<T>
+ public sealed class VnTempBuffer<T> : VnDisposeable, IIndexable<int, T>, IMemoryHandle<T>, IMemoryOwner<T>
{
private readonly ArrayPool<T> Pool;
@@ -43,6 +41,7 @@ namespace VNLib.Utils.Memory
/// Referrence to internal buffer
/// </summary>
public T[] Buffer { get; private set; }
+
/// <summary>
/// Inital/desired size of internal buffer
/// </summary>
@@ -64,6 +63,9 @@ namespace VNLib.Utils.Memory
}
}
+ ///<inheritdoc/>
+ Memory<T> IMemoryOwner<T>.Memory => AsMemory();
+
/// <summary>
/// Allocates a new <see cref="VnTempBuffer{BufType}"/> with a new buffer from shared array-pool
/// </summary>
@@ -177,33 +179,16 @@ namespace VNLib.Utils.Memory
#pragma warning restore CS8625 // Cannot convert null literal to non-nullable reference type.
}
- unsafe MemoryHandle IPinnable.Pin(int elementIndex)
- {
- //Guard
- if (elementIndex < 0 || elementIndex >= Buffer.Length)
- {
- throw new ArgumentOutOfRangeException(nameof(elementIndex));
- }
-
- //Pin the array
- GCHandle arrHandle = GCHandle.Alloc(Buffer, GCHandleType.Pinned);
-
- //Get array base address
- void* basePtr = (void*)arrHandle.AddrOfPinnedObject();
-
- //Get element offset
- void* indexOffet = Unsafe.Add<T>(basePtr, elementIndex);
-
- return new(indexOffet, arrHandle, this);
- }
+ //Pin, will also check bounds
+ ///<inheritdoc/>
+ public MemoryHandle Pin(int elementIndex) => MemoryUtil.PinArrayAndGetHandle(Buffer, elementIndex);
void IPinnable.Unpin()
{
//Gchandle will manage the unpin
}
- ~VnTempBuffer() => Free();
-
-
+ ///<inheritdoc/>
+ ~VnTempBuffer() => Free();
}
} \ No newline at end of file
diff --git a/lib/Utils/src/Memory/Win32PrivateHeap.cs b/lib/Utils/src/Memory/Win32PrivateHeap.cs
index 9911195..d5f08fc 100644
--- a/lib/Utils/src/Memory/Win32PrivateHeap.cs
+++ b/lib/Utils/src/Memory/Win32PrivateHeap.cs
@@ -31,7 +31,6 @@ using System.Runtime.CompilerServices;
using DWORD = System.Int64;
using LPVOID = System.IntPtr;
-
namespace VNLib.Utils.Memory
{
///<summary>
@@ -58,6 +57,11 @@ namespace VNLib.Utils.Memory
public const DWORD HEAP_REALLOC_IN_PLACE_ONLY = 0x10;
public const DWORD HEAP_ZERO_MEMORY = 0x08;
+
+ [DllImport(KERNEL_DLL, SetLastError = true, ExactSpelling = true)]
+ [DefaultDllImportSearchPaths(DllImportSearchPath.System32)]
+ private static extern LPVOID GetProcessHeap();
+
[DllImport(KERNEL_DLL, SetLastError = true, ExactSpelling = true)]
[DefaultDllImportSearchPaths(DllImportSearchPath.System32)]
private static extern LPVOID HeapAlloc(IntPtr hHeap, DWORD flags, nuint dwBytes);
@@ -88,7 +92,7 @@ namespace VNLib.Utils.Memory
[DllImport(KERNEL_DLL, SetLastError = true, ExactSpelling = true)]
[DefaultDllImportSearchPaths(DllImportSearchPath.System32)]
private static extern nuint HeapSize(IntPtr hHeap, DWORD flags, LPVOID lpMem);
-
+
#endregion
/// <summary>
@@ -97,12 +101,39 @@ namespace VNLib.Utils.Memory
/// <param name="initialSize">Intial size of the heap</param>
/// <param name="maxHeapSize">Maximum size allowed for the heap (disabled = 0, default)</param>
/// <param name="flags">Defalt heap flags to set globally for all blocks allocated by the heap (default = 0)</param>
- public static Win32PrivateHeap Create(nuint initialSize, nuint maxHeapSize = 0, DWORD flags = HEAP_NO_FLAGS)
+ /// <param name="cFlags">Flags to configure heap creation</param>
+ /// <remarks>
+ /// Win32 heaps are not thread safe, so synchronization is required, you may disabled internal locking if you use
+ /// your own synchronization.
+ /// </remarks>
+ public static Win32PrivateHeap Create(nuint initialSize, HeapCreation cFlags, nuint maxHeapSize = 0, DWORD flags = HEAP_NO_FLAGS)
{
+ if (cFlags.HasFlag(HeapCreation.IsSharedHeap))
+ {
+ //Clear the synchronization flag because we don't need it for a process heap
+ cFlags &= ~HeapCreation.UseSynchronization;
+
+ //Get the process heap
+ LPVOID handle = GetProcessHeap();
+
+ //The heap does not own the handle
+ return new Win32PrivateHeap(handle, cFlags, false);
+ }
+
+ if (cFlags.HasFlag(HeapCreation.UseSynchronization))
+ {
+ /*
+ * When the synchronization flag is set, we dont need to use
+ * the win32 serialization method
+ */
+
+ flags |= HEAP_NO_SERIALIZE;
+ }
+
//Call create, throw exception if the heap falled to allocate
- IntPtr heapHandle = HeapCreate(flags, initialSize, maxHeapSize);
+ ERRNO heapHandle = HeapCreate(flags, initialSize, maxHeapSize);
- if (heapHandle == IntPtr.Zero)
+ if (!heapHandle)
{
throw new NativeMemoryException("Heap could not be created");
}
@@ -110,17 +141,19 @@ namespace VNLib.Utils.Memory
Trace.WriteLine($"Win32 private heap {heapHandle:x} created");
#endif
//Heap has been created so we can wrap it
- return new(heapHandle);
+ return new(heapHandle, cFlags, true);
}
+
/// <summary>
/// LIFETIME WARNING. Consumes a valid win32 handle and will manage it's lifetime once constructed.
/// Locking and memory blocks will attempt to be allocated from this heap handle.
/// </summary>
/// <param name="win32HeapHandle">An open and valid handle to a win32 private heap</param>
+ /// <param name="flags">The heap creation flags to obey</param>
/// <returns>A wrapper around the specified heap</returns>
- public static Win32PrivateHeap ConsumeExisting(IntPtr win32HeapHandle) => new (win32HeapHandle);
+ public static Win32PrivateHeap ConsumeExisting(IntPtr win32HeapHandle, HeapCreation flags) => new (win32HeapHandle, flags, true);
- private Win32PrivateHeap(IntPtr heapPtr) : base(false, true) => handle = heapPtr;
+ private Win32PrivateHeap(IntPtr heapPtr, HeapCreation flags, bool ownsHeandle) : base(flags, ownsHeandle) => handle = heapPtr;
/// <summary>
/// Retrieves the size of a memory block allocated from the current heap.
@@ -146,6 +179,7 @@ namespace VNLib.Utils.Memory
return result;
}
+
/// <summary>
/// Validates the current heap instance. The function scans all the memory blocks in the heap and verifies that the heap control structures maintained by
/// the heap manager are in a consistent state.
@@ -173,6 +207,7 @@ namespace VNLib.Utils.Memory
#endif
return HeapDestroy(handle);
}
+
///<inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
protected override sealed LPVOID AllocBlock(nuint elements, nuint size, bool zero)
@@ -181,6 +216,7 @@ namespace VNLib.Utils.Memory
return HeapAlloc(handle, zero ? HEAP_ZERO_MEMORY : HEAP_NO_FLAGS, bytes);
}
+
///<inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
protected override sealed bool FreeBlock(LPVOID block) => HeapFree(handle, HEAP_NO_FLAGS, block);
diff --git a/lib/Utils/src/VnEncoding.cs b/lib/Utils/src/VnEncoding.cs
index c9cdbb0..4a95405 100644
--- a/lib/Utils/src/VnEncoding.cs
+++ b/lib/Utils/src/VnEncoding.cs
@@ -485,7 +485,7 @@ namespace VNLib.Utils
//calc size of bin buffer
int size = base32.Length;
//Rent a bin buffer
- using UnsafeMemoryHandle<byte> binBuffer = Memory.MemoryUtil.UnsafeAlloc<byte>(size);
+ using UnsafeMemoryHandle<byte> binBuffer = Memory.MemoryUtil.UnsafeAlloc(size);
//Try to decode the data
ERRNO decoded = TryFromBase32Chars(base32, binBuffer.Span);
//Marshal back to a struct
@@ -505,7 +505,7 @@ namespace VNLib.Utils
return null;
}
//Buffer size of the base32 string will always be enough buffer space
- using UnsafeMemoryHandle<byte> tempBuffer = Memory.MemoryUtil.UnsafeAlloc<byte>(base32.Length);
+ using UnsafeMemoryHandle<byte> tempBuffer = Memory.MemoryUtil.UnsafeAlloc(base32.Length);
//Try to decode the data
ERRNO decoded = TryFromBase32Chars(base32, tempBuffer.Span);
@@ -884,7 +884,7 @@ namespace VNLib.Utils
int decodedSize = encoding.GetByteCount(chars);
//alloc buffer
- using UnsafeMemoryHandle<byte> decodeHandle = MemoryUtil.UnsafeAlloc<byte>(decodedSize);
+ using UnsafeMemoryHandle<byte> decodeHandle = MemoryUtil.UnsafeAlloc(decodedSize);
//Get the utf8 binary data
int count = encoding.GetBytes(chars, decodeHandle);
return Base64UrlDecode(decodeHandle.Span[..count], output);
diff --git a/lib/Utils/tests/Memory/MemoryHandleTest.cs b/lib/Utils/tests/Memory/MemoryHandleTest.cs
index f7ab8d4..f8d9b79 100644
--- a/lib/Utils/tests/Memory/MemoryHandleTest.cs
+++ b/lib/Utils/tests/Memory/MemoryHandleTest.cs
@@ -36,13 +36,15 @@ namespace VNLib.Utils.Memory.Tests
{
[TestMethod]
- public void MemoryHandleAllocLongExtensionTest()
+ public unsafe void MemoryHandleAllocLongExtensionTest()
{
+ Assert.IsTrue(sizeof(nuint) == 8);
+
//Check for negatives
- Assert.ThrowsException<ArgumentOutOfRangeException>(() => Shared.Alloc<byte>(-1));
+ Assert.ThrowsException<ArgumentOutOfRangeException>(() => Shared.Alloc<byte>(-1).Dispose());
//Make sure over-alloc throws
- Assert.ThrowsException<OutOfMemoryException>(() => Shared.Alloc<byte>(nuint.MaxValue, false));
+ Assert.ThrowsException<OverflowException>(() => Shared.Alloc<short>(nuint.MaxValue, false).Dispose());
}
#if TARGET_64_BIT
[TestMethod]
diff --git a/lib/Utils/tests/Memory/MemoryUtilTests.cs b/lib/Utils/tests/Memory/MemoryUtilTests.cs
index 10e5d31..473281f 100644
--- a/lib/Utils/tests/Memory/MemoryUtilTests.cs
+++ b/lib/Utils/tests/Memory/MemoryUtilTests.cs
@@ -20,20 +20,17 @@ namespace VNLib.Utils.Memory.Tests
[TestMethod()]
public void InitializeNewHeapForProcessTest()
{
- //Check if rpmalloc is loaded
- if (MemoryUtil.IsRpMallocLoaded)
- {
- //Initialize the heap
- using IUnmangedHeap heap = MemoryUtil.InitializeNewHeapForProcess();
- //Confirm that the heap is actually a rpmalloc heap
- Assert.IsInstanceOfType(heap, typeof(RpMallocPrivateHeap));
- }
- else
- {
- //Confirm that Rpmalloc will throw DLLNotFound if the lib is not loaded
- Assert.ThrowsException<DllNotFoundException>(() => _ = RpMallocPrivateHeap.GlobalHeap.Alloc(1, 1, false));
- }
+ //Initialize the heap
+ using IUnmangedHeap heap = MemoryUtil.InitializeNewHeapForProcess();
+
+ //Test alloc
+ IntPtr block = heap.Alloc(1, 1, false);
+
+ //Free block
+ heap.Free(ref block);
+
+ //TODO verify the heap type by loading a dynamic heap dll
}
[TestMethod()]
@@ -337,7 +334,7 @@ namespace VNLib.Utils.Memory.Tests
public void GetSharedHeapStatsTest()
{
//Confirm heap diagnostics are enabled
- Assert.AreEqual<string?>(Environment.GetEnvironmentVariable(MemoryUtil.SHARED_HEAP_ENABLE_DIAGNOISTICS_ENV), "1");
+ Assert.AreEqual<string?>("1", Environment.GetEnvironmentVariable(MemoryUtil.SHARED_HEAP_ENABLE_DIAGNOISTICS_ENV));
//Get current stats
HeapStatistics preTest = MemoryUtil.GetSharedHeapStats();
@@ -441,6 +438,29 @@ namespace VNLib.Utils.Memory.Tests
const int TEST_1 = 1;
//Unsafe byte test
+ using (UnsafeMemoryHandle<byte> byteBuffer = MemoryUtil.UnsafeAllocNearestPage(TEST_1, false))
+ {
+ nuint byteSize = MemoryUtil.ByteSize(byteBuffer);
+
+ //Confirm byte size is working also
+ Assert.IsTrue(byteSize == byteBuffer.Length);
+
+ //Should be the same as the page size
+ Assert.IsTrue(byteSize == (nuint)Environment.SystemPageSize);
+ }
+
+ using(IMemoryHandle<byte> safeByteBuffer = MemoryUtil.SafeAllocNearestPage(TEST_1, false))
+ {
+ nuint byteSize = MemoryUtil.ByteSize(safeByteBuffer);
+
+ //Confirm byte size is working also
+ Assert.IsTrue(byteSize == safeByteBuffer.Length);
+
+ //Should be the same as the page size
+ Assert.IsTrue(byteSize == (nuint)Environment.SystemPageSize);
+ }
+
+ //Unsafe byte test with generics
using (UnsafeMemoryHandle<byte> byteBuffer = MemoryUtil.UnsafeAllocNearestPage<byte>(TEST_1, false))
{
nuint byteSize = MemoryUtil.ByteSize(byteBuffer);
@@ -452,7 +472,7 @@ namespace VNLib.Utils.Memory.Tests
Assert.IsTrue(byteSize == (nuint)Environment.SystemPageSize);
}
- using(IMemoryHandle<byte> safeByteBuffer = MemoryUtil.SafeAllocNearestPage<byte>(TEST_1, false))
+ using (IMemoryHandle<byte> safeByteBuffer = MemoryUtil.SafeAllocNearestPage<byte>(TEST_1, false))
{
nuint byteSize = MemoryUtil.ByteSize(safeByteBuffer);
diff --git a/lib/Utils/tests/Memory/NativeHeapTests.cs b/lib/Utils/tests/Memory/NativeHeapTests.cs
new file mode 100644
index 0000000..d27d5fd
--- /dev/null
+++ b/lib/Utils/tests/Memory/NativeHeapTests.cs
@@ -0,0 +1,32 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+
+using System;
+
+namespace VNLib.Utils.Memory.Tests
+{
+ [TestClass()]
+ public class NativeHeapTests
+ {
+ [TestMethod()]
+ public void LoadHeapTest()
+ {
+ const string TEST_HEAP_FILENAME = @"rpmalloc.dll";
+
+ //Try to load the global heap
+ using NativeHeap heap = NativeHeap.LoadHeap(TEST_HEAP_FILENAME, System.Runtime.InteropServices.DllImportSearchPath.SafeDirectories, HeapCreation.None, 0);
+
+ Assert.IsFalse(heap.IsInvalid);
+
+ IntPtr block = heap.Alloc(100, sizeof(byte), false);
+
+ Assert.IsTrue(block != IntPtr.Zero);
+
+ //Free the block
+ Assert.IsTrue(heap.Free(ref block));
+
+ //confirm the pointer it zeroed
+ Assert.IsTrue(block == IntPtr.Zero);
+
+ }
+ }
+} \ No newline at end of file
diff --git a/lib/Utils/tests/Memory/VnTableTests.cs b/lib/Utils/tests/Memory/VnTableTests.cs
index 474a201..cb8ea91 100644
--- a/lib/Utils/tests/Memory/VnTableTests.cs
+++ b/lib/Utils/tests/Memory/VnTableTests.cs
@@ -52,12 +52,18 @@ namespace VNLib.Utils.Memory.Tests
Assert.IsTrue(10000 == table.Cols);
}
-
- //Test oom, should be native
- Assert.ThrowsException<OutOfMemoryException>(() =>
+ try
{
using VnTable<int> table = new(uint.MaxValue, 20);
- });
+
+ Assert.Fail("The table allocation did not fail as expected");
+ }
+ catch (OutOfMemoryException)
+ {}
+ catch(Exception ex)
+ {
+ Assert.Fail("Table overflow creation test failed because another exception type was raised, {0}", ex.GetType().Name);
+ }
}
[TestMethod()]
diff --git a/lib/WinRpMalloc/src/WinRpMalloc.vcxproj b/lib/WinRpMalloc/src/WinRpMalloc.vcxproj
index 69a1bdf..7322319 100644
--- a/lib/WinRpMalloc/src/WinRpMalloc.vcxproj
+++ b/lib/WinRpMalloc/src/WinRpMalloc.vcxproj
@@ -156,6 +156,7 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
+ <ClInclude Include="..\..\NativeHeapApi\src\NativeHeapApi.h" />
<ClInclude Include="framework.h" />
<ClInclude Include="pch.h" />
<ClInclude Include="rpmalloc.h" />
diff --git a/lib/WinRpMalloc/src/dllmain.c b/lib/WinRpMalloc/src/dllmain.c
index 10ea3f5..1c1378e 100644
--- a/lib/WinRpMalloc/src/dllmain.c
+++ b/lib/WinRpMalloc/src/dllmain.c
@@ -1,6 +1,27 @@
-// dllmain.cpp : Defines the entry point for the DLL application.
+/*
+* Copyright (c) 2023 Vaughn Nugent
+*
+* Library: VNLib
+* Package: WinRpMalloc
+* File: dllmain.c
+*
+* WinRpMalloc is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published
+* by the Free Software Foundation, either version 2 of the License,
+* or (at your option) any later version.
+*
+* WinRpMalloc is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with WinRpMalloc. If not, see http://www.gnu.org/licenses/.
+*/
#include "pch.h"
+//Include the native heap header directly from its repo location
+#include "../../NativeHeapApi/src/NativeHeapApi.h"
BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved)
{
@@ -24,4 +45,145 @@ BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserv
break;
}
return TRUE;
+}
+
+#define GLOBAL_HEAP_HANDLE_VALUE -10
+#define GLOBAL_HEAP_INIT_CHECK if (!rpmalloc_is_thread_initialized()) { rpmalloc_thread_initialize(); }
+
+//Define the heap methods
+
+HEAP_METHOD_EXPORT ERRNO heapCreate(UnmanagedHeapFlags* flags)
+{
+ //Check flags
+ if (flags->CreationFlags & HEAP_CREATION_IS_SHARED)
+ {
+ //User requested the global heap, synchronziation is not required, so we can clear the sync flag
+ flags->CreationFlags &= ~(HEAP_CREATION_SERIALZE_ENABLED);
+
+ //Set the heap pointer as the global heap value
+ flags->HeapPointer = (LPVOID)GLOBAL_HEAP_HANDLE_VALUE;
+
+ //Success
+ return TRUE;
+ }
+
+ //Allocate a first class heap
+ flags->HeapPointer = rpmalloc_heap_acquire();
+
+ //Ignore remaining flags, zero/sync can be user optional
+
+ //Return value greater than 0
+ return flags->HeapPointer;
+}
+
+
+HEAP_METHOD_EXPORT ERRNO heapDestroy(LPVOID heap)
+{
+ //Destroy the heap
+ if ((int)heap == GLOBAL_HEAP_HANDLE_VALUE)
+ {
+ //Gloal heap, do nothing, and allow the entrypoint cleanup
+ return TRUE;
+ }
+
+ //Free all before destroy
+ rpmalloc_heap_free_all(heap);
+
+ //Destroy the heap
+ rpmalloc_heap_release(heap);
+
+ return TRUE;
+}
+
+
+HEAP_METHOD_EXPORT LPVOID heapAlloc(LPVOID heap, size_t elements, size_t alignment, BOOL zero)
+{
+ //Multiply for element size
+ size_t size = elements * alignment;
+
+ //Check for global heap
+ if ((int)heap == GLOBAL_HEAP_HANDLE_VALUE)
+ {
+ /*
+ * When called from the dotnet CLR the thread may not call the DLL
+ * thread attach method, so we need to check and initialze the heap
+ * for the current thread
+ */
+ GLOBAL_HEAP_INIT_CHECK
+
+ //Allocate the block
+ if (zero)
+ {
+ //Calloc
+ return rpcalloc(elements, alignment);
+ }
+ else
+ {
+ //Alloc without zero
+ return rpmalloc(size);
+ }
+ }
+ else
+ {
+ //First class heap, lock is held by caller, optionally zero the block
+ if (zero)
+ {
+ return rpmalloc_heap_calloc(heap, alignment, elements);
+ }
+ else
+ {
+ return rpmalloc_heap_alloc(heap, size);
+ }
+ }
+}
+
+
+HEAP_METHOD_EXPORT LPVOID heapRealloc(LPVOID heap, LPVOID block, size_t elements, size_t alignment, BOOL zero)
+{
+ //Multiply for element size
+ size_t size = elements * alignment;
+
+ //Check for global heap
+ if ((int)heap == GLOBAL_HEAP_HANDLE_VALUE)
+ {
+ /*
+ * When called from the dotnet CLR the thread may not call the DLL
+ * thread attach method, so we need to check and initialze the heap
+ * for the current thread
+ */
+ GLOBAL_HEAP_INIT_CHECK
+
+ //Calloc
+ return rprealloc(block, size);
+ }
+ else
+ {
+ //First class heap, lock is held by caller
+ return rpmalloc_heap_realloc(heap, block, size, 0);
+ }
+}
+
+
+HEAP_METHOD_EXPORT ERRNO heapFree(LPVOID heap, LPVOID block)
+{
+ //Check for global heap
+ if ((int)heap == GLOBAL_HEAP_HANDLE_VALUE)
+ {
+ /*
+ * If free happens on a different thread, we must allocate the heap
+ * its cheap to check
+ */
+
+ GLOBAL_HEAP_INIT_CHECK
+
+ //free block
+ rpfree(block);
+ }
+ else
+ {
+ //First class heap, lock is held by caller
+ rpmalloc_heap_free(heap, block);
+ }
+
+ return TRUE;
} \ No newline at end of file
diff --git a/lib/WinRpMalloc/src/rpmalloc.c b/lib/WinRpMalloc/src/rpmalloc.c
index 249d008..65f6ee5 100644
--- a/lib/WinRpMalloc/src/rpmalloc.c
+++ b/lib/WinRpMalloc/src/rpmalloc.c
@@ -68,7 +68,7 @@
#elif defined(__GNUC__)
#define rpmalloc_assume(cond) \
do { \
- if (!__builtin_expect(cond, false)) \
+ if (!__builtin_expect(cond, 0)) \
__builtin_unreachable(); \
} while (0)
#elif defined(_MSC_VER)
@@ -268,7 +268,7 @@ extern int madvise(caddr_t, size_t, int);
typedef volatile long atomic32_t;
typedef volatile long long atomic64_t;
-typedef volatile void* atomicptr_t;
+typedef volatile void* atomicptr_t;
static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return *src; }
static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { *dst = val; }
@@ -279,10 +279,10 @@ static FORCEINLINE int atomic_cas32_acquire(atomic32_t* dst, int32_t val, in
static FORCEINLINE void atomic_store32_release(atomic32_t* dst, int32_t val) { *dst = val; }
static FORCEINLINE int64_t atomic_load64(atomic64_t* src) { return *src; }
static FORCEINLINE int64_t atomic_add64(atomic64_t* val, int64_t add) { return (int64_t)InterlockedExchangeAdd64(val, add) + add; }
-static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)*src; }
+static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)*src; }
static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { *dst = val; }
static FORCEINLINE void atomic_store_ptr_release(atomicptr_t* dst, void* val) { *dst = val; }
-static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return (void*)InterlockedExchangePointer((void* volatile*)dst, val); }
+static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return (void*)InterlockedExchangePointer((void* volatile*)dst, val); }
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (InterlockedCompareExchangePointer((void* volatile*)dst, val, ref) == ref) ? 1 : 0; }
#define EXPECTED(x) (x)
@@ -292,9 +292,9 @@ static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref
#include <stdatomic.h>
-typedef volatile _Atomic(int32_t) atomic32_t;
-typedef volatile _Atomic(int64_t) atomic64_t;
-typedef volatile _Atomic(void*) atomicptr_t;
+typedef volatile _Atomic(int32_t)atomic32_t;
+typedef volatile _Atomic(int64_t)atomic64_t;
+typedef volatile _Atomic(void*)atomicptr_t;
static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
@@ -305,10 +305,10 @@ static FORCEINLINE int atomic_cas32_acquire(atomic32_t* dst, int32_t val, in
static FORCEINLINE void atomic_store32_release(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_release); }
static FORCEINLINE int64_t atomic_load64(atomic64_t* val) { return atomic_load_explicit(val, memory_order_relaxed); }
static FORCEINLINE int64_t atomic_add64(atomic64_t* val, int64_t add) { return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add; }
-static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
+static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
static FORCEINLINE void atomic_store_ptr_release(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_release); }
-static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return atomic_exchange_explicit(dst, val, memory_order_acquire); }
+static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return atomic_exchange_explicit(dst, val, memory_order_acquire); }
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_compare_exchange_weak_explicit(dst, &ref, val, memory_order_relaxed, memory_order_relaxed); }
#define EXPECTED(x) __builtin_expect((x), 1)
@@ -388,8 +388,8 @@ static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref
//! Number of spans to transfer between thread and global cache for large spans
#define THREAD_SPAN_LARGE_CACHE_TRANSFER 6
-_Static_assert((SMALL_GRANULARITY & (SMALL_GRANULARITY - 1)) == 0, "Small granularity must be power of two");
-_Static_assert((SPAN_HEADER_SIZE & (SPAN_HEADER_SIZE - 1)) == 0, "Span header size must be power of two");
+_Static_assert((SMALL_GRANULARITY& (SMALL_GRANULARITY - 1)) == 0, "Small granularity must be power of two");
+_Static_assert((SPAN_HEADER_SIZE& (SPAN_HEADER_SIZE - 1)) == 0, "Span header size must be power of two");
#if ENABLE_VALIDATE_ARGS
//! Maximum allocation size to avoid integer overflow
@@ -498,7 +498,7 @@ typedef struct size_class_use_t size_class_use_t;
// to reduce physical memory use).
struct span_t {
//! Free list
- void* free_list;
+ void* free_list;
//! Total block count of size class
uint32_t block_count;
//! Size class
@@ -526,34 +526,34 @@ struct span_t {
//! Alignment offset
uint32_t align_offset;
//! Owning heap
- heap_t* heap;
+ heap_t* heap;
//! Next span
- span_t* next;
+ span_t* next;
//! Previous span
- span_t* prev;
+ span_t* prev;
};
_Static_assert(sizeof(span_t) <= SPAN_HEADER_SIZE, "span size mismatch");
struct span_cache_t {
size_t count;
- span_t* span[MAX_THREAD_SPAN_CACHE];
+ span_t* span[MAX_THREAD_SPAN_CACHE];
};
typedef struct span_cache_t span_cache_t;
struct span_large_cache_t {
size_t count;
- span_t* span[MAX_THREAD_SPAN_LARGE_CACHE];
+ span_t* span[MAX_THREAD_SPAN_LARGE_CACHE];
};
typedef struct span_large_cache_t span_large_cache_t;
struct heap_size_class_t {
//! Free list of active span
- void* free_list;
+ void* free_list;
//! Double linked list of partially used spans with free blocks.
// Previous span pointer in head points to tail span of list.
- span_t* partial_span;
+ span_t* partial_span;
//! Early level cache of fully free spans
- span_t* cache;
+ span_t* cache;
};
typedef struct heap_size_class_t heap_size_class_t;
@@ -572,23 +572,23 @@ struct heap_t {
//! Number of full spans
size_t full_span_count;
//! Mapped but unused spans
- span_t* span_reserve;
+ span_t* span_reserve;
//! Master span for mapped but unused spans
- span_t* span_reserve_master;
+ span_t* span_reserve_master;
//! Number of mapped but unused spans
uint32_t spans_reserved;
//! Child count
atomic32_t child_count;
//! Next heap in id list
- heap_t* next_heap;
+ heap_t* next_heap;
//! Next heap in orphan list
- heap_t* next_orphan;
+ heap_t* next_orphan;
//! Heap ID
int32_t id;
//! Finalization state flag
int finalize;
//! Master heap owning the memory pages
- heap_t* master_heap;
+ heap_t* master_heap;
#if ENABLE_THREAD_CACHE
//! Arrays of fully freed spans, large spans with > 1 span count
span_large_cache_t span_large_cache[LARGE_CLASS_COUNT - 1];
@@ -596,9 +596,9 @@ struct heap_t {
#if RPMALLOC_FIRST_CLASS_HEAPS
//! Double linked list of fully utilized spans with free blocks for each size class.
// Previous span pointer in head points to tail span of list.
- span_t* full_span[SIZE_CLASS_COUNT];
+ span_t* full_span[SIZE_CLASS_COUNT];
//! Double linked list of large and huge spans allocated by this heap
- span_t* large_huge_span;
+ span_t* large_huge_span;
#endif
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
//! Current and high water mark of spans used per span count
@@ -1053,7 +1053,7 @@ _rpmalloc_unmap_os(void* address, size_t size, size_t offset, size_t release) {
}
static void
-_rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count);
+_rpmalloc_span_mark_as_subspan_unless_master(span_t * master, span_t * subspan, size_t span_count);
//! Use global reserved spans to fulfill a memory map request (reserve size must be checked by caller)
static span_t*
@@ -1070,7 +1070,7 @@ _rpmalloc_global_get_reserved_spans(size_t span_count) {
//! Store the given spans as global reserve (must only be called from within new heap allocation, not thread safe)
static void
-_rpmalloc_global_set_reserved_spans(span_t* master, span_t* reserve, size_t reserve_span_count) {
+_rpmalloc_global_set_reserved_spans(span_t * master, span_t * reserve, size_t reserve_span_count) {
_memory_global_reserve_master = master;
_memory_global_reserve_count = reserve_span_count;
_memory_global_reserve = reserve;
@@ -1085,7 +1085,7 @@ _rpmalloc_global_set_reserved_spans(span_t* master, span_t* reserve, size_t rese
//! Add a span to double linked list at the head
static void
-_rpmalloc_span_double_link_list_add(span_t** head, span_t* span) {
+_rpmalloc_span_double_link_list_add(span_t * *head, span_t * span) {
if (*head)
(*head)->prev = span;
span->next = *head;
@@ -1094,7 +1094,7 @@ _rpmalloc_span_double_link_list_add(span_t** head, span_t* span) {
//! Pop head span from double linked list
static void
-_rpmalloc_span_double_link_list_pop_head(span_t** head, span_t* span) {
+_rpmalloc_span_double_link_list_pop_head(span_t * *head, span_t * span) {
rpmalloc_assert(*head == span, "Linked list corrupted");
span = *head;
*head = span->next;
@@ -1102,11 +1102,12 @@ _rpmalloc_span_double_link_list_pop_head(span_t** head, span_t* span) {
//! Remove a span from double linked list
static void
-_rpmalloc_span_double_link_list_remove(span_t** head, span_t* span) {
+_rpmalloc_span_double_link_list_remove(span_t * *head, span_t * span) {
rpmalloc_assert(*head, "Linked list corrupted");
if (*head == span) {
*head = span->next;
- } else {
+ }
+ else {
span_t* next_span = span->next;
span_t* prev_span = span->prev;
prev_span->next = next_span;
@@ -1123,17 +1124,17 @@ _rpmalloc_span_double_link_list_remove(span_t** head, span_t* span) {
//////
static void
-_rpmalloc_heap_cache_insert(heap_t* heap, span_t* span);
+_rpmalloc_heap_cache_insert(heap_t * heap, span_t * span);
static void
-_rpmalloc_heap_finalize(heap_t* heap);
+_rpmalloc_heap_finalize(heap_t * heap);
static void
-_rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count);
+_rpmalloc_heap_set_reserved_spans(heap_t * heap, span_t * master, span_t * reserve, size_t reserve_span_count);
//! Declare the span to be a subspan and store distance from master span and span count
static void
-_rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count) {
+_rpmalloc_span_mark_as_subspan_unless_master(span_t * master, span_t * subspan, size_t span_count) {
rpmalloc_assert((subspan != master) || (subspan->flags & SPAN_FLAG_MASTER), "Span master pointer and/or flag mismatch");
if (subspan != master) {
subspan->flags = SPAN_FLAG_SUBSPAN;
@@ -1145,7 +1146,7 @@ _rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, si
//! Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)
static span_t*
-_rpmalloc_span_map_from_reserve(heap_t* heap, size_t span_count) {
+_rpmalloc_span_map_from_reserve(heap_t * heap, size_t span_count) {
//Update the heap span reserve
span_t* span = heap->span_reserve;
heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
@@ -1169,7 +1170,7 @@ _rpmalloc_span_align_count(size_t span_count) {
//! Setup a newly mapped span
static void
-_rpmalloc_span_initialize(span_t* span, size_t total_span_count, size_t span_count, size_t align_offset) {
+_rpmalloc_span_initialize(span_t * span, size_t total_span_count, size_t span_count, size_t align_offset) {
span->total_spans = (uint32_t)total_span_count;
span->span_count = (uint32_t)span_count;
span->align_offset = (uint32_t)align_offset;
@@ -1178,11 +1179,11 @@ _rpmalloc_span_initialize(span_t* span, size_t total_span_count, size_t span_cou
}
static void
-_rpmalloc_span_unmap(span_t* span);
+_rpmalloc_span_unmap(span_t * span);
//! Map an aligned set of spans, taking configured mapping granularity and the page size into account
static span_t*
-_rpmalloc_span_map_aligned_count(heap_t* heap, size_t span_count) {
+_rpmalloc_span_map_aligned_count(heap_t * heap, size_t span_count) {
//If we already have some, but not enough, reserved spans, release those to heap cache and map a new
//full set of spans. Otherwise we would waste memory if page size > span size (huge pages)
size_t aligned_span_count = _rpmalloc_span_align_count(span_count);
@@ -1220,7 +1221,7 @@ _rpmalloc_span_map_aligned_count(heap_t* heap, size_t span_count) {
//! Map in memory pages for the given number of spans (or use previously reserved pages)
static span_t*
-_rpmalloc_span_map(heap_t* heap, size_t span_count) {
+_rpmalloc_span_map(heap_t * heap, size_t span_count) {
if (span_count <= heap->spans_reserved)
return _rpmalloc_span_map_from_reserve(heap, span_count);
span_t* span = 0;
@@ -1253,7 +1254,7 @@ _rpmalloc_span_map(heap_t* heap, size_t span_count) {
//! Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)
static void
-_rpmalloc_span_unmap(span_t* span) {
+_rpmalloc_span_unmap(span_t * span) {
rpmalloc_assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
rpmalloc_assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
@@ -1268,7 +1269,8 @@ _rpmalloc_span_unmap(span_t* span) {
rpmalloc_assert(span->align_offset == 0, "Span align offset corrupted");
if (_memory_span_size >= _memory_page_size)
_rpmalloc_unmap(span, span_count * _memory_span_size, 0, 0);
- } else {
+ }
+ else {
//Special double flag to denote an unmapped master
//It must be kept in memory since span header must be used
span->flags |= SPAN_FLAG_MASTER | SPAN_FLAG_SUBSPAN | SPAN_FLAG_UNMAPPED_MASTER;
@@ -1289,7 +1291,7 @@ _rpmalloc_span_unmap(span_t* span) {
//! Move the span (used for small or medium allocations) to the heap thread cache
static void
-_rpmalloc_span_release_to_cache(heap_t* heap, span_t* span) {
+_rpmalloc_span_release_to_cache(heap_t * heap, span_t * span) {
rpmalloc_assert(heap == span->heap, "Span heap pointer corrupted");
rpmalloc_assert(span->size_class < SIZE_CLASS_COUNT, "Invalid span size class");
rpmalloc_assert(span->span_count == 1, "Invalid span count");
@@ -1303,7 +1305,8 @@ _rpmalloc_span_release_to_cache(heap_t* heap, span_t* span) {
if (heap->size_class[span->size_class].cache)
_rpmalloc_heap_cache_insert(heap, heap->size_class[span->size_class].cache);
heap->size_class[span->size_class].cache = span;
- } else {
+ }
+ else {
_rpmalloc_span_unmap(span);
}
}
@@ -1374,7 +1377,7 @@ _rpmalloc_span_initialize_new(heap_t * heap, heap_size_class_t * heap_size_class
}
static void
-_rpmalloc_span_extract_free_list_deferred(span_t* span) {
+_rpmalloc_span_extract_free_list_deferred(span_t * span) {
// We need acquire semantics on the CAS operation since we are interested in the list size
// Refer to _rpmalloc_deallocate_defer_small_or_medium for further comments on this dependency
do {
@@ -1386,13 +1389,13 @@ _rpmalloc_span_extract_free_list_deferred(span_t* span) {
}
static int
-_rpmalloc_span_is_fully_utilized(span_t* span) {
+_rpmalloc_span_is_fully_utilized(span_t * span) {
rpmalloc_assert(span->free_list_limit <= span->block_count, "Span free list corrupted");
return !span->free_list && (span->free_list_limit >= span->block_count);
}
static int
-_rpmalloc_span_finalize(heap_t* heap, size_t iclass, span_t* span, span_t** list_head) {
+_rpmalloc_span_finalize(heap_t * heap, size_t iclass, span_t * span, span_t * *list_head) {
void* free_list = heap->size_class[iclass].free_list;
span_t* class_span = (span_t*)((uintptr_t)free_list & _memory_span_mask);
if (span == class_span) {
@@ -1443,7 +1446,7 @@ _rpmalloc_span_finalize(heap_t* heap, size_t iclass, span_t* span, span_t** list
//! Finalize a global cache
static void
-_rpmalloc_global_cache_finalize(global_cache_t* cache) {
+_rpmalloc_global_cache_finalize(global_cache_t * cache) {
while (!atomic_cas32_acquire(&cache->lock, 1, 0))
_rpmalloc_spin();
@@ -1461,7 +1464,7 @@ _rpmalloc_global_cache_finalize(global_cache_t* cache) {
}
static void
-_rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t count) {
+_rpmalloc_global_cache_insert_spans(span_t * *span, size_t span_count, size_t count) {
const size_t cache_limit = (span_count == 1) ?
GLOBAL_CACHE_MULTIPLIER * MAX_THREAD_SPAN_CACHE :
GLOBAL_CACHE_MULTIPLIER * (MAX_THREAD_SPAN_LARGE_CACHE - (span_count >> 1));
@@ -1541,7 +1544,7 @@ _rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t cou
}
static size_t
-_rpmalloc_global_cache_extract_spans(span_t** span, size_t span_count, size_t count) {
+_rpmalloc_global_cache_extract_spans(span_t * *span, size_t span_count, size_t count) {
global_cache_t* cache = &_memory_span_cache[span_count - 1];
size_t extract_count = 0;
@@ -1588,7 +1591,7 @@ static void _rpmalloc_deallocate_huge(span_t*);
//! Store the given spans as reserve in the given heap
static void
-_rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count) {
+_rpmalloc_heap_set_reserved_spans(heap_t * heap, span_t * master, span_t * reserve, size_t reserve_span_count) {
heap->span_reserve_master = master;
heap->span_reserve = reserve;
heap->spans_reserved = (uint32_t)reserve_span_count;
@@ -1640,7 +1643,7 @@ _rpmalloc_heap_cache_adopt_deferred(heap_t * heap, span_t * *single_span) {
}
static void
-_rpmalloc_heap_unmap(heap_t* heap) {
+_rpmalloc_heap_unmap(heap_t * heap) {
if (!heap->master_heap) {
if ((heap->finalize > 1) && !atomic_load32(&heap->child_count)) {
span_t* span = (span_t*)((uintptr_t)heap & _memory_span_mask);
@@ -1655,7 +1658,7 @@ _rpmalloc_heap_unmap(heap_t* heap) {
}
static void
-_rpmalloc_heap_global_finalize(heap_t* heap) {
+_rpmalloc_heap_global_finalize(heap_t * heap) {
if (heap->finalize++ > 1) {
--heap->finalize;
return;
@@ -1704,7 +1707,7 @@ _rpmalloc_heap_global_finalize(heap_t* heap) {
//! Insert a single span into thread heap cache, releasing to global cache if overflow
static void
-_rpmalloc_heap_cache_insert(heap_t* heap, span_t* span) {
+_rpmalloc_heap_cache_insert(heap_t * heap, span_t * span) {
if (UNEXPECTED(heap->finalize != 0)) {
_rpmalloc_span_unmap(span);
_rpmalloc_heap_global_finalize(heap);
@@ -1757,7 +1760,7 @@ _rpmalloc_heap_cache_insert(heap_t* heap, span_t* span) {
//! Extract the given number of spans from the different cache levels
static span_t*
-_rpmalloc_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_thread_cache_extract(heap_t * heap, size_t span_count) {
span_t* span = 0;
#if ENABLE_THREAD_CACHE
span_cache_t* span_cache;
@@ -1774,7 +1777,7 @@ _rpmalloc_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
}
static span_t*
-_rpmalloc_heap_thread_cache_deferred_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_thread_cache_deferred_extract(heap_t * heap, size_t span_count) {
span_t* span = 0;
if (span_count == 1) {
_rpmalloc_heap_cache_adopt_deferred(heap, &span);
@@ -1787,7 +1790,7 @@ _rpmalloc_heap_thread_cache_deferred_extract(heap_t* heap, size_t span_count) {
}
static span_t*
-_rpmalloc_heap_reserved_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_reserved_extract(heap_t * heap, size_t span_count) {
if (heap->spans_reserved >= span_count)
return _rpmalloc_span_map(heap, span_count);
return 0;
@@ -1795,7 +1798,7 @@ _rpmalloc_heap_reserved_extract(heap_t* heap, size_t span_count) {
//! Extract a span from the global cache
static span_t*
-_rpmalloc_heap_global_cache_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_global_cache_extract(heap_t * heap, size_t span_count) {
#if ENABLE_GLOBAL_CACHE
#if ENABLE_THREAD_CACHE
span_cache_t* span_cache;
@@ -1830,7 +1833,7 @@ _rpmalloc_heap_global_cache_extract(heap_t* heap, size_t span_count) {
}
static void
-_rpmalloc_inc_span_statistics(heap_t* heap, size_t span_count, uint32_t class_idx) {
+_rpmalloc_inc_span_statistics(heap_t * heap, size_t span_count, uint32_t class_idx) {
(void)sizeof(heap);
(void)sizeof(span_count);
(void)sizeof(class_idx);
@@ -1845,7 +1848,7 @@ _rpmalloc_inc_span_statistics(heap_t* heap, size_t span_count, uint32_t class_id
//! Get a span from one of the cache levels (thread cache, reserved, global cache) or fallback to mapping more memory
static span_t*
-_rpmalloc_heap_extract_new_span(heap_t* heap, heap_size_class_t* heap_size_class, size_t span_count, uint32_t class_idx) {
+_rpmalloc_heap_extract_new_span(heap_t * heap, heap_size_class_t * heap_size_class, size_t span_count, uint32_t class_idx) {
span_t* span;
#if ENABLE_THREAD_CACHE
if (heap_size_class && heap_size_class->cache) {
@@ -1896,7 +1899,7 @@ _rpmalloc_heap_extract_new_span(heap_t* heap, heap_size_class_t* heap_size_class
}
static void
-_rpmalloc_heap_initialize(heap_t* heap) {
+_rpmalloc_heap_initialize(heap_t * heap) {
_rpmalloc_memset_const(heap, 0, sizeof(heap_t));
//Get a new heap ID
heap->id = 1 + atomic_incr32(&_memory_heap_id);
@@ -1908,7 +1911,7 @@ _rpmalloc_heap_initialize(heap_t* heap) {
}
static void
-_rpmalloc_heap_orphan(heap_t* heap, int first_class) {
+_rpmalloc_heap_orphan(heap_t * heap, int first_class) {
heap->owner_thread = (uintptr_t)-1;
#if RPMALLOC_FIRST_CLASS_HEAPS
heap_t** heap_list = (first_class ? &_memory_first_class_orphan_heaps : &_memory_orphan_heaps);
@@ -1997,7 +2000,7 @@ _rpmalloc_heap_allocate_new(void) {
}
static heap_t*
-_rpmalloc_heap_extract_orphan(heap_t** heap_list) {
+_rpmalloc_heap_extract_orphan(heap_t * *heap_list) {
heap_t* heap = *heap_list;
*heap_list = (heap ? heap->next_orphan : 0);
return heap;
@@ -2088,7 +2091,7 @@ _rpmalloc_heap_release_raw_fc(void* heapptr) {
}
static void
-_rpmalloc_heap_finalize(heap_t* heap) {
+_rpmalloc_heap_finalize(heap_t * heap) {
if (heap->spans_reserved) {
span_t* span = _rpmalloc_span_map(heap, heap->spans_reserved);
_rpmalloc_span_unmap(span);
@@ -2207,7 +2210,7 @@ _rpmalloc_allocate_from_heap_fallback(heap_t * heap, heap_size_class_t * heap_si
//! Allocate a small sized memory block from the given heap
static void*
-_rpmalloc_allocate_small(heap_t* heap, size_t size) {
+_rpmalloc_allocate_small(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
//Small sizes have unique size classes
const uint32_t class_idx = (uint32_t)((size + (SMALL_GRANULARITY - 1)) >> SMALL_GRANULARITY_SHIFT);
@@ -2220,7 +2223,7 @@ _rpmalloc_allocate_small(heap_t* heap, size_t size) {
//! Allocate a medium sized memory block from the given heap
static void*
-_rpmalloc_allocate_medium(heap_t* heap, size_t size) {
+_rpmalloc_allocate_medium(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
//Calculate the size class index and do a dependent lookup of the final class index (in case of merged classes)
const uint32_t base_idx = (uint32_t)(SMALL_CLASS_COUNT + ((size - (SMALL_SIZE_LIMIT + 1)) >> MEDIUM_GRANULARITY_SHIFT));
@@ -2234,7 +2237,7 @@ _rpmalloc_allocate_medium(heap_t* heap, size_t size) {
//! Allocate a large sized memory block from the given heap
static void*
-_rpmalloc_allocate_large(heap_t* heap, size_t size) {
+_rpmalloc_allocate_large(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
//Calculate number of needed max sized spans (including header)
//Since this function is never called if size > LARGE_SIZE_LIMIT
@@ -2264,7 +2267,7 @@ _rpmalloc_allocate_large(heap_t* heap, size_t size) {
//! Allocate a huge block by mapping memory pages directly
static void*
-_rpmalloc_allocate_huge(heap_t* heap, size_t size) {
+_rpmalloc_allocate_huge(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
_rpmalloc_heap_cache_adopt_deferred(heap, 0);
size += SPAN_HEADER_SIZE;
@@ -2293,7 +2296,7 @@ _rpmalloc_allocate_huge(heap_t* heap, size_t size) {
//! Allocate a block of the given size
static void*
-_rpmalloc_allocate(heap_t* heap, size_t size) {
+_rpmalloc_allocate(heap_t * heap, size_t size) {
_rpmalloc_stat_add64(&_allocation_counter, 1);
if (EXPECTED(size <= SMALL_SIZE_LIMIT))
return _rpmalloc_allocate_small(heap, size);
@@ -2305,7 +2308,7 @@ _rpmalloc_allocate(heap_t* heap, size_t size) {
}
static void*
-_rpmalloc_aligned_allocate(heap_t* heap, size_t alignment, size_t size) {
+_rpmalloc_aligned_allocate(heap_t * heap, size_t alignment, size_t size) {
if (alignment <= SMALL_GRANULARITY)
return _rpmalloc_allocate(heap, size);
@@ -2431,7 +2434,7 @@ retry:
//! Deallocate the given small/medium memory block in the current thread local heap
static void
-_rpmalloc_deallocate_direct_small_or_medium(span_t* span, void* block) {
+_rpmalloc_deallocate_direct_small_or_medium(span_t * span, void* block) {
heap_t* heap = span->heap;
rpmalloc_assert(heap->owner_thread == get_thread_id() || !heap->owner_thread || heap->finalize, "Internal failure");
//Add block to free list
@@ -2463,7 +2466,7 @@ _rpmalloc_deallocate_direct_small_or_medium(span_t* span, void* block) {
}
static void
-_rpmalloc_deallocate_defer_free_span(heap_t* heap, span_t* span) {
+_rpmalloc_deallocate_defer_free_span(heap_t * heap, span_t * span) {
if (span->size_class != SIZE_CLASS_HUGE)
_rpmalloc_stat_inc(&heap->span_use[span->span_count - 1].spans_deferred);
//This list does not need ABA protection, no mutable side state
@@ -2474,7 +2477,7 @@ _rpmalloc_deallocate_defer_free_span(heap_t* heap, span_t* span) {
//! Put the block in the deferred free list of the owning span
static void
-_rpmalloc_deallocate_defer_small_or_medium(span_t* span, void* block) {
+_rpmalloc_deallocate_defer_small_or_medium(span_t * span, void* block) {
// The memory ordering here is a bit tricky, to avoid having to ABA protect
// the deferred free list to avoid desynchronization of list and list size
// we need to have acquire semantics on successful CAS of the pointer to
@@ -2496,7 +2499,7 @@ _rpmalloc_deallocate_defer_small_or_medium(span_t* span, void* block) {
}
static void
-_rpmalloc_deallocate_small_or_medium(span_t* span, void* p) {
+_rpmalloc_deallocate_small_or_medium(span_t * span, void* p) {
_rpmalloc_stat_inc_free(span->heap, span->size_class);
if (span->flags & SPAN_FLAG_ALIGNED_BLOCKS) {
//Realign pointer to block start
@@ -2518,7 +2521,7 @@ _rpmalloc_deallocate_small_or_medium(span_t* span, void* p) {
//! Deallocate the given large memory block to the current heap
static void
-_rpmalloc_deallocate_large(span_t* span) {
+_rpmalloc_deallocate_large(span_t * span) {
rpmalloc_assert(span->size_class == SIZE_CLASS_LARGE, "Bad span size class");
rpmalloc_assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
rpmalloc_assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
@@ -2571,7 +2574,7 @@ _rpmalloc_deallocate_large(span_t* span) {
//! Deallocate the given huge span
static void
-_rpmalloc_deallocate_huge(span_t* span) {
+_rpmalloc_deallocate_huge(span_t * span) {
rpmalloc_assert(span->heap, "No span heap");
#if RPMALLOC_FIRST_CLASS_HEAPS
int defer = (span->heap->owner_thread && (span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
@@ -2621,7 +2624,7 @@ _rpmalloc_usable_size(void* p);
//! Reallocate the given block to the given size
static void*
-_rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigned int flags) {
+_rpmalloc_reallocate(heap_t * heap, void* p, size_t size, size_t oldsize, unsigned int flags) {
if (p) {
//Grab the span using guaranteed span alignment
span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
@@ -2699,7 +2702,7 @@ _rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigne
}
static void*
-_rpmalloc_aligned_reallocate(heap_t* heap, void* ptr, size_t alignment, size_t size, size_t oldsize,
+_rpmalloc_aligned_reallocate(heap_t * heap, void* ptr, size_t alignment, size_t size, size_t oldsize,
unsigned int flags) {
if (alignment <= SMALL_GRANULARITY)
return _rpmalloc_reallocate(heap, ptr, size, oldsize, flags);
@@ -2784,7 +2787,7 @@ rpmalloc_initialize(void) {
}
int
-rpmalloc_initialize_config(const rpmalloc_config_t* config) {
+rpmalloc_initialize_config(const rpmalloc_config_t * config) {
if (_rpmalloc_initialized) {
rpmalloc_thread_initialize();
return 0;
@@ -3209,7 +3212,7 @@ rpmalloc_thread_collect(void) {
}
void
-rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
+rpmalloc_thread_statistics(rpmalloc_thread_statistics_t * stats) {
memset(stats, 0, sizeof(rpmalloc_thread_statistics_t));
heap_t* heap = get_thread_heap_raw();
if (!heap)
@@ -3276,7 +3279,7 @@ rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
}
void
-rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
+rpmalloc_global_statistics(rpmalloc_global_statistics_t * stats) {
memset(stats, 0, sizeof(rpmalloc_global_statistics_t));
#if ENABLE_STATISTICS
stats->mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
@@ -3295,7 +3298,7 @@ rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
#if ENABLE_STATISTICS
static void
-_memory_heap_dump_statistics(heap_t* heap, void* file) {
+_memory_heap_dump_statistics(heap_t * heap, void* file) {
fprintf(file, "Heap %d stats:\n", heap->id);
fprintf(file, "Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize BlkCount SpansCur SpansPeak PeakAllocMiB ToCacheMiB FromCacheMiB FromReserveMiB MmapCalls\n");
for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
@@ -3434,13 +3437,13 @@ rpmalloc_heap_acquire(void) {
}
extern inline void
-rpmalloc_heap_release(rpmalloc_heap_t* heap) {
+rpmalloc_heap_release(rpmalloc_heap_t * heap) {
if (heap)
_rpmalloc_heap_release(heap, 1, 1);
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) {
+rpmalloc_heap_alloc(rpmalloc_heap_t * heap, size_t size) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
@@ -3451,7 +3454,7 @@ rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) {
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) {
+rpmalloc_heap_aligned_alloc(rpmalloc_heap_t * heap, size_t alignment, size_t size) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
@@ -3462,12 +3465,12 @@ rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) {
+rpmalloc_heap_calloc(rpmalloc_heap_t * heap, size_t num, size_t size) {
return rpmalloc_heap_aligned_calloc(heap, 0, num, size);
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num, size_t size) {
+rpmalloc_heap_aligned_calloc(rpmalloc_heap_t * heap, size_t alignment, size_t num, size_t size) {
size_t total;
#if ENABLE_VALIDATE_ARGS
#if PLATFORM_WINDOWS
@@ -3493,7 +3496,7 @@ rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned int flags) {
+rpmalloc_heap_realloc(rpmalloc_heap_t * heap, void* ptr, size_t size, unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
@@ -3504,7 +3507,7 @@ rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned in
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size, unsigned int flags) {
+rpmalloc_heap_aligned_realloc(rpmalloc_heap_t * heap, void* ptr, size_t alignment, size_t size, unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
if ((size + alignment < size) || (alignment > _memory_page_size)) {
errno = EINVAL;
@@ -3515,13 +3518,13 @@ rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment
}
extern inline void
-rpmalloc_heap_free(rpmalloc_heap_t* heap, void* ptr) {
+rpmalloc_heap_free(rpmalloc_heap_t * heap, void* ptr) {
(void)sizeof(heap);
_rpmalloc_deallocate(ptr);
}
extern inline void
-rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
+rpmalloc_heap_free_all(rpmalloc_heap_t * heap) {
span_t* span;
span_t* next_span;
@@ -3590,7 +3593,7 @@ rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
}
extern inline void
-rpmalloc_heap_thread_set_current(rpmalloc_heap_t* heap) {
+rpmalloc_heap_thread_set_current(rpmalloc_heap_t * heap) {
heap_t* prev_heap = get_thread_heap_raw();
if (prev_heap != heap) {
set_thread_heap(heap);
diff --git a/lib/WinRpMalloc/src/rpmalloc.h b/lib/WinRpMalloc/src/rpmalloc.h
index 111ff27..8e62b80 100644
--- a/lib/WinRpMalloc/src/rpmalloc.h
+++ b/lib/WinRpMalloc/src/rpmalloc.h
@@ -1,27 +1,3 @@
-/*
-* Copyright (c) 2023 Vaughn Nugent
-*
-* Library: VNLib
-* Package: WinRpMalloc
-* File: rpmalloc.h
-*
-* rpmalloc.h is part of WinRpMalloc which is part of the larger
-* VNLib collection of libraries and utilities.
-*
-* WinRpMalloc is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published
-* by the Free Software Foundation, either version 2 of the License,
-* or (at your option) any later version.
-*
-* WinRpMalloc is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with WinRpMalloc. If not, see http://www.gnu.org/licenses/.
-*/
-
/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
*
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
@@ -55,7 +31,7 @@ extern "C" {
# endif
# define RPMALLOC_CDECL
#elif defined(_MSC_VER)
-# define RPMALLOC_EXPORT __declspec(dllexport)
+# define RPMALLOC_EXPORT
# define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
# define RPMALLOC_ATTRIB_MALLOC
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)