aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--lib/Net.Compression/vnlib_compress/Taskfile.yaml14
-rw-r--r--lib/Net.Http/src/Core/HttpContext.cs6
-rw-r--r--lib/Net.Http/src/Core/HttpEvent.cs15
-rw-r--r--lib/Net.Http/src/Core/HttpServerProcessing.cs47
-rw-r--r--lib/Net.Http/src/Core/IHttpLifeCycle.cs7
-rw-r--r--lib/Net.Http/src/Core/InitDataBuffer.cs36
-rw-r--r--lib/Net.Http/src/Core/Request/HttpInputStream.cs81
-rw-r--r--lib/Net.Http/src/Core/Request/HttpRequest.cs3
-rw-r--r--lib/Net.Http/src/Core/Request/HttpRequestBody.cs70
-rw-r--r--lib/Net.Http/src/Core/Request/HttpRequestExtensions.cs71
-rw-r--r--lib/Net.Http/src/Core/RequestParse/Http11ParseExtensions.cs12
-rw-r--r--lib/Net.Http/src/Core/Response/HttpResponse.cs18
-rw-r--r--lib/Net.Http/src/Core/Response/HttpStreamResponse.cs7
-rw-r--r--lib/Net.Http/src/Core/Response/HttpstreamResponse.cs7
-rw-r--r--lib/Net.Http/src/Helpers/HttpRange.cs9
-rw-r--r--lib/Net.Http/src/HttpBufferConfig.cs8
-rw-r--r--lib/Plugins.Essentials/src/Sessions/SessionInfo.cs12
-rw-r--r--lib/Utils.Memory/vnlib_rpmalloc/vendor/rpmalloc.c317
-rw-r--r--lib/Utils.Memory/vnlib_rpmalloc/vnlib_rpmalloc.vcxitems4
-rw-r--r--lib/Utils/src/AdvancedTrace.cs52
-rw-r--r--lib/Utils/src/Async/AsyncAccessSerializer.cs43
-rw-r--r--lib/Utils/src/Async/AsyncQueue.cs12
-rw-r--r--lib/Utils/src/Extensions/MemoryExtensions.cs52
-rw-r--r--lib/Utils/src/Memory/IMemoryHandle.cs4
-rw-r--r--lib/Utils/src/Memory/MemoryHandle.cs15
-rw-r--r--lib/Utils/src/Memory/MemoryUtil.cs45
-rw-r--r--lib/Utils/src/Memory/UnsafeMemoryHandle.cs34
-rw-r--r--lib/Utils/src/Native/SafeLibraryHandle.cs11
-rw-r--r--lib/Utils/src/Resources/CallbackOpenHandle.cs44
-rw-r--r--lib/Utils/src/Resources/ManagedLibrary.cs22
-rw-r--r--lib/Utils/src/VNLib.Utils.csproj4
32 files changed, 589 insertions, 496 deletions
diff --git a/.gitignore b/.gitignore
index ff7e8e8..9e7b190 100644
--- a/.gitignore
+++ b/.gitignore
@@ -367,4 +367,5 @@ MigrationBackup/
/.editorconfig
#allow mimalloc included win binaries
-!lib/Utils.Memory/mimalloc/bin/mimalloc-* \ No newline at end of file
+!lib/Utils.Memory/mimalloc/bin/mimalloc-*
+lib/Net.Compression/vnlib_compress/third-party/* \ No newline at end of file
diff --git a/lib/Net.Compression/vnlib_compress/Taskfile.yaml b/lib/Net.Compression/vnlib_compress/Taskfile.yaml
index fd22c22..017c9d8 100644
--- a/lib/Net.Compression/vnlib_compress/Taskfile.yaml
+++ b/lib/Net.Compression/vnlib_compress/Taskfile.yaml
@@ -52,6 +52,9 @@ tasks:
- task: zlib
- task: brotli
+
+ #the CI pipline may have issues reading modules if the third-party dir is not cleaned every time a build runs, only an issue after build
+ - defer: { task: clean-third-party }
#invoke cmake for build (notify that we are precompiling for ci pipeline and rpmalloc lib should be local)
- cmake -B./build -DCI_PRECOMPILE=ON -DENABLE_RPMALLOC=ON
@@ -100,4 +103,13 @@ tasks:
- cd {{.THIRD_PARTY_DIR}} && git clone {{.BROTLI_GIT_REPO}}
cmds:
- cd {{.THIRD_PARTY_DIR}}/brotli && git pull
- \ No newline at end of file
+
+ clean-third-party:
+ internal: false
+ ignore_error: true
+ cmds:
+ - cmd: powershell rm -Recurse -Force '{{.THIRD_PARTY_DIR}}'
+ platforms: [windows]
+ - cmd: rm -rf {{.THIRD_PARTY_DIR}}
+ platforms: [linux, darwin]
+ \ No newline at end of file
diff --git a/lib/Net.Http/src/Core/HttpContext.cs b/lib/Net.Http/src/Core/HttpContext.cs
index 8cecf30..f742e97 100644
--- a/lib/Net.Http/src/Core/HttpContext.cs
+++ b/lib/Net.Http/src/Core/HttpContext.cs
@@ -96,7 +96,7 @@ namespace VNLib.Net.Http.Core
*/
if (supportedMethods != CompressionMethod.None)
{
- Debug.Assert(server.Config.CompressorManager != null, "Expected non-null provider");
+ Debug.Assert(server.Config.CompressorManager != null, "Expected non-null compressor manager");
_compressor = new ManagedHttpCompressor(server.Config.CompressorManager);
}
else
@@ -153,7 +153,7 @@ namespace VNLib.Net.Http.Core
Buffers.AllocateBuffer(ParentServer.Config.MemoryPool);
//Init new connection
- Response.OnNewConnection();
+ Response.OnNewConnection(ctx.ConnectionStream);
}
///<inheritdoc/>
@@ -199,7 +199,7 @@ namespace VNLib.Net.Http.Core
Response.OnRelease();
//Free buffers
- Buffers.FreeAll(true);
+ Buffers.FreeAll(ParentServer.Config.BufferConfig.ZeroBuffersOnDisconnect);
return true;
}
diff --git a/lib/Net.Http/src/Core/HttpEvent.cs b/lib/Net.Http/src/Core/HttpEvent.cs
index 8867a12..37c5ab5 100644
--- a/lib/Net.Http/src/Core/HttpEvent.cs
+++ b/lib/Net.Http/src/Core/HttpEvent.cs
@@ -33,18 +33,11 @@ using VNLib.Net.Http.Core.Response;
namespace VNLib.Net.Http
{
- internal sealed class HttpEvent : MarshalByRefObject, IHttpEvent
+ internal sealed class HttpEvent(HttpContext ctx) : MarshalByRefObject, IHttpEvent
{
- private HttpContext Context;
- private ConnectionInfo _ci;
- private FileUpload[] _uploads;
-
- internal HttpEvent(HttpContext ctx)
- {
- Context = ctx;
- _ci = new ConnectionInfo(ctx);
- _uploads = ctx.Request.CopyUploads();
- }
+ private HttpContext Context = ctx;
+ private ConnectionInfo _ci = new(ctx);
+ private FileUpload[] _uploads = ctx.Request.CopyUploads();
///<inheritdoc/>
IConnectionInfo IHttpEvent.Server => _ci;
diff --git a/lib/Net.Http/src/Core/HttpServerProcessing.cs b/lib/Net.Http/src/Core/HttpServerProcessing.cs
index 8a9ca07..7770ad7 100644
--- a/lib/Net.Http/src/Core/HttpServerProcessing.cs
+++ b/lib/Net.Http/src/Core/HttpServerProcessing.cs
@@ -181,8 +181,16 @@ namespace VNLib.Net.Http
return false;
}
+ bool keepalive = true;
+
+ //Handle an error parsing the request
+ if(!PreProcessRequest(context, (HttpStatusCode)status, ref keepalive))
+ {
+ return false;
+ }
+
//process the request
- bool keepalive = await ProcessRequestAsync(context, (HttpStatusCode)status);
+ bool processSuccess = await ProcessRequestAsync(context);
#if DEBUG
static void WriteConnectionDebugLog(HttpServer server, HttpContext context)
@@ -210,18 +218,17 @@ namespace VNLib.Net.Http
WriteConnectionDebugLog(this, context);
}
#endif
-
- //Close the response
+
await context.WriteResponseAsync();
-
- //Flush the stream before returning
+
await context.FlushTransportAsync();
/*
* If an alternate protocol was specified, we need to break the keepalive loop
+ * the handler will manage the alternate protocol
*/
- return keepalive & context.AlternateProtocol == null;
+ return processSuccess & keepalive & context.AlternateProtocol == null;
}
finally
{
@@ -259,8 +266,7 @@ namespace VNLib.Net.Http
//Get the parse buffer
IHttpHeaderParseBuffer parseBuffer = ctx.Buffers.RequestHeaderParseBuffer;
-
- //Init parser
+
TransportReader reader = new (ctx.GetTransport(), parseBuffer, _config.HttpEncoding, HeaderLineTermination);
HttpStatusCode code;
@@ -278,13 +284,13 @@ namespace VNLib.Net.Http
}
//Parse the headers
- if ((code = ctx.Request.Http1ParseHeaders(ref parseState, ref reader, Config, lineBuf)) > 0)
+ if ((code = ctx.Request.Http1ParseHeaders(ref parseState, ref reader, in _config, lineBuf)) > 0)
{
return code;
}
//Prepare entity body for request
- if ((code = ctx.Request.Http1PrepareEntityBody(ref parseState, ref reader, Config)) > 0)
+ if ((code = ctx.Request.Http1PrepareEntityBody(ref parseState, ref reader, in _config)) > 0)
{
return code;
}
@@ -303,8 +309,7 @@ namespace VNLib.Net.Http
}
}
- [MethodImpl(MethodImplOptions.AggressiveOptimization)]
- private async Task<bool> ProcessRequestAsync(HttpContext context, HttpStatusCode status)
+ private bool PreProcessRequest(HttpContext context, HttpStatusCode status, ref bool keepalive)
{
//Check status
if (status != 0)
@@ -340,12 +345,12 @@ namespace VNLib.Net.Http
context.Respond(HttpStatusCode.ServiceUnavailable);
return false;
}
-
+
//Store keepalive value from request, and check if keepalives are enabled by the configuration
- bool keepalive = context.Request.State.KeepAlive & _config.ConnectionKeepAlive > TimeSpan.Zero;
-
+ keepalive = context.Request.State.KeepAlive & _config.ConnectionKeepAlive > TimeSpan.Zero;
+
//Set connection header (only for http1.1)
-
+
if (keepalive)
{
/*
@@ -363,6 +368,12 @@ namespace VNLib.Net.Http
context.Response.Headers.Set(HttpResponseHeader.Connection, "closed");
}
+ return true;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveOptimization)]
+ private async Task<bool> ProcessRequestAsync(HttpContext context)
+ {
//Get the server root for the specified location or fallback to a wildcard host if one is selected
IWebRoot? root = ServerRoots!.GetValueOrDefault(context.Request.State.Location.DnsSafeHost, _wildcardRoot);
@@ -370,7 +381,7 @@ namespace VNLib.Net.Http
{
context.Respond(HttpStatusCode.NotFound);
//make sure control leaves
- return keepalive;
+ return true;
}
//Check the expect header and return an early status code
@@ -449,7 +460,7 @@ namespace VNLib.Net.Http
*
* For now I will allow it.
*/
- return keepalive;
+ return true;
}
}
diff --git a/lib/Net.Http/src/Core/IHttpLifeCycle.cs b/lib/Net.Http/src/Core/IHttpLifeCycle.cs
index 9ba5ff1..12a1f3f 100644
--- a/lib/Net.Http/src/Core/IHttpLifeCycle.cs
+++ b/lib/Net.Http/src/Core/IHttpLifeCycle.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Net.Http
@@ -58,10 +58,5 @@ namespace VNLib.Net.Http.Core
/// method should not throw exceptions
/// </remarks>
void OnComplete();
-
- /// <summary>
- /// Raised when a new connection is established on the current context
- /// </summary>
- void OnNewConnection();
}
} \ No newline at end of file
diff --git a/lib/Net.Http/src/Core/InitDataBuffer.cs b/lib/Net.Http/src/Core/InitDataBuffer.cs
index 6a400bb..6d559cd 100644
--- a/lib/Net.Http/src/Core/InitDataBuffer.cs
+++ b/lib/Net.Http/src/Core/InitDataBuffer.cs
@@ -24,11 +24,13 @@
using System;
using System.Buffers;
+using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using VNLib.Utils;
using VNLib.Utils.Extensions;
+using VNLib.Utils.Memory;
namespace VNLib.Net.Http.Core
{
@@ -84,6 +86,14 @@ namespace VNLib.Net.Http.Core
set => MemoryMarshal.Write(_positionSegment, in value);
}
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private readonly int GetDataPosition()
+ {
+ Debug.Assert(Position >= 0 && Position <= _dataSize, "Invalid position value");
+ //Points to the first byte of the data segment to read from
+ return POSITION_SEG_SIZE + Position;
+ }
+
/// <summary>
/// Get the amount of data remaining in the data buffer
/// </summary>
@@ -94,6 +104,19 @@ namespace VNLib.Net.Http.Core
}
/// <summary>
+ /// Performs a discard in a single operation by setting the
+ /// position to the end of the data buffer
+ /// </summary>
+ /// <returns>The number of bytes that were remaining in the buffer before the discard</returns>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ internal readonly int DiscardRemaining()
+ {
+ int remaining = Remaining;
+ Position = _dataSize;
+ return remaining;
+ }
+
+ /// <summary>
/// Reads data from the internal buffer into the supplied buffer
/// </summary>
/// <param name="buffer">The buffer to write data to</param>
@@ -103,11 +126,14 @@ namespace VNLib.Net.Http.Core
{
//Calc how many bytes can be read into the output buffer
int bytesToRead = Math.Min(Remaining, buffer.Length);
-
- Span<byte> btr = DataSegment.Slice(Position, bytesToRead);
-
- //Write data to output buffer
- btr.CopyTo(buffer);
+
+ MemoryUtil.Memmove(
+ ref MemoryMarshal.GetArrayDataReference(_buffer),
+ (nuint)GetDataPosition(),
+ ref MemoryMarshal.GetReference(buffer),
+ 0,
+ (nuint)bytesToRead
+ );
//Update position pointer
Position += bytesToRead;
diff --git a/lib/Net.Http/src/Core/Request/HttpInputStream.cs b/lib/Net.Http/src/Core/Request/HttpInputStream.cs
index e36d1e4..ccaa336 100644
--- a/lib/Net.Http/src/Core/Request/HttpInputStream.cs
+++ b/lib/Net.Http/src/Core/Request/HttpInputStream.cs
@@ -40,14 +40,18 @@ namespace VNLib.Net.Http.Core
/// </summary>
internal sealed class HttpInputStream(IHttpContextInformation ContextInfo) : Stream
{
-
- private long ContentLength;
- private Stream? InputStream;
- private long _position;
-
+ private StreamState _state;
private InitDataBuffer? _initalData;
- private long Remaining => Math.Max(ContentLength - _position, 0);
+ private long Remaining
+ {
+ get
+ {
+ long remaining = _state.ContentLength - _state.Position;
+ Debug.Assert(remaining >= 0, "Input stream overrun. Read more data than was available for the connection");
+ return remaining;
+ }
+ }
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal void OnComplete()
@@ -59,12 +63,7 @@ namespace VNLib.Net.Http.Core
_initalData = null;
}
- //Remove stream cache copy
- InputStream = null;
- //Reset position
- _position = 0;
- //reset content length
- ContentLength = 0;
+ _state = default;
}
/// <summary>
@@ -74,9 +73,8 @@ namespace VNLib.Net.Http.Core
/// <param name="contentLength">The number of bytes to allow being read from the transport or initial buffer</param>
internal ref InitDataBuffer? Prepare(long contentLength)
{
- ContentLength = contentLength;
- //Cache transport stream
- InputStream = ContextInfo.GetTransport();
+ _state.ContentLength = contentLength;
+ _state.InputStream = ContextInfo.GetTransport();
return ref _initalData;
}
@@ -105,13 +103,13 @@ namespace VNLib.Net.Http.Core
/// <summary>
/// Gets the total size of the entity body (aka Content-Length)
/// </summary>
- public override long Length => ContentLength;
+ public override long Length => _state.ContentLength;
/// <summary>
/// Gets the number of bytes currently read from the entity body, setting the
/// position is a NOOP
/// </summary>
- public override long Position { get => _position; set { } }
+ public override long Position { get => _state.Position; set { } }
/// <summary>
/// NOOP
@@ -149,19 +147,19 @@ namespace VNLib.Net.Http.Core
writer.Advance(read);
//Update position
- _position += read;
+ _state.Position += read;
}
//See if data is still remaining to be read from transport (reamining size is also the amount of data that can be read)
if (writer.RemainingSize > 0)
{
//Read from transport
- ERRNO read = InputStream!.Read(writer.Remaining);
+ ERRNO read = _state.InputStream!.Read(writer.Remaining);
//Update writer position
writer.Advance(read);
- _position += read;
+ _state.Position += read;
}
//Return number of bytes written to the buffer
@@ -196,19 +194,19 @@ namespace VNLib.Net.Http.Core
writer.Advance(read);
//Update position
- _position += read;
+ _state.Position += read;
}
//See if data is still remaining to be read from transport (reamining size is also the amount of data that can be read)
if (writer.RemainingSize > 0)
{
//Read from transport
- int read = await InputStream!.ReadAsync(writer.Remaining, cancellationToken).ConfigureAwait(true);
+ int read = await _state.InputStream!.ReadAsync(writer.Remaining, cancellationToken).ConfigureAwait(true);
//Update writer position
writer.Advance(read);
- _position += read;
+ _state.Position += read;
}
//Return number of bytes written to the buffer
@@ -232,7 +230,7 @@ namespace VNLib.Net.Http.Core
if(_initalData.HasValue && remaining <= _initalData.Value.Remaining)
{
//All data has been buffred, so just clear the buffer
- _position = Length;
+ _state.Position = Length;
return ValueTask.CompletedTask;
}
//We must actaully disacrd data from the stream
@@ -244,14 +242,31 @@ namespace VNLib.Net.Http.Core
private async ValueTask DiscardStreamDataAsync()
{
- int read;
- do
+ DiscardInternalBuffer();
+
+ int read, bytesToRead = (int)Math.Min(HttpServer.WriteOnlyScratchBuffer.Length, Remaining);
+
+ while (bytesToRead > 0)
{
//Read data to the discard buffer until reading is completed (read == 0)
- read = await ReadAsync(HttpServer.WriteOnlyScratchBuffer, CancellationToken.None)
+ read = await _state.InputStream!.ReadAsync(HttpServer.WriteOnlyScratchBuffer.Slice(0, bytesToRead), CancellationToken.None)
.ConfigureAwait(true);
- } while (read > 0);
+ //Update position
+ _state.Position += read;
+
+ //Recalculate the number of bytes to read
+ bytesToRead = (int)Math.Min(HttpServer.WriteOnlyScratchBuffer.Length, Remaining);
+ }
+ }
+
+ private void DiscardInternalBuffer()
+ {
+ if (_initalData.HasValue)
+ {
+ //Update the stream position with remaining data
+ _state.Position += _initalData.Value.DiscardRemaining();
+ }
}
/// <summary>
@@ -260,12 +275,20 @@ namespace VNLib.Net.Http.Core
/// <param name="offset"></param>
/// <param name="origin"></param>
/// <returns></returns>
- public override long Seek(long offset, SeekOrigin origin) => _position;
+ public override long Seek(long offset, SeekOrigin origin) => _state.Position;
///<inheritdoc/>
public override void SetLength(long value) => throw new NotSupportedException();
///<inheritdoc/>
public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException();
+
+
+ private struct StreamState
+ {
+ public Stream? InputStream;
+ public long Position;
+ public long ContentLength;
+ }
}
} \ No newline at end of file
diff --git a/lib/Net.Http/src/Core/Request/HttpRequest.cs b/lib/Net.Http/src/Core/Request/HttpRequest.cs
index ce8257f..3ebf0d4 100644
--- a/lib/Net.Http/src/Core/Request/HttpRequest.cs
+++ b/lib/Net.Http/src/Core/Request/HttpRequest.cs
@@ -81,9 +81,6 @@ namespace VNLib.Net.Http.Core
void IHttpLifeCycle.OnRelease()
{ }
-
- void IHttpLifeCycle.OnNewConnection()
- { }
void IHttpLifeCycle.OnNewRequest()
{ }
diff --git a/lib/Net.Http/src/Core/Request/HttpRequestBody.cs b/lib/Net.Http/src/Core/Request/HttpRequestBody.cs
deleted file mode 100644
index e39a35c..0000000
--- a/lib/Net.Http/src/Core/Request/HttpRequestBody.cs
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-* Copyright (c) 2023 Vaughn Nugent
-*
-* Library: VNLib
-* Package: VNLib.Net.Http
-* File: HttpRequestBody.cs
-*
-* HttpRequestBody.cs is part of VNLib.Net.Http which is part of the larger
-* VNLib collection of libraries and utilities.
-*
-* VNLib.Net.Http is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License as
-* published by the Free Software Foundation, either version 3 of the
-* License, or (at your option) any later version.
-*
-* VNLib.Net.Http is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see https://www.gnu.org/licenses/.
-*/
-
-using System;
-using System.Collections.Generic;
-
-namespace VNLib.Net.Http.Core
-{
- /// <summary>
- /// Represents a higher-level request entity body (query arguments, request body etc)
- /// that has been parsed and captured
- /// </summary>
- internal class HttpRequestBody
- {
- public readonly List<FileUpload> Uploads;
- public readonly Dictionary<string, string> RequestArgs;
- public readonly Dictionary<string, string> QueryArgs;
-
- public HttpRequestBody()
- {
- Uploads = new(1);
-
- //Request/query args should not be case sensitive
- RequestArgs = new(StringComparer.OrdinalIgnoreCase);
- QueryArgs = new(StringComparer.OrdinalIgnoreCase);
- }
-
- /// <summary>
- /// Releases all resources used by the current instance
- /// </summary>
- public void OnComplete()
- {
- //Only enumerate/clear if file uplaods are present
- if (Uploads.Count > 0)
- {
- //Dispose all initialized files
- for (int i = 0; i < Uploads.Count; i++)
- {
- Uploads[i].Free();
- }
- //Emtpy list
- Uploads.Clear();
- }
- //Clear request args and file uplaods
- RequestArgs.Clear();
- QueryArgs.Clear();
- }
- }
-} \ No newline at end of file
diff --git a/lib/Net.Http/src/Core/Request/HttpRequestExtensions.cs b/lib/Net.Http/src/Core/Request/HttpRequestExtensions.cs
index 69bd2af..878622e 100644
--- a/lib/Net.Http/src/Core/Request/HttpRequestExtensions.cs
+++ b/lib/Net.Http/src/Core/Request/HttpRequestExtensions.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Net.Http
@@ -91,9 +91,17 @@ namespace VNLib.Net.Http.Core
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static bool IsCrossOrigin(this HttpRequest Request)
{
- return Request.State.Origin != null
- && (!Request.State.Origin.Authority.Equals(Request.State.Location.Authority, StringComparison.Ordinal)
- || !Request.State.Origin.Scheme.Equals(Request.State.Location.Scheme, StringComparison.Ordinal));
+ if(Request.State.Origin is null)
+ {
+ return false;
+ }
+
+ //Get the origin string components for comparison (allocs new strings :( )
+ string locOrigin = Request.State.Location.GetComponents(UriComponents.SchemeAndServer, UriFormat.SafeUnescaped);
+ string reqOrigin = Request.State.Origin.GetComponents(UriComponents.SchemeAndServer, UriFormat.SafeUnescaped);
+
+ //If origin components are not equal, this is a cross origin request
+ return !string.Equals(locOrigin, reqOrigin, StringComparison.OrdinalIgnoreCase);
}
/// <summary>
@@ -136,18 +144,6 @@ namespace VNLib.Net.Http.Core
{
HttpRequest request = context.Request;
IHttpContextInformation info = context;
- IHttpMemoryPool pool = context.ParentServer.Config.MemoryPool;
-
- //Gets the max form data buffer size to help calculate the initial char buffer size
- int maxBufferSize = context.ParentServer.Config.BufferConfig.FormDataBufferSize;
-
- //Calculate a largest available buffer to read the entire stream or up to the maximum buffer size
- int bufferSize = (int)Math.Min(request.InputStream.Length, maxBufferSize);
-
- //Get the form data buffer (should be cost free)
- Memory<byte> formBuffer = context.Buffers.GetFormDataBuffer();
-
- Debug.Assert(!formBuffer.IsEmpty, "GetFormDataBuffer() returned an empty memory buffer");
switch (request.State.ContentType)
{
@@ -157,10 +153,9 @@ namespace VNLib.Net.Http.Core
case ContentType.UrlEncoded:
{
//Alloc the form data character buffer, this will need to grow if the form data is larger than the buffer
- using IResizeableMemoryHandle<char> urlbody = pool.AllocFormDataBuffer<char>(bufferSize);
-
- //Load char buffer from stream
- int chars = await BufferInputStream(request.InputStream, urlbody, formBuffer, info.Encoding);
+ using IResizeableMemoryHandle<char> urlbody = AllocFdBuffer(context);
+
+ int chars = await BufferInputStreamAsChars(request.InputStream, urlbody, GetFdBuffer(context), info.Encoding);
//Get the body as a span, and split the 'string' at the & character
((ReadOnlySpan<char>)urlbody.AsSpan(0, chars))
@@ -175,12 +170,10 @@ namespace VNLib.Net.Http.Core
{
break;
}
-
- //Alloc the form data buffer
- using IResizeableMemoryHandle<char> formBody = pool.AllocFormDataBuffer<char>(bufferSize);
-
- //Load char buffer from stream
- int chars = await BufferInputStream(request.InputStream, formBody, formBuffer, info.Encoding);
+
+ using IResizeableMemoryHandle<char> formBody = AllocFdBuffer(context);
+
+ int chars = await BufferInputStreamAsChars(request.InputStream, formBody, GetFdBuffer(context), info.Encoding);
//Split the body as a span at the boundries
((ReadOnlySpan<char>)formBody.AsSpan(0, chars))
@@ -194,6 +187,25 @@ namespace VNLib.Net.Http.Core
request.AddFileUpload(new(request.InputStream, false, request.State.ContentType, null));
break;
}
+
+
+ static IResizeableMemoryHandle<char> AllocFdBuffer(HttpContext context)
+ {
+ //Gets the max form data buffer size to help calculate the initial char buffer size
+ int maxBufferSize = context.ParentServer.Config.BufferConfig.FormDataBufferSize;
+
+ //Calculate a largest available buffer to read the entire stream or up to the maximum buffer size
+ int buffersize = (int)Math.Min(context.Request.InputStream.Length, maxBufferSize);
+
+ return context.ParentServer.Config.MemoryPool.AllocFormDataBuffer<char>(buffersize);
+ }
+
+ static Memory<byte> GetFdBuffer(HttpContext context)
+ {
+ Memory<byte> formBuffer = context.Buffers.GetFormDataBuffer();
+ Debug.Assert(!formBuffer.IsEmpty, "GetFormDataBuffer() returned an empty memory buffer");
+ return formBuffer;
+ }
}
/*
@@ -203,7 +215,12 @@ namespace VNLib.Net.Http.Core
* We assume the parsing method checked the size of the input stream so we can assume its safe to read
* all of it into memory.
*/
- private static async ValueTask<int> BufferInputStream(Stream stream, IResizeableMemoryHandle<char> charBuffer, Memory<byte> binBuffer, Encoding encoding)
+ private static async ValueTask<int> BufferInputStreamAsChars(
+ Stream stream,
+ IResizeableMemoryHandle<char> charBuffer,
+ Memory<byte> binBuffer,
+ Encoding encoding
+ )
{
int length = 0;
do
diff --git a/lib/Net.Http/src/Core/RequestParse/Http11ParseExtensions.cs b/lib/Net.Http/src/Core/RequestParse/Http11ParseExtensions.cs
index 86535c3..cabb723 100644
--- a/lib/Net.Http/src/Core/RequestParse/Http11ParseExtensions.cs
+++ b/lib/Net.Http/src/Core/RequestParse/Http11ParseExtensions.cs
@@ -117,7 +117,6 @@ namespace VNLib.Net.Http.Core
//Try to parse the requested http version, only supported versions
if ((reqState.HttpVersion = HttpHelpers.ParseHttpVersion(requestLine[endloc..])) == HttpVersion.None)
{
- //Return not supported
return HttpStatusCode.HttpVersionNotSupported;
}
@@ -450,18 +449,18 @@ namespace VNLib.Net.Http.Core
}
//Set full http range
- reqState.Range = new(startRangeValue, endRangeValue, HttpRangeType.FullRange);
+ reqState.Range = HttpRange.FullRange(startRangeValue, endRangeValue);
}
else
{
//From-end range
- reqState.Range = new(0, endRangeValue, HttpRangeType.FromEnd);
+ reqState.Range = HttpRange.FromEnd(endRangeValue);
}
}
else if(hasStartRange)
{
//Valid start range only, so from start range
- reqState.Range = new(startRangeValue, 0, HttpRangeType.FromStart);
+ reqState.Range = HttpRange.FromStart(startRangeValue);
}
//No valid range values
}
@@ -554,7 +553,10 @@ namespace VNLib.Net.Http.Core
//Bad format to include a message body with a GET, HEAD, or TRACE request
if (parseState.ContentLength > 0)
{
- Config.ServerLog.Debug("Message body received from {ip} with GET, HEAD, or TRACE request, was considered an error and the request was dropped", reqState.RemoteEndPoint);
+ Config.ServerLog.Debug(
+ "Message body received from {ip} with GET, HEAD, or TRACE request, was considered an error and the request was dropped",
+ reqState.RemoteEndPoint
+ );
return HttpStatusCode.BadRequest;
}
else
diff --git a/lib/Net.Http/src/Core/Response/HttpResponse.cs b/lib/Net.Http/src/Core/Response/HttpResponse.cs
index ec9879b..06f114c 100644
--- a/lib/Net.Http/src/Core/Response/HttpResponse.cs
+++ b/lib/Net.Http/src/Core/Response/HttpResponse.cs
@@ -119,10 +119,11 @@ namespace VNLib.Net.Http.Core.Response
//Write headers
for (int i = 0; i < Headers.Count; i++)
{
- writer.Append(Headers.Keys[i]); //Write header key
- writer.Append(": "); //Write separator
- writer.Append(Headers[i]); //Write the header value
- writer.Append(HttpHelpers.CRLF); //Crlf
+ //<name>: <value>\r\n
+ writer.Append(Headers.Keys[i]);
+ writer.Append(": ");
+ writer.Append(Headers[i]);
+ writer.Append(HttpHelpers.CRLF);
}
//Remove writen headers
@@ -131,7 +132,6 @@ namespace VNLib.Net.Http.Core.Response
//Write cookies if any are set
if (Cookies.Count > 0)
{
- //Enumerate and write
foreach (HttpCookie cookie in Cookies)
{
writer.Append("Set-Cookie: ");
@@ -141,8 +141,7 @@ namespace VNLib.Net.Http.Core.Response
writer.Append(HttpHelpers.CRLF);
}
-
- //Clear all current cookies
+
Cookies.Clear();
}
@@ -302,10 +301,9 @@ namespace VNLib.Net.Http.Core.Response
}
///<inheritdoc/>
- public void OnNewConnection()
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public void OnNewConnection(Stream transport)
{
- //Get the transport stream and init streams
- Stream transport = ContextInfo.GetTransport();
ReusableChunkedStream.OnNewConnection(transport);
ReusableDirectStream.OnNewConnection(transport);
}
diff --git a/lib/Net.Http/src/Core/Response/HttpStreamResponse.cs b/lib/Net.Http/src/Core/Response/HttpStreamResponse.cs
index b08d2ab..679d8fe 100644
--- a/lib/Net.Http/src/Core/Response/HttpStreamResponse.cs
+++ b/lib/Net.Http/src/Core/Response/HttpStreamResponse.cs
@@ -22,13 +22,6 @@
* along with this program. If not, see https://www.gnu.org/licenses/.
*/
-/*
- * This file handles response entity processing. It handles in-memory response
- * processing, as well as stream response processing. It handles constraints
- * such as content-range limits. I tried to eliminate or reduce the amount of
- * memory copying required to process the response entity.
- */
-
using System;
using System.IO;
using System.Threading;
diff --git a/lib/Net.Http/src/Core/Response/HttpstreamResponse.cs b/lib/Net.Http/src/Core/Response/HttpstreamResponse.cs
index b08d2ab..679d8fe 100644
--- a/lib/Net.Http/src/Core/Response/HttpstreamResponse.cs
+++ b/lib/Net.Http/src/Core/Response/HttpstreamResponse.cs
@@ -22,13 +22,6 @@
* along with this program. If not, see https://www.gnu.org/licenses/.
*/
-/*
- * This file handles response entity processing. It handles in-memory response
- * processing, as well as stream response processing. It handles constraints
- * such as content-range limits. I tried to eliminate or reduce the amount of
- * memory copying required to process the response entity.
- */
-
using System;
using System.IO;
using System.Threading;
diff --git a/lib/Net.Http/src/Helpers/HttpRange.cs b/lib/Net.Http/src/Helpers/HttpRange.cs
index cedcc40..af8c8a8 100644
--- a/lib/Net.Http/src/Helpers/HttpRange.cs
+++ b/lib/Net.Http/src/Helpers/HttpRange.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Net.Http
@@ -40,5 +40,12 @@ namespace VNLib.Net.Http
/// <param name="end">The ending range value</param>
/// <returns>True if the range values are valid, false otherwise</returns>
public static bool IsValidRangeValue(ulong start, ulong end) => start <= end;
+
+
+ internal static HttpRange FromStart(ulong start) => new(start, 0, HttpRangeType.FromStart);
+
+ internal static HttpRange FromEnd(ulong end) => new(0, end, HttpRangeType.FromEnd);
+
+ internal static HttpRange FullRange(ulong start, ulong end) => new(start, end, HttpRangeType.FullRange);
}
} \ No newline at end of file
diff --git a/lib/Net.Http/src/HttpBufferConfig.cs b/lib/Net.Http/src/HttpBufferConfig.cs
index fa3ad21..81fea12 100644
--- a/lib/Net.Http/src/HttpBufferConfig.cs
+++ b/lib/Net.Http/src/HttpBufferConfig.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Net.Http
@@ -74,5 +74,11 @@ namespace VNLib.Net.Http
/// May be set to 0 when <see cref="HttpConfig.CompressorManager"/> is set to null (compression is disabled).
/// </summary>
public readonly int ChunkedResponseAccumulatorSize { get; init; } = 64 * 1024;
+
+ /// <summary>
+ /// When a transport connection is closed, dedicated buffers are released back to the
+ /// heap. This setting controls whether the buffers are all zeroed before being released.
+ /// </summary>
+ public readonly bool ZeroBuffersOnDisconnect { get; init; } = true;
}
} \ No newline at end of file
diff --git a/lib/Plugins.Essentials/src/Sessions/SessionInfo.cs b/lib/Plugins.Essentials/src/Sessions/SessionInfo.cs
index 7cb2783..2edb30c 100644
--- a/lib/Plugins.Essentials/src/Sessions/SessionInfo.cs
+++ b/lib/Plugins.Essentials/src/Sessions/SessionInfo.cs
@@ -212,17 +212,17 @@ namespace VNLib.Plugins.Essentials.Sessions
/// Flags the session as invalid. IMPORTANT: the user's session data is no longer valid, no data
/// will be saved to the session store when the session closes
/// </summary>
- public void Invalidate(bool all = false) => UserSession.Invalidate(all);
+ public readonly void Invalidate(bool all = false) => UserSession.Invalidate(all);
/// <summary>
/// Marks the session ID to be regenerated during closing event
/// </summary>
- public void RegenID() => UserSession.RegenID();
+ public readonly void RegenID() => UserSession.RegenID();
/// <summary>
/// Marks the session to be detached from the current connection.
/// </summary>
- public void Detach() => UserSession.Detach();
+ public readonly void Detach() => UserSession.Detach();
#nullable disable
@@ -287,13 +287,13 @@ namespace VNLib.Plugins.Essentials.Sessions
}
///<inheritdoc/>
- public bool Equals(SessionInfo other) => SessionID.Equals(other.SessionID, StringComparison.Ordinal);
+ public readonly bool Equals(SessionInfo other) => SessionID.Equals(other.SessionID, StringComparison.Ordinal);
///<inheritdoc/>
- public override bool Equals(object? obj) => obj is SessionInfo si && Equals(si);
+ public readonly override bool Equals(object? obj) => obj is SessionInfo si && Equals(si);
///<inheritdoc/>
- public override int GetHashCode() => SessionID.GetHashCode(StringComparison.Ordinal);
+ public readonly override int GetHashCode() => SessionID.GetHashCode(StringComparison.Ordinal);
///<inheritdoc/>
public static bool operator ==(SessionInfo left, SessionInfo right) => left.Equals(right);
diff --git a/lib/Utils.Memory/vnlib_rpmalloc/vendor/rpmalloc.c b/lib/Utils.Memory/vnlib_rpmalloc/vendor/rpmalloc.c
index ab7703b..4ce3aba 100644
--- a/lib/Utils.Memory/vnlib_rpmalloc/vendor/rpmalloc.c
+++ b/lib/Utils.Memory/vnlib_rpmalloc/vendor/rpmalloc.c
@@ -11,11 +11,11 @@
#include "rpmalloc.h"
-////////////
-///
-/// Build time configurable limits
-///
-//////
+ ////////////
+ ///
+ /// Build time configurable limits
+ ///
+ //////
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wunused-macros"
@@ -266,7 +266,7 @@ extern int madvise(caddr_t, size_t, int);
typedef volatile long atomic32_t;
typedef volatile long long atomic64_t;
-typedef volatile void* atomicptr_t;
+typedef volatile void* atomicptr_t;
static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return *src; }
static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { *dst = val; }
@@ -277,10 +277,10 @@ static FORCEINLINE int atomic_cas32_acquire(atomic32_t* dst, int32_t val, in
static FORCEINLINE void atomic_store32_release(atomic32_t* dst, int32_t val) { *dst = val; }
static FORCEINLINE int64_t atomic_load64(atomic64_t* src) { return *src; }
static FORCEINLINE int64_t atomic_add64(atomic64_t* val, int64_t add) { return (int64_t)InterlockedExchangeAdd64(val, add) + add; }
-static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)*src; }
+static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return (void*)*src; }
static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { *dst = val; }
static FORCEINLINE void atomic_store_ptr_release(atomicptr_t* dst, void* val) { *dst = val; }
-static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return (void*)InterlockedExchangePointer((void* volatile*)dst, val); }
+static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return (void*)InterlockedExchangePointer((void* volatile*)dst, val); }
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return (InterlockedCompareExchangePointer((void* volatile*)dst, val, ref) == ref) ? 1 : 0; }
#define EXPECTED(x) (x)
@@ -290,9 +290,9 @@ static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref
#include <stdatomic.h>
-typedef volatile _Atomic(int32_t) atomic32_t;
-typedef volatile _Atomic(int64_t) atomic64_t;
-typedef volatile _Atomic(void*) atomicptr_t;
+typedef volatile _Atomic(int32_t)atomic32_t;
+typedef volatile _Atomic(int64_t)atomic64_t;
+typedef volatile _Atomic(void*)atomicptr_t;
static FORCEINLINE int32_t atomic_load32(atomic32_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
static FORCEINLINE void atomic_store32(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
@@ -303,10 +303,10 @@ static FORCEINLINE int atomic_cas32_acquire(atomic32_t* dst, int32_t val, in
static FORCEINLINE void atomic_store32_release(atomic32_t* dst, int32_t val) { atomic_store_explicit(dst, val, memory_order_release); }
static FORCEINLINE int64_t atomic_load64(atomic64_t* val) { return atomic_load_explicit(val, memory_order_relaxed); }
static FORCEINLINE int64_t atomic_add64(atomic64_t* val, int64_t add) { return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add; }
-static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
+static FORCEINLINE void* atomic_load_ptr(atomicptr_t* src) { return atomic_load_explicit(src, memory_order_relaxed); }
static FORCEINLINE void atomic_store_ptr(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_relaxed); }
static FORCEINLINE void atomic_store_ptr_release(atomicptr_t* dst, void* val) { atomic_store_explicit(dst, val, memory_order_release); }
-static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return atomic_exchange_explicit(dst, val, memory_order_acquire); }
+static FORCEINLINE void* atomic_exchange_ptr_acquire(atomicptr_t* dst, void* val) { return atomic_exchange_explicit(dst, val, memory_order_acquire); }
static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref) { return atomic_compare_exchange_weak_explicit(dst, &ref, val, memory_order_relaxed, memory_order_relaxed); }
#define EXPECTED(x) __builtin_expect((x), 1)
@@ -386,8 +386,8 @@ static FORCEINLINE int atomic_cas_ptr(atomicptr_t* dst, void* val, void* ref
//! Number of spans to transfer between thread and global cache for large spans
#define THREAD_SPAN_LARGE_CACHE_TRANSFER 6
-_Static_assert((SMALL_GRANULARITY & (SMALL_GRANULARITY - 1)) == 0, "Small granularity must be power of two");
-_Static_assert((SPAN_HEADER_SIZE & (SPAN_HEADER_SIZE - 1)) == 0, "Span header size must be power of two");
+_Static_assert((SMALL_GRANULARITY& (SMALL_GRANULARITY - 1)) == 0, "Small granularity must be power of two");
+_Static_assert((SPAN_HEADER_SIZE& (SPAN_HEADER_SIZE - 1)) == 0, "Span header size must be power of two");
#if ENABLE_VALIDATE_ARGS
//! Maximum allocation size to avoid integer overflow
@@ -496,7 +496,7 @@ typedef struct size_class_use_t size_class_use_t;
// to reduce physical memory use).
struct span_t {
//! Free list
- void* free_list;
+ void* free_list;
//! Total block count of size class
uint32_t block_count;
//! Size class
@@ -524,34 +524,34 @@ struct span_t {
//! Alignment offset
uint32_t align_offset;
//! Owning heap
- heap_t* heap;
+ heap_t* heap;
//! Next span
- span_t* next;
+ span_t* next;
//! Previous span
- span_t* prev;
+ span_t* prev;
};
_Static_assert(sizeof(span_t) <= SPAN_HEADER_SIZE, "span size mismatch");
struct span_cache_t {
size_t count;
- span_t* span[MAX_THREAD_SPAN_CACHE];
+ span_t* span[MAX_THREAD_SPAN_CACHE];
};
typedef struct span_cache_t span_cache_t;
struct span_large_cache_t {
size_t count;
- span_t* span[MAX_THREAD_SPAN_LARGE_CACHE];
+ span_t* span[MAX_THREAD_SPAN_LARGE_CACHE];
};
typedef struct span_large_cache_t span_large_cache_t;
struct heap_size_class_t {
//! Free list of active span
- void* free_list;
+ void* free_list;
//! Double linked list of partially used spans with free blocks.
// Previous span pointer in head points to tail span of list.
- span_t* partial_span;
+ span_t* partial_span;
//! Early level cache of fully free spans
- span_t* cache;
+ span_t* cache;
};
typedef struct heap_size_class_t heap_size_class_t;
@@ -570,23 +570,23 @@ struct heap_t {
//! Number of full spans
size_t full_span_count;
//! Mapped but unused spans
- span_t* span_reserve;
+ span_t* span_reserve;
//! Master span for mapped but unused spans
- span_t* span_reserve_master;
+ span_t* span_reserve_master;
//! Number of mapped but unused spans
uint32_t spans_reserved;
//! Child count
atomic32_t child_count;
//! Next heap in id list
- heap_t* next_heap;
+ heap_t* next_heap;
//! Next heap in orphan list
- heap_t* next_orphan;
+ heap_t* next_orphan;
//! Heap ID
int32_t id;
//! Finalization state flag
int finalize;
//! Master heap owning the memory pages
- heap_t* master_heap;
+ heap_t* master_heap;
#if ENABLE_THREAD_CACHE
//! Arrays of fully freed spans, large spans with > 1 span count
span_large_cache_t span_large_cache[LARGE_CLASS_COUNT - 1];
@@ -594,9 +594,9 @@ struct heap_t {
#if RPMALLOC_FIRST_CLASS_HEAPS
//! Double linked list of fully utilized spans with free blocks for each size class.
// Previous span pointer in head points to tail span of list.
- span_t* full_span[SIZE_CLASS_COUNT];
+ span_t* full_span[SIZE_CLASS_COUNT];
//! Double linked list of large and huge spans allocated by this heap
- span_t* large_huge_span;
+ span_t* large_huge_span;
#endif
#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS
//! Current and high water mark of spans used per span count
@@ -847,12 +847,12 @@ _rpmalloc_spin(void) {
#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)
__asm__ volatile("yield" ::: "memory");
#elif defined(__powerpc__) || defined(__powerpc64__)
- // No idea if ever been compiled in such archs but ... as precaution
+ // No idea if ever been compiled in such archs but ... as precaution
__asm__ volatile("or 27,27,27");
#elif defined(__sparc__)
__asm__ volatile("rd %ccr, %g0 \n\trd %ccr, %g0 \n\trd %ccr, %g0");
#else
- struct timespec ts = {0};
+ struct timespec ts = { 0 };
nanosleep(&ts, 0);
#endif
}
@@ -881,7 +881,7 @@ _rpmalloc_thread_destructor(void* value) {
static void
_rpmalloc_set_name(void* address, size_t size) {
#if defined(__linux__) || defined(__ANDROID__)
- const char *name = _memory_huge_pages ? _memory_config.huge_page_name : _memory_config.page_name;
+ const char* name = _memory_huge_pages ? _memory_config.huge_page_name : _memory_config.page_name;
if (address == MAP_FAILED || !name)
return;
// If the kernel does not support CONFIG_ANON_VMA_NAME or if the call fails
@@ -940,7 +940,8 @@ _rpmalloc_mmap_os(size_t size, size_t* offset) {
if (_memory_config.map_fail_callback) {
if (_memory_config.map_fail_callback(size + padding))
return _rpmalloc_mmap_os(size, offset);
- } else {
+ }
+ else {
rpmalloc_assert(ptr, "Failed to map virtual memory block");
}
return 0;
@@ -980,7 +981,8 @@ _rpmalloc_mmap_os(size_t size, size_t* offset) {
if (_memory_config.map_fail_callback) {
if (_memory_config.map_fail_callback(size + padding))
return _rpmalloc_mmap_os(size, offset);
- } else if (errno != ENOMEM) {
+ }
+ else if (errno != ENOMEM) {
rpmalloc_assert((ptr != MAP_FAILED) && ptr, "Failed to map virtual memory block");
}
return 0;
@@ -1023,7 +1025,8 @@ _rpmalloc_unmap_os(void* address, size_t size, size_t offset, size_t release) {
if (munmap(address, release)) {
rpmalloc_assert(0, "Failed to unmap virtual memory block");
}
- } else {
+ }
+ else {
#if defined(MADV_FREE_REUSABLE)
int ret;
while ((ret = madvise(address, size, MADV_FREE_REUSABLE)) == -1 && (errno == EAGAIN))
@@ -1040,15 +1043,15 @@ _rpmalloc_unmap_os(void* address, size_t size, size_t offset, size_t release) {
#endif
rpmalloc_assert(0, "Failed to madvise virtual memory block as free");
}
- }
+ }
#endif
#endif
if (release)
_rpmalloc_stat_sub(&_mapped_pages_os, release >> _memory_page_size_shift);
-}
+ }
static void
-_rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count);
+_rpmalloc_span_mark_as_subspan_unless_master(span_t * master, span_t * subspan, size_t span_count);
//! Use global reserved spans to fulfill a memory map request (reserve size must be checked by caller)
static span_t*
@@ -1065,7 +1068,7 @@ _rpmalloc_global_get_reserved_spans(size_t span_count) {
//! Store the given spans as global reserve (must only be called from within new heap allocation, not thread safe)
static void
-_rpmalloc_global_set_reserved_spans(span_t* master, span_t* reserve, size_t reserve_span_count) {
+_rpmalloc_global_set_reserved_spans(span_t * master, span_t * reserve, size_t reserve_span_count) {
_memory_global_reserve_master = master;
_memory_global_reserve_count = reserve_span_count;
_memory_global_reserve = reserve;
@@ -1080,7 +1083,7 @@ _rpmalloc_global_set_reserved_spans(span_t* master, span_t* reserve, size_t rese
//! Add a span to double linked list at the head
static void
-_rpmalloc_span_double_link_list_add(span_t** head, span_t* span) {
+_rpmalloc_span_double_link_list_add(span_t * *head, span_t * span) {
if (*head)
(*head)->prev = span;
span->next = *head;
@@ -1089,7 +1092,7 @@ _rpmalloc_span_double_link_list_add(span_t** head, span_t* span) {
//! Pop head span from double linked list
static void
-_rpmalloc_span_double_link_list_pop_head(span_t** head, span_t* span) {
+_rpmalloc_span_double_link_list_pop_head(span_t * *head, span_t * span) {
rpmalloc_assert(*head == span, "Linked list corrupted");
span = *head;
*head = span->next;
@@ -1097,11 +1100,12 @@ _rpmalloc_span_double_link_list_pop_head(span_t** head, span_t* span) {
//! Remove a span from double linked list
static void
-_rpmalloc_span_double_link_list_remove(span_t** head, span_t* span) {
+_rpmalloc_span_double_link_list_remove(span_t * *head, span_t * span) {
rpmalloc_assert(*head, "Linked list corrupted");
if (*head == span) {
*head = span->next;
- } else {
+ }
+ else {
span_t* next_span = span->next;
span_t* prev_span = span->prev;
prev_span->next = next_span;
@@ -1118,17 +1122,17 @@ _rpmalloc_span_double_link_list_remove(span_t** head, span_t* span) {
//////
static void
-_rpmalloc_heap_cache_insert(heap_t* heap, span_t* span);
+_rpmalloc_heap_cache_insert(heap_t * heap, span_t * span);
static void
-_rpmalloc_heap_finalize(heap_t* heap);
+_rpmalloc_heap_finalize(heap_t * heap);
static void
-_rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count);
+_rpmalloc_heap_set_reserved_spans(heap_t * heap, span_t * master, span_t * reserve, size_t reserve_span_count);
//! Declare the span to be a subspan and store distance from master span and span count
static void
-_rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, size_t span_count) {
+_rpmalloc_span_mark_as_subspan_unless_master(span_t * master, span_t * subspan, size_t span_count) {
rpmalloc_assert((subspan != master) || (subspan->flags & SPAN_FLAG_MASTER), "Span master pointer and/or flag mismatch");
if (subspan != master) {
subspan->flags = SPAN_FLAG_SUBSPAN;
@@ -1140,7 +1144,7 @@ _rpmalloc_span_mark_as_subspan_unless_master(span_t* master, span_t* subspan, si
//! Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)
static span_t*
-_rpmalloc_span_map_from_reserve(heap_t* heap, size_t span_count) {
+_rpmalloc_span_map_from_reserve(heap_t * heap, size_t span_count) {
//Update the heap span reserve
span_t* span = heap->span_reserve;
heap->span_reserve = (span_t*)pointer_offset(span, span_count * _memory_span_size);
@@ -1164,7 +1168,7 @@ _rpmalloc_span_align_count(size_t span_count) {
//! Setup a newly mapped span
static void
-_rpmalloc_span_initialize(span_t* span, size_t total_span_count, size_t span_count, size_t align_offset) {
+_rpmalloc_span_initialize(span_t * span, size_t total_span_count, size_t span_count, size_t align_offset) {
span->total_spans = (uint32_t)total_span_count;
span->span_count = (uint32_t)span_count;
span->align_offset = (uint32_t)align_offset;
@@ -1173,11 +1177,11 @@ _rpmalloc_span_initialize(span_t* span, size_t total_span_count, size_t span_cou
}
static void
-_rpmalloc_span_unmap(span_t* span);
+_rpmalloc_span_unmap(span_t * span);
//! Map an aligned set of spans, taking configured mapping granularity and the page size into account
static span_t*
-_rpmalloc_span_map_aligned_count(heap_t* heap, size_t span_count) {
+_rpmalloc_span_map_aligned_count(heap_t * heap, size_t span_count) {
//If we already have some, but not enough, reserved spans, release those to heap cache and map a new
//full set of spans. Otherwise we would waste memory if page size > span size (huge pages)
size_t aligned_span_count = _rpmalloc_span_align_count(span_count);
@@ -1215,7 +1219,7 @@ _rpmalloc_span_map_aligned_count(heap_t* heap, size_t span_count) {
//! Map in memory pages for the given number of spans (or use previously reserved pages)
static span_t*
-_rpmalloc_span_map(heap_t* heap, size_t span_count) {
+_rpmalloc_span_map(heap_t * heap, size_t span_count) {
if (span_count <= heap->spans_reserved)
return _rpmalloc_span_map_from_reserve(heap, span_count);
span_t* span = 0;
@@ -1248,7 +1252,7 @@ _rpmalloc_span_map(heap_t* heap, size_t span_count) {
//! Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)
static void
-_rpmalloc_span_unmap(span_t* span) {
+_rpmalloc_span_unmap(span_t * span) {
rpmalloc_assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
rpmalloc_assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
@@ -1263,7 +1267,8 @@ _rpmalloc_span_unmap(span_t* span) {
rpmalloc_assert(span->align_offset == 0, "Span align offset corrupted");
if (_memory_span_size >= _memory_page_size)
_rpmalloc_unmap(span, span_count * _memory_span_size, 0, 0);
- } else {
+ }
+ else {
//Special double flag to denote an unmapped master
//It must be kept in memory since span header must be used
span->flags |= SPAN_FLAG_MASTER | SPAN_FLAG_SUBSPAN | SPAN_FLAG_UNMAPPED_MASTER;
@@ -1284,7 +1289,7 @@ _rpmalloc_span_unmap(span_t* span) {
//! Move the span (used for small or medium allocations) to the heap thread cache
static void
-_rpmalloc_span_release_to_cache(heap_t* heap, span_t* span) {
+_rpmalloc_span_release_to_cache(heap_t * heap, span_t * span) {
rpmalloc_assert(heap == span->heap, "Span heap pointer corrupted");
rpmalloc_assert(span->size_class < SIZE_CLASS_COUNT, "Invalid span size class");
rpmalloc_assert(span->span_count == 1, "Invalid span count");
@@ -1298,7 +1303,8 @@ _rpmalloc_span_release_to_cache(heap_t* heap, span_t* span) {
if (heap->size_class[span->size_class].cache)
_rpmalloc_heap_cache_insert(heap, heap->size_class[span->size_class].cache);
heap->size_class[span->size_class].cache = span;
- } else {
+ }
+ else {
_rpmalloc_span_unmap(span);
}
}
@@ -1328,7 +1334,8 @@ free_list_partial_init(void** list, void** first_block, void* page_start, void*
next_block = pointer_offset(next_block, block_size);
}
*((void**)free_block) = 0;
- } else {
+ }
+ else {
*list = 0;
}
return block_count;
@@ -1336,7 +1343,7 @@ free_list_partial_init(void** list, void** first_block, void* page_start, void*
//! Initialize an unused span (from cache or mapped) to be new active span, putting the initial free list in heap class free list
static void*
-_rpmalloc_span_initialize_new(heap_t* heap, heap_size_class_t* heap_size_class, span_t* span, uint32_t class_idx) {
+_rpmalloc_span_initialize_new(heap_t * heap, heap_size_class_t * heap_size_class, span_t * span, uint32_t class_idx) {
rpmalloc_assert(span->span_count == 1, "Internal failure");
size_class_t* size_class = _memory_size_class + class_idx;
span->size_class = class_idx;
@@ -1356,7 +1363,8 @@ _rpmalloc_span_initialize_new(heap_t* heap, heap_size_class_t* heap_size_class,
if (span->free_list_limit < span->block_count) {
_rpmalloc_span_double_link_list_add(&heap_size_class->partial_span, span);
span->used_count = span->free_list_limit;
- } else {
+ }
+ else {
#if RPMALLOC_FIRST_CLASS_HEAPS
_rpmalloc_span_double_link_list_add(&heap->full_span[class_idx], span);
#endif
@@ -1367,7 +1375,7 @@ _rpmalloc_span_initialize_new(heap_t* heap, heap_size_class_t* heap_size_class,
}
static void
-_rpmalloc_span_extract_free_list_deferred(span_t* span) {
+_rpmalloc_span_extract_free_list_deferred(span_t * span) {
// We need acquire semantics on the CAS operation since we are interested in the list size
// Refer to _rpmalloc_deallocate_defer_small_or_medium for further comments on this dependency
do {
@@ -1379,13 +1387,13 @@ _rpmalloc_span_extract_free_list_deferred(span_t* span) {
}
static int
-_rpmalloc_span_is_fully_utilized(span_t* span) {
+_rpmalloc_span_is_fully_utilized(span_t * span) {
rpmalloc_assert(span->free_list_limit <= span->block_count, "Span free list corrupted");
return !span->free_list && (span->free_list_limit >= span->block_count);
}
static int
-_rpmalloc_span_finalize(heap_t* heap, size_t iclass, span_t* span, span_t** list_head) {
+_rpmalloc_span_finalize(heap_t * heap, size_t iclass, span_t * span, span_t * *list_head) {
void* free_list = heap->size_class[iclass].free_list;
span_t* class_span = (span_t*)((uintptr_t)free_list & _memory_span_mask);
if (span == class_span) {
@@ -1404,7 +1412,8 @@ _rpmalloc_span_finalize(heap_t* heap, size_t iclass, span_t* span, span_t** list
}
if (last_block) {
*((void**)last_block) = free_list;
- } else {
+ }
+ else {
span->free_list = free_list;
}
heap->size_class[iclass].free_list = 0;
@@ -1435,7 +1444,7 @@ _rpmalloc_span_finalize(heap_t* heap, size_t iclass, span_t* span, span_t** list
//! Finalize a global cache
static void
-_rpmalloc_global_cache_finalize(global_cache_t* cache) {
+_rpmalloc_global_cache_finalize(global_cache_t * cache) {
while (!atomic_cas32_acquire(&cache->lock, 1, 0))
_rpmalloc_spin();
@@ -1453,7 +1462,7 @@ _rpmalloc_global_cache_finalize(global_cache_t* cache) {
}
static void
-_rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t count) {
+_rpmalloc_global_cache_insert_spans(span_t * *span, size_t span_count, size_t count) {
const size_t cache_limit = (span_count == 1) ?
GLOBAL_CACHE_MULTIPLIER * MAX_THREAD_SPAN_CACHE :
GLOBAL_CACHE_MULTIPLIER * (MAX_THREAD_SPAN_LARGE_CACHE - (span_count >> 1));
@@ -1491,10 +1500,11 @@ _rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t cou
span_t* current_span = span[ispan];
// Keep master spans that has remaining subspans to avoid dangling them
if ((current_span->flags & SPAN_FLAG_MASTER) &&
- (atomic_load32(&current_span->remaining_spans) > (int32_t)current_span->span_count)) {
+ (atomic_load32(&current_span->remaining_spans) > (int32_t)current_span->span_count)) {
current_span->next = keep;
keep = current_span;
- } else {
+ }
+ else {
_rpmalloc_span_unmap(current_span);
}
}
@@ -1508,7 +1518,7 @@ _rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t cou
for (; islot < cache->count; ++islot) {
span_t* current_span = cache->span[islot];
if (!(current_span->flags & SPAN_FLAG_MASTER) || ((current_span->flags & SPAN_FLAG_MASTER) &&
- (atomic_load32(&current_span->remaining_spans) <= (int32_t)current_span->span_count))) {
+ (atomic_load32(&current_span->remaining_spans) <= (int32_t)current_span->span_count))) {
_rpmalloc_span_unmap(current_span);
cache->span[islot] = keep;
break;
@@ -1529,10 +1539,10 @@ _rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t cou
atomic_store32_release(&cache->lock, 0);
}
-}
+ }
static size_t
-_rpmalloc_global_cache_extract_spans(span_t** span, size_t span_count, size_t count) {
+_rpmalloc_global_cache_extract_spans(span_t * *span, size_t span_count, size_t count) {
global_cache_t* cache = &_memory_span_cache[span_count - 1];
size_t extract_count = 0;
@@ -1579,7 +1589,7 @@ static void _rpmalloc_deallocate_huge(span_t*);
//! Store the given spans as reserve in the given heap
static void
-_rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve, size_t reserve_span_count) {
+_rpmalloc_heap_set_reserved_spans(heap_t * heap, span_t * master, span_t * reserve, size_t reserve_span_count) {
heap->span_reserve_master = master;
heap->span_reserve = reserve;
heap->spans_reserved = (uint32_t)reserve_span_count;
@@ -1587,7 +1597,7 @@ _rpmalloc_heap_set_reserved_spans(heap_t* heap, span_t* master, span_t* reserve,
//! Adopt the deferred span cache list, optionally extracting the first single span for immediate re-use
static void
-_rpmalloc_heap_cache_adopt_deferred(heap_t* heap, span_t** single_span) {
+_rpmalloc_heap_cache_adopt_deferred(heap_t * heap, span_t * *single_span) {
span_t* span = (span_t*)((void*)atomic_exchange_ptr_acquire(&heap->span_free_deferred, 0));
while (span) {
span_t* next_span = (span_t*)span->free_list;
@@ -1605,10 +1615,12 @@ _rpmalloc_heap_cache_adopt_deferred(heap_t* heap, span_t** single_span) {
*single_span = span;
else
_rpmalloc_heap_cache_insert(heap, span);
- } else {
+ }
+ else {
if (span->size_class == SIZE_CLASS_HUGE) {
_rpmalloc_deallocate_huge(span);
- } else {
+ }
+ else {
rpmalloc_assert(span->size_class == SIZE_CLASS_LARGE, "Span size class invalid");
rpmalloc_assert(heap->full_span_count, "Heap span counter corrupted");
--heap->full_span_count;
@@ -1629,13 +1641,14 @@ _rpmalloc_heap_cache_adopt_deferred(heap_t* heap, span_t** single_span) {
}
static void
-_rpmalloc_heap_unmap(heap_t* heap) {
+_rpmalloc_heap_unmap(heap_t * heap) {
if (!heap->master_heap) {
if ((heap->finalize > 1) && !atomic_load32(&heap->child_count)) {
span_t* span = (span_t*)((uintptr_t)heap & _memory_span_mask);
_rpmalloc_span_unmap(span);
}
- } else {
+ }
+ else {
if (atomic_decr32(&heap->master_heap->child_count) == 0) {
_rpmalloc_heap_unmap(heap->master_heap);
}
@@ -1643,7 +1656,7 @@ _rpmalloc_heap_unmap(heap_t* heap) {
}
static void
-_rpmalloc_heap_global_finalize(heap_t* heap) {
+_rpmalloc_heap_global_finalize(heap_t * heap) {
if (heap->finalize++ > 1) {
--heap->finalize;
return;
@@ -1680,7 +1693,8 @@ _rpmalloc_heap_global_finalize(heap_t* heap) {
heap_t* list_heap = _memory_heaps[list_idx];
if (list_heap == heap) {
_memory_heaps[list_idx] = heap->next_heap;
- } else {
+ }
+ else {
while (list_heap->next_heap != heap)
list_heap = list_heap->next_heap;
list_heap->next_heap = heap->next_heap;
@@ -1691,7 +1705,7 @@ _rpmalloc_heap_global_finalize(heap_t* heap) {
//! Insert a single span into thread heap cache, releasing to global cache if overflow
static void
-_rpmalloc_heap_cache_insert(heap_t* heap, span_t* span) {
+_rpmalloc_heap_cache_insert(heap_t * heap, span_t * span) {
if (UNEXPECTED(heap->finalize != 0)) {
_rpmalloc_span_unmap(span);
_rpmalloc_heap_global_finalize(heap);
@@ -1715,7 +1729,8 @@ _rpmalloc_heap_cache_insert(heap_t* heap, span_t* span) {
#endif
span_cache->count = remain_count;
}
- } else {
+ }
+ else {
size_t cache_idx = span_count - 2;
span_large_cache_t* span_cache = heap->span_large_cache + cache_idx;
span_cache->span[span_cache->count++] = span;
@@ -1743,7 +1758,7 @@ _rpmalloc_heap_cache_insert(heap_t* heap, span_t* span) {
//! Extract the given number of spans from the different cache levels
static span_t*
-_rpmalloc_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_thread_cache_extract(heap_t * heap, size_t span_count) {
span_t* span = 0;
#if ENABLE_THREAD_CACHE
span_cache_t* span_cache;
@@ -1760,11 +1775,12 @@ _rpmalloc_heap_thread_cache_extract(heap_t* heap, size_t span_count) {
}
static span_t*
-_rpmalloc_heap_thread_cache_deferred_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_thread_cache_deferred_extract(heap_t * heap, size_t span_count) {
span_t* span = 0;
if (span_count == 1) {
_rpmalloc_heap_cache_adopt_deferred(heap, &span);
- } else {
+ }
+ else {
_rpmalloc_heap_cache_adopt_deferred(heap, 0);
span = _rpmalloc_heap_thread_cache_extract(heap, span_count);
}
@@ -1772,7 +1788,7 @@ _rpmalloc_heap_thread_cache_deferred_extract(heap_t* heap, size_t span_count) {
}
static span_t*
-_rpmalloc_heap_reserved_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_reserved_extract(heap_t * heap, size_t span_count) {
if (heap->spans_reserved >= span_count)
return _rpmalloc_span_map(heap, span_count);
return 0;
@@ -1780,7 +1796,7 @@ _rpmalloc_heap_reserved_extract(heap_t* heap, size_t span_count) {
//! Extract a span from the global cache
static span_t*
-_rpmalloc_heap_global_cache_extract(heap_t* heap, size_t span_count) {
+_rpmalloc_heap_global_cache_extract(heap_t * heap, size_t span_count) {
#if ENABLE_GLOBAL_CACHE
#if ENABLE_THREAD_CACHE
span_cache_t* span_cache;
@@ -1788,7 +1804,8 @@ _rpmalloc_heap_global_cache_extract(heap_t* heap, size_t span_count) {
if (span_count == 1) {
span_cache = &heap->span_cache;
wanted_count = THREAD_SPAN_CACHE_TRANSFER;
- } else {
+ }
+ else {
span_cache = (span_cache_t*)(heap->span_large_cache + (span_count - 2));
wanted_count = THREAD_SPAN_LARGE_CACHE_TRANSFER;
}
@@ -1814,7 +1831,7 @@ _rpmalloc_heap_global_cache_extract(heap_t* heap, size_t span_count) {
}
static void
-_rpmalloc_inc_span_statistics(heap_t* heap, size_t span_count, uint32_t class_idx) {
+_rpmalloc_inc_span_statistics(heap_t * heap, size_t span_count, uint32_t class_idx) {
(void)sizeof(heap);
(void)sizeof(span_count);
(void)sizeof(class_idx);
@@ -1829,7 +1846,7 @@ _rpmalloc_inc_span_statistics(heap_t* heap, size_t span_count, uint32_t class_id
//! Get a span from one of the cache levels (thread cache, reserved, global cache) or fallback to mapping more memory
static span_t*
-_rpmalloc_heap_extract_new_span(heap_t* heap, heap_size_class_t* heap_size_class, size_t span_count, uint32_t class_idx) {
+_rpmalloc_heap_extract_new_span(heap_t * heap, heap_size_class_t * heap_size_class, size_t span_count, uint32_t class_idx) {
span_t* span;
#if ENABLE_THREAD_CACHE
if (heap_size_class && heap_size_class->cache) {
@@ -1880,7 +1897,7 @@ _rpmalloc_heap_extract_new_span(heap_t* heap, heap_size_class_t* heap_size_class
}
static void
-_rpmalloc_heap_initialize(heap_t* heap) {
+_rpmalloc_heap_initialize(heap_t * heap) {
_rpmalloc_memset_const(heap, 0, sizeof(heap_t));
//Get a new heap ID
heap->id = 1 + atomic_incr32(&_memory_heap_id);
@@ -1892,7 +1909,7 @@ _rpmalloc_heap_initialize(heap_t* heap) {
}
static void
-_rpmalloc_heap_orphan(heap_t* heap, int first_class) {
+_rpmalloc_heap_orphan(heap_t * heap, int first_class) {
heap->owner_thread = (uintptr_t)-1;
#if RPMALLOC_FIRST_CLASS_HEAPS
heap_t** heap_list = (first_class ? &_memory_first_class_orphan_heaps : &_memory_orphan_heaps);
@@ -1981,7 +1998,7 @@ _rpmalloc_heap_allocate_new(void) {
}
static heap_t*
-_rpmalloc_heap_extract_orphan(heap_t** heap_list) {
+_rpmalloc_heap_extract_orphan(heap_t * *heap_list) {
heap_t* heap = *heap_list;
*heap_list = (heap ? heap->next_orphan : 0);
return heap;
@@ -2014,7 +2031,7 @@ _rpmalloc_heap_release(void* heapptr, int first_class, int release_cache) {
return;
//Release thread cache spans back to global cache
_rpmalloc_heap_cache_adopt_deferred(heap, 0);
- if (release_cache || heap->finalize) {
+ if (release_cache || heap->finalize) {
#if ENABLE_THREAD_CACHE
for (size_t iclass = 0; iclass < LARGE_CLASS_COUNT; ++iclass) {
span_cache_t* span_cache;
@@ -2028,7 +2045,8 @@ _rpmalloc_heap_release(void* heapptr, int first_class, int release_cache) {
if (heap->finalize) {
for (size_t ispan = 0; ispan < span_cache->count; ++ispan)
_rpmalloc_span_unmap(span_cache->span[ispan]);
- } else {
+ }
+ else {
_rpmalloc_stat_add64(&heap->thread_to_global, span_cache->count * (iclass + 1) * _memory_span_size);
_rpmalloc_stat_add(&heap->span_use[iclass].spans_to_global, span_cache->count);
_rpmalloc_global_cache_insert_spans(span_cache->span, iclass + 1, span_cache->count);
@@ -2071,7 +2089,7 @@ _rpmalloc_heap_release_raw_fc(void* heapptr) {
}
static void
-_rpmalloc_heap_finalize(heap_t* heap) {
+_rpmalloc_heap_finalize(heap_t * heap) {
if (heap->spans_reserved) {
span_t* span = _rpmalloc_span_map(heap, heap->spans_reserved);
_rpmalloc_span_unmap(span);
@@ -2138,7 +2156,7 @@ free_list_pop(void** list) {
//! Allocate a small/medium sized memory block from the given heap
static void*
-_rpmalloc_allocate_from_heap_fallback(heap_t* heap, heap_size_class_t* heap_size_class, uint32_t class_idx) {
+_rpmalloc_allocate_from_heap_fallback(heap_t * heap, heap_size_class_t * heap_size_class, uint32_t class_idx) {
span_t* span = heap_size_class->partial_span;
rpmalloc_assume(heap != 0);
if (EXPECTED(span != 0)) {
@@ -2150,7 +2168,8 @@ _rpmalloc_allocate_from_heap_fallback(heap_t* heap, heap_size_class_t* heap_size
block = free_list_pop(&span->free_list);
heap_size_class->free_list = span->free_list;
span->free_list = 0;
- } else {
+ }
+ else {
//If the span did not fully initialize free list, link up another page worth of blocks
void* block_start = pointer_offset(span, SPAN_HEADER_SIZE + ((size_t)span->free_list_limit * span->block_size));
span->free_list_limit += free_list_partial_init(&heap_size_class->free_list, &block,
@@ -2189,7 +2208,7 @@ _rpmalloc_allocate_from_heap_fallback(heap_t* heap, heap_size_class_t* heap_size
//! Allocate a small sized memory block from the given heap
static void*
-_rpmalloc_allocate_small(heap_t* heap, size_t size) {
+_rpmalloc_allocate_small(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
//Small sizes have unique size classes
const uint32_t class_idx = (uint32_t)((size + (SMALL_GRANULARITY - 1)) >> SMALL_GRANULARITY_SHIFT);
@@ -2202,7 +2221,7 @@ _rpmalloc_allocate_small(heap_t* heap, size_t size) {
//! Allocate a medium sized memory block from the given heap
static void*
-_rpmalloc_allocate_medium(heap_t* heap, size_t size) {
+_rpmalloc_allocate_medium(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
//Calculate the size class index and do a dependent lookup of the final class index (in case of merged classes)
const uint32_t base_idx = (uint32_t)(SMALL_CLASS_COUNT + ((size - (SMALL_SIZE_LIMIT + 1)) >> MEDIUM_GRANULARITY_SHIFT));
@@ -2216,7 +2235,7 @@ _rpmalloc_allocate_medium(heap_t* heap, size_t size) {
//! Allocate a large sized memory block from the given heap
static void*
-_rpmalloc_allocate_large(heap_t* heap, size_t size) {
+_rpmalloc_allocate_large(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
//Calculate number of needed max sized spans (including header)
//Since this function is never called if size > LARGE_SIZE_LIMIT
@@ -2246,7 +2265,7 @@ _rpmalloc_allocate_large(heap_t* heap, size_t size) {
//! Allocate a huge block by mapping memory pages directly
static void*
-_rpmalloc_allocate_huge(heap_t* heap, size_t size) {
+_rpmalloc_allocate_huge(heap_t * heap, size_t size) {
rpmalloc_assert(heap, "No thread heap");
_rpmalloc_heap_cache_adopt_deferred(heap, 0);
size += SPAN_HEADER_SIZE;
@@ -2275,7 +2294,7 @@ _rpmalloc_allocate_huge(heap_t* heap, size_t size) {
//! Allocate a block of the given size
static void*
-_rpmalloc_allocate(heap_t* heap, size_t size) {
+_rpmalloc_allocate(heap_t * heap, size_t size) {
_rpmalloc_stat_add64(&_allocation_counter, 1);
if (EXPECTED(size <= SMALL_SIZE_LIMIT))
return _rpmalloc_allocate_small(heap, size);
@@ -2287,7 +2306,7 @@ _rpmalloc_allocate(heap_t* heap, size_t size) {
}
static void*
-_rpmalloc_aligned_allocate(heap_t* heap, size_t alignment, size_t size) {
+_rpmalloc_aligned_allocate(heap_t * heap, size_t alignment, size_t size) {
if (alignment <= SMALL_GRANULARITY)
return _rpmalloc_allocate(heap, size);
@@ -2376,8 +2395,8 @@ retry:
ptr = (void*)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);
if (((size_t)pointer_diff(ptr, span) >= _memory_span_size) ||
- (pointer_offset(ptr, size) > pointer_offset(span, mapped_size)) ||
- (((uintptr_t)ptr & _memory_span_mask) != (uintptr_t)span)) {
+ (pointer_offset(ptr, size) > pointer_offset(span, mapped_size)) ||
+ (((uintptr_t)ptr & _memory_span_mask) != (uintptr_t)span)) {
_rpmalloc_unmap(span, mapped_size, align_offset, mapped_size);
++num_pages;
if (num_pages > limit_pages) {
@@ -2413,7 +2432,7 @@ retry:
//! Deallocate the given small/medium memory block in the current thread local heap
static void
-_rpmalloc_deallocate_direct_small_or_medium(span_t* span, void* block) {
+_rpmalloc_deallocate_direct_small_or_medium(span_t * span, void* block) {
heap_t* heap = span->heap;
rpmalloc_assert(heap->owner_thread == get_thread_id() || !heap->owner_thread || heap->finalize, "Internal failure");
//Add block to free list
@@ -2445,7 +2464,7 @@ _rpmalloc_deallocate_direct_small_or_medium(span_t* span, void* block) {
}
static void
-_rpmalloc_deallocate_defer_free_span(heap_t* heap, span_t* span) {
+_rpmalloc_deallocate_defer_free_span(heap_t * heap, span_t * span) {
if (span->size_class != SIZE_CLASS_HUGE)
_rpmalloc_stat_inc(&heap->span_use[span->span_count - 1].spans_deferred);
//This list does not need ABA protection, no mutable side state
@@ -2456,7 +2475,7 @@ _rpmalloc_deallocate_defer_free_span(heap_t* heap, span_t* span) {
//! Put the block in the deferred free list of the owning span
static void
-_rpmalloc_deallocate_defer_small_or_medium(span_t* span, void* block) {
+_rpmalloc_deallocate_defer_small_or_medium(span_t * span, void* block) {
// The memory ordering here is a bit tricky, to avoid having to ABA protect
// the deferred free list to avoid desynchronization of list and list size
// we need to have acquire semantics on successful CAS of the pointer to
@@ -2478,7 +2497,7 @@ _rpmalloc_deallocate_defer_small_or_medium(span_t* span, void* block) {
}
static void
-_rpmalloc_deallocate_small_or_medium(span_t* span, void* p) {
+_rpmalloc_deallocate_small_or_medium(span_t * span, void* p) {
_rpmalloc_stat_inc_free(span->heap, span->size_class);
if (span->flags & SPAN_FLAG_ALIGNED_BLOCKS) {
//Realign pointer to block start
@@ -2500,7 +2519,7 @@ _rpmalloc_deallocate_small_or_medium(span_t* span, void* p) {
//! Deallocate the given large memory block to the current heap
static void
-_rpmalloc_deallocate_large(span_t* span) {
+_rpmalloc_deallocate_large(span_t * span) {
rpmalloc_assert(span->size_class == SIZE_CLASS_LARGE, "Bad span size class");
rpmalloc_assert(!(span->flags & SPAN_FLAG_MASTER) || !(span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
rpmalloc_assert((span->flags & SPAN_FLAG_MASTER) || (span->flags & SPAN_FLAG_SUBSPAN), "Span flag corrupted");
@@ -2536,14 +2555,16 @@ _rpmalloc_deallocate_large(span_t* span) {
heap->spans_reserved = span->span_count;
if (span->flags & SPAN_FLAG_MASTER) {
heap->span_reserve_master = span;
- } else { //SPAN_FLAG_SUBSPAN
+ }
+ else { //SPAN_FLAG_SUBSPAN
span_t* master = (span_t*)pointer_offset(span, -(intptr_t)((size_t)span->offset_from_master * _memory_span_size));
heap->span_reserve_master = master;
rpmalloc_assert(master->flags & SPAN_FLAG_MASTER, "Span flag corrupted");
rpmalloc_assert(atomic_load32(&master->remaining_spans) >= (int32_t)span->span_count, "Master span count corrupted");
}
_rpmalloc_stat_inc(&heap->span_use[idx].spans_to_reserved);
- } else {
+ }
+ else {
//Insert into cache list
_rpmalloc_heap_cache_insert(heap, span);
}
@@ -2551,7 +2572,7 @@ _rpmalloc_deallocate_large(span_t* span) {
//! Deallocate the given huge span
static void
-_rpmalloc_deallocate_huge(span_t* span) {
+_rpmalloc_deallocate_huge(span_t * span) {
rpmalloc_assert(span->heap, "No span heap");
#if RPMALLOC_FIRST_CLASS_HEAPS
int defer = (span->heap->owner_thread && (span->heap->owner_thread != get_thread_id()) && !span->heap->finalize);
@@ -2601,7 +2622,7 @@ _rpmalloc_usable_size(void* p);
//! Reallocate the given block to the given size
static void*
-_rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigned int flags) {
+_rpmalloc_reallocate(heap_t * heap, void* p, size_t size, size_t oldsize, unsigned int flags) {
if (p) {
//Grab the span using guaranteed span alignment
span_t* span = (span_t*)((uintptr_t)p & _memory_span_mask);
@@ -2620,7 +2641,8 @@ _rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigne
memmove(block, p, oldsize);
return block;
}
- } else if (span->size_class == SIZE_CLASS_LARGE) {
+ }
+ else if (span->size_class == SIZE_CLASS_LARGE) {
//Large block
size_t total_size = size + SPAN_HEADER_SIZE;
size_t num_spans = total_size >> _memory_span_size_shift;
@@ -2636,7 +2658,8 @@ _rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigne
memmove(block, p, oldsize);
return block;
}
- } else {
+ }
+ else {
//Oversized block
size_t total_size = size + SPAN_HEADER_SIZE;
size_t num_pages = total_size >> _memory_page_size_shift;
@@ -2654,7 +2677,8 @@ _rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigne
return block;
}
}
- } else {
+ }
+ else {
oldsize = 0;
}
@@ -2676,8 +2700,8 @@ _rpmalloc_reallocate(heap_t* heap, void* p, size_t size, size_t oldsize, unsigne
}
static void*
-_rpmalloc_aligned_reallocate(heap_t* heap, void* ptr, size_t alignment, size_t size, size_t oldsize,
- unsigned int flags) {
+_rpmalloc_aligned_reallocate(heap_t * heap, void* ptr, size_t alignment, size_t size, size_t oldsize,
+ unsigned int flags) {
if (alignment <= SMALL_GRANULARITY)
return _rpmalloc_reallocate(heap, ptr, size, oldsize, flags);
@@ -2761,7 +2785,7 @@ rpmalloc_initialize(void) {
}
int
-rpmalloc_initialize_config(const rpmalloc_config_t* config) {
+rpmalloc_initialize_config(const rpmalloc_config_t * config) {
if (_rpmalloc_initialized) {
rpmalloc_thread_initialize();
return 0;
@@ -2823,14 +2847,14 @@ rpmalloc_initialize_config(const rpmalloc_config_t* config) {
if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 && rc == 1) {
static size_t defsize = 2 * 1024 * 1024;
int nsize = 0;
- size_t sizes[4] = {0};
+ size_t sizes[4] = { 0 };
_memory_huge_pages = 1;
_memory_page_size = defsize;
if ((nsize = getpagesizes(sizes, 4)) >= 2) {
- nsize --;
+ nsize--;
for (size_t csize = sizes[nsize]; nsize >= 0 && csize; --nsize, csize = sizes[nsize]) {
//! Unlikely, but as a precaution..
- rpmalloc_assert(!(csize & (csize -1)) && !(csize % 1024), "Invalid page size");
+ rpmalloc_assert(!(csize & (csize - 1)) && !(csize % 1024), "Invalid page size");
if (defsize < csize) {
_memory_page_size = csize;
break;
@@ -2846,7 +2870,8 @@ rpmalloc_initialize_config(const rpmalloc_config_t* config) {
#endif
}
#endif
- } else {
+ }
+ else {
if (_memory_config.enable_huge_pages)
_memory_huge_pages = 1;
}
@@ -2905,7 +2930,8 @@ rpmalloc_initialize_config(const rpmalloc_config_t* config) {
_memory_span_size = _memory_default_span_size;
_memory_span_size_shift = _memory_default_span_size_shift;
_memory_span_mask = _memory_default_span_mask;
- } else {
+ }
+ else {
size_t span_size = _memory_config.span_size;
if (span_size > (256 * 1024))
span_size = (256 * 1024);
@@ -2919,7 +2945,7 @@ rpmalloc_initialize_config(const rpmalloc_config_t* config) {
}
#endif
- _memory_span_map_count = ( _memory_config.span_map_count ? _memory_config.span_map_count : DEFAULT_SPAN_MAP_COUNT);
+ _memory_span_map_count = (_memory_config.span_map_count ? _memory_config.span_map_count : DEFAULT_SPAN_MAP_COUNT);
if ((_memory_span_size * _memory_span_map_count) < _memory_page_size)
_memory_span_map_count = (_memory_page_size / _memory_span_size);
if ((_memory_page_size >= _memory_span_size) && ((_memory_span_map_count * _memory_span_size) % _memory_page_size))
@@ -3131,7 +3157,7 @@ rprealloc(void* ptr, size_t size) {
extern RPMALLOC_ALLOCATOR void*
rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize,
- unsigned int flags) {
+ unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
if ((size + alignment < size) || (alignment > _memory_page_size)) {
errno = EINVAL;
@@ -3180,7 +3206,7 @@ rpmemalign(size_t alignment, size_t size) {
}
extern inline int
-rpposix_memalign(void **memptr, size_t alignment, size_t size) {
+rpposix_memalign(void** memptr, size_t alignment, size_t size) {
if (memptr)
*memptr = rpaligned_alloc(alignment, size);
else
@@ -3198,7 +3224,7 @@ rpmalloc_thread_collect(void) {
}
void
-rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
+rpmalloc_thread_statistics(rpmalloc_thread_statistics_t * stats) {
memset(stats, 0, sizeof(rpmalloc_thread_statistics_t));
heap_t* heap = get_thread_heap_raw();
if (!heap)
@@ -3265,7 +3291,7 @@ rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats) {
}
void
-rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
+rpmalloc_global_statistics(rpmalloc_global_statistics_t * stats) {
memset(stats, 0, sizeof(rpmalloc_global_statistics_t));
#if ENABLE_STATISTICS
stats->mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;
@@ -3297,7 +3323,7 @@ rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
#if ENABLE_STATISTICS
static void
-_memory_heap_dump_statistics(heap_t* heap, void* file) {
+_memory_heap_dump_statistics(heap_t * heap, void* file) {
fprintf(file, "Heap %d stats:\n", heap->id);
fprintf(file, "Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize BlkCount SpansCur SpansPeak PeakAllocMiB ToCacheMiB FromCacheMiB FromReserveMiB MmapCalls\n");
for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {
@@ -3438,13 +3464,13 @@ rpmalloc_heap_acquire(void) {
}
extern inline void
-rpmalloc_heap_release(rpmalloc_heap_t* heap) {
+rpmalloc_heap_release(rpmalloc_heap_t * heap) {
if (heap)
_rpmalloc_heap_release(heap, 1, 1);
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) {
+rpmalloc_heap_alloc(rpmalloc_heap_t * heap, size_t size) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
@@ -3455,7 +3481,7 @@ rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) {
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) {
+rpmalloc_heap_aligned_alloc(rpmalloc_heap_t * heap, size_t alignment, size_t size) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
@@ -3466,12 +3492,12 @@ rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) {
+rpmalloc_heap_calloc(rpmalloc_heap_t * heap, size_t num, size_t size) {
return rpmalloc_heap_aligned_calloc(heap, 0, num, size);
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num, size_t size) {
+rpmalloc_heap_aligned_calloc(rpmalloc_heap_t * heap, size_t alignment, size_t num, size_t size) {
size_t total;
#if ENABLE_VALIDATE_ARGS
#if PLATFORM_WINDOWS
@@ -3497,7 +3523,7 @@ rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned int flags) {
+rpmalloc_heap_realloc(rpmalloc_heap_t * heap, void* ptr, size_t size, unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
if (size >= MAX_ALLOC_SIZE) {
errno = EINVAL;
@@ -3508,7 +3534,7 @@ rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned in
}
extern inline RPMALLOC_ALLOCATOR void*
-rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size, unsigned int flags) {
+rpmalloc_heap_aligned_realloc(rpmalloc_heap_t * heap, void* ptr, size_t alignment, size_t size, unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
if ((size + alignment < size) || (alignment > _memory_page_size)) {
errno = EINVAL;
@@ -3519,13 +3545,13 @@ rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment
}
extern inline void
-rpmalloc_heap_free(rpmalloc_heap_t* heap, void* ptr) {
+rpmalloc_heap_free(rpmalloc_heap_t * heap, void* ptr) {
(void)sizeof(heap);
_rpmalloc_deallocate(ptr);
}
extern inline void
-rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
+rpmalloc_heap_free_all(rpmalloc_heap_t * heap) {
span_t* span;
span_t* next_span;
@@ -3545,6 +3571,11 @@ rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
_rpmalloc_heap_cache_insert(heap, span);
span = next_span;
}
+
+ span = heap->size_class[iclass].cache;
+ if (span)
+ _rpmalloc_heap_cache_insert(heap, span);
+ heap->size_class[iclass].cache = 0;
}
memset(heap->size_class, 0, sizeof(heap->size_class));
memset(heap->full_span, 0, sizeof(heap->full_span));
@@ -3594,7 +3625,7 @@ rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
}
extern inline void
-rpmalloc_heap_thread_set_current(rpmalloc_heap_t* heap) {
+rpmalloc_heap_thread_set_current(rpmalloc_heap_t * heap) {
heap_t* prev_heap = get_thread_heap_raw();
if (prev_heap != heap) {
set_thread_heap(heap);
@@ -3626,4 +3657,4 @@ rpmalloc_get_heap_for_ptr(void* ptr)
void
rpmalloc_linker_reference(void) {
(void)sizeof(_rpmalloc_initialized);
-}
+} \ No newline at end of file
diff --git a/lib/Utils.Memory/vnlib_rpmalloc/vnlib_rpmalloc.vcxitems b/lib/Utils.Memory/vnlib_rpmalloc/vnlib_rpmalloc.vcxitems
index ff70e80..eeda4c8 100644
--- a/lib/Utils.Memory/vnlib_rpmalloc/vnlib_rpmalloc.vcxitems
+++ b/lib/Utils.Memory/vnlib_rpmalloc/vnlib_rpmalloc.vcxitems
@@ -19,10 +19,14 @@
<Text Include="$(MSBuildThisFileDirectory)license" />
</ItemGroup>
<ItemGroup>
+ <ClCompile Include="$(MSBuildThisFileDirectory)vendor\rpmalloc.c" />
<ClCompile Include="$(MSBuildThisFileDirectory)vnlib_rpmalloc.c" />
</ItemGroup>
<ItemGroup>
<None Include="$(MSBuildThisFileDirectory)package.json" />
<None Include="$(MSBuildThisFileDirectory)Taskfile.yaml" />
</ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="$(MSBuildThisFileDirectory)vendor\rpmalloc.h" />
+ </ItemGroup>
</Project> \ No newline at end of file
diff --git a/lib/Utils/src/AdvancedTrace.cs b/lib/Utils/src/AdvancedTrace.cs
new file mode 100644
index 0000000..0507089
--- /dev/null
+++ b/lib/Utils/src/AdvancedTrace.cs
@@ -0,0 +1,52 @@
+/*
+* Copyright (c) 2024 Vaughn Nugent
+*
+* Library: VNLib
+* Package: VNLib.Utils
+* File: AdvancedTrace.cs
+*
+* AdvancedTrace.cs is part of VNLib.Utils which is part of the larger
+* VNLib collection of libraries and utilities.
+*
+* VNLib.Utils is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published
+* by the Free Software Foundation, either version 2 of the License,
+* or (at your option) any later version.
+*
+* VNLib.Utils is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with VNLib.Utils. If not, see http://www.gnu.org/licenses/.
+*/
+
+using System.Diagnostics;
+
+namespace VNLib.Utils
+{
+ /// <summary>
+ /// Provides methods for advanced tracing that are only optionally compiled
+ /// with the VNLIB_ADVANCED_TRACING symbol defined
+ /// </summary>
+ internal static class AdvancedTrace
+ {
+ const string AdvancedTraceSymbol = "VNLIB_ADVANCED_TRACING";
+
+ [Conditional(AdvancedTraceSymbol)]
+ public static void WriteLine(string? message) => Trace.WriteLine(message);
+
+ [Conditional(AdvancedTraceSymbol)]
+ public static void WriteLine(string? message, string? category) => Trace.WriteLine(message, category);
+
+ [Conditional(AdvancedTraceSymbol)]
+ public static void WriteLineIf(bool condition, string? message) => Trace.WriteLineIf(condition, message);
+
+ [Conditional(AdvancedTraceSymbol)]
+ public static void WriteLineIf(bool condition, string? message, string? category) => Trace.WriteLineIf(condition, message, category);
+
+ [Conditional(AdvancedTraceSymbol)]
+ public static void WriteLine(object? value) => Trace.WriteLine(value);
+ }
+} \ No newline at end of file
diff --git a/lib/Utils/src/Async/AsyncAccessSerializer.cs b/lib/Utils/src/Async/AsyncAccessSerializer.cs
index 76532bc..1beb4dc 100644
--- a/lib/Utils/src/Async/AsyncAccessSerializer.cs
+++ b/lib/Utils/src/Async/AsyncAccessSerializer.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -37,42 +37,35 @@ namespace VNLib.Utils.Async
/// Creates a base concrete implementation of an <see cref="IAsyncAccessSerializer{TMoniker}"/>
/// </summary>
/// <typeparam name="TMoniker">The moniker (key) type</typeparam>
- public class AsyncAccessSerializer<TMoniker> : IAsyncAccessSerializer<TMoniker>, ICacheHolder where TMoniker : notnull
+ /// <remarks>
+ /// Initializes a new <see cref="AsyncAccessSerializer{TMoniker}"/> with the desired
+ /// caching pool size and initial capacity
+ /// </remarks>
+ /// <param name="maxPoolSize">The maxium number of cached wait entry objects</param>
+ /// <param name="initialCapacity">The initial capacity of the wait table</param>
+ /// <param name="keyComparer">The moniker key comparer</param>
+ public class AsyncAccessSerializer<TMoniker>(int maxPoolSize, int initialCapacity, IEqualityComparer<TMoniker>? keyComparer)
+ : IAsyncAccessSerializer<TMoniker>, ICacheHolder where TMoniker : notnull
{
/// <summary>
/// The mutual exclusion monitor locking object
/// </summary>
- protected object StoreLock { get; }
+ protected object StoreLock { get; } = new();
/// <summary>
/// A cache pool for <see cref="WaitEntry"/>
/// </summary>
- protected Stack<WaitEntry> EntryPool { get; }
+ protected Stack<WaitEntry> EntryPool { get; } = new(maxPoolSize);
/// <summary>
/// The table containing all active waiters
/// </summary>
- protected Dictionary<TMoniker, WaitEntry> WaitTable { get; }
+ protected Dictionary<TMoniker, WaitEntry> WaitTable { get; } = new(initialCapacity, keyComparer);
/// <summary>
/// The maxium number of elements allowed in the internal entry cache pool
/// </summary>
- protected int MaxPoolSize { get; }
-
- /// <summary>
- /// Initializes a new <see cref="AsyncAccessSerializer{TMoniker}"/> with the desired
- /// caching pool size and initial capacity
- /// </summary>
- /// <param name="maxPoolSize">The maxium number of cached wait entry objects</param>
- /// <param name="initialCapacity">The initial capacity of the wait table</param>
- /// <param name="keyComparer">The moniker key comparer</param>
- public AsyncAccessSerializer(int maxPoolSize, int initialCapacity, IEqualityComparer<TMoniker>? keyComparer)
- {
- MaxPoolSize = maxPoolSize;
- StoreLock = new();
- EntryPool = new(maxPoolSize);
- WaitTable = new(initialCapacity, keyComparer);
- }
+ protected int MaxPoolSize { get; } = maxPoolSize;
///<inheritdoc/>
public virtual Task WaitAsync(TMoniker moniker, CancellationToken cancellation = default)
@@ -464,11 +457,9 @@ namespace VNLib.Utils.Async
* next task in the queue and be awaitable as a task
*/
- private sealed class TaskNode : Task
+ private sealed class TaskNode(Action<object?> callback, object item, CancellationToken cancellation)
+ : Task(callback, item, cancellation)
{
- public TaskNode(Action<object?> callback, object item, CancellationToken cancellation) : base(callback, item, cancellation)
- { }
-
public TaskNode? Next { get; set; }
}
@@ -500,7 +491,7 @@ namespace VNLib.Utils.Async
/// another waiter is not selected.
/// </para>
/// </summary>
- /// <returns>A value that indicates if the task was transition successfully</returns>
+ /// <returns>A value that indicates if the task was transitioned successfully</returns>
public readonly bool Release()
{
//return success if no next waiter
diff --git a/lib/Utils/src/Async/AsyncQueue.cs b/lib/Utils/src/Async/AsyncQueue.cs
index 45f1219..e94d08e 100644
--- a/lib/Utils/src/Async/AsyncQueue.cs
+++ b/lib/Utils/src/Async/AsyncQueue.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -93,19 +93,13 @@ namespace VNLib.Utils.Async
/// Initalizes a new unbound channel based queue
/// </summary>
/// <param name="ubOptions">Channel options</param>
- public AsyncQueue(UnboundedChannelOptions ubOptions)
- {
- _channel = Channel.CreateUnbounded<T>(ubOptions);
- }
+ public AsyncQueue(UnboundedChannelOptions ubOptions) => _channel = Channel.CreateUnbounded<T>(ubOptions);
/// <summary>
/// Initalizes a new bound channel based queue
/// </summary>
/// <param name="options">Channel options</param>
- public AsyncQueue(BoundedChannelOptions options)
- {
- _channel = Channel.CreateBounded<T>(options);
- }
+ public AsyncQueue(BoundedChannelOptions options) => _channel = Channel.CreateBounded<T>(options);
/// <inheritdoc/>
public bool TryEnque(T item) => _channel.Writer.TryWrite(item);
diff --git a/lib/Utils/src/Extensions/MemoryExtensions.cs b/lib/Utils/src/Extensions/MemoryExtensions.cs
index 8f90525..65d90a0 100644
--- a/lib/Utils/src/Extensions/MemoryExtensions.cs
+++ b/lib/Utils/src/Extensions/MemoryExtensions.cs
@@ -50,6 +50,8 @@ namespace VNLib.Utils.Extensions
/// <returns>A new <see cref="OpenResourceHandle{T}"/> encapsulating the rented array</returns>
public static UnsafeMemoryHandle<T> UnsafeAlloc<T>(this ArrayPool<T> pool, int size, bool zero = false) where T : unmanaged
{
+ ArgumentNullException.ThrowIfNull(pool);
+
T[] array = pool.Rent(size);
if (zero)
@@ -268,7 +270,7 @@ namespace VNLib.Utils.Extensions
/// <summary>
/// Gets a reference to the element at the specified offset from the base
- /// address of the <see cref="MemoryHandle{T}"/>
+ /// address of the <see cref="IMemoryHandle{T}"/>
/// </summary>
/// <param name="block"></param>
/// <param name="offset">The element offset from the base address to add to the returned reference</param>
@@ -766,52 +768,6 @@ namespace VNLib.Utils.Extensions
#endregion
/// <summary>
- /// Slices the current array by the specified starting offset to the end
- /// of the array
- /// </summary>
- /// <typeparam name="T">The array type</typeparam>
- /// <param name="arr"></param>
- /// <param name="start">The start offset of the new array slice</param>
- /// <returns>The sliced array</returns>
- /// <exception cref="ArgumentOutOfRangeException"></exception>
- public static T[] Slice<T>(this T[] arr, int start)
- {
- ArgumentNullException.ThrowIfNull(arr);
- ArgumentOutOfRangeException.ThrowIfNegative(start);
- ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual(start, arr.Length);
-
- Range sliceRange = new(start, arr.Length - start);
- return RuntimeHelpers.GetSubArray(arr, sliceRange);
- }
-
- /// <summary>
- /// Slices the current array by the specified starting offset to including the
- /// speciifed number of items
- /// </summary>
- /// <typeparam name="T">The array type</typeparam>
- /// <param name="arr"></param>
- /// <param name="start">The start offset of the new array slice</param>
- /// <param name="count">The size of the new array</param>
- /// <returns>The sliced array</returns>
- /// <exception cref="ArgumentOutOfRangeException"></exception>
- public static T[] Slice<T>(this T[] arr, int start, int count)
- {
- ArgumentNullException.ThrowIfNull(arr);
- ArgumentOutOfRangeException.ThrowIfNegative(start);
- ArgumentOutOfRangeException.ThrowIfNegative(count);
- ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual(start + count, arr.Length);
-
- if(count == 0)
- {
- return [];
- }
-
- //Calc the slice range
- Range sliceRange = new(start, start + count);
- return RuntimeHelpers.GetSubArray(arr, sliceRange);
- }
-
- /// <summary>
/// Creates a new sub-sequence over the target handle. (allows for convient sub span)
/// </summary>
/// <typeparam name="T"></typeparam>
@@ -877,6 +833,7 @@ namespace VNLib.Utils.Extensions
/// <returns>The sub-sequence of the current handle</returns>
/// <exception cref="ArgumentOutOfRangeException"></exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
+ [Obsolete("Functions are included directly on the type now")]
public static Span<T> AsSpan<T>(this in UnsafeMemoryHandle<T> handle, int start) where T: unmanaged => handle.Span[start..];
/// <summary>
@@ -889,6 +846,7 @@ namespace VNLib.Utils.Extensions
/// <returns>The sub-sequence of the current handle</returns>
/// <exception cref="ArgumentOutOfRangeException"></exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
+ [Obsolete("Functions are included directly on the type now")]
public static Span<T> AsSpan<T>(this in UnsafeMemoryHandle<T> handle, int start, int count) where T : unmanaged => handle.Span.Slice(start, count);
/// <summary>
diff --git a/lib/Utils/src/Memory/IMemoryHandle.cs b/lib/Utils/src/Memory/IMemoryHandle.cs
index f4e1a36..b538f87 100644
--- a/lib/Utils/src/Memory/IMemoryHandle.cs
+++ b/lib/Utils/src/Memory/IMemoryHandle.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -46,7 +46,7 @@ namespace VNLib.Utils.Memory
/// <summary>
/// Gets a reference to the first element in the block
/// </summary>
- /// <returns>The reference</returns>
+ /// <returns>The reference to the first element in the block</returns>
ref T GetReference();
}
diff --git a/lib/Utils/src/Memory/MemoryHandle.cs b/lib/Utils/src/Memory/MemoryHandle.cs
index c5cc295..16fc555 100644
--- a/lib/Utils/src/Memory/MemoryHandle.cs
+++ b/lib/Utils/src/Memory/MemoryHandle.cs
@@ -188,8 +188,7 @@ namespace VNLib.Utils.Memory
this.ThrowIfClosed();
//Get ptr and offset it
- T* bs = ((T*)handle) + elements;
- return bs;
+ return ((T*)handle) + elements;
}
///<inheritdoc/>
@@ -199,6 +198,16 @@ namespace VNLib.Utils.Memory
return ref MemoryUtil.GetRef<T>(handle);
}
+ /// <summary>
+ /// Gets a reference to the element at the specified offset from the base
+ /// address of the <see cref="MemoryHandle{T}"/>
+ /// </summary>
+ /// <param name="offset">The element offset from the base address to add to the returned reference</param>
+ /// <returns>The reference to the item at the desired offset</returns>
+ /// <exception cref="ArgumentOutOfRangeException"></exception>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public unsafe ref T GetOffsetRef(nuint offset) => ref Unsafe.AsRef<T>(GetOffset(offset));
+
///<inheritdoc/>
///<exception cref="ObjectDisposedException"></exception>
///<exception cref="ArgumentOutOfRangeException"></exception>
@@ -248,6 +257,6 @@ namespace VNLib.Utils.Memory
public override bool Equals(object? obj) => obj is MemoryHandle<T> oHandle && Equals(oHandle);
///<inheritdoc/>
- public override int GetHashCode() => base.GetHashCode();
+ public override int GetHashCode() => HashCode.Combine(base.GetHashCode(), handle.GetHashCode(), _length);
}
} \ No newline at end of file
diff --git a/lib/Utils/src/Memory/MemoryUtil.cs b/lib/Utils/src/Memory/MemoryUtil.cs
index b6eadbc..7ce7c81 100644
--- a/lib/Utils/src/Memory/MemoryUtil.cs
+++ b/lib/Utils/src/Memory/MemoryUtil.cs
@@ -1349,6 +1349,51 @@ namespace VNLib.Utils.Memory
public static MemoryHandle GetMemoryHandleFromPointer(IntPtr value, GCHandle handle = default, IPinnable? pinnable = null)
=> new (value.ToPointer(), handle, pinnable);
+
+ /// <summary>
+ /// Slices the current array by the specified starting offset to the end
+ /// of the array
+ /// </summary>
+ /// <typeparam name="T">The array type</typeparam>
+ /// <param name="arr"></param>
+ /// <param name="start">The start offset of the new array slice</param>
+ /// <returns>The sliced array</returns>
+ /// <exception cref="ArgumentNullException"></exception>
+ /// <exception cref="ArgumentOutOfRangeException"></exception>
+ public static T[] SliceArray<T>(T[] arr, int start)
+ {
+ ArgumentNullException.ThrowIfNull(arr);
+ return SliceArray(arr, start, arr.Length - start);
+ }
+
+ /// <summary>
+ /// Slices the current array by the specified starting offset to including the
+ /// speciifed number of items
+ /// </summary>
+ /// <typeparam name="T">The array type</typeparam>
+ /// <param name="arr"></param>
+ /// <param name="start">The start offset of the new array slice</param>
+ /// <param name="count">The size of the new array</param>
+ /// <returns>The sliced array</returns>
+ /// <exception cref="ArgumentNullException"></exception>
+ /// <exception cref="ArgumentOutOfRangeException"></exception>
+ public static T[] SliceArray<T>(T[] arr, int start, int count)
+ {
+ ArgumentNullException.ThrowIfNull(arr);
+ ArgumentOutOfRangeException.ThrowIfNegative(start);
+ ArgumentOutOfRangeException.ThrowIfNegative(count);
+ ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual(start + count, arr.Length);
+
+ if (count == 0)
+ {
+ return [];
+ }
+
+ //Calc the slice range
+ Range sliceRange = new(start, start + count);
+ return RuntimeHelpers.GetSubArray(arr, sliceRange);
+ }
+
/// <summary>
/// Gets a <see cref="Span{T}"/> from the supplied address
/// </summary>
diff --git a/lib/Utils/src/Memory/UnsafeMemoryHandle.cs b/lib/Utils/src/Memory/UnsafeMemoryHandle.cs
index fbf96eb..d93739d 100644
--- a/lib/Utils/src/Memory/UnsafeMemoryHandle.cs
+++ b/lib/Utils/src/Memory/UnsafeMemoryHandle.cs
@@ -49,8 +49,9 @@ namespace VNLib.Utils.Memory
PrivateHeap
}
- private readonly IntPtr _memoryPtr;
private readonly int _length;
+
+ private readonly IntPtr _memoryPtr;
private readonly HandleType _handleType;
private readonly T[]? _poolArr;
@@ -115,6 +116,11 @@ namespace VNLib.Utils.Memory
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal UnsafeMemoryHandle(IUnmangedHeap heap, IntPtr initial, int elements)
{
+ //Never allow non-empty handles
+ Debug.Assert(heap != null);
+ Debug.Assert(initial != IntPtr.Zero);
+ Debug.Assert(elements > 0);
+
_pool = null;
_poolArr = null;
_heap = heap;
@@ -196,6 +202,32 @@ namespace VNLib.Utils.Memory
}
}
+ /// <summary>
+ /// Returns a <see cref="Span{T}"/> that represents the memory block pointed to by this handle
+ /// </summary>
+ /// <returns>The memory block that is held by the internl handle</returns>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public readonly Span<T> AsSpan() => Span;
+
+ /// <summary>
+ /// Returns a <see cref="Span{T}"/> that represents the memory block pointed to by this handle
+ /// </summary>
+ /// <param name="start">A starting element offset to return the span at</param>
+ /// <returns>The desired memory block at the desired element offset</returns>
+ /// <exception cref="ArgumentOutOfRangeException"></exception>
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public readonly Span<T> AsSpan(int start) => Span[start..];
+
+ /// <summary>
+ /// Returns a <see cref="Span{T}"/> that represents the memory block pointed to by this handle
+ /// </summary>
+ /// <param name="start">The starting element offset</param>
+ /// <param name="length">The number of elements included in the returned span</param>
+ /// <returns>The desired memory block at the desired element offset and length</returns>
+ /// <exception cref="ArgumentOutOfRangeException"></exception>"
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public readonly Span<T> AsSpan(int start, int length) => Span.Slice(start, length);
+
///<inheritdoc/>
public readonly override int GetHashCode()
{
diff --git a/lib/Utils/src/Native/SafeLibraryHandle.cs b/lib/Utils/src/Native/SafeLibraryHandle.cs
index 5fb2283..4b4ead4 100644
--- a/lib/Utils/src/Native/SafeLibraryHandle.cs
+++ b/lib/Utils/src/Native/SafeLibraryHandle.cs
@@ -63,9 +63,8 @@ namespace VNLib.Utils.Native
{
//Get the method pointer
IntPtr nativeMethod = NativeLibrary.GetExport(handle, functionName);
- //Get the delegate for the function pointer
- T method = Marshal.GetDelegateForFunctionPointer<T>(nativeMethod);
- return new(this, method);
+ AdvancedTrace.WriteLine($"Loaded function '{functionName}' with address: 0x'{nativeMethod:x}'");
+ return new(this, Marshal.GetDelegateForFunctionPointer<T>(nativeMethod));
}
catch
{
@@ -90,6 +89,7 @@ namespace VNLib.Utils.Native
this.ThrowIfClosed();
//Get the method pointer
IntPtr nativeMethod = NativeLibrary.GetExport(handle, functionName);
+ AdvancedTrace.WriteLine($"Loaded function '{functionName}' with address: 0x'{nativeMethod:x}'");
//Get the delegate for the function pointer
return Marshal.GetDelegateForFunctionPointer<T>(nativeMethod);
}
@@ -97,6 +97,7 @@ namespace VNLib.Utils.Native
///<inheritdoc/>
protected override bool ReleaseHandle()
{
+ AdvancedTrace.WriteLine($"Releasing library handle: 0x'{handle:x}'");
//Free the library and set the handle as invalid
NativeLibrary.Free(handle);
SetHandleAsInvalid();
@@ -211,7 +212,9 @@ namespace VNLib.Utils.Native
NatveLibraryResolver resolver = new(libPath, assembly, searchPath);
- return resolver.ResolveAndLoadLibrary(out library);
+ bool success = resolver.ResolveAndLoadLibrary(out library);
+ AdvancedTrace.WriteLineIf(success, $"Loaded library '{libPath}' with address: 0x'{library?.DangerousGetHandle():x}'");
+ return success;
}
}
}
diff --git a/lib/Utils/src/Resources/CallbackOpenHandle.cs b/lib/Utils/src/Resources/CallbackOpenHandle.cs
deleted file mode 100644
index 625bd45..0000000
--- a/lib/Utils/src/Resources/CallbackOpenHandle.cs
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-* Copyright (c) 2022 Vaughn Nugent
-*
-* Library: VNLib
-* Package: VNLib.Utils
-* File: CallbackOpenHandle.cs
-*
-* CallbackOpenHandle.cs is part of VNLib.Utils which is part of the larger
-* VNLib collection of libraries and utilities.
-*
-* VNLib.Utils is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published
-* by the Free Software Foundation, either version 2 of the License,
-* or (at your option) any later version.
-*
-* VNLib.Utils is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with VNLib.Utils. If not, see http://www.gnu.org/licenses/.
-*/
-
-using System;
-
-namespace VNLib.Utils.Resources
-{
- /// <summary>
- /// A concrete <see cref="OpenHandle"/> for a defered operation or a resource that should be released or unwound
- /// when the instance lifetime has ended.
- /// </summary>
- public sealed class CallbackOpenHandle : OpenHandle
- {
- private readonly Action ReleaseFunc;
- /// <summary>
- /// Creates a new generic <see cref="OpenHandle"/> with the specified release callback method
- /// </summary>
- /// <param name="release">The callback function to invoke when the <see cref="OpenHandle"/> is disposed</param>
- public CallbackOpenHandle(Action release) => ReleaseFunc = release;
- ///<inheritdoc/>
- protected override void Free() => ReleaseFunc();
- }
-} \ No newline at end of file
diff --git a/lib/Utils/src/Resources/ManagedLibrary.cs b/lib/Utils/src/Resources/ManagedLibrary.cs
index 786a22d..c899156 100644
--- a/lib/Utils/src/Resources/ManagedLibrary.cs
+++ b/lib/Utils/src/Resources/ManagedLibrary.cs
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2023 Vaughn Nugent
+* Copyright (c) 2024 Vaughn Nugent
*
* Library: VNLib
* Package: VNLib.Utils
@@ -25,7 +25,6 @@
using System;
using System.IO;
using System.Linq;
-using System.Threading;
using System.Reflection;
using System.Runtime.Loader;
using System.Collections.Generic;
@@ -43,7 +42,7 @@ namespace VNLib.Utils.Resources
{
private readonly AssemblyLoadContext _loadContext;
private readonly AssemblyDependencyResolver _resolver;
- private readonly Lazy<Assembly> _lazyAssembly;
+ private readonly LazyInitializer<Assembly> _lazyAssembly;
/// <summary>
/// The absolute path to the assembly file
@@ -53,7 +52,7 @@ namespace VNLib.Utils.Resources
/// <summary>
/// The assembly that is maintained by this loader
/// </summary>
- public Assembly Assembly => _lazyAssembly.Value;
+ public Assembly Assembly => _lazyAssembly.Instance;
/// <summary>
/// Initializes a new <see cref="ManagedLibrary"/> and skips
@@ -74,11 +73,15 @@ namespace VNLib.Utils.Resources
context.ResolvingUnmanagedDll += OnNativeLibraryResolving;
//Lazy load the assembly
- _lazyAssembly = new(LoadAssembly, LazyThreadSafetyMode.PublicationOnly);
+ _lazyAssembly = new(LoadAssembly);
}
//Load the assembly into the parent context
- private Assembly LoadAssembly() => _loadContext.LoadFromAssemblyPath(AssemblyPath);
+ private Assembly LoadAssembly()
+ {
+ AdvancedTrace.WriteLine($"Loading managed library {AssemblyPath} into context {_loadContext.Name}");
+ return _loadContext.LoadFromAssemblyPath(AssemblyPath);
+ }
/// <summary>
/// Raised when the load context that owns this assembly
@@ -91,6 +94,7 @@ namespace VNLib.Utils.Resources
/// </remarks>
protected virtual void OnUnload(AssemblyLoadContext? ctx = null)
{
+ AdvancedTrace.WriteLine($"Unloading managed library {AssemblyPath}");
//Remove resolving event handlers
_loadContext.Unloading -= OnUnload;
_loadContext.Resolving -= OnDependencyResolving;
@@ -111,6 +115,8 @@ namespace VNLib.Utils.Resources
//Resolve the desired asm dependency for the current context
string? requestedDll = _resolver.ResolveUnmanagedDllToPath(libname);
+ AdvancedTrace.WriteLineIf(requestedDll != null,$"Resolving native library {libname} to path {requestedDll} for library {AssemblyPath}");
+
//if the dep is resolved, seach in the assembly directory for the manageed dll only
return requestedDll == null ?
IntPtr.Zero :
@@ -122,6 +128,8 @@ namespace VNLib.Utils.Resources
//Resolve the desired asm dependency for the current context
string? desiredAsm = _resolver.ResolveAssemblyToPath(asmName);
+ AdvancedTrace.WriteLineIf(desiredAsm != null, $"Resolving managed assembly {asmName.Name} to path {desiredAsm} for library {AssemblyPath}");
+
//If the asm exists in the dir, load it
return desiredAsm == null ? null : _loadContext.LoadFromAssemblyPath(desiredAsm);
}
@@ -137,6 +145,8 @@ namespace VNLib.Utils.Resources
//See if the type is exported
Type exp = TryGetExportedType<T>() ?? throw new EntryPointNotFoundException($"Imported assembly does not export desired type {typeof(T).FullName}");
+ AdvancedTrace.WriteLine($"Creating instance of type {exp.FullName} from assembly {AssemblyPath}");
+
//Create instance
return (T)Activator.CreateInstance(exp)!;
}
diff --git a/lib/Utils/src/VNLib.Utils.csproj b/lib/Utils/src/VNLib.Utils.csproj
index bda2164..7941a7b 100644
--- a/lib/Utils/src/VNLib.Utils.csproj
+++ b/lib/Utils/src/VNLib.Utils.csproj
@@ -26,6 +26,10 @@
<PackageRequireLicenseAcceptance>True</PackageRequireLicenseAcceptance>
</PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
+ <DefineConstants>$(DefineConstants);VNLIB_ADVANCED_TRACING</DefineConstants>
+ </PropertyGroup>
+
<ItemGroup>
<None Include="..\README.md">
<Pack>True</Pack>