Thursday, 31 December 2020

Simple property grid in Blazor

This artile will present a simple property grid in Blazor I have made. The component relies on standard stuff like Bootstrap, jQuery, Twitter Bootstrap and Font Awesome. But the repo url shown here links to the Github repo of mine which can be easily forked if you want to add features (such as editing capabilities). The component already supports nested levels, so if the object you inspect has a hierarchical structure, this is shown in this Blazor component. Having a component to inspect objects in Blazor is great as Blazor lacks inspect tools (since the app is compiled into a web assembly, we cannot easily inspect state of objects in the app other than the DOM and Javascript objects. With this component we can get basic inspection support to inspect state of the object in the app you desire to inspect). The Github repo contains also a bundled application which uses the component and shows a sample use-case (also shown in Gif video below). I have tested the component with three levels of depth for a sample object (included in the repo). The component is available here on my Github repo:
 
 git clone https://github.com/toreaurstadboss/BlazorPropertyGrid/tree/main/BlazorPropertyGrid
 
https://github.com/toreaurstadboss/BlazorPropertyGrid/tree/main/BlazorPropertyGrid
The component consists of two components where one of them is used in a recursive manner to support nested object structure. The top level component got this code-behind class.
PropertyGridComponentBase.cs
using System.Collections.Generic; using System.Reflection; using Microsoft.AspNetCore.Components; using Microsoft.AspNetCore.Components.Web; using Microsoft.JSInterop; namespace BlazorPropertyGridComponents.Components { public class PropertyGridComponentBase : ComponentBase { [Inject] public IJSRuntime JsRuntime { get; set; } [Parameter] public object DataContext { get; set; } public Dictionary<string, PropertyInfoAtLevelNodeComponent> Props { get; set; } public PropertyGridComponentBase() { Props = new Dictionary<string, PropertyInfoAtLevelNodeComponent>(); } protected override void OnParametersSet() { Props.Clear(); if (DataContext == null) return; Props["ROOT"] = MapPropertiesOfDataContext(string.Empty, DataContext, null); StateHasChanged(); } private bool IsNestedProperty(PropertyInfo pi) => pi.PropertyType.IsClass && pi.PropertyType.Namespace != "System"; private PropertyInfoAtLevelNodeComponent MapPropertiesOfDataContext(string propertyPath, object parentObject, PropertyInfo currentProp) { if (parentObject == null) return null; var publicProperties = parentObject.GetType() .GetProperties(BindingFlags.Public | BindingFlags.Instance); var propertyNode = new PropertyInfoAtLevelNodeComponent { PropertyName = currentProp?.Name ?? "ROOT", PropertyValue = parentObject, PropertyType = parentObject.GetType(), FullPropertyPath = TrimFullPropertyPath($"{propertyPath}.{currentProp?.Name}") ?? "ROOT", IsClass = parentObject.GetType().IsClass && parentObject.GetType().Namespace != "System" }; foreach (var p in publicProperties) { var propertyValue = p.GetValue(parentObject, null); if (!IsNestedProperty(p)) { propertyNode.SubProperties.Add(p.Name, new PropertyInfoAtLevelNodeComponent { IsClass = false, FullPropertyPath = TrimFullPropertyPath($"{propertyPath}.{p.Name}"), PropertyName = p.Name, PropertyValue = propertyValue, PropertyType = p.PropertyType //note - SubProperties are default empty if not nested property of course. } ); } else { //we need to add the sub property but recurse also call to fetch the nested properties propertyNode.SubProperties.Add(p.Name, new PropertyInfoAtLevelNodeComponent { IsClass = true, FullPropertyPath = propertyPath + p.Name, PropertyName = p.Name, PropertyValue = MapPropertiesOfDataContext(TrimFullPropertyPath($"{propertyPath}.{p.Name}"), propertyValue, p), PropertyType = p.PropertyType //note - SubProperties are default empty if not nested property of course. } ); } } return propertyNode; } protected void toggleExpandButton(MouseEventArgs e, string buttonId) { JsRuntime.InvokeVoidAsync("toggleExpandButton", buttonId); } private string TrimFullPropertyPath(string fullpropertypath) { if (string.IsNullOrEmpty(fullpropertypath)) return fullpropertypath; return fullpropertypath.TrimStart('.').TrimEnd('.'); } } }
And its razor file looks like this:
PropertyGridComponentBase.razor
@inherits PropertyGridComponentBase @using BlazorPropertyGridComponents.Components <table class="table table-striped col-md-4 col-lg-3 col-sm-6"> <thead> <tr> <th scope="col">Property</th> <th scope="col">Value</th> </tr> </thead> <tbody> @foreach (KeyValuePair<string, PropertyInfoAtLevelNodeComponent> prop in Props) { @if (!prop.Value.IsClass) { @* <tr> <td>@prop.Key</td> <td>@prop.Value</td> </tr>*@ } else { var currentNestedDiv = "currentDiv_" + prop.Key; var currentProp = prop.Value.PropertyValue; //must be a nested class property <tr> <td colspan="2"> <button type="button" id="@prop.Key" class="btn btn-info fas fa-minus" @onclick="(e) => toggleExpandButton(e,prop.Key)" data-toggle="collapse" data-target="#@currentNestedDiv"> </button> <div id="@currentNestedDiv" class="collapse show"> <PropertyRowComponent Depth="1" PropertyInfoAtLevel="@prop.Value" /> </div> </td> </tr> } } </tbody> </table> @code { }
We also have this helper class to model each property in the nested structure:
PropertyInfoAtLevelNodeComponent.cs
using System; using System.Collections.Generic; namespace BlazorPropertyGridComponents.Components { /// <summary> /// Node class for hierarchical structure of property info for an object of given object graph structure. /// </summary> public class PropertyInfoAtLevelNodeComponent { public PropertyInfoAtLevelNodeComponent() { SubProperties = new Dictionary<string, PropertyInfoAtLevelNodeComponent>(); } public string PropertyName { get; set; } public object PropertyValue { get; set; } public Type PropertyType { get; set; } public Dictionary<string, PropertyInfoAtLevelNodeComponent> SubProperties { get; private set; } public string FullPropertyPath { get; set; } public bool IsClass { get; set; } } }
Our lower component used by the top component code-behind looks like this:
PropertyRowComponentBase.cs
using System.Collections.Generic; using Microsoft.AspNetCore.Components; using Microsoft.AspNetCore.Components.Web; using Microsoft.JSInterop; namespace BlazorPropertyGridComponents.Components { public class PropertyRowComponentBase : ComponentBase { public PropertyRowComponentBase() { DisplayedFullPropertyPaths = new List<string>(); } [Parameter] public PropertyInfoAtLevelNodeComponent PropertyInfoAtLevel { get; set; } [Parameter] public int Depth { get; set; } [Parameter] public List<string> DisplayedFullPropertyPaths { get; set; } [Inject] protected IJSRuntime JsRunTime { get; set; } protected void toggleExpandButton(MouseEventArgs e, string buttonId) { JsRunTime.InvokeVoidAsync("toggleExpandButton", buttonId); } } }
The razor file looks like this:
PropertyRowComponent.razor
@using BlazorPropertyGridComponents.Components @inherits PropertyRowComponentBase @foreach (var item in PropertyInfoAtLevel.SubProperties.Keys) { var propertyInfoAtLevel = PropertyInfoAtLevel.SubProperties[item]; if (propertyInfoAtLevel != null) { @* if (DisplayedFullPropertyPaths.Contains(propertyInfoAtLevel.FullPropertyPath)){ continue; //the property is already displayed. }*@ DisplayedFullPropertyPaths.Add(propertyInfoAtLevel.FullPropertyPath); @* <span class="text-white bg-dark">@propertyInfoAtLevel.FullPropertyPath</span>*@ @* <em> @propertyInfoAtLevel </em>*@ } if (!propertyInfoAtLevel.PropertyType.IsClass || propertyInfoAtLevel.PropertyType.Namespace.StartsWith("System")) { <tr> <td> <span title="@propertyInfoAtLevel.FullPropertyPath" class="font-weight-bold">@propertyInfoAtLevel.PropertyName</span> </td> <td> <span>@propertyInfoAtLevel.PropertyValue</span> </td> </tr> } else if (propertyInfoAtLevel.PropertyValue != null && propertyInfoAtLevel.PropertyValue is PropertyInfoAtLevelNodeComponent) { var nestedLevel = (PropertyInfoAtLevelNodeComponent)propertyInfoAtLevel.PropertyValue; var collapseOrNotCssClass = Depth == 0 ? "collapse show" : "collapse"; var curDepth = Depth + 1; collapseOrNotCssClass += " depth" + Depth; var currentNestedDiv = "collapsingdiv_" + propertyInfoAtLevel.PropertyName; //must be a nested class property <tr> <td colspan="2"> <span>@propertyInfoAtLevel.PropertyName</span> <button id="@propertyInfoAtLevel.FullPropertyPath" type="button" @onclick="(e) => toggleExpandButton(e,propertyInfoAtLevel.FullPropertyPath)" class="fas btn btn-info fa-plus" data-toggle="collapse" data-target="#@currentNestedDiv"></button> <div id="@currentNestedDiv" class="@collapseOrNotCssClass"> <PropertyRowComponent PropertyInfoAtLevel="@nestedLevel" Depth="@curDepth" /> </div> </td> </tr> } } @code { }

Monday, 28 December 2020

More fun with Blazor - creating a folder viewer in 10 minutes

 
   git clone https://github.com/toreaurstadboss/BlazorLiveReloadSample.git 
 
Blazor is very easy to use! I spent 10 minutes to create a simple folder viewer here now.
First off, the Blazor razor component looks like this:
FilewView.razor
@inject IJSRuntime jsRuntime; <h3>FileView</h3> @foreach (var folder in folders) { var depthOfFolder = folder.Split('\\').Count(); <p style="cursor:pointer" @onclick="() => openFolder(folder)"> @for (int i = 0; i < depthOfFolder; i++) { <span style="margin-left:10px"></span> } <i style="color:orange;cursor:pointer" class="fa fa-folder" /> @folder </p> } @code { List<string> folders = Directory.GetDirectories("/").ToList(); private void openFolder(string folder) { jsRuntime.InvokeVoidAsync("log", folder); string[] subfolders = Directory.GetDirectories(folder); Console.WriteLine(folder); int folderIndex = folders.IndexOf(folder); folders.InsertRange(folderIndex, subfolders); } }
We add to the solution also Font-awesome via right click solution explorer and choose Add => Client-Side Library. Search for 'font-awesome' Choose Font Awesome and add all files to be added to the lib/font-awesome folder of wwwroot. Then at the bottom of _Host.cshtml we add:
 
_Host.cshtml
<link rel="stylesheet" href="~/lib/font-awesome/css/all.css" />
Now we have access to the Font Awesome icons.

Live reloads for Blazor - and a simple clock component

This article will test out Blazor. I had some difficulties with getting live reload to work. I got it working in Visual Studio 2019 for the Blazor Asp.Net Core project template. We will also create a very simple component (a clock) that calls Javascript function from C#. You can clone the simple app of mine from Github like this:
 
 
 git clone https://github.com/toreaurstadboss/BlazorLiveReloadSample.git
 
 
First off, we add the following into _host.cshtml :
 
_Host.cshtml
<script src="js/script.js"></script> <script src="_framework/blazor.server.js"></script> <script> Blazor.defaultReconnectionHandler._reconnectCallback = function (d) { document.location.reload(); } </script>
The Blazor.defaultReconnectionHandler._reconnectCallback is set to reload the document location This makes the page reload when you edit the razor files of the Blazor app. You will see this as a temporarily recompile step - give it some 5 seconds in a simple app.
Let's for fun add a clock component also. Add to the Shared folder the file Clock.razor.
 
Clock.razor
@inject IJSRuntime JsRunTime @implements IDisposable

The time is now:

00:00:00
@code { ElementReference timeDiv; protected override async Task OnAfterRenderAsync(bool firstRender) { if (firstRender) { await JsRunTime.InvokeVoidAsync("startTime", timeDiv); } } public void Dispose() { JsRunTime.InvokeVoidAsync("stopTime"); } }
And we have also the script.js file in wwwroot to add some Javascript (Blazor razor files dont like Js in the component itself, just make sure to add the Js somewhere in wwwroot instead which loads up the necessary Js). As you can see we inject with the @inject in the razor Blazor file (rhymes a bit) the IJsRunTime. This allows us to call client-side code from the C# code. We start off the clock with a setTimeout and stop the clock with a clearTimeout.
 
Clock.razor
var clock; function startTime(element) { let timeString = new Date().toLocaleTimeString('nb-No', { hour: 'numeric', hour12: false, minute: 'numeric', second: 'numeric' }); element.innerHTML = timeString; clock = setTimeout(startTime.bind(null, element), 1000); } function stopTime() { clearTimeout(clock); }

Sunday, 27 December 2020

GraphQL in Asp.Net Core - Creating a flexible API

More and more .NET Developers have heard about GraphQL. This started as an in-house project in Facebook 2012 to provide a flexible way of sending customized data to mobile clients. Giving the clients the possible to query after tailored data meant sending less data over the wire to the mobiles with less bandwidth. As cell phones moves over to 5G networks, the issue means less and less (in urban areas with good base station coverage), however we should of course seek to always optimize our data transfer as pure bandwidth usage is always a valued thing to optimize. And added dimension is the less cost of creating APIs as we can tailor our data needs. Instead of creating methods for either returning lookup ids and then querying after entire data objects, we can project only the data we need to retrieve to present data on the mobile clients in a meaningful way. Whatever makes your boat rock for showing interest in GraphQL, this article will discuss how you can get started with GraphQL in Asp.Net Core. I have prepared a demo here:
 
  https://github.com/toreaurstadboss/GenericMemoryCacheAspNetCore.git
 
The demo repository shows a list of the tallest mountains in the municipialites in Norway. Norway is a land of mountains and it is always to know which mountain is the very tallest in the municipiality you are visiting! (I enjoy mountain climbing and hiking now and then in my spare time). The demo page shows a text area where you can customize the data to load here. Of course we can only load the data provided for us. We can also use the Ui playground for GraphQL added for us here too:
First off, we need to grab some Nuget packages for GraphQL. We will be using Asp.Net Core 3.1. in this article.
 
        <PackageReference Include="GraphQL" Version="2.4.0" />
	<PackageReference Include="GraphQL.Server.Transports.AspNetCore" Version="3.4.0" />
	<PackageReference Include="GraphQL.Server.Transports.WebSockets" Version="3.4.0" />
	<PackageReference Include="GraphQL.Server.Ui.Playground" Version="3.5.0-alpha0046" />  
 
Then we need to specify in our Startup class the needed setup.
 
Startup.cs
using AspNetCore_GraphQLDemo.GraphQL; using AspNetCore_GraphQLDemo.GraphQL.Messaging; using Data; using Data.Repositories; using GraphQL; using GraphQL.Server; using GraphQL.Server.Ui.Playground; using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Diagnostics; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.WebSockets; using Microsoft.EntityFrameworkCore; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; using Newtonsoft.Json; namespace AspNetCore_GraphQLDemo { public class Startup { private readonly IWebHostEnvironment _env; public Startup(IConfiguration configuration, IWebHostEnvironment env) { _env = env; Configuration = configuration; } public IConfiguration Configuration { get; } // This method gets called by the runtime. Use this method to add services to the container. public void ConfigureServices(IServiceCollection services) { // If using IIS: services.Configure<IISServerOptions>(options => { options.AllowSynchronousIO = true; }); services.AddControllersWithViews(); services.AddHttpContextAccessor(); services.AddRazorPages().AddRazorRuntimeCompilation(); services.AddDbContext<MountainDbContext>(options => { options.UseSqlServer(Configuration.GetConnectionString("DefaultConnection")); }); services.AddScoped<IMountainRepository, MountainRepository>(); services.AddScoped<IDependencyResolver>(s => new FuncDependencyResolver(s.GetRequiredService)); services.AddScoped<MountainSchema>(); services.AddSingleton<MountainMessageService>(); services.AddSingleton<MountainDetailsDisplayedMessageService>(); services.AddGraphQL(x => { x.EnableMetrics = true; x.ExposeExceptions = _env.IsDevelopment(); x.SetFieldMiddleware = true; }).AddGraphTypes(ServiceLifetime.Scoped) .AddUserContextBuilder(httpContext => httpContext.User) .AddDataLoader() .AddWebSockets(); services.AddCors(options => { options.AddPolicy(name: "MyAllowSpecificOrigins", builder => { builder.AllowAnyOrigin().AllowAnyMethod(); }); }); } //static IEnumerable<Type> GetGraphQlTypes() //{ // return typeof(Startup).Assembly // .GetTypes() // .Where(x => !x.IsAbstract && // (typeof(IObjectGraphType).IsAssignableFrom(x) || // typeof(IInputObjectGraphType).IsAssignableFrom(x))); //} // This method gets called by the runtime. Use this method to configure the HTTP request pipeline. public void Configure(IApplicationBuilder app, IWebHostEnvironment env) { if (env.IsDevelopment()) { app.UseDeveloperExceptionPage(); app.UseBrowserLink(); } app.UseExceptionHandler(errorApp => { errorApp.Run(async context => { context.Response.Redirect("/Error"); context.Response.StatusCode = 500; var exceptionHandlerPathFeature = context.Features.Get<IExceptionHandlerPathFeature>(); var exception = exceptionHandlerPathFeature.Error; var result = JsonConvert.SerializeObject(new { error = exception.Message }); context.Response.ContentType = "application/json"; await context.Response.WriteAsync(result); }); }); app.UseStaticFiles(); app.UseRouting(); app.UseCors("MyAllowSpecificOrigins"); app.UseWebSockets(); app.UseGraphQLWebSockets<MountainSchema>("/graphql"); //app.UseAuthorization(); app.UseEndpoints(endpoints => { endpoints.MapDefaultControllerRoute(); }); app.UseGraphQL<MountainSchema>(); if (env.IsDevelopment()) { app.UseGraphQLPlayground(new GraphQLPlaygroundOptions { }); } } } }
In ConfigureServices method above we register the schema for our GraphQL method.
 
 
    services.AddScoped<MountainSchema>(); 
 
 
We also add GraphQL itself and setup also web sockets (which are needed for GraphQL).
 
       services.AddGraphQL(x =>
                {
                    x.EnableMetrics = true; x.ExposeExceptions = _env.IsDevelopment(); x.SetFieldMiddleware = true; }).AddGraphTypes(ServiceLifetime.Scoped)
            .AddUserContextBuilder(httpContext => httpContext.User)
            .AddDataLoader()
            .AddWebSockets(); 
 
Just as a side note, you want to add Cors also:
 
     services.AddCors(options =>
            {
                options.AddPolicy(name: "MyAllowSpecificOrigins",
                    builder =>
                    {
                        builder.AllowAnyOrigin().AllowAnyMethod();
                    });
            });
 
Inside Configure method we add also the following to enable GraphQL:
 
           app.UseCors("MyAllowSpecificOrigins");

            app.UseWebSockets();

            app.UseGraphQLWebSockets<MountainSchema>("/graphql");

            //app.UseAuthorization();

            app.UseEndpoints(endpoints =>
            {
                endpoints.MapDefaultControllerRoute();
            });

            app.UseGraphQL<MountainSchema>();
            if (env.IsDevelopment())
            {
                app.UseGraphQLPlayground(new GraphQLPlaygroundOptions
                {
                    
                });
            }
        }  
 
Our Mountainschema looks like this:
MountainSchema.cs
using AspNetCore_GraphQLDemo.GraphQL.Types; using AspNetCore_GraphQLDemo.GraphQL.Types.Directives; using GraphQL; using GraphQL.Instrumentation; using GraphQL.Types; namespace AspNetCore_GraphQLDemo.GraphQL { public class MountainSchema : Schema { public MountainSchema(IDependencyResolver resolver) : base(resolver) { Query = resolver.Resolve<MountainQuery>(); Mutation = resolver.Resolve<MountainMutation>(); Subscription = resolver.Resolve<MountainSubscription>(); RegisterDirective(new LowercaseDirective()); RegisterDirective(new OrderbyDirective()); var builder = new FieldMiddlewareBuilder(); builder.Use<LowercaseFieldsMiddleware>(); builder.ApplyTo(this); builder.Use(next => { return context => { return next(context).ContinueWith(x => { var c = context; var result = x.Result; result = OrderbyQuery.OrderIfNecessary(context, result); return result; }); }; }); builder.ApplyTo(this); //builder.Use<CustomGraphQlExecutor<MountainSchema>>(); //builder.ApplyTo(this); } } }
We pass in a IDependencyResolver (dependency!) into the constructor and resolve the classes we desire (we inherit from Schema class). We wire up our schema here to the Query, Mutation and Subscription we desire and register directives. Here is how the Query property is set:
 
MountainQuery.cs
using AspNetCore_GraphQLDemo.GraphQL.Types; using Data; using Data.Repositories; using GraphQL.Types; namespace AspNetCore_GraphQLDemo.GraphQL { public class MountainQuery : ObjectGraphType { public MountainQuery(IMountainRepository mountainRepository) { Field<ListGraphType<MountainType>>("mountains", resolve: context => mountainRepository.GetAll() ); FieldAsync<MountainType>("mountain", arguments: new QueryArguments(new QueryArgument<NonNullGraphType<MountainIdInputType>> {Name = "id"}), resolve: async context => { var mountain = context.GetArgument<MountainInfo>("id"); var mountainFromDb = await mountainRepository.GetById(mountain.Id); return mountainFromDb; }); //FieldAsync<MountainType>("selectmountain", // arguments: new QueryArguments(new QueryArgument(typeof(int)) { Name = "id" }), // resolve: async context => // { // var mountain = context.GetArgument<MountainInfo>("id"); // var mountainFromDb = await mountainRepository.GetById(mountain.Id); // return mountainFromDb; // }); //sadly, we need to inherit from IGraphType and cannot just have simple scalar arguments in GraphQL.Net.. } } }
As you can see, we can define multiple queries. We inherit from ObjectGraphType and pass in a IMountainRepository. This is an interface for your repository, which fetches data via Entity Framework Core and you can then load data into GraphQL from the local database (The DEMO uses Sql Server (SQLEXPRESS)) via EF Core in a simple manner by only providing the repo via dependency injection. We define via the methods Field and FieldAsync our methods (note the use of string constants as a string value we can use in GraphQL queries of ours that resides in the Schema) and the resolve lambda tells how data is to be fetched. We can specify arguments also. The "mountain" FieldAsync method also accepts arguments via the
arguments lambda and this allows us parameterized access to our data. Over to the Subscription property. It looks like this:
 
using AspNetCore_GraphQLDemo.GraphQL.Messaging;
using AspNetCore_GraphQLDemo.GraphQL.Types;
using GraphQL.Resolvers;
using GraphQL.Types;

namespace AspNetCore_GraphQLDemo.GraphQL
{
    public class MountainSubscription : ObjectGraphType
    {
        public MountainSubscription(MountainDetailsDisplayedMessageService mountainDetailsDisplayedMessageService)
        {
            Name = "Subscription";
            AddField(new EventStreamFieldType
            {
                Name = "detailsDisplayed",
                Type = typeof(MountainDetailsMessageType),
                Resolver = new FuncFieldResolver<MountainDetailsMessage>(c => c.Source as MountainDetailsMessage),
                Subscriber = new EventStreamResolver<MountainDetailsMessage>(c => mountainDetailsDisplayedMessageService.GetMessages())
            });
        }
    }
}
 
 
Here we inherit from ObjectGraphType (as we did for Query) and we use the MountainDetailsDisplayedMessageService. This was added as a (concrete class) singleton in the Startup.cs file. The message service uses RxJs serverside to handle the Pub-sub pattern of the subscriber. We are using System.Reactive.Subjects here.
 
MountainSubscription.cs
using System; using System.Reactive.Linq; using System.Reactive.Subjects; namespace AspNetCore_GraphQLDemo.GraphQL.Messaging { public class MountainDetailsDisplayedMessageService { private readonly ISubject<MountainDetailsMessage> _messageStream = new ReplaySubject<MountainDetailsMessage>(1); public MountainDetailsMessage AddMountainDetailsMessage(int id) { var message = new MountainDetailsMessage { Id = id }; _messageStream.OnNext(message); return message; } public IObservable<MountainDetailsMessage> GetMessages() { return _messageStream.AsObservable(); } } }
The mutation looks like this:
MountainMutation.cs
using AspNetCore_GraphQLDemo.GraphQL.Messaging; using AspNetCore_GraphQLDemo.GraphQL.Types; using Data; using Data.Repositories; using GraphQL.Types; namespace AspNetCore_GraphQLDemo.GraphQL { public class MountainMutation : ObjectGraphType { public MountainMutation(IMountainRepository mountainRepository, MountainMessageService mountainMessageService) { FieldAsync<MountainType>("createMountain", arguments: new QueryArguments( new QueryArgument<NonNullGraphType<MountainInputType>> {Name = "mountain"}), resolve: async context => { var mountain = context.GetArgument<MountainInfo>("mountain"); await mountainRepository.AddMountain(mountain); mountainMessageService.AddMountainAddedMessage(mountain); return mountain; }); FieldAsync<MountainType>("removeMountain", arguments: new QueryArguments( new QueryArgument<NonNullGraphType<MountainIdInputType>> { Name = "id" }), resolve: async context => { var mountain = context.GetArgument<MountainInfo>("id"); await mountainRepository.RemoveMountain(mountain.Id); return mountain; }); } } }
We can create a mountain like this in GraphQL Query:
 
 mutation {
  createMountain(mountain: {
   county: "Svalbard"
  muncipiality: "Svalbard"
  officialName: "Newtontoppen"
  referencePoint: "Isbjønn på toppen"
  comments: "Husk rask snøskuter",
  metresAboveSeaLevel: "1713",
  primaryFactor: "1713"
  }) {    
    id
  }
} 
  
 
And we can remove a mountain (don't we all?) like this:
 

# Write your query or mutation here
mutation {
  removeMountain(id: {
    id: 370
  }) { id }
}
  
 
If you clone the repo you will find more source code concerning directives such as lowercase and sorting. As you saw in MountainSchema I use the FieldMiddlewareBuilder to do the sorting as this needs to tap into the pipeline more of GraphQL.Net. We also need some more code - for the client side of course. The client side code relies on Apollo Client lib like this:
 
index.cshtml
<script src="https://unpkg.com/apollo-client-browser@1.7.0"></script>
The libman.json file (the similar file to package.json when it comes to specifying client-side libraries in .net core mvc solutions) of the demo solution looks like this I have used looks like this:
 
libman.json
{ "version": "1.0", "defaultProvider": "cdnjs", "libraries": [ { "library": "twitter-bootstrap@4.2.1", "destination": "wwwroot/lib/bootstrap", "files": [ "js/bootstrap.bundle.js", "css/bootstrap.min.css" ] }, { "library": "jquery@3.3.1", "destination": "wwwroot/lib/jquery", "files": [ "jquery.min.js" ] }, { "provider": "unpkg", "library": "font-awesome@4.7.0", "destination": "wwwroot/lib/font-awesome/" }, { "provider": "unpkg", "library": "toastr@2.1.4", "destination": "wwwroot/lib/toastr/" } ] }
We then need some client side code to load data from GraphQL server of ours.
 
  <script>

    function LoadGraphQLDataIntoUi(result) {

        var tableBody = $("#mountainsTableBody");
        tableBody.empty();

        var tableHeaderRow = $("#mountainsTableHeaderRow");
        tableHeaderRow.empty();

        var rowIndex = 0;

        result.data.mountains.forEach(mountain => {

            if (rowIndex == 0) {
                Object.keys(mountain).forEach(key => {
                    if (key === '__typename') {
                        return;
                    }
                    tableHeaderRow.append(`<th>${key}</th>`);
                });;
            }

            tableBody.append('<tr>');

            Object.keys(mountain).forEach(key => {
                if (key === '__typename') {
                    return;
                }
                if (key === 'id') {
                    tableBody.append(`<td><a href='/home/mountaindetails/?id=${mountain[key]}'><i class='fa fa-arrow-right'></i></a> ${mountain[key]}</td>`);
                    return;
                }
                tableBody.append(`<td>${mountain[key]}</td>`);

            });

            tableBody.append('</tr>');

            rowIndex++;

        });

        toastr.success('Loaded GraphQL data from server into the UI successfully.');


    }

    $("#btnConnect").click(function () {
        ConnectDemo();

    });


    $("#btnLoadData").click(function () {
        var gqlQueryContents = $("#GraphQLQuery").val();
        LoadGraphQLData(gqlQueryContents, LoadGraphQLDataIntoUi);
        toastr.info('Retrieving data from API using GraphQL.');
    });

    $(document).ready(function () {

        console.log('loading');

        var initialQuery = `
                {
                    mountains {
                        id
                        fylke: county
                        kommune: muncipiality
                        hoydeOverHavet: calculatedMetresAboveSeaLevel
                        offisieltNavn: officialName
                        primaerfaktor: calculatedPrimaryFactor
                        referansePunkt: referencePoint
                    }
                }`;

        $("#GraphQLQuery").val(initialQuery);

    });

</script>
 
 
And then a method using Apollo client lib to load the data:
 
 /**
 * Loads GraphQL data specified by query expression and passes the 'result' array to the callBackFunction
 * callBackFunction should be Js method (function) that accept one parameter, preferably called result, which is an object
 * that contains a result.data object.
 */
function LoadGraphQLData(gqlQuery, callBackFunction) {

    var apolloClient = new Apollo.lib.ApolloClient({
        networkInterface: Apollo.lib.createNetworkInterface({
            uri: 'http://localhost:2542/graphql',
            transportBatching: true,
        }), connectToDevTools: true
    });
    var query = Apollo.gql(gqlQuery);

    apolloClient.query({
        query: query,
        variables: {}
    }).then(result => {
        callBackFunction(result);
    }).catch(error => {
        //debugger
        toastr.error(error, 'GraphQL loading failed');
    });
}
 

Friday, 25 December 2020

Generic memory cache and middleware in Asp.Net Core

In this article I will present code for creating added functionality to IMemoryCache in Asp.Net Core or in Net.Core in general. The code has been tested in Asp.Net Core 3.1. I have tested out a Generic memory cache and creating middleware for adding items and removing and listing values. Usually you do not want to expose caching to a public api, but perhaps your api resides in a safe(r) intranet zone and you want to cache different objects. This article will teach you the principles upon building a generic memory cache for (asp).net core and to wire up cache functionality to rest api(s). The code of this article is available on Github:

 git clone https://github.com/toreaurstadboss/GenericMemoryCacheAspNetCore.git

We start with our Generic Memory cache. It has some features:
  • The primary feature is to offer generic functionality and STRONGLY TYPED access to the IMemoryCache
  • Strongly typed access means you can use the cache (memory) as a repository and easily add, remove, update and get multiple items in a strongly typed fashion and easily add compound objects (class instances or nested objects, what have you - whatever you want here (as long as it is serializable to Json would be highly suggested in case you want to use the Generic Memory Cache together with REST apis)
  • You add homogenous objects of the same type to a prefixed part of the cache (by prefixed keys) to help avoid collisions in the same process
  • If you add the same key twice, the item will not be added again - you must update instead
  • Additional methods exists for removing, updating and clearing the memory cache.
  • The Generic memory cache wraps IMemoryCache in Asp.Net Core which will do the actual caching in memory on the workstation or server in use for your application.
GenericMemoryCache.cs
using Microsoft.Extensions.Caching.Memory; using Microsoft.Extensions.Primitives; using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Threading; namespace SomeAcme.SomeUtilNamespace { /// <summary> /// Thread safe memory cache for generic use - wraps IMemoryCache /// </summary> /// <typeparam name="TCacheItemData">Payload to store in the memory cache</typeparam> /// multiple paralell importing sessions</remarks> public class GenericMemoryCache<TCacheItemData> : IGenericMemoryCache<TCacheItemData> { private readonly string _prefixKey; private readonly int _defaultExpirationInSeconds; private static readonly object _locker = new object(); public GenericMemoryCache(IMemoryCache memoryCache, string prefixKey, int defaultExpirationInSeconds = 0) { defaultExpirationInSeconds = Math.Abs(defaultExpirationInSeconds); //checking if a negative value was passed into the constructor. _prefixKey = prefixKey; Cache = memoryCache; _defaultExpirationInSeconds = defaultExpirationInSeconds; } /// <summary> /// Cache object if direct access is desired. Only allow exposing this for inherited types. /// </summary> protected IMemoryCache Cache { get; } public string PrefixKey(string key) => $"{_prefixKey}_{key}"; //to avoid IMemoryCache collisions with other parts of the same process, each cache key is always prefixed with a set prefix set by the constructor of this class. /// <summary> /// Adds an item to memory cache /// </summary> /// <param name="key"></param> /// <param name="itemToCache"></param> /// <returns></returns> public bool AddItem(string key, TCacheItemData itemToCache) { try { if (!key.StartsWith(_prefixKey)) key = PrefixKey(key); lock (_locker) { if (!Cache.TryGetValue(key, out TCacheItemData existingItem)) { var cts = new CancellationTokenSource(_defaultExpirationInSeconds > 0 ? _defaultExpirationInSeconds * 1000 : -1); var cacheEntryOptions = new MemoryCacheEntryOptions().AddExpirationToken(new CancellationChangeToken(cts.Token)); Cache.Set(key, itemToCache, cacheEntryOptions); return true; } } return false; //Item not added, the key already exists } catch (Exception err) { Debug.WriteLine(err); return false; } } public virtual List<T> GetValues<T>() { lock (_locker) { var values = Cache.GetValues<ICacheEntry>().Where(c => c.Value is T).Select(c => (T)c.Value).ToList(); return values; } } /// <summary> /// Retrieves a cache item. Possible to set the expiration of the cache item in seconds. /// </summary> /// <param name="key"></param> /// <returns></returns> public TCacheItemData GetItem(string key) { try { if (!key.StartsWith(_prefixKey)) key = PrefixKey(key); lock (_locker) { if (Cache.TryGetValue(key, out TCacheItemData cachedItem)) { return cachedItem; } } return default(TCacheItemData); } catch (Exception err) { Debug.WriteLine(err); return default(TCacheItemData); } } public bool SetItem(string key, TCacheItemData itemToCache) { try { if (!key.StartsWith(_prefixKey)) key = PrefixKey(key); lock (_locker) { if (GetItem(key) != null) { AddItem(key, itemToCache); return true; } UpdateItem(key, itemToCache); } return true; } catch (Exception err) { Debug.WriteLine(err); return false; } } /// <summary> /// Updates an item in the cache and set the expiration of the cache item /// </summary> /// <param name="key"></param> /// <param name="itemToCache"></param> /// <returns></returns> public bool UpdateItem(string key, TCacheItemData itemToCache) { if (!key.StartsWith(_prefixKey)) key = PrefixKey(key); lock (_locker) { TCacheItemData existingItem = GetItem(key); if (existingItem != null) { //always remove the item existing before updating RemoveItem(key); } AddItem(key, itemToCache); } return true; } /// <summary> /// Removes an item from the cache /// </summary> /// <param name="key"></param> /// <returns></returns> public bool RemoveItem(string key) { if (!key.StartsWith(_prefixKey)) key = PrefixKey(key); lock (_locker) { if (Cache.TryGetValue(key, out var item)) { if (item != null) { } Cache.Remove(key); return true; } } return false; } public void AddItems(Dictionary<string, TCacheItemData> itemsToCache) { foreach (var kvp in itemsToCache) AddItem(kvp.Key, kvp.Value); } /// <summary> /// Clear all cache keys starting with known prefix passed into the constructor. /// </summary> public void ClearAll() { lock (_locker) { List<string> cacheKeys = Cache.GetKeys<string>().Where(k => k.StartsWith(_prefixKey)).ToList(); foreach (string cacheKey in cacheKeys) { if (cacheKey.StartsWith(_prefixKey)) Cache.Remove(cacheKey); } } } } }
There are different ways of making use of the generic memory cache above. The simplest use-case would be to instantiate it in a Controller and add cache items as wanted. As you can see the Generic Memory cache offers strongly typed access to the memory cache. Lets look at how we can register the Memory Cache as a service too.
startup.cs
// This method gets called by the runtime. Use this method to add services to the container. public void ConfigureServices(IServiceCollection services) { services.AddControllers(); services.AddMemoryCache(); services.AddSingleton<GenericMemoryCache<WeatherForecast>>(genmen => new GenericMemoryCache<WeatherForecast>(new MemoryCache(new MemoryCacheOptions()), "WEATHER_FORECASTS", 120)); }
In the sample above we register as a singleton (memory is either way shared so making a transient or scoped generic memory cache would be less logical) and register the memory cache above. We can then inject it like this :
WeatherForecastController.cs
private readonly GenericMemoryCache<WeatherForecast> _genericMemoryCache; public WeatherForecastController(ILogger<WeatherForecastController> logger, GenericMemoryCache<WeatherForecast> genericMemoryForecast) { _logger = logger; _genericMemoryCache = genericMemoryForecast; if (_logger != null) { } }
This way of injecting the generic memory cache is cumbersome, since we need to have a more dynamic way of specfifying the type of the memory cache. We could register the type of the generic memory cache to object, but then we loose the strongly typing by boxing the items in the cache to object. Instead, I have looked into defining a custom middleware for working against the generic memory cache. Of course you would in production add some protection against this cache so it cannot be readily available for everyone, such as a token or similar to be added into the REST api calls. The middleware shown next is just a suggestion how we can build up a generic memory cache in asp.net core via rest api calls. It should be very handy in case you have consumers / clients that have data they want to store into a cache on-demand. The appliances of this could be endless in an asp.net core environment. That is if you would offer such functionality. In many cases, you would otherwise use my GenericMemoryCache more directly where needed and not expose it. But for those who want to see how it can be made available in a REST api, the following middleware offers a suggestion.
Startup.cs
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline. public void Configure(IApplicationBuilder app, IWebHostEnvironment env) { if (env.IsDevelopment()) { app.UseDeveloperExceptionPage(); } .. app.UseGenericMemoryCache(new GenericMemoryCacheOptions { PrefixKey = "volvoer", DefaultExpirationInSeconds = 600 }); ..
We call first the UseGenericMemoryCache to just register the middlware and we initially also set up the PrefixKey to "volvoer" and default expiration in seconds to ten minutes. But we will instead just use Postman to send some rest api calls to build up contents of the cache instead afterwards. The UseMiddleware extension method is used in the extension method that is added to offer this functionality:
GenericMemoryCacheExtensions.cs
using Microsoft.AspNetCore.Builder; namespace SomeAcme.SomeUtilNamespace { public static class GenericMemoryCacheExtensions { public static IApplicationBuilder UseGenericMemoryCache<TItemData>(this IApplicationBuilder builder, GenericMemoryCacheOptions options) where TItemData: class { return builder.UseMiddleware<GenericMemoryCacheMiddleware<TItemData>>(options); } } }
The middleware looks like this (it could be easily extended to cover more functions of the API):
GenericMemoryCacheMiddleware.cs
using Microsoft.AspNetCore.Http; using Microsoft.Extensions.Caching.Memory; using Newtonsoft.Json; using System; using System.IO; using System.Text; using System.Threading.Tasks; namespace SomeAcme.SomeUtilNamespace { public class GenericMemoryCacheMiddleware<TCacheItemData> where TCacheItemData: class { private readonly RequestDelegate _next; private readonly string _prefixKey; private readonly int _defaultExpirationTimeInSeconds; public GenericMemoryCacheMiddleware(RequestDelegate next, GenericMemoryCacheOptions options) { if (options == null) { throw new ArgumentNullException(nameof(options)); } _next = next; _prefixKey = options.PrefixKey; _defaultExpirationTimeInSeconds = options.DefaultExpirationInSeconds; } public async Task InvokeAsync(HttpContext context, IMemoryCache memoryCache) { context.Request.EnableBuffering(); //do this to be able to re-read the body multiple times without consuming it. (asp.net core 3.1) if (context.Request.Method.ToLower() == "post") { if (IsDefinedCacheOperation("addtocache", context)) { // Leave the body open so the next middleware can read it. using (var reader = new StreamReader( context.Request.Body, encoding: Encoding.UTF8, detectEncodingFromByteOrderMarks: false, bufferSize: 4096, leaveOpen: true)) { var body = await reader.ReadToEndAsync(); // Do some processing with body if (body != null) { string cacheKey = context.Request.Query["cachekey"].ToString(); if (context.Request.Query.ContainsKey("type")) { var typeArgs = CreateGenericCache(context, memoryCache, out var cache); var payloadItem = JsonConvert.DeserializeObject(body, typeArgs[0]); var addMethod = cache.GetType().GetMethod("AddItem"); if (addMethod != null) { addMethod.Invoke(cache, new[] {cacheKey, payloadItem}); } } else { var cache = new GenericMemoryCache<object>(memoryCache, cacheKey, 0); if (cache != null) { //TODO: implement } } } } // Reset the request body stream position so the next middleware can read it context.Request.Body.Position = 0; } } if (context.Request.Method.ToLower() == "delete") { if (IsDefinedCacheOperation("removeitemfromcache", context)) { var typeArgs = CreateGenericCache(context, memoryCache, out var cache); var removeMethod = cache.GetType().GetMethod("RemoveItem"); string cacheKey = context.Request.Query["cachekey"].ToString(); if (removeMethod != null) { removeMethod.Invoke(cache, new[] { cacheKey }); } } } if (context.Request.Method.ToLower() == "get") { if (IsDefinedCacheOperation("getvaluesfromcache", context)) { var typeArgs = CreateGenericCache(context, memoryCache, out var cache); var getValuesMethod = cache.GetType().GetMethod("GetValues"); if (getValuesMethod != null) { var genericGetValuesMethod = getValuesMethod.MakeGenericMethod(typeArgs); var existingValuesInCache = genericGetValuesMethod.Invoke(cache, null); if (existingValuesInCache != null) { context.Response.ContentType = "application/json"; await context.Response.WriteAsync(JsonConvert.SerializeObject(existingValuesInCache)); } else { context.Response.ContentType = "application/json"; await context.Response.WriteAsync("{}"); //return empty object literal } return; //terminate further processing - return data } } } await _next(context); } private static bool IsDefinedCacheOperation(string cacheOperation, HttpContext context, bool requireType = true) { return context.Request.Query.ContainsKey(cacheOperation) && context.Request.Query.ContainsKey("prefix") && (!requireType || context.Request.Query.ContainsKey("type")); } private static Type[] CreateGenericCache(HttpContext context, IMemoryCache memoryCache, out object cache) { Type genericType = typeof(GenericMemoryCache<>); string cacheitemtype = context.Request.Query["type"].ToString(); string prefix = context.Request.Query["prefix"].ToString(); Type[] typeArgs = {Type.GetType(cacheitemtype)}; Type cacheType = genericType.MakeGenericType(typeArgs); cache = Activator.CreateInstance(cacheType, memoryCache, prefix, 0); return typeArgs; } } }
The middleware above for now supports adding items to the cache and removing them or listing them up. I have used this busines model to test it out:
 
  namespace GenericMemoryCacheAspNetCore.Models
{

    public class Car
    {
        public Car()
        {
            NumberOfWheels = 4;
        }

        public string Make { get; set; }
        public string Model { get; set; }
        public int NumberOfWheels { get; set; }
    }

}

 
The following requests were tested to add three cars and then delete one and then list them up:
 
  # add three cars
  POST https://localhost:44391/caching/addcar?addtocache&prefix=volvoer&cachekey=240&type=GenericMemoryCacheAspNetCore.Models.Car,GenericMemoryCacheAspNetCore
  POST https://localhost:44391/caching/addcar?addtocache&prefix=volvoer&cachekey=Amazon&type=GenericMemoryCacheAspNetCore.Models.Car,GenericMemoryCacheAspNetCore
  POST https://localhost:44391/caching/addcar?addtocache&prefix=volvoer&cachekey=Pv&type=GenericMemoryCacheAspNetCore.Models.Car,GenericMemoryCacheAspNetCore
  
  #remove one
  DELETE https://localhost:44391/caching?removeitemfromcache&prefix=volvoer&cachekey=Amazon&&type=GenericMemoryCacheAspNetCore.Models.Car,GenericMemoryCacheAspNetCore
  
  # list up the cars in the cache (items)
  GET https://localhost:44391/caching/addcar?getvaluesfromcache&prefix=volvoer&type=GenericMemoryCacheAspNetCore.Models.Car,GenericMemoryCacheAspNetCore
 
 
About the POST, I have posted payloads in the body via postman such as this:
 
  {
    Make: "Volvo",
    Model: "Amazon"
  }
 
Finally, we can see that we get the cached data in our generic memory cache. As you can see, the REST api specifies the type arguments by specifying the type name with namespaces and after the comma, also the asembly name (fully qualified type name). So this way of building a generic memory cache via rest api is fully feasible in asp.net core. However, it should only be used in scenarios where such functionality is desired and the clients can be trusted in some way (or by restricing access to such functionality only to priviledged users via a token or other functionality.) You would of course never allow clients to just send over data to a server's memory cache only to see it bogged down by memory. That was not the purpose of this article. The purpose was to acquaint the reader more with IMemoryCache, Generic Memory cache and middlware in Asp.Net Core. A generic memory cache will give you strongly typed access to memory cache in asp.net core and the concepts shown here in .net core should be similar.

Sunday, 20 December 2020

Outputting runnable SQL from Entity Framework 6.x

This article will describe how you can output runnable SQL from Entity Framework. The output will be sent to the Console and Debug. You can easily modify this to output to other output sources, such as tracing or files for that matter. What is important is that we need to interpolate the parameters from Entity Framework so that we get a runnable SQL. Entity Framework parameterizes the SQL queries such that SQL injection is avoided. Where conditions and similar are inserted into parameters, notably with the p__linq naming convention. We will interpolate these parameters into runnable SQL such that you can paste SQL into SQL Server Management Studio (SMSMS). Or you could save the runnable SQL to a .sql file and let SQLCMD run it from command line. Either way, we must set up the DbContext to do this. I am using Entity Framework 6.2.0. It should be possible to use this technique with all EF 6.x version. In Entity Framework Core and Entity Framework Core 2, the techniques will be similar. First define a DbConfiguration and attribute the DbContext class you are using like this with the DbConfigurationType (we are not considering ObjectContext in this article, but DbContext is a wrapper around this class anyways, so you should be apply to techniques taught here to other scenarios).
SomeAcmeDbContext.cs
namespace SomeAcme.Data.EntityFramework { [DbConfigurationType(typeof(SomeAcmeDataContextConfiguration))] public partial class SomeAcmeDataContext : System.Data.Entity.DbContext, ISomeAcmeDataContext { ..
Ok, so our DbConfiguration just inherits from DbConfiguration and sets up a custom DatabaseLogFormatter like this:
  
SomeAcmeDataContextConfiguration.cs
using System.Data.Entity; namespace SomeAcme.Data.EntityFramework.DbContext { public class SomeAcmeDataContextConfiguration : DbConfiguration { public SomeAcmeDataContextConfiguration() { SetDatabaseLogFormatter((context, logAction) => new SomeAcmeDbLogFormatter(context, logAction)); } } }
SetDatabaseLogFormatter is a protected method o DbConfiguration. Our DatabaseLogFormatter implementation then looks like this:
 
SomeAcmeDbLogFormatter.cs
using System; using System.Data.Common; using System.Data.Entity.Infrastructure.Interception; using SomeAcme.Data.EntityFramework.DbContext.Extensions; namespace SomeAcme.Data.EntityFramework.DbContext { public class SomeAcmeDbLogFormatter : DatabaseLogFormatter { public SomeAcmeDbLogFormatter(System.Data.Entity.DbContext dbContext, Action<string> loggingAction) : base(dbContext, loggingAction) { } public override void LogCommand<TResult>(DbCommand command, DbCommandInterceptionContext<TResult> interceptionContext) { string cmdText = command.CommandText; if (string.IsNullOrEmpty(cmdText)) return; if (cmdText.StartsWith("Openend connection", StringComparison.InvariantCultureIgnoreCase) || cmdText.StartsWith("Closed connection", StringComparison.InvariantCultureIgnoreCase)) return; Write($"--DbContext {Context.GetType().Name} is executing command against DB {Context.Database.Connection.Database}: {Environment.NewLine}{command.GetGeneratedQuery().Replace(Environment.NewLine, "")} {Environment.NewLine}"); } public override void LogResult<TResult>(DbCommand command, DbCommandInterceptionContext<TResult> interceptionContext) { //empty by intention } } }
We also have a helper extension method called GetGeneratedQuery on DbCommand objects to help us get the cruft of this article - the interpolated runnable query. From my testing we can just interpolate the parameters as is in most use cases. However, some datatypes in the T-SQL world must be quoted (like, strings) and we need to adjust the date and time data types to a runnable format too. In case you find this helper method should be interpolated, please let me know. Our helper method GetGeneratedQuery looks like this:
  
SomeAcmeDbCommandExtensions.cs
using System; using System.Data; using System.Data.Common; using System.Data.SqlClient; using System.Linq; using System.Text; namespace SomeAcme.Data.EntityFramework.DbContext.Extensions { public static class DbCommandExtensions { /// <summary> /// Returns the generated sql string where parameters are replaced by value. Generated a runnable /// SQL script. Note that this is an approximation anwyays, but gives us a runnable query. The database server query engine optimizer will possible rewrite /// even simple queries if it sees it possible to rearrange the query to predictively create a more efficient query. /// </summary> /// <param name="dbCommand"></param> /// <returns></returns> public static string GetGeneratedQuery(this DbCommand dbCommand) { DbType[] quotedParameterTypes = new DbType[] { DbType.AnsiString, DbType.Date, DbType.DateTime, DbType.DateTime2, DbType.Guid, DbType.String, DbType.AnsiStringFixedLength, DbType.StringFixedLength }; var sb = new StringBuilder(); sb.AppendLine(dbCommand.CommandText); var arrParams = new SqlParameter[dbCommand.Parameters.Count]; dbCommand.Parameters.CopyTo(arrParams, 0); //copy dbCommand parameters into another collection to avoid //mutating the query and be able to run a foreach loop foreach (SqlParameter p in arrParams.OrderByDescending(p => p.ParameterName.Length)) { string value = p.Value.ToString(); if (p.DbType == DbType.Date || p.DbType == DbType.DateTime || p.DbType == DbType.DateTime2) { value = DateTime.Parse(value).ToString("yyyy-MM-dd HH:mm:ss.fff"); } if (quotedParameterTypes.Contains(p.DbType)) value = "'" + value + "'"; sb.Replace("@" + p.ParameterName, value); } return sb.ToString(); } } }
We also need to activate database logging in the first place. Database logging to the console and debug should be avoided in production in ordinary cases, as they make a performance impact. Instead, it is handy to turn it on or off via an app setting. I have decided to only allow it while debugging so my constructors of my DbContext where I have tested it calls this method:
SomeAcmeDbContext.cs
(once more need to add some code) private void SetupDbContextBehavior() { Configuration.AutoDetectChangesEnabled = true; Configuration.LazyLoadingEnabled = true; ObjectContext.CommandTimeout = 10 * 60; #if DEBUG //To enable outputting database traffic to the console, set the app setting OutputDatabaseTrafficLogging in web.config to true //this must not be activated in production. To safe guard this, //this block below is wrapped in the debug preprocessor directive. bool outputDatabaseTrafficLogging = ConfigurationManagerWrapper.GetAppsetting(SomeAcme.Common.Constants.OutputDatabaseTrafficLogging); if (outputDatabaseTrafficLogging) { Database.Log = s => { if (s.StartsWith("Opened connection", StringComparison.InvariantCultureIgnoreCase) || s.StartsWith("Closed connection", StringComparison.InvariantCultureIgnoreCase)) return; Console.WriteLine(s); Debug.WriteLine(s); }; } #endif
Never mind the first three lines, they are just added here as tips for additional settings you CAN set if you want to. The important bit is the Database.Log delegate property, which acceps a lambda for example where you set up what to do with the logging. Here we just tell the DbContext that if the app setting OutputDatabaseTrafficLogging is set to true, we output the runnable SQL from Entity Framework to the console. That's all there is to it! You can now activate the app setting and see in the debug output (or in console) runnable SQL. And you can paste the SQL into SMSS for example to check for performance issues such as missing indexes and similar or tune up the size of the result sets and alter the SQL. You should also consider making your DbContext runnable in Linqpad for easier tuning of EF queries, but that is for another article. Happy coding!

Saturday, 19 December 2020

Running Angular on a fixed port on Windows platform

This article is for Angular developers running on a Windows platform. We will use Windows domain tools such as netstat and Powershell in this article. A cross platform version of Powershell exist. I have not tested this approach on other OS-es, such as Linux. Linux developers using Angular (if any) might follow my approach here with success, it is is possible to do this in a similar manner on *nix systems. Also note that this article is meant for those using Angular SPA with .Net Core. This is the standard setup for Windows platform and Angular development. When developing an Angular app locally, sometimes you want to run on a fixed port. For example, your app might federate its access towards ADFS (Active Directory Federated Services) and you desire to have a fixed port. This makes it possible to set up a callback url that can be fixed and not have the standard setup with a random port that Angular and webpack sets up for you. Here is how I managed to achieve to override this and set up a fixed port. First off, we need to create a short Powershell script with a function to stop (kill) the processes running at a given port with the following contents:


KillAngularApp.ps1
param ( [Parameter(Mandatory=$true)][string] $portToFind ) # This PS script requires the paramter $portToFind to be passed into it pwd Write-Host Probing for Angular App running at $portToFind $runningEudAppProcessLocal = 'netstat -ano | findstr "$portToFind"' $arr = Invoke-Expression $runningEudAppProcessLocal # $runningEudAppProcessLocal $arr = $arr -split '\s+' Write-Host Probing complete: $arr if ($arr.Length -ge 5) { $runningAngularAppPort = $arr[5] $runningAngularAppPort Write-Host Killing the process.. $killScript = "taskkill /PID $runningAngularAppPort /F" Invoke-Expression $killScript Write-Host probing once more $arr = Invoke-Expression $runningEudAppProcessLocal if ($arr.Length -eq 0){ Write-Host There is no running process any more at $portToFind } }
The powershell script above runs 'netstat ano -findstr "someportnumber"'. It finds the PID at that port and splits the result using the '\s+' expression, i.e. whitespace. If we find a PID (process id), we stop that process using the 'taskkill' command with the '-force' flag. We then need to have a C# class that .Net Core can call. The code here has been tested with .Net Core 3.0 and 3.1 successfully.


KillAngularPortHelper.cs
using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Hosting; using System; using System.Diagnostics; namespace EndUserDevice { public static class AngularKillPortHelper { /// <summary> /// Kills Angular app running with for example node at configured port Host:SpaPort /// </summary> public static void KillPort() { try { string environmentName = Environment.GetEnvironmentVariable("ASPNETCORE_ENVIRONMENT"); string configFile = environmentName == Environments.Development ? "appsettings.json" : environmentName == Environments.Staging ? "appsettings.Staging.json" : environmentName == Environments.Production ? "appsettings.Production.json" : "appsettings.json"; // Set up configuration sources. var config = new ConfigurationBuilder() .AddJsonFile(configFile, optional: false) .Build(); string angularAppPort = config.GetValue<string>("Configuration:Host:SpaPort"); if (environmentName == Environments.Development) { string killAngularAppRunningAtPortMessage = $"Trying to shutdown existing running Angular SPA if it is running at reserved fixed port: {angularAppPort}"; Debug.WriteLine(killAngularAppRunningAtPortMessage); Console.WriteLine(killAngularAppRunningAtPortMessage); //requires Nuget Packages: //Microsoft.Powershell.SDK //System.Management.Automation string ps1File = config.GetValue<string>("Configuration:Host:CloseSpaPortScriptPath"); var startInfo = new ProcessStartInfo(); startInfo.FileName = "powershell.exe"; startInfo.Arguments = "-noprofile \"& \"\"" + ps1File + "\"\"\""; startInfo.Arguments += " -portToFind " + angularAppPort; startInfo.UseShellExecute = false; //WARNING!!! If the powershell script outputs lots of data, this code could hang //You will need to output using a stream reader and purge the contents from time to time startInfo.RedirectStandardOutput = !startInfo.UseShellExecute; startInfo.RedirectStandardError = !startInfo.UseShellExecute; //startInfo.CreateNoWindow = true; var process = new System.Diagnostics.Process(); process.StartInfo = startInfo; process.Start(); process.WaitForExit(3*1000); //if you want to limit how long you wait for the program to complete //input is in milliseconds //var seconds_to_wait_for_exit = 120; //process.WaitForExit(seconds_to_wait_for_exit * 1000); string output = ""; if (startInfo.RedirectStandardOutput) { output += "Standard Output"; output += Environment.NewLine; output += process.StandardOutput.ReadToEnd(); } if (startInfo.RedirectStandardError) { var error = process.StandardError.ReadToEnd(); if (!string.IsNullOrWhiteSpace(error)) { if (!string.IsNullOrWhiteSpace(output)) { output += Environment.NewLine; output += Environment.NewLine; } output += "Error Output"; output += Environment.NewLine; output += error; } } Console.WriteLine(output); Debug.WriteLine(output); } } catch (Exception err) { Console.WriteLine(err); } } } }
Finally we can configure our app to use the ports we want to use like this:

appsettings.json
"Configuration": { "Host": { "SpaPort": "44394", "ApiPort": "44364", "CloseSpaPortScriptPath": "c:\\dev\\someapp\\somesubdir\\KillAngularApp.ps1" }, }
The ApiPort here is not used by our code in our article. The SpaPort however is used. We then call this helper class like this in Program.cs of our asp.net core application hosting the Angular Spa:

Program.cs
public class Program { public static void Main(string[] args) { AngularKillPortHelper.KillPort(); IWebHost host = CreateWebHostBuilder(args).Build(); host.Run(); } //..
This makes sure that the port is ready and not buy by another app (such as your previous debugging session of the app!) and by killing the app running with node at that port, we make
sure that Angular can run at a fixed port. Are we done yet? No! We must also update the package.json file to use that port we want to use! This must correspond to the port we configure in the appsettings.json file to kill in the first place. (i.e. make sure the port is freely available and not busy)

package.json
{ "name": "enduserdevice", "version": "0.0.0", "scripts": { "ng": "ng", "start": "ng serve -o --ssl true --ssl-key /node_modules/browser-sync/certs/server.key --ssl-cert /node_modules/browser-sync/certs/server.crt --port=44394 &REM", "build": "ng build --base-href /eud/ --source-map --prod", "build-staging": "ng build --base-href /eud/ --source-map --staging", "build-mobile": "ng build --base-href /eud2/ --source-map -c mobile", "prod-build-dev": "ng build --prod --source-map --base-href /eud/ --prod", "build:ssr": "ng run EndUserDevice:server:dev", "test": "ng test", "lint": "ng lint", "e2e": "ng e2e" },
Never mind much of the setup above, the important bit to make note of here is the part: --port=44394 &REM The adjustments of the ng start script makes sure we use a fixed port of our Angular app. Sadly, as you can see - this is not easily configurable as we hard code it into our package.json. Maybe we could set this up as an environment variable in a Angular environment file.. ? Hope you found this article interesting. Having a fixed port is always handy when developing locally Angular app and you do no not like this random port thingy that is default set up otherwise. Works on my PC!

Saturday, 10 October 2020

Eslint Standalone in Azure Devops Build task

I have created a standalone tool that can run Eslint from the commandline. The tool is a Node.js application build with Pkg as a node10-win application, built as a standalone EXE file executable. You can find the repository here:

https://github.com/toreaurstadboss/eslint-standalone

Here you can also alter the application to your needs, if necessary. The application is available as a Npm package or a Nuget package on the official repos (npmjs.org and nuget.org) This article will focus on the use of the application via Nuget and activating the tool in Azure devops. First off make sure you add the official Nuget repo to your Nuget.config file like this:
<?xml version="1.0" encoding="utf-8"?>
<configuration>
  <packageRestore>
    <add key="enabled" value="True" />
  </packageRestore>
  <activePackageSource>
    <!-- some other nuget repo in addition if desired -->
  </activePackageSource>
  <packageSources>
    <clear />
    <!-- some other nuget repo in addition if desired -->
    <add key="Nuget official repo" value="https://nuget.org/api/v2/" />
  </packageSources>
</configuration>

Now you can add a packagereference to the EslintStandalone.Cli tool in the .csproj project file (or .vbproj if you use Visual Basic) like this:

<PackageReference Include="EslintStandalone.Cli" Version="1.1.0" GeneratePathProperty="true" />
Also add the following copy step to copy the standalone.exe tool within the Nuget package out to the bin folder of your project: <ItemGroup> <Content Include="$(PkgEslintStandalone_Cli)\eslint-standalone.exe"> <CopyToOutputDirectory>Always</CopyToOutputDirectory> </Content> </ItemGroup> This is possible since you use GeneratePathProperty set to true and we refer the folder of the nuget on disk like $(PkgEslintStandalone_Cli). The Nuget pakage is called EsLintStandalone.Cli. We replace '.' with '_' and we prefix always the variable to point to the nuget folder with Pkg and we reference the entire package with the $() expression. The next step is to add the execution of the tool in Azure Devops like a task. You can either define a single task or a Task group. I like task groups, since we then can easily share task among projects. The following command should be added:
dir
echo Starting Eslint tool to analyzing for compability issues in Javascript files
cd Source\SomeProject\bin
echo Current folder
dir *.exe
move eslint-standalone.exe ..
cd ..
echo Navigated to root folder of SomeProject. Starting the eslint-standalone tool. 
eslint-standalone.exe

Here we copy the standalone tool a level down to the root of the project, parent folder of bin folder. Here we usually have our target files, which will be Javascript files in our project with such files (e.g. a MVC project or other web projects usually). Finally we must supply a .eslintrc.js file, a config file for Eslint. At my work, I have customers that uses Internet Explorer 11. So I check for Ecmascript 5 compability. This tool can handle such as scenario. The following .eslintrc.js such suffice:

module.exports = {
    "plugins": ["ie11"],
    "env": {
      "browser": true,
      "node": true,
      "es6": false
    },
    "parserOptions": {
      "ecmaVersion": 5,
    },
    "rules": {
      "ie11/no-collection-args": ["error"],
      "ie11/no-for-in-const": ["error"],
      //"ie11/no-loop-func": ["warn"],
      "ie11/no-weak-collections": ["error"],
      "curly": ["off"]
    }
};

//A list of rules that can be applied is here: https://eslint.org/docs/rules/
//The rules can have the following severity in EsLint: "warn", "error" and "off".


https://eslint.org/docs/rules/ You can find Eslint rules at the link above. You can set the error level to either 'warn' or' 'error' or 'off'. https://eslint.org/docs/user-guide/configuring If you want to use the tool in a Npm based project, you can see the Npm page here: https://www.npmjs.com/package/eslint-standalone npm i eslint-standalone I got two version of the tool. Version 1.1. is recommended, as you must supply a .eslintrc.js file and have control over how the linting is done. Version 1.2. supplies a .eslintrc.js in the same folder as the tool with ES5 support detection as shown above included (.eslintrc.js file is bundled together). The tool itself is quite simple code in Node.js:

#!/usr/bin/env node

const CLIEngine = require("eslint").CLIEngine;
const minimist = require("minimist");
const path = require("path");
const chalk = require("chalk");
const eslintPluginCompat = require("eslint-plugin-compat");
const eslintIe11 = require("eslint-plugin-ie11");
const fs = require("fs");
const { promisify } = require("util");

const fsAccessAsync = promisify(fs.access);

var runEsLint = function(baseConfig, args) {
  const cli = new CLIEngine({ baseConfig });

  let filesDir = [];

  if (args.dir) {
    // Dir can be a string or an array, we do a preprocessing to always have an array
    filesDir = []
      .concat(args.dir)
      .map((item) => path.resolve(process.cwd(), item));
  } else {
    filesDir = ["./."];
  }

  console.log(`> eslint is checking the following dir: ${filesDir}`);

  const report = cli.executeOnFiles(filesDir);

  if (report.errorCount > 0) {
    const formatter = cli.getFormatter();

    console.log(
      chalk.bold.redBright(`> eslint has found ${report.errorCount} error(s)`)
    );
    console.log(formatter(report.results));

    process.exitCode = 1; //eslint errors encountered means the process should exit not with exit code 0.

    return;
  }
  console.log(chalk.bold.greenBright("> eslint finished without any errors!"));
  process.env.exitCode = 0; //exit with success code

}

var tryLoadConfigViaKnownSystemFolder = function(){

  let configFileFound = null;
try {
  let knownHomeDirectoryOnOSes =
    process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE;
  let knownHomeDirectoryOnOSesNormalized = path.normalize(
    knownHomeDirectoryOnOSes + "/.eslintrc"
  );
  configPath = path.resolve(knownHomeDirectoryOnOSesNormalized);
  if (checkIfFileExistsAndIsAccessible(configPath)){
    configFileFound = true;
    errorEncountered = false;
  }

} catch (error) {
  errorEncountered = true;
  console.error(error);  
  process.exitCode = 1; //signal an error has occured. https://stackoverflow.com/questions/5266152/how-to-exit-in-node-js
  return configFileFound;
}

};


var checkIfFileExistsAndIsAccessible = function(configPathFull) {
  try {
   fs.accessSync(configPathFull, fs.F_OK);
  return true;
  }
  catch (Error){
    return false;
   }  
}


var tryLoadFileInDirectoryStructure = function(curDir){

  let configFullPathFound = null;
  for (let i = 0; i < 100; i++) {
    try {
      if (i > 0) {
        console.info("Trying lib folder of eslint-standalone: " + curDir);
        let oldCurDir = curDir;
        curDir = path.resolve(curDir, ".."); //parent folder
        if (oldCurDir == curDir) {
          //at the top of media disk volume - exit for loop trying to retrieve the .eslintrc.js file from parent folder
          console.info(
            "It is recommended to save an .eslintrc.js file in the folder structure where you run this tool."
          );
          break;
        }
      }
      configPath = path.join(curDir + "/.eslintrc.js");
      configPath = path.normalize(configPath);
      if (checkIfFileExistsAndIsAccessible(configPath)){
       baseConfig = require(configPath);
       errorEncountered = false;
       configFullPathFound = configPath;
       break; //exit the for loop
      }
    } catch (error) {
      process.stdout.write(".");
      errorEncountered = true;
    }
  }
  return configFullPathFound;
}

var inspectArgs = function(args) {
  let fix = false;

  console.log("Looking at provided arguments:");
  for (var i = 0; i < args.length; i++) {
    console.log(args[i]);
    if (args[i] === "--fix") {
      fix = true;
      console.log("Fix option provided: " + fix);
      console.warn("Fix is not supported yet, you must manually adjust the files."
      );
    }
  }
}


module.exports = (() => {
  const args = process.argv.slice(2);

  inspectArgs(args); 

  // Read a default eslint config
  //console.log("Dirname: " + __dirname);

  let configPath = "";
  let baseConfig = "";
  let errorEncountered = false;

  console.info("Trying to resolve .eslintrc.js file");

  console.info("Trying current working directory:", process.cwd());

  let curDir = process.cwd();

  let configFilefound = tryLoadFileInDirectoryStructure(curDir);
  
  if (configFilefound === null) {
   curDir = __dirname;
   configFilefound = tryLoadFileInDirectoryStructure(curDir);
  }

  // try {
  //   configPath = path.join(curDir + "/.eslintrc.js");
  //   configPath = path.normalize(configPath);
  //   baseConfig = require(configPath);

  //   console.info("Found config file in current working folder");

  //   errorEncountered = false;
  //   configFilefound = baseConfig !== "";
  // } catch (error) {
  //   //ignore error handling for now at working folder
  //   configFilefound = false;
  // }

  // if (!configFilefound) {
  //   curDir = __dirname;

  //   for (let i = 0; i < 100; i++) {
  //     try {
  //       if (i > 0) {
  //         console.info("Trying lib folder of eslint-standalone: " + curDir);
  //         let oldCurDir = curDir;
  //         curDir = path.resolve(curDir, ".."); //parent folder
  //         if (oldCurDir == curDir) {
  //           //at the top of media disk volume - exit for loop trying to retrieve the .eslintrc.js file from parent folder
  //           console.info(
  //             "It is recommended to save an .eslintrc.js file in the folder structure where you run this tool."
  //           );
  //           break;
  //         }
  //       }
  //       configPath = path.join(curDir + "/.eslintrc.js");
  //       configPath = path.normalize(configPath);
  //       baseConfig = require(configPath);
  //       errorEncountered = false;
  //       break; //exit the for loop
  //     } catch (error) {
  //       process.stdout.write(".");
  //       errorEncountered = true;
  //     }
  //   }
  // }

  // Check if the path to a client config was specified
  if (args.conf) {
    if (Array.isArray(args.conf)) {
      const error = chalk.bold.redBright(
        `> eslint requires a single config file`
      );
      errorEncountered = true;
      console.warn(error);
    }

    try {
      configPath = path.resolve(process.cwd(), args.conf);
      baseConfig = require(configPath);
      errorEncountered = false;
    } catch (error) {
      errorEncountered = true;
      console.log(error);
    }
  }

  if (errorEncountered === true) {
    configFileFound = tryLoadConfigViaKnownSystemFolder();
    if (configFileFound !== null) {
      baseConfig = `{
        "extends": "${configPath}"         
      }`;    
    }
    // try {
    //   let knownHomeDirectoryOnOSes =
    //     process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE;
    //   let knownHomeDirectoryOnOSesNormalized = path.normalize(
    //     knownHomeDirectoryOnOSes + "/.eslintrc"
    //   );
    //   configPath = path.resolve(knownHomeDirectoryOnOSesNormalized);
    

    //   errorEncountered = false;
    // } catch (error) {
    //   errorEncountered = true;
    //   console.error(error);
    //   process.exitCode = 1; //signal an error has occured. https://stackoverflow.com/questions/5266152/how-to-exit-in-node-js
    //   return;
    // }
  }

  console.log(`> eslint has loaded config from: ${configFilefound}`);

  runEsLint(baseConfig, args);

  // console.log('base config: ');
  // console.log(baseConfig);

  // const cli = new CLIEngine({ baseConfig });

  // let filesDir = [];

  // if (args.dir) {
  //   // Dir can be a string or an array, we do a preprocessing to always have an array
  //   filesDir = []
  //     .concat(args.dir)
  //     .map((item) => path.resolve(process.cwd(), item));
  // } else {
  //   filesDir = ["./."];
  // }

  // console.log(`> eslint is checking the following dir: ${filesDir}`);

  // const report = cli.executeOnFiles(filesDir);

  // if (report.errorCount > 0) {
  //   const formatter = cli.getFormatter();

  //   console.log(
  //     chalk.bold.redBright(`> eslint has found ${report.errorCount} error(s)`)
  //   );
  //   console.log(formatter(report.results));

  //   process.exitCode = 1; //eslint errors encountered means the process should exit not with exit code 0.

  //   return;
  // }
  // console.log(chalk.bold.greenBright("> eslint finished without any errors!"));
  // process.env.exitCode = 0; //exit with success code
})();



Sunday, 27 September 2020

Generic Memory Cache for .Net Framework

The following sample code shows how to create a Generic Memory Cache for .Net Framework. This allows you to cache specific items defined by a TCacheItemData type argument, i.e. caching same type of data such as instances of a class, or arrays of instances. Inside your .csproj you should see something like:
    
Over to the implementation. Since a memory cache is shared by possibly other applications, it is important to prefix your cached contents, i.e. prefix the the keys. This makes it easier to barrier the memory cache. Note though that some barriering is done accross processes of course, this is just to make it easier within your application and running process to group the cached elements with a prefix key used for the generic memory cache operations. Now over to the implementation.

using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Runtime.Caching;

namespace SomeAcme.SomeUtilNamespace
{
    /// <summary>
    /// Thread safe memory cache for generic use
    /// </summary>
    /// <typeparam name="TCacheItemData">Payload to store in the memory cache</typeparam>
    /// <remarks>Uses MemoryCache.Default which defaults to an in-memory cache. All cache items are prefixed with an 'import cache session guid' to compartmentalize
    /// multiple paralell importing sessions</remarks>
    public class GenericMemoryCache<TCacheItemData> where TCacheItemData : class
    {
        private readonly string _prefixKey;
        private readonly ObjectCache _cache;
        private readonly CacheItemPolicy _cacheItemPolicy;

        public GenericMemoryCache(string prefixKey, int defaultExpirationInSeconds = 0)
        {
            defaultExpirationInSeconds = Math.Abs(defaultExpirationInSeconds); //checking if a negative value was passed into the constructor.

            _prefixKey = prefixKey;
            _cache = MemoryCache.Default;
            _cacheItemPolicy = defaultExpirationInSeconds == 0
                ? new CacheItemPolicy { Priority = CacheItemPriority.NotRemovable }
                : new CacheItemPolicy
                { AbsoluteExpiration = DateTime.Now.AddSeconds(Math.Abs(defaultExpirationInSeconds)) };
        }

        /// <summary>
        /// Cache object if direct access is desired
        /// </summary>
        public ObjectCache Cache => _cache;

        public string PrefixKey(string key) => $"{_prefixKey}_{key}";


        /// <summary>
        /// Adds an item to memory cache
        /// </summary>
        /// <param name="key"></param>
        /// <param name="itemToCache"></param>
        /// <returns></returns>
        public bool AddItem(string key, TCacheItemData itemToCache)
        {
            try
            {
                if (!key.StartsWith(_prefixKey))
                    key = PrefixKey(key);

                var cacheItem = new CacheItem(key, itemToCache);
                _cache.Add(cacheItem, _cacheItemPolicy);
                return true;
            }
            catch (Exception err)
            {
                Debug.WriteLine(err);
                return false;
            }
        }

        public virtual List<T> GetValues<T>()
        {
            List<T> list = new List<T>();
            IDictionaryEnumerator cacheEnumerator = (IDictionaryEnumerator)((IEnumerable)_cache).GetEnumerator();

            while (cacheEnumerator.MoveNext())
            {
                if (cacheEnumerator.Key == null)
                    continue;
                if (cacheEnumerator.Key.ToString().StartsWith(_prefixKey))
                    list.Add((T)cacheEnumerator.Value);
            }
            return list;
        }

        /// <summary>
        /// Retrieves a cache item. Possible to set the expiration of the cache item in seconds. 
        /// </summary>
        /// <param name="key"></param>
        /// <returns></returns>
        public TCacheItemData GetItem(string key)
        {
            try
            {
                if (!key.StartsWith(_prefixKey))
                    key = PrefixKey(key);
                if (_cache.Contains(key))
                {
                    CacheItem cacheItem = _cache.GetCacheItem(key);
                    object cacheItemValue = cacheItem?.Value;
                    UpdateItem(key, cacheItemValue as TCacheItemData);
                    TCacheItemData item = _cache.Get(key) as TCacheItemData;
                    return item;
                }
                return null;
            }
            catch (Exception err)
            {
                Debug.WriteLine(err);
                return null;
            }
        }

        public bool SetItem(string key, TCacheItemData itemToCache)
        {
            try
            {
                if (!key.StartsWith(_prefixKey))
                    key = PrefixKey(key);
                if (GetItem(key) != null)
                {
                    AddItem(key, itemToCache);
                    return true;
                }

                UpdateItem(key, itemToCache);
                return true;
            }
            catch (Exception err)
            {
                Debug.WriteLine(err);
                return false;
            }
        }


        /// <summary>
        /// Updates an item in the cache and set the expiration of the cache item 
        /// </summary>
        /// <param name="key"></param>
        /// <param name="itemToCache"></param>
        /// <returns></returns>
        public bool UpdateItem(string key, TCacheItemData itemToCache)
        {
            if (!key.StartsWith(_prefixKey))
                key = PrefixKey(key);
            CacheItem cacheItem = _cache.GetCacheItem(key);
            if (cacheItem != null)
            {
                cacheItem.Value = itemToCache;
                _cache.Set(key, itemToCache, _cacheItemPolicy);
            }
            else
            {
                //if we cant find the cache item, just set the cache directly
                _cache.Set(key, itemToCache, _cacheItemPolicy);

            }
              return true;
           
        }

        /// <summary>
        /// Removes an item from the cache 
        /// </summary>
        /// <param name="key"></param>
        /// <returns></returns>
        public bool RemoveItem(string key)
        {
            if (!key.StartsWith(_prefixKey))
                key = PrefixKey(key);

            if (_cache.Contains(key))
            {
                _cache.Remove(key);
                return true;
            }
            return false;
        }

        public void AddItems(Dictionary<string, TCacheItemData> itemsToCache)
        {
            foreach (var kvp in itemsToCache)
                AddItem(kvp.Key, kvp.Value);
        }

        /// <summary>
        /// Clear all cache keys starting with known prefix passed into the constructor.
        /// </summary>
        public void ClearAll()
        {
            var cacheKeys = _cache.Select(kvp => kvp.Key).ToList();
            foreach (string cacheKey in cacheKeys)
            {
                if (cacheKey.StartsWith(_prefixKey))
                    _cache.Remove(cacheKey);
            }
        }

    }
}