-- Last updated: March 18, 2025
-- Synopsis: This script retrieves detailed change log information from the ObjectChanges, PropertyChanges, and ChangeSets tables.
-- It filters the results based on specific identifiers stored in a table variable, in this example Guids.
-- In this example T-Sql the library FrameLog is used to store a log
-- DateTime columns are retrieved by looking at number of ticks elapsed since DateTime.MinValue as
-- DateTime columns are stored in SQL Server as a this numeric value.
DECLARE @EXAMPLEGUIDS TABLE (ID NVARCHAR(36))
INSERT INTO @EXAMPLEGUIDS (Id)
VALUES
('1968126a-64c1-4d15-bf23-8cb8497dcaa9'),
('3e11aad8-95df-4377-ad63-c2fec3d43034'),
('acbdd116-b6a5-4425-907b-f86cb55aeedd') --tip: define which form Guids to fetch the ChangeLog database tables which 'FrameLog' uses The form Guids can each be retrieved from url showing the form in MRS in the browser
SELECT
o.Id as ObjectChanges_Id,
o.ObjectReference as ObjectReference,
o.TypeName as ObjectChanges_TypeName,
c.Id as Changeset_Id,
c.[Timestamp] as Changeset_Timestamp,
c.Author_UserName as Changeset_AuthorName,
p.[Id] as PropertyChanges_Id,
p.[PropertyName],
p.[Value],
p.[ValueAsInt],
CASE
WHEN p.Value IS NOT NULL
AND ISNUMERIC(p.[Value]) = 1
AND CAST(p.[Value] AS decimal) > 100000000000
THEN
DATEADD(SECOND,
CAST(CAST(p.[Value] AS decimal) / 10000000 AS BIGINT) % 60,
DATEADD(MINUTE,
CAST(CAST(p.[Value] AS decimal) / 10000000 / 60 AS BIGINT),
CAST('0001-01-01' AS datetime2)
)
)
ELSE NULL
END AS ValueAsDate,
o.ChangeType as ObjectChanges_ChangeTypeIfSet
FROM propertychanges p
LEFT OUTER JOIN ObjectChanges o on o.Id = p.ObjectChange_Id
LEFT OUTER JOIN ChangeSets c on o.ChangeSet_Id = c.Id
WHERE ObjectChange_Id in (
SELECT ObjectChanges.Id
FROM PropertyChanges
LEFT OUTER JOIN ObjectChanges on ObjectChanges.Id = PropertyChanges.ObjectChange_Id
LEFT OUTER JOIN ChangeSets on ObjectChanges.ChangeSet_Id = ChangeSets.Id
WHERE ObjectChange_Id in (SELECT Id FROM ObjectChanges where ObjectReference IN (
SELECT Id FROM @EXAMPLEGUIDS
))) --find out the Changeset where ObjectChange_Id equals the Id of ObjectChanges where ObjectReference equals one of the identifiers in @EXAMPLEGUIDS
ORDER BY ObjectReference, Changeset_Id DESC, Changeset_Timestamp DESC
The T-Sql is handy in case you come across datetime columns from .NET that are saved as ticks in a column as numerical value and shows how we can do the conversion.
Tuesday, 18 March 2025
Converting a .NET Datetime to a DateTime2 in T-SQL
This article presents a handy T-Sql that extracts a DateTime value stored in .NET in a numerical field and converts it into a Sql Server DateTime (DateTime2 column).
The T-SQL will convert into a DateTime2 with a SECOND precision.
An assumption here is that any numerical value larger than 100000000000 contains a DateTime value. This is an acceptable assumption when you log data, as very large values usually indicate a datetime value. But you might want to have additional
checking here of course in addition to what i show in the example T-SQL script.
Here is the T-SQL that shows how we can convert the .NET DateTime into a SQL DateTime.
Etiketter:
.net,
Database technical,
Entity Framework,
Framelog,
Sql-Server,
T-SQL
Wednesday, 12 March 2025
Rebuild indexes in all tables in Sql Server for a set of databases
Here is a convenient Sql script to rebuild all indexes in a set of databases in Sql Server.
RebuildAllDatabaseIndexesInSetOfDatabases.sql
/*
This script loops through all databases with names containing a specified prefix or matching a specific name.
For each database, it rebuilds indexes on all tables to improve performance by reducing fragmentation.
Variables:
- @Prefix: The prefix to match database names.
- @SomeSpecialSharedDatabaseName: The specific database specific shared database name to include in the loop.
- @DatabaseName: The name of the current database in the loop.
- @TableName: The name of the current table in the loop.
- @SQL: The dynamic SQL statement to execute.
Steps:
1. Declare the necessary variables.
2. Create a cursor to loop through the databases.
3. For each database, use another cursor to loop through all tables and rebuild their indexes.
4. Print progress messages for each database and table.
*/
DECLARE @SomeAcmeUnitDatabaseNamePrefix NVARCHAR(255) = 'SomeAcmeUnit';
DECLARE @SomeSpecialSharedDatabaseName NVARCHAR(255) = 'SomeAcmeShared';
DECLARE @DatabaseName NVARCHAR(255);
DECLARE @TableName NVARCHAR(255);
DECLARE @SQL NVARCHAR(MAX);
DECLARE @DatabaseCount INT;
DECLARE @CurrentDatabaseCount INT = 0;
DECLARE @TableCount INT;
DECLARE @CurrentTableCount INT = 0;
DECLARE @StartTime DATETIME2;
DECLARE @EndTime DATETIME2;
DECLARE @ElapsedTime NVARCHAR(100);
SET @StartTime = SYSDATETIME();
-- Get the total number of databases to process
SELECT @DatabaseCount = COUNT(*)
FROM sys.databases
WHERE [name] LIKE @SomeAcmeUnitDatabaseNamePrefix + '%' OR [name] = @SomeSpecialSharedDatabaseName;
DECLARE DatabaseCursor CURSOR FOR
SELECT [name]
FROM sys.databases
WHERE [name] LIKE @SomeAcmeUnitDatabaseNamePrefix + '%' OR [name] = @SomeSpecialSharedDatabaseName;
OPEN DatabaseCursor;
FETCH NEXT FROM DatabaseCursor INTO @DatabaseName;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @CurrentDatabaseCount = @CurrentDatabaseCount + 1;
SET @SQL = '
RAISERROR(''*****************************************************************'', 0, 1) WITH NOWAIT;
RAISERROR(''REBUILDING ALL INDEXES OF TABLES INSIDE DB: %s'', 0, 1, @DatabaseName) WITH NOWAIT;
RAISERROR(''*****************************************************************'', 0, 1) WITH NOWAIT;
DECLARE @TableName NVARCHAR(255);
DECLARE @SQL NVARCHAR(MAX);
-- Get the total number of tables to process in the current database
SELECT @TableCount = COUNT(*)
FROM sys.tables;
DECLARE TableCursor CURSOR FOR
SELECT QUOTENAME(SCHEMA_NAME(schema_id)) + ''.'' + QUOTENAME(name)
FROM sys.tables ORDER BY QUOTENAME(name) ASC;
OPEN TableCursor;
FETCH NEXT FROM TableCursor INTO @TableName;
WHILE @@FETCH_STATUS = 0
BEGIN
SET @CurrentTableCount = @CurrentTableCount + 1;
PRINT ''Rebuilding database indexes on table: '' + @TableName + ''... (DB: '' + CAST(@CurrentDatabaseCount AS NVARCHAR) + ''/'' + CAST(@DatabaseCount AS NVARCHAR) + '')'' + ''... (Table: '' + CAST(@CurrentTableCount AS NVARCHAR) + ''/'' + CAST(@TableCount AS NVARCHAR) + '')'';
--RAISERROR(''..Indexing (hit Ctrl+End to go to latest message inside this buffer)'',0,1) WITH NOWAIT;
SET @SQL = ''ALTER INDEX ALL ON '' + @TableName + '' REBUILD'';
EXEC sp_executesql @SQL;
FETCH NEXT FROM TableCursor INTO @TableName;
--PRINT ''Rebuilt database indexes on table: '' + @TableName + ''.'';
END;
CLOSE TableCursor;
DEALLOCATE TableCursor;';
EXEC sp_executesql @SQL,
N'@CurrentDatabaseCount INT, @DatabaseCount INT, @TableCount INT, @CurrentTableCount INT, @DatabaseName NVARCHAR(255)',
@CurrentDatabaseCount, @DatabaseCount, @TableCount, @CurrentTableCount, @DatabaseName;
FETCH NEXT FROM DatabaseCursor INTO @DatabaseName;
END;
SET @EndTime = SYSDATETIME()
-- Calculate the elapsed time
DECLARE @ElapsedSeconds INT = DATEDIFF(SECOND, @StartTime, @EndTime);
DECLARE @ElapsedMilliseconds INT = DATEPART(MILLISECOND, @EndTime) - DATEPART(MILLISECOND, @StartTime);
-- Combine seconds and milliseconds
SET @ElapsedTime = CAST(@ElapsedSeconds AS NVARCHAR) + '.' + RIGHT('000' + CAST(@ElapsedMilliseconds AS NVARCHAR), 3);
-- Print the elapsed time using RAISERROR
RAISERROR('Total execution time: %s seconds', 0, 1, @ElapsedTime) WITH NOWAIT;
CLOSE DatabaseCursor;
DEALLOCATE DatabaseCursor;
The SQL Script uses loops defined by T-SQL database cursors and uses the sys.databases and sys.tables system views to loop through all the databases matching the set prefixes of 'unit databases' like 'SomeAcmeUnit_0', 'SomeAcmeUnit_1' and 'shared database like 'SomeAcmeShared'.
This matches a setup of many databases used in companies, where you just want to rebuild all the indexes in all the tables in all the set of databases. It can be handy to be sure that all indexes are rebuilt.
Etiketter:
Database admin,
DbAdmin,
Performance,
SQL Server,
T-SQL
Sunday, 9 March 2025
Generating dropdowns for enums in Blazor
This article will look into generating dropdown for enums in Blazor.
The repository for the source code listed in the article is here:
https://github.com/toreaurstadboss/DallEImageGenerationImgeDemoV4
First off, a helper class for enums that will use the InputSelect control. The helper class will support setting the display text for enum options / alternatives via resources files using the display attribute.
Enumhelper.cs | C# source code
using DallEImageGenerationImageDemoV4.Models;
using Microsoft.AspNetCore.Components;
using Microsoft.AspNetCore.Components.Forms;
using System.ComponentModel.DataAnnotations;
using System.Linq.Expressions;
using System.Resources;
namespace DallEImageGenerationImageDemoV4.Utility
{
public static class EnumHelper
{
public static RenderFragment GenerateEnumDropDown<TEnum>(
object receiver,
TEnum selectedValue,
Action<TEnum> valueChanged)
where TEnum : Enum
{
Expression<Func<TEnum>> onValueExpression = () => selectedValue;
var onValueChanged = EventCallback.Factory.Create<TEnum>(receiver, valueChanged);
return builder =>
{
// Set the selectedValue to the first enum value if it is not set
if (EqualityComparer<TEnum>.Default.Equals(selectedValue, default))
{
object? firstEnum = Enum.GetValues(typeof(TEnum)).GetValue(0);
if (firstEnum != null)
{
selectedValue = (TEnum)firstEnum;
}
}
builder.OpenComponent<InputSelect<TEnum>>(0);
builder.AddAttribute(1, "Value", selectedValue);
builder.AddAttribute(2, "ValueChanged", onValueChanged);
builder.AddAttribute(3, "ValueExpression", onValueExpression);
builder.AddAttribute(4, "class", "form-select"); // Adding Bootstrap class for styling
builder.AddAttribute(5, "ChildContent", (RenderFragment)(childBuilder =>
{
foreach (var value in Enum.GetValues(typeof(TEnum)))
{
childBuilder.OpenElement(6, "option");
childBuilder.AddAttribute(7, "value", value?.ToString());
childBuilder.AddContent(8, GetEnumOptionDisplayText(value)?.ToString()?.Replace("_", " ")); // Ensure the display text is clean
childBuilder.CloseElement();
}
}));
builder.CloseComponent();
};
}
/// <summary>
/// Retrieves the display text of an enum alternative
/// </summary>
private static string? GetEnumOptionDisplayText<T>(T value)
{
string? result = value!.ToString()!;
var displayAttribute = value
.GetType()
.GetField(value!.ToString()!)
?.GetCustomAttributes(typeof(DisplayAttribute), false)?
.OfType<DisplayAttribute>()
.FirstOrDefault();
if (displayAttribute != null)
{
if (displayAttribute.ResourceType != null && !string.IsNullOrWhiteSpace(displayAttribute.Name))
{
result = new ResourceManager(displayAttribute.ResourceType).GetString(displayAttribute!.Name!);
}
else if (!string.IsNullOrWhiteSpace(displayAttribute.Name))
{
result = displayAttribute.Name;
}
}
return result;
}
}
}
The following razor component shows how to use this helper.
<div class="form-group">
<label for="Quality" class="form-class fw-bold">GeneratedImageQuality</label>
@EnumHelper.GenerateEnumDropDown(this, homeModel.Quality,v => homeModel.Quality = v)
<ValidationMessage For="@(() => homeModel.Quality)" class="text-danger" />
</div>
<div class="form-group">
<label for="Size" class="form-label fw-bold">GeneratedImageSize</label>
@EnumHelper.GenerateEnumDropDown(this, homeModel.Size, v => homeModel.Size = v)
<ValidationMessage For="@(() => homeModel.Size)" class="text-danger" />
</div>
<div class="form-group">
<label for="Style" class="form-label fw-bold">GeneratedImageStyle</label>
@EnumHelper.GenerateEnumDropDown(this, homeModel.Style, v => homeModel.Style = v)
<ValidationMessage For="@(() => homeModel.Style)" class="text-danger" />
</div>
It would be possible to instead make a component than such a helper method that just passes a typeref parameter of the enum type.
But using such a programmatic helper returning a RenderFragment. As the code shows, returning a builder which uses the
RenderTreeBuilder let's you register the rendertree to return here. It is possible to use OpenComponent and CloseComponent.
Using AddAttribute to add attributes to the InputSelect.
And a childbuilder for the option values.
Sometimes it is easier to just make such a class with helper method instead of a component. The downside is that it is a more manual process, it is similar to how MVC uses HtmlHelpers. What is the best option from using a component or such a RenderFragment helper is not clear, it is a technique many developers using Blazor should be aware of.
Generating images with generated AI service Dall-e-3
This article presents code showing how to generate images using Dall-e-3 images.
The source code presented in this article can be cloned from my Github repo here:
https://github.com/toreaurstadboss/DallEImageGenerationImgeDemoV4
First, let's look at the following extension class that geneates the image. The method returnin a string will be used. The image is returned in this sample code in the responseformat Bytes. This is actually base 64 string, looking at the BinaryData and converted into base 64 string. A browser can display base-64 strings and the Dall-e-3 AI services delivers images in png format.
https://github.com/toreaurstadboss/DallEImageGenerationImgeDemoV4
First, let's look at the following extension class that geneates the image. The method returnin a string will be used. The image is returned in this sample code in the responseformat Bytes. This is actually base 64 string, looking at the BinaryData and converted into base 64 string. A browser can display base-64 strings and the Dall-e-3 AI services delivers images in png format.
DallEImageExtensions.cs | C# source code
using OpenAI.Images;
namespace DallEImageGenerationDemo.Utility
{
public static class DallEImageExtensions
{
/// <summary>
/// Generates an image from an description in <paramref name="imagedescription"/>
/// This uses OpenAI DALL-e-3 AI
/// </summary>
/// <param name="imageClient"></param>
/// <param name="imagedescription"></param>
/// <param name="options">Send in options for the image generation. If no options are sent, a 512x512 natural image in response format bytes will be returned</param>
/// <returns></returns>
public static async Task<GeneratedImage> GenerateDallEImageAsync(this ImageClient imageClient,
string imagedescription, ImageGenerationOptions? options = null)
{
options = options ?? new ImageGenerationOptions
{
Quality = GeneratedImageQuality.High,
Size = GeneratedImageSize.W1024xH1024,
Style = GeneratedImageStyle.Vivid,
};
options.ResponseFormat = GeneratedImageFormat.Bytes;
return await imageClient.GenerateImageAsync(imagedescription, options);
}
/// <summary>
/// Generates an image from an description in <paramref name="imagedescription"/>
/// This uses OpenAI DALL-e-3 AI. Base-64 string is extracted from the bytes in the image for easy display of
/// image inside a web application (e.g. Blazor WASM)
/// </summary>
/// <param name="imageClient"></param>
/// <param name="imagedescription"></param>
/// <param name="options">Send in options for the image generation. If no options are sent, a 512x512 natural image in response format bytes will be returned</param>
/// <returns></returns>
public static async Task<string> GenerateDallEImageB64StringAsync(this ImageClient imageClient,
string imagedescription, ImageGenerationOptions? options = null)
{
GeneratedImage generatedImage = await GenerateDallEImageAsync(imageClient, imagedescription, options);
string preamble = "data:image/png;base64,";
return $"{preamble}{Convert.ToBase64String(generatedImage.ImageBytes.ToArray())}";
}
}
}
As we can see, creating a Dall-e-3 image using an OpenAI.Image.ImageClient. The ImageClient is set up in Program.cs registered it as a scoped service.
Program.cs | C# source code
builder.Services.AddScoped(sp =>
{
var config = sp.GetRequiredService<IConfiguration>();
string imageModelName = "dall-e-3";
return new ImageClient(imageModelName, config["OpenAI:DallE3:ApiKey"]);
});
The api key for Dall-e-3 will be set up in the appsettings.json file.
appsettings.json | .json file
{
"OpenAI": {
"DallE3": {
"ApiKey": "openapi_api_key_inserted_here"
},
"ChatGpt4": {
"ApiKey": "chat_gpt_4_api_key_inserted_here",
"Endpoint": "chat_gpt_4_endpoint_inserted_here"
}
}
}
To generate suggestions for how to create the image, we can use Chat GPT-4. Here is the code to generate an Open AI chat enabled ChatClient.
OpenAIChatClientBuilder.cs | C# source code
using Azure.AI.OpenAI;
using OpenAI.Chat;
using System.ClientModel;
namespace DallEImageGenerationImageDemoV4.Utility
{
/// <summary>
/// Creates AzureOpenAIClient or ChatClient (default ai model (LLM) is set to "gpt-4")
/// </summary>
public class OpenAIChatClientBuilder(IConfiguration configuration)
{
private string? _endpoint = null;
private ApiKeyCredential? _key = null;
private readonly IConfiguration _configuration = configuration;
/// <summary>
/// Set the endpoint for Open AI Chat GPT-4 chat client. Defaults to config setting 'ChatGpt4:Endpoint' inside the appsettings.json file
/// </summary>
public OpenAIChatClientBuilder WithEndpoint(string? endpoint = null)
{
_endpoint = endpoint ?? _configuration["OpenAI:ChatGpt4:Endpoint"];
return this;
}
/// <summary>
/// Set the key for Open AI Chat GPT-4 chat client. Defaults to config setting 'ChatGpt4:ApiKey' inside the appsettings.json file
/// </summary>
public OpenAIChatClientBuilder WithApiKey(string? key = null)
{
string? keyToUse = key ?? _configuration["OpenAI:ChatGpt4:ApiKey"];
if (!string.IsNullOrWhiteSpace(keyToUse))
{
_key = new ApiKeyCredential(keyToUse!);
}
return this;
}
/// <summary>
/// In case the derived AzureOpenAIClient is to be used, use this Build method to get a specific AzureOpenAIClient
/// </summary>
/// <returns></returns>
public AzureOpenAIClient? BuildAzureOpenAIClient() => !string.IsNullOrWhiteSpace(_endpoint) && _key != null ? new AzureOpenAIClient(new Uri(_endpoint), _key) : null;
/// <summary>
/// Returns the ChatClient that is set up to use OpenAI Default ai model (LLM) will be set 'gpt-4'.
/// </summary>
/// <returns></returns>
public ChatClient? Build(string aiModel = "gpt-4") => BuildAzureOpenAIClient()?.GetChatClient(aiModel);
}
}
We generate a builder for the chat client using a factory. This is so we can first get hold of the IConfiguration via dependency injection and use it for the builder of the OpenAI enabled chat client.
OpenAiChatClientBuilderFactory.cs | C# source code
namespace DallEImageGenerationImageDemoV4.Utility
{
public class OpenAiChatClientBuilderFactory : IOpenAiChatClientBuilderFactory
{
private readonly IConfiguration _configuration;
public OpenAiChatClientBuilderFactory(IConfiguration configuration)
{
_configuration = configuration;
}
public OpenAIChatClientBuilder Create()
{
var openAiChatClient = new OpenAIChatClientBuilder(_configuration)
.WithApiKey()
.WithEndpoint();
return openAiChatClient;
}
}
}
The factory is registered into Program.cs
Program.cs | C# source code
builder.Services.AddSingleton<IOpenAiChatClientBuilderFactory, OpenAiChatClientBuilderFactorygt;();
The chat client builder is listed next.
OpenAiChatClientBuilder.cs | C# source code
using Azure.AI.OpenAI;
using OpenAI.Chat;
using System.ClientModel;
namespace DallEImageGenerationImageDemoV4.Utility
{
/// <summary>
/// Creates AzureOpenAIClient or ChatClient (default ai model (LLM) is set to "gpt-4")
/// </summary>
public class OpenAIChatClientBuilder(IConfiguration configuration)
{
private string? _endpoint = null;
private ApiKeyCredential? _key = null;
private readonly IConfiguration _configuration = configuration;
/// <summary>
/// Set the endpoint for Open AI Chat GPT-4 chat client. Defaults to config setting 'ChatGpt4:Endpoint' inside the appsettings.json file
/// </summary>
public OpenAIChatClientBuilder WithEndpoint(string? endpoint = null)
{
_endpoint = endpoint ?? _configuration["OpenAI:ChatGpt4:Endpoint"];
return this;
}
/// <summary>
/// Set the key for Open AI Chat GPT-4 chat client. Defaults to config setting 'ChatGpt4:ApiKey' inside the appsettings.json file
/// </summary>
public OpenAIChatClientBuilder WithApiKey(string? key = null)
{
string? keyToUse = key ?? _configuration["OpenAI:ChatGpt4:ApiKey"];
if (!string.IsNullOrWhiteSpace(keyToUse))
{
_key = new ApiKeyCredential(keyToUse!);
}
return this;
}
/// <summary>
/// In case the derived AzureOpenAIClient is to be used, use this Build method to get a specific AzureOpenAIClient
/// </summary>
/// <returns></returns>
public AzureOpenAIClient? BuildAzureOpenAIClient() => !string.IsNullOrWhiteSpace(_endpoint) && _key != null ? new AzureOpenAIClient(new Uri(_endpoint), _key) : null;
/// <summary>
/// Returns the ChatClient that is set up to use OpenAI Default ai model (LLM) will be set 'gpt-4'.
/// </summary>
/// <returns></returns>
public ChatClient? Build(string aiModel = "gpt-4") => BuildAzureOpenAIClient()?.GetChatClient(aiModel);
}
}
A helper method is also added to get a streamed reply.
OpenAIChatClientExtensions.cs | C# source code
using OpenAI.Chat;
using System.ClientModel;
namespace OpenAIDemo
{
public static class OpenAIChatClientExtensions
{
/// <summary>
/// Provides a stream result from the Chatclient service using AzureAI services.
/// </summary>
/// <param name="chatClient">ChatClient instance</param>
/// <param name="message">The message to send and communicate to the ai-model</param>
/// <param name="systemMessage">Set the system message to instruct the chat response. Defaults to 'You are an helpful, wonderful AI assistant'.</param>
/// <returns>Streamed chat reply / result. Consume using 'await foreach'</returns>
public static async IAsyncEnumerable<string?> GetStreamedReplyStringAsync(this ChatClient chatClient, string message, string? systemMessage = null)
{
await foreach (var update in GetStreamedReplyInnerAsync(chatClient, message, systemMessage))
{
foreach (var textReply in update.ContentUpdate.Select(cu => cu.Text))
{
yield return textReply;
}
}
}
private static AsyncCollectionResult<StreamingChatCompletionUpdate> GetStreamedReplyInnerAsync(this ChatClient chatClient, string message, string? systemMessage = null) =>
chatClient.CompleteChatStreamingAsync(
[new SystemChatMessage(systemMessage ?? "You are an helpful, wonderful AI assistant"), new UserChatMessage(message)]);
}
}
Here is the client side code in the code behind file of the page displaying the Dall-e-3 image and Open AI Chat gpt-4 response.
using DallEImageGenerationDemo.Components.Pages;
using DallEImageGenerationDemo.Utility;
using DallEImageGenerationImageDemoV4.Models;
using DallEImageGenerationImageDemoV4.Utility;
using Microsoft.AspNetCore.Components;
using Microsoft.JSInterop;
using OpenAI.Images;
using OpenAIDemo;
namespace DallEImageGenerationImageDemoV4.Pages;
public partial class Home : ComponentBase
{
[Inject]
public required IConfiguration Config { get; set; }
[Inject]
public required IJSRuntime JSRuntime { get; set; }
[Inject]
public required ImageClient DallEImageClient { get; set; }
[Inject]
public required IOpenAiChatClientBuilderFactory OpenAIChatClientFactory { get; set; }
private readonly HomeModel homeModel = new();
private bool IsLoading { get; set; }
private string ImageData { get; set; } = string.Empty;
private const string modelName = "dall-e-3";
protected async Task HandleGenerateText()
{
var openAiChatClient = OpenAIChatClientFactory
.Create()
.Build();
if (openAiChatClient == null)
{
await JSRuntime.InvokeAsync<string>("alert", "Sorry, the OpenAI Chat client did not initiate properly. Cannot generate text.");
return;
}
string description = """
You are specifying instructions for generating a DALL-e-3 image.
Do not always choose Bergen! Also choose among smaller cities, villages and different locations in Norway.
Just generate one image, not a montage. Only provide one suggestion.
The suggestion should be based from this input and randomize what to display:
Suggests a cozy vivid location set in Norway showing outdoor scenery in good weather at different places
and with nice weather aimed to attract tourists. Note - it should also display both urban,
suburban or nature scenery with a variance of which of these three types of locations to show.
It should also include some Norwegian animals and flowers and show people. It should pick random cities and places in Norway to display.
""";
homeModel.Description = string.Empty;
await foreach (var updateContentPart in openAiChatClient.GetStreamedReplyStringAsync(description))
{
homeModel.Description += updateContentPart;
StateHasChanged();
await Task.Delay(20);
}
}
protected async Task HandleValidSubmit()
{
IsLoading = true;
string generatedImageBase64 = await DallEImageClient.GenerateDallEImageB64StringAsync(homeModel.Description!,
new ImageGenerationOptions
{
Quality = MapQuality(homeModel.Quality),
Style = MapStyle(homeModel.Style),
Size = MapSize(homeModel.Size)
});
ImageData = generatedImageBase64;
if (!string.IsNullOrWhiteSpace(ImageData))
{
// Open the modal
await JSRuntime.InvokeVoidAsync("showModal", "imageModal");
}
IsLoading = false;
StateHasChanged();
}
private static GeneratedImageSize MapSize(ImageSize size) => size switch
{
ImageSize.W1024xH1792 => GeneratedImageSize.W1024xH1792,
ImageSize.W1792H1024 => GeneratedImageSize.W1792xH1024,
_ => GeneratedImageSize.W1024xH1024,
};
private static GeneratedImageStyle MapStyle(ImageStyle style) => style switch
{
ImageStyle.Vivid => GeneratedImageStyle.Vivid,
_ => GeneratedImageStyle.Natural
};
private static GeneratedImageQuality MapQuality(ImageQuality quality) => quality switch
{
ImageQuality.High => GeneratedImageQuality.High,
_ => GeneratedImageQuality.Standard
};
}
Finally, a screenshot of the app !
The app can also be used as an app on a mobile device, as it is using Bootstrap 5 and responsive design.
Sunday, 16 February 2025
Outputting tags/objects using Azure AI
This article presents a way to output tags for an image and output it to the console. Azure AI is used, more specifically the ImageAnalysisClient.
The article shows how you can define a way to consume the data for an IAsyncEnumerable, so you can use await foreach to consume the data. I would recommend this approach
for many services in Azure Ai (and similar) where there is no support out of the box for async enumerable and hide away the deails in a helper extension method as shown in this article.
Then we use an await foreach pattern here to extract the image tags asynchronously. This is a custom extension method I created so I can output the tags asynchronously using await foreach and also specify a wait time between each new tag being outputted, defaulting to 200 milliseconds here.
The extension method looks like this:
In addition to tags, we can also output objects in the image in a very similar extension method:
public static async void ExtractImageTags()
{
string visionApiKey = Environment.GetEnvironmentVariable("VISION_KEY")!;
string visionApiEndpoint = Environment.GetEnvironmentVariable("VISION_ENDPOINT")!;
var credentials = new AzureKeyCredential(visionApiKey);
var serviceUri = new Uri(visionApiEndpoint);
var imageAnalysisClient = new ImageAnalysisClient(serviceUri, credentials);
await foreach (var tag in imageAnalysisClient.ExtractImageTagsAsync("Images/Store.png"))
{
Console.WriteLine(tag);
}
}
The code creates an ImageAnalysisClient, defined in the Azure.AI.Vision.ImageAnalysis Nuget package. I got two environment variables here to store the key and endpoint.
Note that not all Azure Ai features are available in all regions. If you just want to test out some Azure Ai features, you can first off just test it out at US East region, as that region
will have most likely all features you want to test, then you can just a more local region if you are planning to do more workloads using Azure Ai.
Then we use an await foreach pattern here to extract the image tags asynchronously. This is a custom extension method I created so I can output the tags asynchronously using await foreach and also specify a wait time between each new tag being outputted, defaulting to 200 milliseconds here.
The extension method looks like this:
using Azure.AI.Vision.ImageAnalysis;
namespace UseAzureAIServicesFromNET.Vision;
public static class ImageAnalysisClientExtensions
{
/// <summary>
/// Extracts the tags for image at specified path, if existing.
/// The results are returned as async enumerable of strings.
/// </summary>
/// <param name="client"></param>
/// <param name="imagePath"></param>
/// <param name="waitTimeInMsBetweenOutputTags">Default wait time in ms between output. Defaults to 200 ms.</param>
/// <returns></returns>
public static async IAsyncEnumerable<string?> ExtractImageTagsAsync(this ImageAnalysisClient client,
string imagePath, int waitTimeInMsBetweenOutputTags = 200)
{
if (!File.Exists(imagePath))
{
yield return default(string); //just return null if a file is not found at provided path
}
using FileStream imageStream = new FileStream(imagePath, FileMode.Open);
var analysisResult =
await client.AnalyzeAsync(BinaryData.FromStream(imageStream), VisualFeatures.Tags | VisualFeatures.Caption);
yield return $"Description: {analysisResult.Value.Caption.Text}";
foreach (var tag in analysisResult.Value.Tags.Values)
{
yield return $"Tag: {tag.Name}, Confidence: {tag.Confidence:F2}";
await Task.Delay(waitTimeInMsBetweenOutputTags);
}
}
}
The console output of the tags looks like this:
In addition to tags, we can also output objects in the image in a very similar extension method:
/// <summary>
/// Extracts the objects for image at specified path, if existing.
/// The results are returned as async enumerable of strings.
/// </summary>
/// <param name="client"></param>
/// <param name="imagePath"></param>
/// <param name="waitTimeInMsBetweenOutputTags">Default wait time in ms between output. Defaults to 200 ms.</param>
/// <returns></returns>
public static async IAsyncEnumerable<string?> ExtractImageObjectsAsync(this ImageAnalysisClient client,
string imagePath, int waitTimeInMsBetweenOutputTags = 200)
{
if (!File.Exists(imagePath))
{
yield return default(string); //just return null if a file is not found at provided path
}
using FileStream imageStream = new FileStream(imagePath, FileMode.Open);
var analysisResult =
await client.AnalyzeAsync(BinaryData.FromStream(imageStream), VisualFeatures.Objects | VisualFeatures.Caption);
yield return $"Description: {analysisResult.Value.Caption.Text}";
foreach (var objectInImage in analysisResult.Value.Objects.Values)
{
yield return $"""
Object tag: {objectInImage.Tags.FirstOrDefault()?.Name} Confidence: {objectInImage.Tags.FirstOrDefault()?.Confidence},
Position (bbox): {objectInImage.BoundingBox}
""";
await Task.Delay(waitTimeInMsBetweenOutputTags);
}
}
The code is nearly identical, we set the VisualFeatures of the image to extract and we read out the objects (not the tags).
The console output of the objects looks like this:
Etiketter:
AI,
AsyncEnumerable,
Azure AI,
c#,
Computer Vision,
dotnet 8,
IAsyncEnumerable,
ImageAnalysis
Monday, 27 January 2025
Kinesisk nyttår i .NET
新年快乐
Det nærmer seg Kinesisk nyttår i 2025 ! Dette skjer 29. januar i år. I denne artikkelen vil metoder for å regne ut Kinesisk nyttår i .NET presenteres. Det er en del utregninger, heldigvis har .NET en hjelpeklasse for nettopp dette. Xīn nián kuài lè Xīn - Nytt nián - År kuài lè - Godt/lykkelig Uttale er ca slik: "Xin nien quai løe"
Regne ut Kinesisk nyttår
I denne artikkelen skal vi se på hvordan vi kan regne ut Kinesisk nyttår i .NET ! Vi kan regne ut Kinesisk nyttår i .NET på en intrikat måte. Det er definert som den andre nymånen etter vintersolverv. Nyttår faller dermed vanligvis mellom 21. januar og 20. februar og vil derfor variere fra år til år. I .NET har vi en klasse som heter ChineseLunisolarCalendar som en kan regne over til vår gregorianske kalender, som vi har hatt i Vesten siden
1582. Noen land, som Russland, Serbia og Etiopia bruker den julianske kalenderen i visse religiøse sammenhenger. Obs ! Denne klassen kan regne ut maks frem til og med år 2100 kinesisk nyttår, som jo er et stykke frem i tid. Men om 76 år må nok kildekoden her endres ! Kanskje en "V2" av denne klassen? La oss se på selve utregningen i .NET
ChineseCalendarUtils.cs
/// <summary>
/// Provides methods to calculate the date of Chinese New Year.
/// </summary>
public class ChineseNewYearCalculator
{
/// <summary>
/// Gets the date of Chinese New Year for a given year.
/// </summary>
/// <param name="year">The Gregorian year.</param>
/// <returns>The date of Chinese New Year as a <see cref="DateTime"/>.</returns>
public static DateTime GetChineseNewYear(int year)
{
System.Globalization.ChineseLunisolarCalendar chinese = new ChineseLunisolarCalendar();
DateTime chineseNewYear = chinese.ToDateTime(year, 1, 1, 0, 0, 0, 0);
return chineseNewYear;
}
}
Her bruker vi ChineseLunisolarCalendar og vi bruker metoden ToDateTime for 1. januar for spesifisert år. Dette gjør en transformasjon som gir oss kinesisk nyttår regnet ut i vår gregorianske kalender i Vesten.
Neste 10 år med Kinesisk nyttår
| Year | Date | Animal |
|---|---|---|
| 2025 | January 29 | Snake |
| 2026 | February 17 | Horse |
| 2027 | February 6 | Goat |
| 2028 | January 26 | Monkey |
| 2029 | February 13 | Rooster |
| 2030 | February 3 | Dog |
| 2031 | January 23 | Pig |
| 2032 | February 11 | Rat |
| 2033 | January 31 | Ox |
| 2034 | February 19 | Tiger |
Kinesisk tolvårssyklus
Den kinesiske tolvårssyklusen stammer fra en myte (det er ikke stedfestet hvor eller når denne kongen levde, i og med at det er en myte, men det stemmer fra veldig gammelt av Kina), hvor Jadekongen Yù Huáng Dà Dì(玉皇大帝) inviterte alle dyr i kongeriket til et løp som gikk på tvers av en elv. De 12 første dyrene som krysset elven, skulle få et år kalt opp etter dem i den kinesiske 12 års Zodiac syklusen. En utregning av Kinesisk år, altså det dyret som året er "tilegnet" blir da en enkel modulo 12 utregning, som vist under.
ChineseCalendarUtils.cs
string GetChineseZodiac(int year)
{
string[] zodiacAnimals =
{
"Rat", "Ox", "Tiger", "Rabbit", "Dragon", "Snake",
"Horse", "Goat", "Monkey", "Rooster", "Dog", "Pig"
};
int index = (year - 4) % 12;
return zodiacAnimals[index];
}
Jadekongen Yù Huáng Dà Dì (玉皇大帝) og hans Zodiac-hjul (DALL-e 3 generert kunst)
Subscribe to:
Comments (Atom)




