Saturday, 3 December 2016

Parallell execution with threads in C# - Old Stars finder

This article will present parallell execution of threads in C# to find old stars in a star formation known as W5 in the constellation of Cassiopeia with the Spitzer Space telescope. The code is from the book "C# Multithreaded and Parallell programming" by Packt Publishing by author Rodney Ringler et. al.

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Threading;
using System.Drawing.Imaging;


namespace OldStarsFinder
{
    public partial class Form1 : Form
    {

        //The number of processors or cores available in the computer for this application
        private int priProcessorCount = Environment.ProcessorCount;
        //The bitmaps list
        private List<Bitmap> prloBitmapList;
        //The long list with the old stars count 
        private List<long> prliOldStarsCount;
        //The threads list
        private List<Thread> prloThreadList;
        //The original huge infrared bitmap portrait
        Bitmap proOriginalBitmap; 

        public Form1()
        {
            InitializeComponent();
        }

        public bool IsOldStar(Color poPixelColor)
        {
            //Hue between 150 and 258
            //Saturation more than 0.10 
            //Brightness more than 0.90
            return ((poPixelColor.GetHue() >= 150 && (poPixelColor.GetHue() <= 258)) &&
                (poPixelColor.GetSaturation() >= 0.10) &&
                (poPixelColor.GetBrightness() >= 0.90)); 
        }

        private Bitmap CropBitmap(Bitmap proBitmap, Rectangle proRectangle)
        {
            //Create a new bitmap copying the portion of the original defined by proRectangle and keeping the PixelFormat 
            var loCroppedBitmap = proBitmap.Clone(proRectangle, proBitmap.PixelFormat);
            //Return the cropped bitmap 
            return loCroppedBitmap;
        }

        private void ThreadOldStarsFinder(object poThreadParameter)
        {
            //Retrieve the thread number reeived in object poThreadParameter 
            int liThreadNumber = (int) poThreadParameter;
            //The pixel matrix (bitmap) row number (Y)
            int liRow;
            //The pixel matrix (bitmap col number (X)
            int liCol;
            //The pixel color 
            Color loPixelColor;
            //Get my bitmap part from the bitmap list 
            Bitmap loBitmap = prloBitmapList[liThreadNumber];

            //Reset my old stars counter 
            prliOldStarsCount[liThreadNumber] = 0;
            //Iterate through each pixel matrix (bitmap) row 
            for (liRow = 0; liRow < loBitmap.Height; liRow++)
            {
                //Iterate through each pixel matrix (bitmap) cols 
                for(liCol = 0; liCol < loBitmap.Width; liCol++)
                {
                    //Get the pixel color for liCol and liRow 
                    loPixelColor = loBitmap.GetPixel(liCol, liRow);
                    //Get the pixel color for liCol and liRow 
                    if (IsOldStar(loPixelColor))
                    {
                        //The color range correspons to an old star
                        //Change its color to a pure blue 
                        loBitmap.SetPixel(liCol, liRow, Color.Blue);
                        //Increase the old stars counter 
                        prliOldStarsCount[liThreadNumber]++;
                    }
                    else
                    {
                        loBitmap.SetPixel(liCol, liRow, Color.FromArgb(128, loPixelColor));
                    }
                }
            }
            //Simulate heavy processing
            Random rnd = new Random();
            Thread.Sleep(rnd.Next(2000, 2500)); 
        }

        private void WaitForThreadsToDie()
        {
            //A bool flag 
            bool lbContinue = true;
            int liDeadThreads = 0;
            int liThreadNumber;
            while (lbContinue)
            {
                for(liThreadNumber = 0; liThreadNumber < priProcessorCount; liThreadNumber++)
                {
                    if (prloThreadList[liThreadNumber].IsAlive)
                    {
                        //One of the threads is still alive
                        //exit the for loop and sleep 100 milliseconds 
                        break;
                    }
                    else
                    {
                        //Increase the dead threads count 
                        liDeadThreads++;

                        progressBar1.Value = (int) ((liDeadThreads * 1.0 / priProcessorCount * 1.0) * 100.0);
                    }
                }

                if (liDeadThreads == priProcessorCount)
                {
                    //All the threads are dead, exit the while loop 
                    break; 
                }
                Thread.Sleep(100);
                liDeadThreads = 0; 
            }
        }

        private void ShowBitmapWithOldStars()
        {
            int liThreadNumber;
            //Each bitmap portion 
            Bitmap loBitmap;
            //The starting row in each iteration 
            int liStartRow = 0;

            //Calculate each bitmap's height 
            int liEachBitmapHeight = ((int) (proOriginalBitmap.Height / priProcessorCount)) + 1;

            //Create a new bitmap with the whole width and height 
            loBitmap = new Bitmap(proOriginalBitmap.Width, proOriginalBitmap.Height);
            Graphics g = Graphics.FromImage((Image) loBitmap);
            g.InterpolationMode = System.Drawing.Drawing2D.InterpolationMode.HighQualityBicubic;

            for (liThreadNumber = 0; liThreadNumber < priProcessorCount; liThreadNumber++)
            {
                //Draw each portion in its corresponding absolute starting row 
                g.DrawImage(prloBitmapList[liThreadNumber], 0, liStartRow);
                //Increase the starting row 
                liStartRow += liEachBitmapHeight;        
            }

            //Show the bitmap in the PictureBox picStarsBitmap 
            picStarsBitmap.Image = loBitmap;

            g.Dispose(); 
        }

        private void butFindOldStars_Click(object sender, EventArgs e)
        {
            progressBar1.Visible = true;

            proOriginalBitmap = new Bitmap(pictureBox1.Image);

            //Thread number 
            int liThreadNumber;
            //Create the thread list, the long list and the bitmap list 
            prloThreadList = new List<Thread>(priProcessorCount);
            prliOldStarsCount = new List<long>(priProcessorCount);
            prloBitmapList = new List<Bitmap>(priProcessorCount);

            int liStartRow = 0;

            int liEachBitmapHeight = ((int) (proOriginalBitmap.Height / priProcessorCount)) + 1;

            int liHeightToAdd = proOriginalBitmap.Height;
            Bitmap loBitmap; 

            //Initialize the threads 

            for (liThreadNumber = 0; liThreadNumber < priProcessorCount; liThreadNumber++)
            {
                //Just to occupy the number 
                prliOldStarsCount.Add(0); 

                if (liEachBitmapHeight > liHeightToAdd)
                {
                    //The last bitmap height perhaps is less than the other bitmap height
                    liEachBitmapHeight = liHeightToAdd; 
                }

                loBitmap = CropBitmap(proOriginalBitmap, new Rectangle(0, liStartRow, proOriginalBitmap.Width, liEachBitmapHeight));
                liHeightToAdd -= liEachBitmapHeight;
                liStartRow += liEachBitmapHeight;
                prloBitmapList.Add(loBitmap);

  

                //Add the new thread, with a parameterized start (to allow parameters)
                prloThreadList.Add(new Thread(new ParameterizedThreadStart(ThreadOldStarsFinder))); 
            }

            //Now, start the threads
            for (liThreadNumber = 0; liThreadNumber < priProcessorCount; liThreadNumber++)
            {
                prloThreadList[liThreadNumber].Start(liThreadNumber); 
            }

            WaitForThreadsToDie();

            ShowBitmapWithOldStars();

            progressBar1.Visible = false;
        }

    }
}

The code is available as a Windows Forms application in a Visual 2015 Solution available for download (zip) below:
Old Stars Finder (.zip) W5 image (NASA website): W5 image

Sunday, 27 November 2016

Making a simple accordion in Bootstrap 3


Creating an accordion for a web site is a breeze with Bootstrap 3. Just including the Bootstrap CSS and Javascript files and jQuery, we can start building an accordion. The accordion is a menu that shows one menu item a time. These menu items are panels and an accordion resembles a tab control with tabs, but is vertically stacked default. The following HTML page renders a simple accordion with Boostrap, CSS, Javascript and HTML.

<!DOCTYPE html>
<html>
<head>
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">
  <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
  <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>
</head>
<body>

<div class="container">

  <div id="accordion" class="panel-group">
  
   <div class="panel panel-success">
    <div class="panel-heading">
     <h4 class="panel-title"><a href="#collapse1" data-toggle="collapse" data-parent="#accordion">Collapsible panel 1</a></h4>
    </div>
    <div id="collapse1" class="panel-collapse collapse">
     <div class="panel-body"><div class="well">This is a nice collapsible panel.</div><p>This is a test.</p></div>
     <div class="panel-footer">Panel footer</div>
    </div>   
   </div>
   
    <div class="panel panel-warning">
    <div class="panel-heading">
     <h4 class="panel-title"><a href="#collapse2" data-toggle="collapse" data-parent="#accordion">Collapsible panel 2</a></h4>
    </div>
    <div id="collapse2" class="panel-collapse collapse">
     <div class="panel-body">This is another nice panel</div>
     <div class="panel-footer">Hey a panel footer too!</div>
    </div>   
   </div>
   
    <div class="panel panel-default">
    <div class="panel-heading">
     <h4 class="panel-title"><a href="#collapse3" data-toggle="collapse" data-parent="#accordion">Collapsible panel 3</a></h4>
    </div>
    <div id="collapse3" class="panel-collapse collapse">
     <div class="panel-body">Omg a third panel!</div>
     <div class="panel-footer">Let's have another Panel footer too!</div>
    </div>   
   </div>
  
  </div>  
  
</div>

</body>
</html>

Make note that we here use the data-parent HTML5 extension to point to the parent element to get the accordion effect of only showing one panel at a time. Each panel consists of a panel with a panel heading, having a panel title and then a panel body and finally a panel footer. We use the CSS framework of Bootstrap to achieve this.

Sunday, 6 November 2016

Twitter 3 Bootstrap Autocomplete control for MVC 5 - Typeahead.js



This article will present a reusable user control for MVC applications using Twitter Bootstrap autocomplete feature. This feature is known as the Twitter Bootstrap typeahead. There is a lot of articles covering this topic on the Internet, my version will present a simple reusable control using MVC html helper that generates input fields of type text and hidden, i.e. a textbox and a hidden field to save the value. The MVC model binder will therefore be able to save the selected value in the list showing up in the autocomplete-enabled textbox into the target property specified. Of course, your needs for autocomplete feature will vary. The control described in this article will suit many developer's needs as they call up a controller action to get the desired data and then filter the drop down and also use the Bloodhound engine of Twitter to mark up the matches quite nicely. The end result is a very useful and nicely styled autocomplete textbox! It also supports keyboard navigation with arrow keys and Enter! Read on! Let's first look at the MVC Html helper itself first:

using System;
using System.Linq.Expressions;
using System.Web.Mvc;

namespace TwitterBootstrapAutoCompleteControl.HtmlHelpers
{

    public static class CustomMvcHelpers
    {

        public static MvcHtmlString AutoCompleteFor<TModel, TResult>(this HtmlHelper<TModel> htmlHelper,
            Expression<Func<TModel, TResult>> propertyToSet, string fetchUrl)
        {
            var metaData = ModelMetadata.FromLambdaExpression(propertyToSet, htmlHelper.ViewData);
            string propertyName = metaData.PropertyName;
            string jsComponent = string.Format(
            @"
              <input type ='hidden' id='{0}' />   
              <input type='text' id='{1}' class='typeahead form-control' placeholder='Search some values' />             
              <script type='text/javascript'> 
              <!-- AutoCompleteFor -->
              $(function() {{

                var suggestionEngine = new Bloodhound({{
                limit: 300,
                datumTokenizer: function(datum) {{
                    Bloodhound.tokenizers.obj.whitespace('value')
                }},
                queryTokenizer: Bloodhound.tokenizers.whitespace,
                remote:
                {{
                    url: '{2}',
                    filter: function(response) {{
                    var matches = [];
                    $.map(response, function(item) {{
                            var query = $('#{1}').val().toLowerCase();
                            var itemKey = item.Text.toLowerCase();                          
                            if (itemKey.indexOf(query) >= 0)
                            {{
                                matches.push(item);
                                //console.log(item);
                            }}
                        }});

                        return matches;
                    }}
                }}
            }});

            suggestionEngine.initialize();

            $('#{1}').typeahead({{
                hint: true,
                highlight: true,
                minLength: 1,           
        }}   , {{
                limit: 30,
                displayKey: 'Text',
                source: suggestionEngine.ttAdapter(),
                filter: function(data) {{
                    console.log(data);
                    return data;
                }},            
                templates:
                {{
                    suggestion: function(data) {{
                        return '<p>' + data.Text + '</p>';
                    }},
                empty: [
                '<div>',
                'No results matching',
                '</div>'
                ].join('\n'),
            }}
            }});

            $('#{1}').bind('typeahead:select', function(ev, suggestion) {{
             //console.log('Selection: ' + suggestion.Text);
         
            $('#{0}').val(suggestion.Id); 

           }});

          }});

        </script>

     ", propertyName, propertyName + "TextBox", fetchUrl);


            return MvcHtmlString.Create(jsComponent);
        }

    }

}

The MVC html helper will generate the HTML and the javascript that is required to generate a textbox and a hidden field with the autocomplete feature. Your MVC solution needs to include both jQuery and Twitter Bootstrap, plus the Twitter typeahead.js Nuget packages. In addition, you need to include the Bloodhound javascript file. Let's look at a controller action return Json data to our Html helper, which will use javascript to call that method:

        public ActionResult SomeData()
        {
            var countries = new List
            {
            new IdTextItem {Id = "US", Text = "United States"},
            new IdTextItem {Id = "CA", Text = "Canada"},
            new IdTextItem {Id = "AF", Text = "Afghanistan"},
            new IdTextItem {Id = "AL", Text = "Albania"},
            new IdTextItem {Id = "DZ", Text = "Algeria"},
            new IdTextItem {Id = "DS", Text = "American Samoa"},
            new IdTextItem {Id = "AD", Text = "Andorra"},
            new IdTextItem {Id = "AO", Text = "Angola"},
            new IdTextItem {Id = "AI", Text = "Anguilla"},
            new IdTextItem {Id = "AQ", Text = "Antarctica"},
            new IdTextItem {Id = "AG", Text = "Antigua and/or Barbuda"}
            };

            return Json(countries, JsonRequestBehavior.AllowGet);

        }

The Json method returns a JsonResult that is called once by this html helper. We filter the data on the client as can be seen in the Html Helper code. Let's look at the script bundle added in BundleConfig

   bundles.Add(new ScriptBundle("~/bundles/typeahead").Include(
                "~/Scripts/bloodhound.js",
                "~/Scripts/typeahead.bundle.js"
                ));

This Html helper will fetch data on load, while it is the text that the user is typing that filters the autocomplete list. If you need to pass in a text and use that in your fetchUrl, the html helper's filtering most likely can be adjusted, also look into the prefetch property of the typeahead. We also move up jQuery bundle to the top as the default MVC template has this bundle in the footer and not in the header, as will be required by the typeahead feature. Place in the section of _Layout.cshtml: @Scripts.Render("~/bundles/jquery") And in the bottom of the element of that same file: @Scripts.Render("~/bundles/typeahead") The html helper expects that your class contains the properties Id and Text. You can of course use an anonymous type to avoid creating a new type in your C#-code. The type could be a string or an integer for the Id.

 public class IdTextItem
    {

        public string Id { get; set; }

        public string Text { get; set; }

    }

We also do some adjustments to the style, since the default setting of the typeahead is borishingly looking. Add the following css styling to your view - or better - in a standalone css to be included in _Layout.cshtml. Save the following content into typeahead.css file which you place in the Content folder:
 

.tt-query {
  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
     -moz-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
          box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
}

.tt-hint {
  color: #999
}

.tt-menu {    /* used to be tt-dropdown-menu in older versions */
  width: 422px;
  margin-top: 4px;
  cursor: pointer;
  padding: 4px 0;
  background-color: #fff;
  border: 1px solid #ccc;
  border: 1px solid rgba(0, 0, 0, 0.2);
  -webkit-border-radius: 4px;
     -moz-border-radius: 4px;
          border-radius: 4px;
  -webkit-box-shadow: 0 5px 10px rgba(0,0,0,.2);
     -moz-box-shadow: 0 5px 10px rgba(0,0,0,.2);
          box-shadow: 0 5px 10px rgba(0,0,0,.2);
}

.tt-suggestion {
  padding: 3px 20px;
  line-height: 24px;
}

.tt-suggestion.tt-cursor,.tt-suggestion:hover {
  color: #fff;
  background-color: #0097cf;

}

.tt-suggestion p {
  margin: 0;
}

Then adjust the StyleBundle in BundleConfig to include this css file:
      bundles.Add(new StyleBundle("~/Content/css").Include(
                      "~/Content/bootstrap.css",
                      "~/Content/site.css",
                      "~/Content/typeahead.css"));
Finally, here is an example of how to use this Html Helper:

<div class="row">
    <div class="col-md-4">    
        <h2>Autocomplete control html helper:</h2>   
        @Html.AutoCompleteFor(m => m.SomeProperty, Url.Action("SomeData", "Home"))
    </div>
</div>

The call to the html helper provides as the first argument the property of the Model of the MVC View and the second argument is an url to the action to fetch the data. This HTML helper should match a lot of developer's needs, but can of course be adjusted. The benefit of using a MVC Html helper is that you get reusability. You avoid having to fiddle with Javascript for each field you want to add to your MVC view where you want some autocomplete feature. Maybe you want to adjust the HTML helper to fit your needs. I have provided a link to a zip file of this Html Helper in a Visual Studio 2015 below, let me know if there are some tips or improvement you have in case you evaluate and test out this Html helper and find improvements. Note that the chosen value in the autocomplete list is set to a hidden field. The textbox will be named "propertyname"TextBox and the hidden field will be named "propertyname"

Download the source code for the autocomplete control (VS 2015 solution):

Download zip file [.zip | 31,7 MB] Reading material:

Monday, 3 October 2016

Disposing objects instantiated by MEF

Experienced developers that has worked with the official extensibility framework in .NET, the Managed Extensibility Framework (MEF) allows the composition of different parts into more composite parts through composition. MEF has got similarities to other IoC framework, where you register components and then make use of them in other components. However, with MEF there is a caveat and an important one too!
MEF beautifully abstracts away the IoC container and lets you specify parts that can be epxorted and imported. But if you inspect your application or system with a memory profiler such as JetBrains DotMemory or Red Gate Memory Profiler, you soon find out that much of the memory used by your applications is not properly disposed, i.e freed up after use. This is the case for nonshared (non-singleton) objects that are exported and then instantiated (imported). This means that your application will through continued use hold more and more memory. It will leak memory. By inspecting the memory dependency chain, one can see that MEF is the reason why the nonshared objects instantiated by MEF is not released, even after the objects are issued to be disposed.

I use in this code the ServiceLocator found with the Enterprise Library. Make note that my code will break up the dependency chain that hinders the object, but it does not mean that necessarily objects will be disposed right away. After all, .NET is managed and decides itself when objects are really to be disposed. But if you strive with releasing objects that are tied to memory even after use and also use MEF, read on.

I use the Factory pattern here to instantiate objects. I also use the new feature in .NET 4.5 that is called the ExportLifeTimeContext. I also use the ExportFactory in MEF inside a class called ExportFactoryInstantiator that does actual instantiation of the objects and keeping a track of these ExportLifeTimeContext objects. As noted, you need at least .NET 4.5 to make this work. For .NET 4.0 users, sorry - you are out of luck as far as I know. Upgrade your application to .NET 4.5 if possible and get the newer version of MEF.

The code below shows how you can accomplish control over memory resources using MEF:

MefFactory.cs

using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.ComponentModel.Composition;
using System.Linq;
using Microsoft.Practices.ServiceLocation;

namespace SomeAcme.Client.Infrastructure.IoC
{

    /// <summary>
    /// Factory for MEF Parts that is able to actually dispose MEF instantiated objects and get around bug in MEF 
    /// where objects never gets properly GC-ed when they should dispose
    /// </summary>
    /// <typeparam name="T"></typeparam>
    [Export]
    [PartCreationPolicy(CreationPolicy.Shared)]
    public class MefFactory<T> : IPartImportsSatisfiedNotification 
    {

        /// <summary>
        /// Backlog that keeps track of mef parts that are instantiated via this factory 
        /// </summary>
        private static readonly ConcurrentBag<ExportLifetimeContext<T>> MefParts = new ConcurrentBag<ExportLifetimeContext<T>>();

        /// <summary>
        /// Disposes parts added to the mef factory backlog of type T
        /// </summary>
        public static void DisposeMefParts()
        {
            ExportLifetimeContext<T> item;
            while (MefParts.TryTake(out item))
            {
                if (item != null)
                    item.Dispose();
            }
        }

        /// <summary>
        /// Disposes parts added to the mef factory backlog of type T by a given predicate condition
        /// </summary>
        public static void DisposeMefParts(Predicate<T> condition)
        {
            ExportLifetimeContext<T> item;
            List<ExportLifetimeContext<T>> lifeTimeProlonged = new List<ExportLifetimeContext<T>>();
            while (MefParts.TryTake(out item))
            {
                if (item != null && condition(item.Value))
                    item.Dispose();
                else 
                    lifeTimeProlonged.Add(item);
            }
            if (lifeTimeProlonged.Any())
            {
                //Add back again the parts not matching condition to the Concurrent bag
                foreach (var part in lifeTimeProlonged)
                {
                    MefParts.Add(part);
                }
            }
        }

        public void OnImportsSatisfied()
        {
            //marker interface
        }
   
        /// <summary>
        /// Resolves the mef part
        /// </summary>
        /// <returns></returns>
        public static T Resolve()
        {
            var factoryInstantiator = ServiceLocator.Current.GetInstance<ExportFactoryInstantiator<T>>();
            MefParts.Add(factoryInstantiator.Lifetime);
            return factoryInstantiator.Instance;
        }

    }
}



using System.ComponentModel.Composition;

namespace SomeAcme.Client.Infrastructure.IoC
{

    [Export]
    [PartCreationPolicy(CreationPolicy.NonShared)]
    public class ExportFactoryInstantiator<T> : IPartImportsSatisfiedNotification
    {

        [Import]
        public ExportFactory<T> Factory { get; set; }

        public T Instance { get; private set; }

        private ExportLifetimeContext<T> _lifeTime;

        public ExportLifetimeContext<T> Lifetime
        {
            get { return _lifeTime; }
        } 

        public void OnImportsSatisfied()
        {
            _lifeTime = Factory.CreateExport();
            Instance = _lifeTime.Value;
        }

        public bool DisposeOnDemand()
        {
            if (_lifeTime == null)
                return false;
            _lifeTime.Dispose();
            return Instance == null;
        }

    }

}

To instantiate an object, you do:

 var somepart = MefFactory.Resolve();

When you are done using the object you can dispose it with:

 MefFactory.DisposeMefParts(); 

Please note, you can use a Predicate here to filter out which object you want to keep and which ones to dispose.

And once more, the immediate disposal of the object is not guaranteed, since GC will still control the true lifetime of objects. You can use GC.Collect(); to force releasing disposed objects, but that will usually degrade application performance.

But the techniques shown here will over time really improve your application by gaining control on the memory footprint your application uses.

Resources

[1] Enterprise Library: https://msdn.microsoft.com/library/cc467894.aspx
[2] ServiceLocator class: https://msdn.microsoft.com/en-us/library/microsoft.practices.servicelocation.servicelocator(v=pandp.51).aspx
[3] ServiceLocator pattern: https://msdn.microsoft.com/en-us/library/ff648968.aspx
[4] Managed Extensibility Framework: https://msdn.microsoft.com/en-us/library/dd460648(v=vs.110).aspx

Thursday, 30 June 2016

Creating TPL Dataflow meshes to construct pipelines of computations

The TPL DataFlow Library allows the creation of simple and more complex data meshes that propagate data computations and exceptions using the Nuget package Microsoft.Tpl.DataFlow Let's look at how we can create a compound mesh to do three calculations that is considered as a single mesh. These simple examples appear to give simple computations as these a huge overhead in complexity. Of course, you would use Microsoft.Tpl.DataFlow for more complex scenarios, the simple example is just used for clarity. Consider the following code: First off, make sure you add a reference to Microsoft.Tpl.Dataflow, since TPL Dataflow is not part of the base class Library BCL in .NET. In the Nuget Package Explorer commandline in VS:
Install-Package Microsoft.Tpl.DataFlow

using System;
using System.Collections.Generic;
using System.Threading.Tasks;
using System.Threading.Tasks.Dataflow;

namespace DataFlowDemo
{

    class Program
    {

        static void Main(string[] args)
        {
            //TplDataDemo();
            SecondTplDataDemo();
            Console.WriteLine("Press any key to continue ..");
            Console.ReadKey();
        }

        private static async void SecondTplDataDemo()
        {
            int[] nums = { 1, 13, 26, 14, 29, 15 };
            Console.WriteLine("Input numbers: ");
            foreach (var n in nums)
                Console.WriteLine(n);
            IPropagatorBlock<int, int> compountBlock = GetPropagatorBlock();
            Console.WriteLine("Pipeline: " + "x = (x * 2) => (x + 2) => (x / 2)");
            foreach (var num in nums)
            {
                compountBlock.Post(num);
            }
            try
            {

                while (true)
                {
                    try
                    {
                        Task<int> f = compountBlock.ReceiveAsync(TimeSpan.FromSeconds(1));
                        await f;
                        await Task.Delay(1000);
                        Console.WriteLine(f.Result);
                    }
                    catch (TimeoutException err)
                    {
                        //Console.WriteLine(err.Message);
                        break;
                    }
                    catch (Exception err)
                    {
                        //Console.WriteLine(err.Message);
                        throw err;
                    }
                }

            }
            catch (Exception err)
            {
                Console.WriteLine(err.Message);
            }
        }

        private static IPropagatorBlock<int, int> GetPropagatorBlock()
        {
            var multiplyBlock = new TransformBlock<int, int>(item => item * 2);
            var addBlock = new TransformBlock<int, int>(item => item + 2);
            var divideBlock = new TransformBlock<int, int>(item => item / 2);

            var flowCompletion = new DataflowLinkOptions { PropagateCompletion = true };
            multiplyBlock.LinkTo(addBlock, flowCompletion);
            addBlock.LinkTo(divideBlock, flowCompletion);

            return DataflowBlock.Encapsulate(multiplyBlock, divideBlock);
        }
  }

We build up the steps of the computation pipeline as a TransformBlock. The multiplyblock is linked to the addBlock and the divideBlock is then linked to the addBlock. We got a pipeline like this: multiplyBlock-addBlock-divideBlock. Each computation will then be: y = (x * 2) => z = y + 2 => w = z / 2. We also use the Encapsulate method to glue together the start step and the end step. We then get the following output:
Input numbers:
1
13
26
14
29
15
Pipeline: x = (x * 2) => (x + 2) => (x / 2)
2
14
27
15
30
16
Press any key to continue ..
Test out TPL Dataflow sample above (VS 2015 solution here: VS Solution With sample code above (.zip)

Wrapping Asynchronous Programming Model (APM) to Task-based Asynchronous Pattern (TAP)

Let's look at how we can wrap classic Begin and End methods used in APM to the newer Task-based Asynchronous Pattern (TAP). Many methods of older framework Versions of .NET support such APM methods and we want to wrap or adapt them to support TAP and async await. Example code:

using System;
using System.IO;
using System.Net;
using System.Text;
using System.Threading.Tasks;

namespace ApmToTap
{
    class Program
    {

        static void Main(string[] args)
        {
            DownloadDemo();

            Console.WriteLine();
            Console.ReadKey();
        }

        private static async void DownloadDemo()
        {
            WebRequest wr = WebRequest.Create("https://t.co/UrkiLgN1BC");
            try
            {
                var response = await wr.GetResponseFromAsync();
                using (Stream stream = response.GetResponseStream())
                {
                    StreamReader reader = new StreamReader(stream, Encoding.UTF8);
                    Console.WriteLine(reader.ReadToEnd());
                }
            }
            catch (Exception err)
            {
                Console.WriteLine(err.Message);
            }
        }
    }

    public static class WebRequestExtensions
    {

        public static Task<WebResponse> GetResponseFromAsync(this WebRequest request)
        {
            return Task<WebResponse>.Factory.FromAsync(request.BeginGetResponse,
                request.EndGetResponse, null);
        }

    }

}

We use the Task<T>Factory.FromAsync method and provide the delegates for the Begin and End methods used in APM. We then provide just null as the AsyncState parameter, as this is not needed. We then can await the Task we create here and get the functionality Task provides such as information of how the asynchronous operation went, exceptions and so on. And of course we can also get the result we usually retrieve in the End method using APM. So there you have it. To use TAP With APM methods, you can use the Task<T>FromAsync method.

Wednesday, 29 June 2016

High performance Producer-Consumer scenario using Nito.AsyncEx

The Nuget Library "Nito.AsyncEx" contains powerful collections that makes it possible to create asynchronous collections that support Producer-Consumer scenarios. First off, let us install the Nuget package:

Install-Package Nito.AsyncEx


using Nito.AsyncEx;
using System;
using System.Collections.Concurrent;
using System.Threading.Tasks;

namespace AsyncCollectionDemo
{
    class Program
    {

        private static readonly AsyncCollection<int> _asyncStack = new AsyncCollection<int>(new ConcurrentStack<int>(), maxCount: 1);


        static void Main(string[] args)
        {
            ProducerConsumerDemo();
        }

        private static async void ConsumerThread()
        {
            await Task.Run(async () =>
             {
                 //Consumer code 
                 while (await _asyncStack.OutputAvailableAsync())
                 {
                     Console.WriteLine(await _asyncStack.TakeAsync());
                     //Thread.Sleep(1000);
                 }
             });
        }

        private static async void ProducerConsumerDemo()
        {
            ConsumerThread();
            //Producer code 
            await _asyncStack.AddAsync(7);
            await _asyncStack.AddAsync(13);
            await _asyncStack.AddAsync(19);
            _asyncStack.CompleteAdding();


            Console.WriteLine("Press any key to continue ..");
            Console.ReadKey(); 
        }

    }
}

We can use the AsyncCollection to create one or multiple producer threads and then return the results to the consumer threads. The benefit of this Collection compared to the BlockingCollection in BCL is that since it supports async, the consumer can be an UI thread for example. So you can have code that produces results and delivers them back to the user Interface. You can ofcourse skip using the ConcurrentStack if you want a FIFO ordering instead of the stack's LIFO ordering. The creator of Nito.AsyncEx is created by Stephen Cleary, which also is the author of the good book "Concurrency in C# Cookboox" which is an O'Reilly book.

Producer-Consumer scenario in .NET using BlockingCollection

The BCL contains helpful classes to build your producer-consumer scenarios. We want to have a thread or several that produces data and one or several threads that consume that data. In this article I will show a simple single producer, single consumer scenario. BlockingCollection contains the necessary logic to support this. Consider the following code:


using System;
using System.Collections.Concurrent;
using System.Threading;
using System.Threading.Tasks;

namespace BlockingCollection
{
    class Program
    {

        private static readonly BlockingCollection<int> _blockingQueue = new BlockingCollection<int>(boundedCapacity: 1); 


        static void Main(string[] args)
        {
            Task.Run(() =>
            {
                foreach (var item in _blockingQueue.GetConsumingEnumerable())
                {
                    Console.WriteLine("Hey I got this item: " + item + "!");
                    Thread.Sleep(1000);
                }

            });

            int[] nums = { 1, 3, 8, 11, 94, 28 };
            foreach (var n in nums)
                _blockingQueue.Add(n);
            _blockingQueue.CompleteAdding();

            Console.WriteLine("Hit the any key!");
            Console.ReadKey(); 
        }

    }
}


We create a blocking collection with a bound capacity of one item. This will only accept a single item at a time. This means that our second call to the .Add() method will actually block. We also use the .CompleteAdding() method to signal that we are finished filling up values. It is also important to start up our consumer before the blocking call. I use Task.Run()here to start up a new thread. We will inside that thread Call the .GetConsumingEnumerable() to set up our consumer. Note that I also use Thread.Sleep to make the consumer wait a bit, this is only for testing and normally you would of course not wait here. Note also that you could spawn multiple consumers with Task.Run or similar to support 1 producer, multiple consumers scenarios. For multiple producers you could use a single shared blocking collection or possibly multiple blocking collections. BlockingCollection is very flexible. It is important that you call .CompleteAdding() on your producer side to allow the consumers to finish also their execution.
What if you want to not have a FIFO ordering in your Producer-Consumer scenario, but say a LIFO scenario? As you probably can see, if you instantiate the BlockingCollection, the constructor takes overloads to somethhing called an IProducerConsumerCollection Collection. So we just adjust our instantiation like the following (note that I had to remove the boundedCapacity here set to 1 to make it work!):

   private static readonly BlockingCollection<int> _blockingQueue = new BlockingCollection<int>(
            new ConcurrentStack<int>()); 

So now we get our new output:

Hit the any key!
Hey I got this item: 28!
Hey I got this item: 94!
Hey I got this item: 11!
Hey I got this item: 8!
Hey I got this item: 3!
Hey I got this item: 1!

The BCL contains several interesting classes in the System.Collections.Concurrent namespace. Happy .NET coding!

Tuesday, 21 June 2016

How to open up a SQL connection with SQL Management studio 2012 through Powershell

Sometimes it is nice to just log into a SQL server database with just running a command. To maintain security, we let our user input the password manually and use Powershell to start up SQL Server Management Studio 2012. If you got another version of SQL Server Management Server (SSMS), just adjust the path to ManagementStudio.

$domain="SOMEDOMAIN"
$user="SomeUser"
$workingdir="c:\Program Files (x86)\Microsoft SQL Server\110\Tools\Binn\ManagementStudio"
$databaseserver="somedbserver.somedomain.net"
$cmd="ssms.exe"
$arguments=" -S $databaseserver" 

$domainuser = $domain + "\" + $user

$response = Read-host "Enter password" -AsSecureString 
#$secpasswd = [Runtime.InteropServices.Marshal]::PtrToStringAuto([Runtime.InteropServices.Marshal]::SecureStringToBSTR($response))

$credential = New-Object System.Management.Automation.PSCredential ($domainuser, $response)

Start-Process -WorkingDirectory $workingdir -FilePath $cmd -Argument $arguments -Credential $credential 

You can add an icon on your desktop and assign it a shortcut to powershell and then paste the script above to a .ps1 file and then as the argument of the shortcut point to a .ps1 with the script above.

Wednesday, 1 June 2016

How to display the SQL involved for in Entity Framework programatically with DbContext and ObjectContext

As a developer, we often use an Object-Relational Mapper (ORM) to abstract from the way we work with a database. Gone are the days of building a SqLCommand object, setting the CommandText and executing row by row the result set, as used with ADO.NET. Today, most .NET developers use EntityFramework to work with the database. But this abstraction is all and well, and makes us work more efficient. Sadly, many developers are agnostic to the fact that despite they get the results out from the database, they do often do so in a slow manner. The reason of this is often not that there is a lot of data in the database, but we use Entity Framework queries that generate the wrong sql, i.e we get the results, but the SQL involved got a poor performance. You can use LinqPad for example to display the SQL involved in Entity Framework queries. But we can also achieve this using programatically methods, with C#. Here is an extension class I wrote to achieve this. The extension methods works with both data contexts that inherit from DbContext and data contexts that inherit from ObjectContext.

using System;
using System.Data.Entity.Core.Objects;
using System.Linq;
using System.Reflection;

namespace Hemit.OpPlan.Data.EntityFramework
{
    
    public static class IQueryableExtensions
    {

        /// <summary>
        /// Shows the sql the IQueryable query will be generated into and executed on the database
        /// </summary>
        /// <param name="query">The IQueryable to analyze</param>
        /// <param name="decodeParameters">Set to true if this method should try decoding the parameters</param>
        /// <remarks>This is the generated SQL query in use for Entity Framework. This works using ObjectContext</remarks>
        public static string ShowSqlUsingObjectContext(this IQueryable query, bool decodeParameters = false)
        {
            var objectQuery = (ObjectQuery)query; 
           
            string result = ((ObjectQuery)query).ToTraceString();

            if (!decodeParameters)
                return result; 

            foreach (var p in objectQuery.Parameters)
            {
                string valueString = p.Value != null ? p.Value.ToString() : string.Empty;
                if (p.ParameterType == typeof(string) || p.ParameterType == typeof(DateTime))
                    valueString = "'" + valueString + "'";
                result = result.Replace("@" +p.Name, p.Value != null ? valueString : string.Empty); 
            }
            return result; 
        }

        public static string ShowSqlUsingDbContext(this IQueryable query, bool decodeParameters = false)
        {
            var memberInfo = query.GetType().BaseType;
            if (memberInfo != null)
            {
                var internalQueryField = 
                memberInfo.GetFields(BindingFlags.NonPublic
              | BindingFlags.Instance).FirstOrDefault(f => f.Name.Equals("_internalQuery"));
                if (internalQueryField != null)
                {
                    var internalQuery = internalQueryField.GetValue(query);
                    var objectQueryField =
                        internalQuery.GetType().GetProperty("ObjectQuery"); 

                    // Here's your ObjectQuery!
                    if (objectQueryField != null)
                    {
                        var objectQuery = objectQueryField.GetValue(internalQuery) as ObjectQuery;
                        string sql = ShowSqlUsingObjectContext(objectQuery, decodeParameters);
                        return sql;
                    }
                }
            }

            return null;
        }

    }
}

Note that when we use an IQueryable inside a DbContext, the BaseType is actually a DbQuery, and this wraps the ObjectQuery inside a field called "_internalQuery". In addition, we get a property inside this field that is called "ObjectQuery". So we can get hold of the ObjectQuery inside a DbQuery. When we got hold of the ObjectQuery, it is easy to decode the contents using ToTraceString() method and if we want to further decode the parameters EntityFramework generates, we can do so using the Parameters property of ObjectQuery. We can then interpolate the SQL parametrization and get the SQL string most readable, if we like this form. Of course, some developers rather like the parametrized version. There may be some queries that don't have any parameters at all, but this is fine. Of course, all this parametrization business is to hinder SQL injection. Please do not resort to creating methods that accepts such "clean sql", you may easily generate an attack vector into your system if you try do adjusts queries so and not being careful. With this extension method we can easily test it out:

using SomeAcme.EntityFramework;
using Nunit.Framework; 

[Test]
public void TestGettingSomeSql(){

 using (var context = new SomeAcmeContext()){
  IQueryable query = context.SomeDataEntity.Where(x => x.SomeProperty == 123).AsQueryable();
  
  string sql = string.Empty; 

  //If this is a dbContext: 

   sql = query.ShowSqlUsingDbContext(decodeParameters: true);

   //Or if this is an ObjectContext: 

   sql = query.ShowSqlUsingObjectContext(decodeParameters: true); 

   Console.WriteLine(sql);

 }
}

We create a query, using the .AsQueryable() extension method in Linq and we then pass the IQueryable object to the extension methods of the class shown earlier. Of course, the extension method to use depends on the type of data context you work with. Either a DbContext or an ObjectContext.

Monday, 16 May 2016

How to ensure the integrity of information in .NET using Digital Signature Algorithm DSA

This article will concern the topic of digital signature. There are several ways to ensure the integrity of the information or data that is sent. This concerns the concept of non-repudiation, the case that the sender cannot deny that he or she is the true sender of that data. We also can check that the data is correct, so digital signature can act as some sort of checksum - but for the entire message. We are also concerned that the information is authentic and original and not tampered with by an attacker. Digital Signature Algorithm in .NET or DSA uses the SHA-1 Secure Hash Algorithm. There are today more powerful methods to sign data, such as the RSACryptoServiceProvider. But we will in this article use DSACryptoServiceProvider. DSA is today not considered failsafe. There are security vulnerabilities. However, for ordinary use - it is not that easy to break. Just like in RSA, there is a public and private key. The API is very similar to RSACryptoServiceProvider. The following console application shows some central API calls on a DSACryptoServiceProvider.

using System;
using System.Security.Cryptography;
using System.Text;

namespace DSASignDemo
{
    class Program
    {

        // ReSharper disable once UnusedParameter.Local
        static void Main(string[] args)
        {
            var dsa = new DSACryptoServiceProvider(1024);
            var publicDsaParameters = dsa.ExportParameters(false);
            var privateDsaParameters = dsa.ExportParameters(true);
            string inputText = "Burgers and coca cola";
            byte[] inputData = Encoding.Unicode.GetBytes(inputText);
            byte[] signedBytes = SignData(inputData, privateDsaParameters);
            bool isVerified = VerifyData(inputData, signedBytes, publicDsaParameters);

            Console.WriteLine("Input text: " + inputText);
            Console.WriteLine("Signed text: " + Convert.ToBase64String(signedBytes));

            if (isVerified)
                Console.WriteLine("The message was verified");
            else
                Console.WriteLine("The message was not verified");

            byte[] hashData = ComputeHash(inputData);
            Console.WriteLine("SHA-1 computed hash: " + Convert.ToBase64String(hashData));

            bool isHashSame = CompareHash(inputText, Convert.ToBase64String(hashData)); 
            if (isHashSame)
                Console.WriteLine("Hash is the same");
            else 
                Console.WriteLine("Hash is not same");

            byte[] signedHashData = dsa.SignHash(hashData, "SHA1");

            Console.WriteLine("Signed hash: ");
            Console.WriteLine(Convert.ToBase64String(signedHashData));

            bool isVerifiedHash = dsa.VerifyHash(hashData, "SHA1", signedHashData);
            if (isVerifiedHash)
                Console.WriteLine("Hash is verified");
            else
                Console.WriteLine("Hash is not verified");



            Console.WriteLine("Press the any key to continue ..");
            Console.ReadKey();  
        }

        static bool CompareHash(string inputText, string hashText)
        {
            string computedHash = Convert.ToBase64String(ComputeHash(Encoding.Unicode.GetBytes(inputText)));
            StringComparer comparer = StringComparer.OrdinalIgnoreCase;
            return comparer.Compare(computedHash, hashText) == 0; 
        }

        static byte[] ComputeHash(byte[] inputData)
        {
            var shaManaged = new SHA1Managed();
            byte[] hashBytes = shaManaged.ComputeHash(inputData);
            return hashBytes;
        }

        static byte[] SignData(byte[] inputData, DSAParameters dsaParameters)
        {
            try
            {
                var dsa = new DSACryptoServiceProvider();
                dsa.ImportParameters(dsaParameters);
                return dsa.SignData(inputData);
            }
            catch (CryptographicException cge)
            {
                Console.WriteLine(cge.Message);
                return null;
            }
        }

        static bool VerifyData(byte[] inputData, byte[] signedData, DSAParameters dsaParmeters)
        {
            try
            {
                var dsa = new DSACryptoServiceProvider();
                dsa.ImportParameters(dsaParmeters);
                return dsa.VerifyData(inputData, signedData); 
            }
            catch (Exception err)
            {
                Console.WriteLine(err.Message);
                return false;
            }
        }




    }
}


A sample output of running this console application is:
 


Input text: Burgers and coca cola
Signed text: su7Qv+O58MyOzFjWXXx6bq9xAz9GtJ30+N8pmEYA4qFwmCdU04+qWg==
The message was verified
SHA-1 computed hash: b4o//84sCZ5cUY6cfewNia9yHYI=
Hash is the same
Signed hash:
xWExD3udQWayE2nfVDY+w8o/VuuBlKRng5Oe5XZ1zBAJO90BG+dbcA==
Hash is verified
Press the any key to continue ..


Note that the output will differ per run, where it says signed text and SHA-1 computed hash and signed hash. The reason is that the DSA algorithm will choose a random number in part of its steps and the resulting output will give different results. The boolean values here should of course be consistent, i.e. give consistens checks. Some things to note here: - Using the constructor we ask for at least a 1024 bit sized BigNum in the constructor of the DSACryptoServiceProvider to be used to generate the large primes that is involved in the DSA algorithm. - We actually use the private key of DSA to sign data and the public key to verify the data. DSA is an assymetric cryptographic algoritm and the order in which the keys are used is kind of reversed to RSA. It is the sender that sign the data and the receiver that verifies the data with a public key. - For speed, we can sometimes choose to just compute a hash like SHA-1 and then sign this hash. We can then verify hash. This is much quicker than signing large data. So first off, we can compute a SHA-1 hash, then sign the hash and then include this signed hash appended to the message, then let the receiver just verify the signed hash. The receiver will then use the hash and the signed hash and verify the hash and the fact that the message integrity is kept. We must tell the method VerifyHash which algorithm that is used. An overview of the hash algorithm names you can use in one of the arguments of SignHash and VerifyHash methods is available here: https://msdn.microsoft.com/en-us/library/system.security.cryptography.hashalgorithmname(v=vs.110)

How to do async calls without locking the UI thread in WPF

WPF developers that have worked with async await have most likely run into problems with avoiding race conditions with the UI thread, either making the entire UI lock up or burden the UI thread and cause clients not responding. This article will show you how to avoid this. The way to do this is to await using another thread and afterwards use that result back again on the WPF thread to do the updates in the UI. We use ThreadPool.QueueUserWorkItem for this. As you can see, we user an inner method marked with async keyword to await the results. This is done to avoid the classic "async proliferation" seen in async code, where the async keyword spreads upwards to all methods. We instead use an outher method and call the async method and use the Result of the Task returned. We could do some quality checking here, to check if the Task succeeded of course. The Task object contains status information about if the results are really available or not and Result will throw an exception if there was an error in the retrieval of the async results from the lower layers of the software app layers. Example code:

DispatcherUtil.AsyncWorkAndUiThreadUpdate(Dispatcher.CurrentDispatcher, () => GetSomeItems(someId),
 x => GetSomeItemsUpdateUIAfterwards(x), displayWaitCursor:true);
//Note here that we retrieve the data not on the UI thread, but on a dedicated thread and after retrieved the
//result, we do an update in the GUI. 
private List<SomeItemDataContract> GetSomeItems(int someId)
        {
         var retrieveTask = GomeSomeItemsInnerAsync(someId);
         return retrieveTask.Result;
        }
 
private async Task<List<SomeItemDataContract>> GetSomeItemsInnerAsync(int someId)
        {
         List<SomeItemDataContract> retrieveTask = await SomeServiceAgent.GetSomeItems(someId);
         return retrieveTask;
        }

private void GetSomeItemsUpdateUIAfterwards(SomeItemDataContract x){
 if (x != null){
  //Do some UI stuff - remember RaisePropertyChanged
 }
}


Utility method:

public static void AsyncWorkAndUiThreadUpdate<T>(Dispatcher currentDispatcher, Func<T> threadWork, Action<T> guiUpdate, 
bool displayWaitCursor = false)
        {
         if (displayWaitCursor)
          PublishMouseCursorEvent<T>(Cursors.Wait);

         // ReSharper disable once UnusedAnonymousMethodSignature 
         ThreadPool.QueueUserWorkItem(delegate(object state)
            {
              T resultAfterThreadWork = threadWork();
              // ReSharper disable once UnusedAnonymousMethodSignature
              currentDispatcher.BeginInvoke(DispatcherPriority.Normal, new Action<T>(delegate {
       
              if (displayWaitCursor)
               PublishMouseCursorEvent<T>(Cursors.Arrow);
 
              guiUpdate(resultAfterThreadWork);
           }), resultAfterThreadWork);

            });
 
        }

The PublishMouseCursorEvent publishes a prism event that is captured by a Bootstrapper class, but what you choose to do here is of course up to you. One way is to subscribe such an event (either a CompositePresentationEvent as in Prism or an ordinary CLR event for example):
private void OnCursorEvent(CursorEventArg eventArg)
{
 if (eventArg != null)
 {
 Mouse.OverrideCursor = eventArg.Cursor;
 }
}

Sunday, 8 May 2016

RSA algorithm demo in MonoDevelop and GtkSharp

This article will present a demo of using RSA in Monodevelop using GtkSharp UI framework. As you know, the Mono project offers an implementation of .NET framework, such as BCL, CLR, MSIL and so on - and also the classes in System.Security.Cryptography! So let us delve into the details of doing some RSA crypto! First off, the GUI looks like this:


In MonoDevelop we use the Stetic GUI Designer to build the GUI!





Cool! We can build apps that runs on Linux and Windows with Monodevelop! Now over to the code of this app!




using System;
using Gtk;
using System.Text;
using System.Security.Cryptography; 
using System.Security; 
using System.IO; 

public partial class MainWindow: Gtk.Window
{

 private RSACryptoServiceProvider _rsa;
 private RSAParameters _rsaPrivateKey;
 private RSAParameters _rsaPublicKey;
 private byte[] _cipherBytes; 
 private byte[] _decipherBytes; 
 private byte[] _plainTextBytes;

 public MainWindow () : base ("Pango")
 {
  Application.Init ();
  Build ();
  SetupControls ();
  Application.Run ();
 }

 private void SetupControls(){
  Gdk.Color color = new Gdk.Color (255, 30, 80);
     lblP.ModifyFont (Pango.FontDescription.FromString ("Purisa 10")); 
  //lblP.ModifyBg (StateType.Normal, new Gdk.Color (255, 80, 10));
 }

 protected void OnDeleteEvent (object sender, DeleteEventArgs a)
 {
  Application.Quit ();
  a.RetVal = true;
 }

 protected void btnRsaSetupClick (object sender, EventArgs e)
 {
  _rsa = new RSACryptoServiceProvider ();
 
  StringWriter writer = new StringWriter (); 
  string rsaSetupXml = _rsa.ToXmlString (true);
  writer.Write (rsaSetupXml); 
  //tbRsaSetup.Buffer.Text = writer.ToString ();
  writer.Close ();

  _rsaPrivateKey = _rsa.ExportParameters (true);
  _rsaPublicKey = _rsa.ExportParameters (false); 

  SetupControls ();
  DisplayRsaSetup (_rsaPrivateKey);
 } 

 private void DisplayRsaSetup (RSAParameters rsaParams){
  try {
   lblPValue.Text = Convert.ToBase64String (rsaParams.P);
   lblQValue.Text = Convert.ToBase64String (rsaParams.Q);
   lblModulusValue.Text = Convert.ToBase64String (rsaParams.Modulus);
   lblDValue.Text = Convert.ToBase64String(rsaParams.D);
   lblEValue.Text = Convert.ToBase64String (rsaParams.Exponent);

  } catch (Exception err) {
   Console.WriteLine (err.Message);
  }

 }

 protected void btnEncryptClicked (object sender, EventArgs e)
 {
  if (_rsa == null)
   return;
  _plainTextBytes = Encoding.UTF8.GetBytes (textViewPlainText.Buffer.Text);
  _cipherBytes = _rsa.Encrypt (_plainTextBytes, false);
  textviewEncrypted.Buffer.Text = Convert.ToBase64String(_cipherBytes);
 }

 protected void btnDecryptClicked (object sender, EventArgs e)
 {
  textviewDecrypted.Buffer.Text = string.Empty; 

  if (_rsa == null)
   return;
  if (_cipherBytes == null)
   return; 
  _decipherBytes = _rsa.Decrypt (_cipherBytes, false); 

  textviewDecrypted.Buffer.Text = Encoding.UTF8.GetString(_decipherBytes); 
 }

}





As you can see in the code, we instantiate a new RSACryptoServiceProvider instance. We use the Encrypt and Decrypt method, using the second argument set to false to not use the OAEP padding, that is the Optimal Assymetric Encryption Padding for compability. Setting false here for padding will use the PKCS# instead. PKCS stands for Public Key Cryptography Standards. I have tested also with the parameters set to true i OAEP, and it seems to work nice also with Monodevelop - so you could use both types of padding. Note that we use the ExportParameters methods of the RSACryptoServiceProvider to the the RSAParameters object. In assymetric encryption, we must guard our private key and expose our public key. This is a comprehensive demo of the RSA algorithm. We would use the ExportParameters method with the parameters set to false to not include the private key. To export the RSA parameters with more compability, you can export the parameters as XML. You can use the ToXmlString() method to export the XML as a string. You can either export the RSA parameters as a string or to a file, and you can then use the method FromXmlString() to import the RSA parameters.

 {
  _rsa = new RSACryptoServiceProvider ();
 
  StringWriter writer = new StringWriter (); 
  string rsaSetupXml = _rsa.ToXmlString (true);
  writer.Write (rsaSetupXml); 
  //tbRsaSetup.Buffer.Text = writer.ToString ();
  writer.Close ();

As you can see in the code above, you can use a StringWriter to write to a string, but you can also use a FileStream to write the contents out to a file. Using the ToXmlString - you will export the information needed for a public key by setting the argument of this method to FALSE. To include private key information, you would provide the value TRUE here. In the RSA algorithm the following is belonging to the "PUBLIC Domain":
  • Modulus
  • Public exponent E
The "PRIVATE Domain" contains the additional information:
  • Private exponent D
  • Prime P
  • Prime Q
Private domain will also reveal the values DQ, DP, InverseQ that is given by this extra information. The security of the RSA algorithm relies on the toughness of prime factorization of large prime numbers. RSA will use large numbers and the public key only contains the modulus (product) of the prime numbers and a public exponent E that the sender will use this information as a public key to encrypt the information. The receiver, which knows the private key can then decrypt the information with this extra information. So the key note here is to guard your private key and share your public key! And that you can do RSA encryption when making applications for Linux of course, with Monodevelop! The .NET Framework is already there for you to use and it is very updated. To work with this sample, a download link is shown below. Bunzip the file using the command: tar xjvf RsaDemo.tar.bz2 Monodevelop project with RsaDemo

tar xjvf Symmetric.tar.bz2 

Just so you know:
tar - Tape ARchiver
And the options:
x - extract
v - verbose output (lists all files as they are extracted)
j - deal with bzipped file
f - read from a file, rather than a tape device

"tar --help" will give you more options and info

After unpacking, just open the solution in MonoDevelop.

So .NET Developers - Start your engines - Start developing for Linux!

Friday, 6 May 2016

Symmetric crypto algorithms in C# with MonoDevelop and GTK-Sharp

Using MonoDevelop and GTK-Sharp (GTK#) offers a .NET developer to develop applications for other platforms such as applications in Linux and other OS-es. Let's look more at the very able MonoDeveloper IDE and GTK-Sharp. As a .NET developer who has written .NET applications for many years, MonoDevelop has fully matured into a very good platform to develop a multitiude of applications. GTK# resembles .NET Windows Forms in many ways. We will here use some symmetric crypto algorithms in .NET that is available with Mono framework. The demo provides Digital Encryption Standard (DES) and Triple-DES, plus the Advanced Encryption Standard (AES) - Rijndael. The GUI will look like this:

The GUI is designed with the GUI designer Stetic in Monodevelop, for developing GTK#-applications. We can choose the Mode of the cryptographic algorithm, default here is Cipher Block Chaining. We can also set the padding of the cryptographic algorithm. Note that not all combinations are legal. I have tested with Rijndael, Cipher Block Chaining and padding set to Zeros, which seems to be working ok. You can use the demo here to test out other combinations. You can also generate different Initialization Vectors and Keys to use with the algorithm.

The code to achieve the encryption and decryption is listed below:

using System;
using System.Security.Cryptography;
using Gtk;
using System.IO;

public partial class MainWindow: Gtk.Window
{

 private SymmetricAlgorithm _symmetricAlgorithm; 
 private byte[] _intializationVector; 
 private byte[] _key;
 private byte[] _cipherBytes;



 public MainWindow () : base (Gtk.WindowType.Toplevel)
 {
  Build ();
 }

 protected void OnDeleteEvent (object sender, DeleteEventArgs a)
 {
  Application.Quit ();
  a.RetVal = true;
 }

 protected void btnGenerateIVClick (object sender, EventArgs e)
 {
     _symmetricAlgorithm = CreateSymmetricAlgorithm (); 

  _symmetricAlgorithm.GenerateIV ();
  _intializationVector = _symmetricAlgorithm.IV;

  lbInitializationVector.Text = Convert.ToBase64String(_symmetricAlgorithm.IV);

//  MessageDialog msgBox = new MessageDialog (null, DialogFlags.Modal, 
//                      MessageType.Info, ButtonsType.Ok, "Why hello world!");
//  msgBox.Run ();
//  msgBox.Destroy ();
 }

 protected void btnEncrypt_Click (object sender, EventArgs e)
 {
  _symmetricAlgorithm = CreateSymmetricAlgorithm (); //ensure that we use the selected algorithm
  _cipherBytes = Encrypt(textviewPlainText.Buffer.Text);
  textviewCipher.Buffer.Text = Convert.ToBase64String(_cipherBytes); 
 }

 private byte[] Encrypt(string text){
  byte[] encrypted;
  ICryptoTransform encryptor = _symmetricAlgorithm.CreateEncryptor (_key, _intializationVector);
  using (MemoryStream msEncrypt = new MemoryStream ()) {
   using (CryptoStream csEncrypt = new CryptoStream (msEncrypt, encryptor, CryptoStreamMode.Write)) {
    using (StreamWriter swWriter = new StreamWriter (csEncrypt)) {
     swWriter.Write (text);
    }
    encrypted = msEncrypt.ToArray (); 
   }

  }
  return encrypted;
 }

 private string Decrypt(byte[] cipherBytes){
  try {
   ICryptoTransform decryptor = _symmetricAlgorithm.CreateDecryptor (_key, 
    _intializationVector);
   using (MemoryStream msEncrypt = new MemoryStream (cipherBytes)) {
    using (CryptoStream csEncrypt = new CryptoStream (msEncrypt, decryptor,
     CryptoStreamMode.Read)) {
     using (StreamReader sReader = new StreamReader (csEncrypt)) {
      string decrypted = sReader.ReadToEnd();
      return decrypted;
     }
    } 
   }
  } catch (Exception err) {
   Console.WriteLine (err.Message);
  }
  return string.Empty;
 }

 private SymmetricAlgorithm CreateSymmetricAlgorithm(){
  SymmetricAlgorithm sa = null;
  if (rbDES.Active)
   sa = DESCryptoServiceProvider.Create ();
  if (rbThreeDES.Active)
   sa = TripleDESCryptoServiceProvider.Create ();
  if (rbRijndael.Active)
   sa = RijndaelManaged.Create ();

  if (sa == null)
   sa = DESCryptoServiceProvider.Create (); 

  if (_intializationVector != null)
   sa.IV = _intializationVector;
  if (_key != null)
   sa.Key = _key;

  sa.Mode = GetCipherMode ();
  sa.Padding = GetPadding ();
  return sa;
 }

 private PaddingMode GetPadding(){
  if (rbPaddingNone.Active)
   return PaddingMode.None;
  if (rbPaddingZeros.Active)
   return PaddingMode.PKCS7;
  if (rbPaddingAnsiX923.Active)
   return PaddingMode.ANSIX923;
  if (rbPaddingISO1126.Active)
   return PaddingMode.ISO10126;
  return PaddingMode.Zeros;
 }

 private CipherMode GetCipherMode(){
  if (rbCBC.Active)
   return CipherMode.CBC;
  if (rbCFB.Active)
   return CipherMode.CFB;
  if (rbCTS.Active)
   return CipherMode.CTS;
  if (rbECB.Active)
   return CipherMode.ECB;
  if (rbOFB.Active)
   return CipherMode.OFB;

  return CipherMode.CBC;
 }

 protected void btnKeyClick (object sender, EventArgs e)
 {
  _symmetricAlgorithm = CreateSymmetricAlgorithm ();     

  _symmetricAlgorithm.GenerateKey ();  
  _key = _symmetricAlgorithm.Key;
  lblKey.Text = Convert.ToBase64String (_symmetricAlgorithm.Key);
 }

 protected void btnDecryptClick (object sender, EventArgs e)
 {
  _symmetricAlgorithm = CreateSymmetricAlgorithm (); //ensure that we use the selected algorithm
  string decrypted = Decrypt(Convert.FromBase64String(textviewCipher.Buffer.Text));
  textviewDecrypted.Buffer.Text = decrypted; 
 }
}











To open up this sample, I have uploaded the MonoDevelop project as a tar.bz2 file available for download here: Sample project Monodevelop in this article To unzip the tar bunzip2 file, just the following command:
tar xjvf Symmetric.tar.bz2 

Just so you know:
tar - Tape ARchiver
And the options:
x - extract
v - verbose output (lists all files as they are extracted)
j - deal with bzipped file
f - read from a file, rather than a tape device

"tar --help" will give you more options and info

After unpacking, just open the solution in MonoDevelop.





Wednesday, 4 May 2016

Creating a simple MD5 application using GTK# and Monodevelop

Let's look at building a simple application using GTK# and Monodevelop! I created this application using a Ubuntu 16.04 Xenial AMD64 Distribution of Linux running inside an Oracle VM VirtualBox on my Windows 10 Machine! First off, this article will show a very simple application written in Monodevelop IDE using GTK# to build a GUI. It resembles somewhat Windows Forms if you come from a Visual Studio background, such as I do. The application just takes some text input (plaintext) and computes a MD5 hash. Simple stuff. Defining the following form in MainWindow of the GTK# application: Moving over to the code bit, I define the following in MainWindow (Source of the form), which is the code behind:

using System;
using System.Linq;
using Gtk;
using System.Security.Cryptography;
using System.Text;


public partial class MainWindow: Gtk.Window
{
 public MainWindow () : base (Gtk.WindowType.Toplevel)
 {
  Build ();
  btnMd5.Clicked += OnBtnClick;
 }

 protected void OnDeleteEvent (object sender, DeleteEventArgs a)
 {
  Application.Quit ();
  a.RetVal = true;
 }

 protected void OnBtnClick (object sender, EventArgs args)
 {
  var md5 = MD5CryptoServiceProvider.Create ();
  byte[] plainTextBytes = Encoding.UTF8.GetBytes (tbPlainText.Buffer.Text); 
  byte[] hashBytes = md5.ComputeHash (plainTextBytes); 
  var sbuilder = new StringBuilder (); 

  sbuilder.Append(string.Join("",
  hashBytes.Select(x => x.ToString("x2")))); 
  tbHash.Buffer.Text = sbuilder.ToString();
 }

}



The code above instantiates a MD5CryptoServiceProvder instance, then computes a hash. We get a string representation of the MD5 hash using a StringBuilder and we use ToString("x2") - which assembles a hexidecimal string for us, which is the common way to represent a MD5 hash. A MD5 hash produces 128 bits = 16 bytes = 32 hexidecimal digits. A hexadecimal value can be 0-9 and A-F = 16 different values = one half byte.

We build up our GUI using the GUI designer inside Monodevelop. The GUI designer for GTK# in Monodevelop is called Stetic.
Stetic

Friday, 22 April 2016

Transfering a commit message from Git to Target Process

It is possible to transfer a commit message from Git to Target Process using the REST Api of TP. This is done using hooks in Git. The problem is that the hook is written using bash shell scripts (or perl) and unlike Mercurial - Git does not sport an obvious choice of tool to develop such hooks. I am going to present below a hook I wrote in bash shell to transmit your commit message to TP. I choose to do this in the post-commit hook. First off, go to the source repository you are working with and into the .git subfolder. Then go into the hooks folder. Now add a new file called: post-commit Ok, so now we add our hook:

#!/bin/sh
#
# Overfører kommentar fra Git til Target Process
# Bruk - Formater kommentaren som: 
# TP TASKID: Min innsjekkingskommentar her
#
# Dette vil overføre så kommentarer til TP 
# Merk: 
# Erstatt verdiene i scriptet som heter TP_LOGON og TP_PASSWORD
# med ditt pålogging til TP. Vil du ikke inkludere passordet ditt til TP kan du 
# ta bort passordet som argument til curl kommandoen 
#
# Merk at du må installere Cygwin først og installere curl og curl-lib. Nano anbefales som editor
# Cygwin - https://www.cygwin.com 
 
 
NAME=$(git branch | grep '*' | sed 's/* //') 
DESCRIPTION=$(git config branch."$NAME".description)
 
regex='TP ([0-9]+):*'
 
melding=$(git log -1 --pretty=format:%s)
 
echo "Viser melding her: "
echo $melding
 
tpnum=0
 
if [[ $melding =~ $regex ]]
then
 tpnum="${BASH_REMATCH[1]}"
        echo "TP number: $tpnum"
        curl -H "Content-Type: application/json" -X POST --data '{"Description":"'"$melding"'", "General": { "Id": "'"$tpnum"'"}}' https://someacme.tpondemand.com/api/v1/Comments?resultFormat=json -u TP_LOGON:TP_PASSWORD
 else 
  echo "Pusher ikke melding ut til Target Process. Bruk: Skriv TP TASKID: melding"
fi

Note that the curl command needs to be a one liner. To use this hook, just do some changes in your code and commit! git commit -m "TP 123: This is a checkin comment for the task with task Id 123 and will be shown in TP via a Git hook!" If you are working on a Windows system, you can download Cygwin (64-bits tested) and the libs curl and curl-lib. I like the Nano editor very much. Happy coding in Git and sharing your check in comments on TP! Pretty nifty to share progress with others!

Tuesday, 26 January 2016

Paged IQueryable ObjectContext EntityFramework

The following article displays how we can achieve retrieving data from EntityFramework using paged results with ObjectContext and sticking inside IQueryable<T> Let's review the extension mehod first:

 public static class EntityExtensions
    {

        public static IQueryable<TEntity> PagedResult<TEntity, TKey>(
            this IQueryable<TEntity> query, 
            Func<TEntity, TKey> sortingFunc, 
            int pageIndex = 1,
            int pageSize = 20)
        {
            var pagedResult = query.OrderBy(sortingFunc)
                .Skip(Math.Max(pageIndex - 0, 0) * pageSize)
                .Take(pageSize);
            return pagedResult.AsQueryable(); 
        }

}

And using AdventureWorks2008R2 database, here is some sample query that shows how we can use this query extension in Linq to Entities:

   using (var ctx = new AdventureWorks2008R2Entities())
   {
                var mountainStuff = from product in ctx.Products
                               where product.Name.Contains("Mountain")
                               select product;
                var firstMountainStuffPage = mountainStuff.PagedResult(p => p.Name, 1, 20);

                foreach (var item in firstMountainStuffPage)
                    Console.WriteLine(item.Name);
   }

            Console.WriteLine("Press any key to continue ..");
            Console.ReadKey();

Output

LL Mountain Frame - Black, 42
LL Mountain Frame - Black, 44
LL Mountain Frame - Black, 48
LL Mountain Frame - Black, 52
LL Mountain Frame - Silver, 40
LL Mountain Frame - Silver, 42
LL Mountain Frame - Silver, 44
LL Mountain Frame - Silver, 48
LL Mountain Frame - Silver, 52
LL Mountain Front Wheel
LL Mountain Handlebars
LL Mountain Pedal
LL Mountain Rear Wheel
LL Mountain Rim
LL Mountain Seat Assembly
LL Mountain Seat/Saddle
LL Mountain Tire
ML Mountain Frame - Black, 38
ML Mountain Frame - Black, 40
ML Mountain Frame - Black, 44
Press any key to continue ..

Conclusion

So there we are, we now have a query that we can reuse to get our paged result and we can pass in our sorting key. So now we can retrieve paged data from for example queries returning large result sets and only display a single page at a time, supporting quicker fetches from the server for clients, retrieving less data and support mobile clients better by getting data pagewise.