const http = require('http'); const server = http.createServer((req, res) => { res.end("This sets up a http server in Node.js. The http module is built-in in Node.js, we only have to require it (import in ES6). Now, just enter the following (I use Windows Subsystem for Linux - WSL in this case) in your shell to export NODE_DEBUG environment variable: export NODE_DEBUG=http We can now see requests and responses in our Node.js server! (Image below uses Wsl-Terminal as our terminal against WSL).Why hello world!
\n"); }); server.listen(4242, () => { console.log("Server is running..."); });
Friday, 28 December 2018
Debugging http Node.js server requests and responses from shell
Here is a cool tip to inspect Node.js running an http process / server.
First off, we start a simple http server in Node.js like this:
Sunday, 23 December 2018
Canceling Promise in Javascript
Canceling Promise in Js is a often sough after functionality, that can be provided by wrapping the Promise async function and provide canceling abilities. This will in functionality be similar to what we can do
in C# with a CancellationTokenSource using in System.Threading.Task objects. We can invoke asynchronous function in Js with Promise, but if the user navigates away from a View or Page in for example React Native component, clicking a button to go to another Component, we must tidy up already started Promise operations such as fetch and here is the code to achieve that.
First off, we define and export a makeCancelable method to be able to cancel a Promise.
/** * Wraps a promise into a cancelable promise, allowing it to be canceled. Useful in scenarios such as navigating away from a view or page and a fetch is already started. * @param {Promise} promise Promise object to cancel. * @return {Object with wrapped promise and a cancel function} */ export const makeCancelable = (promise) => { let hasCanceled = false; const wrappedPromise = new Promise((resolve, reject) => { promise.then(value => hasCanceled ? reject({ isCanceled: true }) : resolve(value), error => hasCanceled ? reject({ isCanceled: true }) : reject(error) ); }); return { promise: wrappedPromise, cancel() { hasCanceled: true; } }; };The promise is wrapped with additional logic to check a boolean flag in a variable hasCanceled that either rejects the Promise if it is canceled or resolves the Promise (fullfils the async operation). Returned is an object in Js with the Promise itself in a primise attribute and the function cancel() which sets the boolean flag hasCanceled to true, effectively rejecting the Promise and rejecting it. Example usage below:
'use strict'; import React, { Component } from 'react'; import { TextInput, Text, View, StyleSheet, Image, TouchableHighlight, ActivityIndicator, FlatList, AsyncStorage } from 'react-native'; import AuthService from './AuthService'; import { makeCancelable } from './Util'; const styles = StyleSheet.create({ container: { backgroundColor: '#F5FCFF', flex: 1, paddingTop: 40, alignItems: 'center' }, heading: { fontSize: 30, fontWeight: '100', marginTop: 20 }, input: { height: 50, marginTop: 10, padding: 4, margin: 2, alignSelf: 'stretch', fontSize: 18, borderWidth: 1, borderColor: '#48bbec' }, button: { height: 50, backgroundColor: '#48bbec', alignSelf: 'stretch', marginTop: 10, justifyContent: 'center' }, buttonText: { fontSize: 22, color: '#FFF', alignSelf: 'center' }, error: { fontWeight: '300', fontSize: 20, color: 'red', paddingTop: 10 } }); const cancelableSearchRepositoriesPromiseFetch = makeCancelable(fetch('https://api.github.com/search/repositories?q=react')); class LoginForm extends Component { constructor(props) { super(props); this.state = { showProgress: false, username: '', password: '', repos: [], badCredentials: false, unknownError: false, }; } onLoginPressed() { this.setState({ showProgress: true }); var reposFound = []; var authService = new AuthService(); authService.login({ username: this.state.username, password: this.state.password }, (results) => { this.setState(Object.assign({ showProgress: false }, results)); if (this.state.success && this.props.onLogin) { this.props.onLogin(); } }); cancelableSearchRepositoriesPromiseFetch.promise.then((response) => { return response.json(); }) .then((results) => { results.items.forEach(item => { reposFound.push(item); }); this.setState({ repos: reposFound, showProgress: false }); }); } componentWillMount() { this._retrieveLastCredentials(); cancelableSearchRepositoriesPromiseFetch.cancel(); } _retrieveLastCredentials = async () => { var lastusername = await AsyncStorage.getItem("GithubDemo:LastUsername"); var lastpassword = await AsyncStorage.getItem("GithubDemo:LastPassword"); this.setState({ username: lastusername, password: lastpassword }); } _saveLastUsername = async (username) => { if (username != null) { await AsyncStorage.setItem("GithubDemo:LastUsername", username); } } _savePassword = async (password) => { if (password != null) { await AsyncStorage.setItem("GithubDemo:LastPassword", password); } } componentWillUnmount() { } render() { var errorCtrl = <View />; if (!this.state.success && this.state.badCredentials) { errorCtrl = <Text color='#FF0000' style={styles.error}>That username and password combination did not work</Text> } if (!this.state.success && this.state.unknownError) { errorCtrl = <Text color='#FF0000' style={styles.error}>Unexpected error while logging in. Try again later</Text> } return ( <View style={styles.container}> <Image style={{ width: 66, height: 55 }} source={require('./assets/Octocat.png')} /> <Text style={styles.heading}>Github browser</Text> <TextInput value={this.state.username} onChangeText={(text) => { this._saveLastUsername(text); this.setState({ username: text }); }} style={styles.input} placeholder='Github username' /> <TextInput value={this.state.password} textContentType={'password'} multiline={false} secureTextEntry={true} onChangeText={(text) => { this._savePassword(text); this.setState({ password: text }); }} style={styles.input} placeholder='Github password' /> <TouchableHighlight style={styles.button} onPress={this.onLoginPressed.bind(this)}> <Text style={styles.buttonText}>Log in</Text> </TouchableHighlight> {errorCtrl} <ActivityIndicator animating={this.state.showProgress} size={"large"} /> <FlatList keyExtractor={item => item.id} data={this.state.repos} renderItem={({ item }) => <Text>{item.full_name}</Text>} /> </View> ); } } export default LoginForm;
Thursday, 20 December 2018
React native - Checkboxes and date pickers for IoS
This article will look at checkboxes and date pickers for IoS apps in React Native. For checkboxes, we use the built-in Switch control. For Date pickers we use the built-in DatePickerIOS control.
This is the basic JS / JSX to get you started! Note the use of a basic state variable to keep track of the chosenDate. The onDateChange callback uses an arrow inline function to update the state variable. We also set the minimum and maximum datetime allowed and some othern nice props of the control!
App.js
import React from 'react'; import { StyleSheet, Text, View, DatePickerIOS, Switch } from 'react-native'; export default class App extends React.Component { constructor(props){ super(props); this.state = { chosenDate : new Date(), isDatePickerVisible: false }; this.setDate = this.setDate.bind(this); } setDate(newDate){ this.setState({chosenDate: newDate }) } render() { return ( <View style={styles.container}> <Switch onValueChange={(val) => this.setState({ isDatePickerVisible : val })} value={this.state.isDatePickerVisible} ios_backgroundColor={"aliceblue"} trackColor={true} /> <Text style={{ display: this.state.isDatePickerVisible ? "none" : "flex"}}>Choose datetime..</Text> <View style={{ display: this.state.isDatePickerVisible ? "flex" : "none" }}> <DatePickerIOS locale="no" mode={"datetime"} minuteInterval={10} minimumDate={new Date(2018,11,1)} maximumDate={new Date(2018,11,31)} style={{ borderWidth: 1, borderColor: 'gray'}} onDateChange={this.setDate} date={this.state.chosenDate} /> <Text>{this.state.chosenDate.toLocaleDateString() + " " + this.state.chosenDate.toLocaleTimeString()}</Text> </View> </View> ); } } const styles = StyleSheet.create({ container: { flex: 1, justifyContent: 'center', }, });
React Native apps with Expo - Radio buttons
This article will present how you can add Radio buttons into your React Native app. I am using Expo to build the React Native app and Visual Studio code as the editor and IDE.
You can run this app by downloading the Expo app from App Store and run the following sample by opening the Camera on your mobile and scan the QR code. This will launch Expo
app and run the app.
WidgetDemos - React Native app with radio buttons demo
First off, you can install Expo using npm and the following command:
App.js
npm install expo-cli --global
You can initialize a project using these two commands:
expo init MyRadioButtonProject
cd MyRadioButtonProject
expo start
Expo's website contains good documentation here:
We can use the built in React Native control SegmentedControlIOS from React native and the following code:
import React, { Component } from 'react'; import { StyleSheet, Text, View, SegmentedControlIOS } from 'react-native'; export default class Widget extends Component { constructor(props) { super(props); this.state = { selectedMoreFruitIndex: 0, }; } setMoreFruitIndex(event) { this.setState({ selectedMoreFruitIndex: event.nativeEvent.selectedSegmentIndex }) }; render(){ return ( <View> <SegmentedControlIOS values={['banana', 'apple']} selectedIndex={this.state.selectedMoreFruitIndex} onChange={(event) => this.setMoreFruitIndex(event)} /> </View > ); }; ..The SegmentControlIOS is simple to get started with, but it is not so easy to set up with compound objects, it only supports a string array. Hence, you cannot specify a label and a value to each radio button item. Instead use the SegmentedControls from the 'react-native-radio-buttons' package. We import this npm package using:
npm install react-native-radio-buttons --save
The npm package has got a home and documentation here:
https://www.npmjs.com/package/react-native-radio-buttons
First off, we define an array of fruit objects and put these into a state variable (array of objects) inside the constructor
constructor(props) { .. let moreFruits = [{ label: 'Strawberry', value: 'Strawberry' }, { label: 'Melon', value: 'Melon' }, { label: 'Pineapple', value: 'Pineapple' }, { label: 'Grapes', value: 'Grapes' }, ]; this.state = { selectedFruit: 'Pick your favorite fruit', selectedMoreFruitIndex: 0, selectedMoreFruit: null, moreFruits: moreFruits } .. }We use the SegmentedControls by importing at the top and defining it inside the View returned in the render method:
import RadioButtons, { SegmentedControls } from 'react-native-radio-buttons'; //RadioButtons not needed in this example //..inside views of render <SegmentedControls options={this.state.moreFruits} direction='row' onSelection={option => this.setSelectedOption(option)} selectedOption={this.state.selectedMoreFruit} extractText={(option) => option.label} ></SegmentedControls>The method setSelectedOption and more can be read in the code listing below of Widget.js. I include also App.js below, the starting component: Widget.js
import React, { Component } from 'react'; import { StyleSheet, Text, View, TextInput, Button, RadioGroup, TouchableWithoutFeedback, SegmentedControlIOS } from 'react-native'; import { Dropdown } from 'react-native-material-dropdown'; import PropTypes from 'prop-types'; import { withStyles } from '@material-ui/core/styles'; import RadioButtons, { SegmentedControls } from 'react-native-radio-buttons'; export default class Widget extends Component { constructor(props) { super(props); let moreFruits = [{ label: 'Strawberry', value: 'Strawberry' }, { label: 'Melon', value: 'Melon' }, { label: 'Pineapple', value: 'Pineapple' }, { label: 'Grapes', value: 'Grapes' }, ]; this.state = { selectedFruit: 'Pick your favorite fruit', reasonFruit: '', isFruitButtonClicked: 'no', selectedMoreFruitIndex: 0, moreFruits: moreFruits } } setSelectedOption(selectedOption) { var selectedMoreFruit = this.state.moreFruits.find(item => item.label == selectedOption.label); this.setState({ selectedMoreFruit: selectedMoreFruit }) } renderOption(option, selected, onSelect, index) { const style = selected ? { backgroundColor: 'blue' } : { backgroundColor: 'red' }; return ( <TouchableWithoutFeedback onPress={onSelect} key={index}> <Text style={style}>{option.label}</Text> </TouchableWithoutFeedback> ); } renderContainer(optionNodes) { return <View>optionNodes</View>; } setMoreFruitIndex(event) { this.setState({ selectedMoreFruitIndex: event.nativeEvent.selectedSegmentIndex }) } render() { let data = [ { label: 'Banana', value: 'Banana' }, { label: 'Apple', value: 'Apple' }, { label: 'Kiwi', value: 'Kiwi' } ]; return ( <View> <SegmentedControlIOS values={['one', 'two']} selectedIndex={this.state.selectedMoreFruitIndex} onChange={(event) => this.setMoreFruitIndex(event)} /> <Text>Selected option of react-native-radio-buttons: {this.state.selectedMoreFruit ? this.state.selectedMoreFruit.value : 'no fruit'}</Text> <SegmentedControls options={this.state.moreFruits} direction='row' onSelection={option => this.setSelectedOption(option)} selectedOption={this.state.selectedMoreFruit} extractText={(option) => option.label} ></SegmentedControls> <Text style={{ fontWeight: 'bold' }}>Select favorite fruit then</Text> <Dropdown onChangeText={(val) => this.setState({ selectedFruit: val })} style={{ backgroundColor: 'aliceblue' }} data={data} /> <Text>You selected: {this.state.selectedFruit}</Text> <Text></Text> <Text>Reason for fruit choice: {this.state.reasonFruit}</Text> <TextInput onChangeText={(val) => this.setState({ reasonFruit: val })} style={{ borderColor: 'black', borderRadius: 4, borderWidth: 1, backgroundColor: 'aliceblue' }} value={this.state.reasonFruit}></TextInput> <Text></Text> {/* <Button color="#841584" accessibilityLabel="Click this button!" onPress={(val) => this.setState({ isFruitButtonClicked: 'yes' })} title="Fruit button" /> <Text>Is Fruit button clicked: {this.state.isFruitButtonClicked}</Text> */} </View > ); } sayHello(val) { this.setState({ selectedFruit: 'You selected: ' + val }) } }
App.js
import React from 'react'; import { StyleSheet, Text, View } from 'react-native'; import Widget from './Widget' import { Col, Row, Grid } from 'react-native-easy-grid'; export default class App extends React.Component { render() { return ( // Try setting `alignItems` to 'flex-start' // Try setting `justifyContent` to `flex-end`. // Try setting `flexDirection` to `row`. <View style={{ flex: 1, flexDirection: 'column', justifyContent: 'center', alignItems: 'stretch', }}> <View style={{ width: '80%', height: 50 }}> <Text style={{ fontSize: 18, flexWrap: 'wrap' }} >Welcome to the React Native apps demo!</Text> </View> <View style={{ height: 150 }}> <Widget /> </View> <View style={{ height: 100 }} /> </View> ); } } const styles = StyleSheet.create({ container: { flex: 1, justifyContent: 'center', flexDirection: 'column', backgroundColor: '#fff', alignItems: 'stretch' }, });Lastly, remember to set the selected object in the correct manner, using the find method for example on the state variable. This to ensure we are pointing at the right selected object in the SegmentedControls control.
setSelectedOption(selectedOption) { var selectedMoreFruit = this.state.moreFruits.find(item => item.label == selectedOption.label); this.setState({ selectedMoreFruit: selectedMoreFruit }) }
Thursday, 22 November 2018
Displaying branch history in Git
I desired to have an easy branch log today. The following alias commands makes this easier. These go under the [alias] section in the .gitconfig file you are using in your repository.
latest = "!f() { echo "Latest \"${1:-11}\" commits accross all branches:"; git log --abbrev-commit --date=relative --branches --all --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset%n' -n ${1:-11}; } ; f" logbranch = "!f() { echo "Latest \"${1:-11}\" commits in current branch against master:"; git log master..${1:git branch} --abbrev-commit --date=relative --pretty=format:'%C(yellow)%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(white blue bold)<%an>%Creset%n' -n ${1:-11}; } ; f"git logbranch will display only the latest commit for the specified branch or defaulting to the current branch, defaulting to last 11 commits using a shell function. Note that we compare against the master branch. And we get the following sample output:
Sunday, 4 November 2018
Closing branches in Git
Git unlike Mercurial has no builtin support for closing branches. This leads to a proliferation of branches and running git branch -a to view remote branches or git branch will show ever more branches. Actually, closing a branch in Git can be supported through the use of tags. We decide to keep the tag for future use, so that we can use it to check out a new branch from this tag. Another way would of course be to just delete a brach local and/or remote, but that is not the same as closing a branch. Closing a branch in Mercurial still makes it possible to reopen it again for later work. Anyways, in this article, I will show two aliases which can be used to close a branch, either both local and remote or just remote.
Put the following into the [alias] section of your .gitConfig file:
closebranch = "!w() { echo Attempting to close local and remote branch: $1 Processing...; echo Checking the branch $1 out..; git checkout $1; echo Trying to create a new tag archive/$1; git tag archive/\"$1\"; git push origin archive/\"$1\"; echo Deleting the local branch $1; git branch -d $1; echo Deleting the remote branch $1; git push origin --delete $1; echo Done. To restore the closed branch later, enter: git checkout -b MyNewBranch archive/\"$1\"; }; w" closebranchpassive = "!w() { echo Attempting to close local and remote branch: $1 Processing...; echo Checking the branch $1 out..; git checkout $1; echo Trying to create a new tag archive/$1; git tag archive/\"$1\"; git push origin archive/$1; echo Deleting the local branch $1; echo Deleting the remote branch $1; echo Done. To restore the closed branch later, enter: git checkout -b MyNewBranch archive/\"$1\"; }; w" closeremotebranch = "!w() { echo Attempting to close remote branch: $1 Processing...; echo Checking the branch $1 out..; git checkout $1; echo Trying to create a new tag archive/$1; git tag archive/\"$1\"; git push origin archive/\"$1\"; echo Deleting the remote branch $1; git push origin --delete $1; echo Done. To restore the closed branch later, enter: git checkout -b MyNewBranch archive/\"$1\"; }; w"What we do here is the following:
- Check out the branch to close
- Tag this branch as archive/branchname
- Important - push the tag the remote e.g. origin in the provided aliased commands above
- (Delete the local branch)
- Delete the remote branch
- Display a friendly output message how to restore the branch later through a tag
git closebranch MyBranchToBeClosed
If you just want to close the remote branch and keep the local one, enter:
git closeremotebranch MyBranchToBeClosed
To restore the branch MyBranchToBeClosed (which now is actually closed!) later, just enter:
git checkout -b MyRestoredBranch archive/MyBranchToBeClosed
This lets you keep old branch around as tags and not proliferate the branch listings. We however have moved the branch(es) over to tags prefixed with archive/
I wish Git was simpler to use sometimes so we did not have to use such hacks, closing branches should be easy.
Wednesday, 17 October 2018
Working with Netsh http sslcert setup and SSL bindings through Powershell
I am working with a solution at work where I need to enable IIS Client certificates. I am not able to get past the "Provide client certificate" dialog, but
it is possible to alter the setup of SSL cert bindings on your computer through the Netsh command. This command is not in Powershell, but at the command line.
I decided to write some Powershell functions to be able to alter this setup atleast in an easier way. One annoyance with the netsh command is that you have to keep track of the
Application Id and Certificate hash values. Here, we can easier keep track of this through Powershell code.
The Powershell code to display and alter, modify, delete and and SSL cert bindings is as follows:
function Get-NetshSetup($sslBinding='0.0.0.0:443') { $sslsetup = netsh http show ssl 0.0.0.0:443 #Get-Member -InputObject $sslsetup $sslsetupKeys = @{} foreach ($line in $sslsetup){ if ($line -ne $null -and $line.Contains(': ')){ $key = $line.Split(':')[0] $value = $line.Split(':')[1] if (!$sslsetupKeys.ContainsKey($key)){ $sslsetupKeys.Add($key.Trim(), $value.Trim()) } } } return $sslsetup } function Display-NetshSetup($sslBinding='0.0.0.0:443'){ Write-Host SSL-Setup is: $sslsetup = Get-NetshSetup($sslBinding) foreach ($key in $sslsetup){ Write-Host $key $sslsetup[$key] } } function Modify-NetshSetup($sslBinding='0.0.0.0:443', $certstorename='My', $verifyclientcertrevocation='disable', $verifyrevocationwithcachedcleintcertonly='disable', $clientCertNegotiation='enable', $dsmapperUsage='enable'){ $sslsetup = Get-NetshSetup($sslBinding) echo Deleting sslcert netsh http binding for $sslBinding ... netsh http delete sslcert ipport=$sslBinding echo Adding sslcert netsh http binding for $sslBinding... netsh http add sslcert ipport=$sslBinding certhash=$sslsetup['Certificate Hash'] appid=$sslsetup['Application ID'] certstorename=$certstorename verifyclientcertrevocation=$verifyclientcertrevocation verifyrevocationwithcachedclientcertonly=$verifyrevocationwithcachedcleintcertonly clientcertnegotiation=$clientCertNegotiation dsmapperusage=$dsmapperUsage echo Done. Inspect output. Display-NetshSetup $sslBinding } function Add-NetshSetup($sslBinding, $certstorename, $certhash, $appid, $verifyclientcertrevocation='disable', $verifyrevocationwithcachedcleintcertonly='disable', $clientCertNegotiation='enable', $dsmapperUsage='enable'){ echo Adding sslcert netsh http binding for $sslBinding... netsh http add sslcert ipport=$sslBinding certhash=$certhash appid=$appid clientcertnegotiation=$clientCertNegotiation dsmapperusage=$dsmapperUsage certstorename=$certstorename verifyclientcertrevocation=$verifyclientcertrevocation verifyrevocationwithcachedclientcertonly=$verifyrevocationwithcachedcleintcertonly echo Done. Inspect output. Display-NetshSetup $sslBinding } #Get-NetshSetup('0.0.0.0:443'); Display-NetshSetup #Modify-NetshSetup Add-NetshSetup '0.0.0.0:443' 'MY' 'c0fe06da89bcb8f22da8c8cbdc97be413b964619' '{4dc3e181-e14b-4a21-b022-59fc669b0914}' Display-NetshSetup
Saturday, 29 September 2018
Injecting text in html lists into lists using Sass
I wanted to test out if I could inject text via Sass style sheets to see if Sass could somehow support this. Of course, this is already possible in CSS using the nth-child selector and the ::after selector.
Here is a sample of the technique used in this article!
In Sass, however, we can inject text into a large list if we want. This means that the DOM must support the data you want to inject, if you want to inject for example five strings into an array of <ul>
of <li> items, you must also have five items of <li>
Anyways, this is what I ended up with:
$baskerville-font-stack: "Big Caslon", "Book Antiqua", "Palatino Linotype", Georgia, serif !default; ul li { font-family: $baskerville-font-stack; } $coolitems: ('wig', 'makeup', 'lipstick', 'rouge', 'botox'); @for $i from 1 to length($coolitems)+1{ $coolitem: nth($coolitems, $i); li:nth-child(#{$i})::before { content: $coolitem; } } $blogtitle: "Tores jævlige rosablogg";We first declare an array in Sass using
$coolitems: ('wig', 'makeup', 'lipstick', 'rouge', 'botox')
. We then use the @for loop in Sass to get loop through this array. The syntax is:
@for $i from 1 to length($coolarray)+1
I had to use the +1 here in my sample.. Then we grab hold of the current item using the nth function, passing in the array $coolitems and specifying the index $i. Now that we have the
item of the $coolitems array, we can set the content CSS property of the n-th child and use the ::before (or ::after) selector. I tried to avoid using the +1 in the for loop, but then the last item of my array was not included in the list.
Note - the use of the #{$i} in the syntax above. This is called variable interpolation in Sass. It is similar syntax-wise to variable interpolation in shell scripts, actually. We use it so that we can refer to the variable $i inside the nth-child operator of CSS selector.
And the nth operator is as noted used to grab hold of the nth item of the Sass array.
And your HTML will then look like this:
<ul> <li></li> <li></li> <li></li> <li></li> <li></li> </ul>So you actually now have a list in HTML which is empty, but the values are injected through CSS! And Sass makes supporting such scenarios easier. Now why the heck would you want such a thing? I do not know, I am more a backend developer than webdev frontend developer, it is though cool that you can load up data into the DOM using Sass and CSS
Thursday, 6 September 2018
Some handy Git tips - show latest commits and searching the log and more
This article will present some tips around Git and how you can add functionality for showing the latest commits and search the log.
I would like to search these aliased command to show you how they can ease your everyday use of Git from the commandline.
[alias] glog = log --all --decorate --oneline --graph glogf = log --all --decorate --oneline --graph --pretty=fuller st = status out = !git fetch && git log FETCH_HEAD.. outgoing = !git fetch && git log FETCH_HEAD.. in = !git fetch && git log ..FETCH_HEAD incoming = !git fetch && git log ..FETCH_HEAD com = "!w() { git commit --all --message \"$1\"; }; w" undopush = "!w() { git revert HEAD~\"$1\"..HEAD; }; w" searchlog = "!f() { git --no-pager log --color-words --all --decorate --graph -i --grep \"$1\"; }; f" branches = branch --verbose --sort=-committerdate --format '%(HEAD)%(color:yellow)%(refname:short)%(color:reset) -%(color:red)%(objectname:short)%(color:reset) - %(contents:subject) -%(authorname) (%(color:green)%(committerdate:relative)%(color:reset))' allbranches = "!g() { git branch --all --verbose --sort=-committerdate --format '%(HEAD) %(color:yellow)%(refname:short)%(color:reset) -%(color:red)%(objectname:short)%(color:reset) - %(contents:subject) -%(authorname) (%(color:green)%(committerdate:relative)%(color:reset))' --color=always | less -R; }; g" verify = fsck clearlocal = clean -fd && git reset stash-unapply = !git stash show -p | git apply -R lgb = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset%n' --abbrev-commit --date=relative --branches tree = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset%n' --abbrev-commit --date=relative --branches alltree = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset%n' --date=relative --branches --all latest = "!f() { echo "Latest \"${1:-11}\" commits accross all branches:"; git log --abbrev-commit --date=relative --branches --all --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset%n' -n ${1:-11}; } ; f" add-commit = !git add -A && git commit showconfig = config --global -e [merge] tool = kdiff3 [mergetool "kdiff3"] cmd = \"C:\\\\Program Files\\\\KDiff3\\\\kdiff3\" $BASE $LOCAL $REMOTE -o $MERGED [core] editor = 'c:/program files/sublime text 3/subl.exe' -w [core] editor = 'c:/Program Files/Sublime Text 3/sublime_text.exe'The best aliases are how you set up Sublime Text 3 as the Git editor and also how you can show the latest commits. The latest commits use a parametrized shell function. I set the default value to 11 in this case, if you do not give a parameter. You can for example show the latest 2 commits by typing: git latest 2
latest = "!f() { echo "Latest \"${1:-11}\" commits accross all branches:"; git log --abbrev-commit --date=relative --branches --all --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset%n' -n ${1:-11}; } ; f"Note the use of a shell function and also that we refer to the first parameter as ${1} i bash shell script, with a :-11 to set the first param as 11. The syntax is ${n:-p} where n is the nth parameter (not starting with zero!) and p is the default value. A special syntax, but that is how bash works. Also note that a git alias with a shell function can do multiple functions, separated with semi-colon ;. The searchlog alias / shell function is also handy:
searchlog = "!f() { git --no-pager log --color-words --all --decorate --graph -i --grep \"$1\"; }; f"Also, multiple aliases here are similar to Mercurial's in and out commands to detect incoming pushed commits and outgoing local commits. Happy Git-ing!
Tuesday, 21 August 2018
Creating a validation attribute for multiple enum values in C#
This article will present a validation attribute for multiple enum value in C#. In C#, generics is not supported in attributes.
The following class therefore specifyes the type of enum and provides a list of invalid enum values as an example of such an attribute.
using System; using System.Collections.Generic; using System.ComponentModel.DataAnnotations; namespace ValidateEnums { public sealed class InvalidEnumsAttribute : ValidationAttribute { private List<object> _invalidValues = new List<object>(); public InvalidEnumsAttribute(Type enumType, params object[] enumValues) { foreach (var enumValue in enumValues) { var _invalidValueParsed = Enum.Parse(enumType, enumValue.ToString()); _invalidValues.Add(_invalidValueParsed); } } public override bool IsValid(object value) { foreach (var invalidValue in _invalidValues) { if (Enum.Equals(invalidValue, value)) return false; } return true; } } }Let us make use of this attribute in a sample class.
public class Snack { [InvalidEnums(typeof(IceCream), IceCream.None, IceCream.All )] public IceCream IceCream { get; set; } }We can then test out this attribute easily in NUnit tests for example:
[TestFixture] public class TestEnumValidationThrowsExpected { [Test] [ExpectedException(typeof(ValidationException))] [TestCase(IceCream.All)] [TestCase(IceCream.None)] public void InvalidEnumsAttributeTest_ThrowsExpected(IceCream iceCream) { var snack = new Snack { IceCream = iceCream }; Validator.ValidateObject(snack, new ValidationContext(snack, null, null), true); } [Test] public void InvalidEnumsAttributeTest_Passes_Accepted() { var snack = new Snack { IceCream = IceCream.Vanilla }; Validator.ValidateObject(snack, new ValidationContext(snack, null, null), true); Assert.IsTrue(true, "Test passed for valid ice cream!"); }
Sunday, 19 August 2018
ConfigurationManager for .Net Core
.Net Core is changing a lot of the underlying technology for .Net developers migrating to this development environment. System.Configuration.ConfigurationManager class is gone and web.config and app.config
files, which are XML-based are primrily replaced with .json files, at least in Asp.NET Core 2 for example.
Let's look at how we can implement a class to let you at least be able to read AppSettings in your applicationSettings.json file which can be later refined. This implementation is my first version.
using Microsoft.AspNetCore.Hosting; using Microsoft.Extensions.Configuration; using System.IO; using System.Linq; namespace WebApplication1 { public static class ConfigurationManager { private static IConfiguration _configuration; private static string _basePath; private static string[] _configFileNames; public static void SetBasePath(IHostingEnvironment hostingEnvironment) { _basePath = hostingEnvironment.ContentRootPath; _configuration = null; //fix base path _configuration = GetConfigurationObject(); } public static void SetApplicationConfigFiles(params string[] configFileNames) { _configFileNames = configFileNames; } public static IConfiguration AppSettings { get { if (_configuration != null) return _configuration; _configuration = GetConfigurationObject(); return _configuration; } } private static IConfiguration GetConfigurationObject() { var builder = new ConfigurationBuilder() .SetBasePath(_basePath ?? Directory.GetCurrentDirectory()); if (_configFileNames != null && _configFileNames.Any()) { foreach (var configFile in _configFileNames) { builder.AddJsonFile(configFile, true, true); } } else builder.AddJsonFile("appsettings.json", false, true); return builder.Build(); } } }We can then easily get app settings from our config file:
string endPointUri = ConfigurationManager.AppSettings["EmployeeWSEndpointUri"];Sample appsettings.json file:
{
"Logging": {
"IncludeScopes": false,
"Debug": {
"LogLevel": {
"Default": "Warning"
}
},
"Console": {
"LogLevel": {
"Default": "Warning"
}
}
},
"EmployeeWSEndpointUri": "https://someserver.somedomain.no/someproduct/somewcfservice.svc"
}
If you have nested config settings, you can refer to these using the syntax SomeAppSetting:SomeSubAppSetting, like "Logging:Debug:LogLevel:Default".
Creating a VM in Azure with Powershell core in Linux
This article will describe how a virtual machine in Azure from a command line in Linux. Powershell core will be used and I have tested this procedure using the Linux distribution MX-17 Horizon.
First, we will update the package list for APT (Advanced Packaging Tool), get the latest versions of cURL and apt-transport-https and add the
GPG key for the Microsoft repository for Powershell core. Then APT is once more updated and the package powershell is installed. The following script works on Debian-based distributions:
The last command is executed to check that the Powershell module is available. Type [Y] to allow the module installation in the first step. Next off, logging into the Azure Resource Manager. Type the following command in Powershell core:
sudo apt-get install
sudo apt-get install curl apt-transport-https
curl https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add -
sudo sh -c 'echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-debian-jessie-prod jessie main" > /etc/apt/sources.list.d/microsoft.list'
sudo apt-get update
sudo apt-get install -y powershell
On my system however, I need to make use of Snap packages. This is because MX-Horizon fails to install powershell due to restrictions on libssl1.0 and libicu52.
Anyways this route let me start up Powershell on MX-17 Horizon:
sudo apt-get install snapd
sudo snap install powershell-preview --classic
sudo snap run powershell-preview
Logging into Azure RM account
Running powershell-preview allows you to both run Powershell commands such as Get-ChildItem ("ls") and Unix tools such as ls (!) from the Powershell command line. We will first install the Powershell module AzureRM.NetCore inside the Powershell session, which is running.
Install-Module AzureRM.Netcore
Get-Command -Module AzureRM.Compute.Netcore
The last command is executed to check that the Powershell module is available. Type [Y] to allow the module installation in the first step. Next off, logging into the Azure Resource Manager. Type the following command in Powershell core:
Login-AzureRMAccount
Running this command will prompt a code and tell you to open up a browser window and log on to: https://microsoft.com/devicelogin
Then you log in to your Azure account and if you are successfully logged in, your Powershell core session is authenticated and you can access your Azure account and its resources!
Creating the Virtual machine in Azure
Creating the virtual machine is done in several steps. The script below is to be saved into a Powershell script file, forexample AzureVmCreate.ps1. There are many steps involved into establishing an installation of a VM in Azure. We will in this sample set up all the necessities to get an Ubuntu Server LTS. If you already have got for example a resource group in Azure, the script below can use this resource group instead of creating a new one. The list of steps are as follows to create a Linux VM in Azure:- Create a resource group
- Create a subnet config
- Create a virtual network, set it up with the subnet config
- Create a public IP
- Create a security rule config to allow port 22 (SSH)
- Create a network security group nsg and add in the security rule config
- Create a network interface card nic and associate it with the public ip and the nsg
- Create a VM config and set the operating system, OS image and nic
- Create a VM config's SHH public key config
- Create a VM - Virtual Machine in Azure !
param([string]$resourceGroupName,
[string]$resourceGroupLocation,
[string]$vmComputerName,
[string]$vmUser,
[string]$vmUserPassword,
[string]$virtualNetworkName)
#Write-Host $resourceGroupName
#Write-Host $resourceGroupLocation
#Write-Host $vmComputerName
#Write-Host $vmUser
#Write-Host $vmUserPassword
# Definer user name and blank password
$securePassword = ConvertTo-SecureString ' ' -AsPlainText -Force
$cred = New-Object System.Management.Automation.PSCredential ("azureuser", $securePassword)
New-AzureRmResourceGroup -Name $resourceGroupName -Location $resourceGroupLocation
$subnetConfig = New-AzureRmVirtualNetworkSubnetConfig -Name default -AddressPrefix 10.0.0.0/24
$virtualNetwork = New-AzureRMVirtualNetwork -ResourceGroupName $resourceGroupName -Name `
$virtualNetworkName -AddressPrefix 10.0.0.0/16 -Location $resourceGroupLocation `
-Subnet $subnetConfig
Write-Host "Subnet id: " $virtualNetwork.Subnets[0].Id
# Create a public IP address and specify a DNS name
$pip = New-AzureRmPublicIpAddress -ResourceGroupName $resourceGroupName -Location $resourceGroupLocation `
-Name "mypublicdns$(Get-Random)" -AllocationMethod Static -IdleTimeoutInMinutes 4
# Create an inbound network security group rule for port 22
$nsgRuleSSH = New-AzureRmNetworkSecurityRuleConfig -Name myNetworkSecurityGroupRuleSSH -Protocol Tcp `
-Direction Inbound -Priority 1000 -SourceAddressPrefix * -SourcePortRange * -DestinationAddressPrefix * `
-DestinationPortRange 22 -Access Allow
# Create a network security group
$nsg = New-AzureRmNetworkSecurityGroup -ResourceGroupName $resourceGroupName -Location $resourceGroupLocation `
-Name myNetworkSecurityGroup -SecurityRules $nsgRuleSSH
# Create a virtual network card and associate with public IP address and NSG
$nic = New-AzureRmNetworkInterface -Name myNic -ResourceGroupName $resourceGroupName -Location $resourceGroupLocation `
-SubnetId $virtualNetwork.Subnets[0].Id -PublicIpAddressId $pip.Id -NetworkSecurityGroupId $nsg.Id
# Create a virtual machine configuration
$vmConfig = New-AzureRmVMConfig -VMName $vmComputerName -VMSize Standard_D1 | `
Set-AzureRmVMOperatingSystem -Linux -ComputerName $vmComputerName -Credential $cred -DisablePasswordAuthentication | `
Set-AzureRmVMSourceImage -PublisherName Canonical -Offer UbuntuServer -Skus 14.04.2-LTS -Version latest | `
Add-AzureRmVMNetworkInterface -Id $nic.Id
# Configure SSH Keys
$sshPublicKey = Get-Content "$HOME/.ssh/id_rsa.pub"
Add-AzureRmVMSshPublicKey -VM $vmconfig -KeyData $sshPublicKey -Path "/home/azureuser/.ssh/authorized_keys"
# Create a virtual machine
New-AzureRmVM -ResourceGroupName $resourceGroupName -Location $resourceGroupLocation -VM $vmConfig
We can then instantiate a new VM in Azure running the script above, which will create a standard D1 blade server in Azure
with approx. 3,5 GB RAM and 30 GB disk space with Ubuntu Server LTS latest from the publisher Canonical by calling this script
like for example:
./AzureVmCreate.ps1 -resourceGroupName "SomeResourceGroup" -resourceGroupLocation "northcentralus" "SomeVmComputer" -vmUser "adminUser" -password "s0m3CoolPassw0rkzzD" -virtualNetworkName "SomeVirtualNetwork"
After the VM is created, which for me took about 5 minutes time before the VM was up and running in Azure, you can access it by visiting the Azure portal at
https://portal.azure.com
You can then take notice of the public IP adress that the script in this article created and connect with : ssh azureuser@ip_address_to_linux_VM_you_just_created_in_Azure!
The following images shows me logging into the Ubuntu Server LTS Virtual Machine that was created with the Powershell core script in this article!
A complete list of available Linux images can be seen in the Azure marketplace or in the Microsoft Docs: Linux VMs in Azure overview
After installation, you can run the following cmdlet to clean up the resource group and all its resources, including the VM you created for testing.
Remove-AzureRmResourceGroup -Name myResourceGroup
Thursday, 9 August 2018
Detect USB drives in a WPF application
We can use the class System.IO.DriveInfo to retrieve all the drives on the system and look for drives where the DriveType is Removable. In addition, the removable drive (USB usually) must be ready, which is accessible as the property IsReady. First off, we define a provider to retrieve the removable drives:
using System.Collections.Generic; using System.IO; using System.Linq; namespace TestPopWpfWindow { public static class UsbDriveListProvider { public static IEnumerable<DriveInfo> GetAllRemovableDrives() { var driveInfos = DriveInfo.GetDrives().AsEnumerable(); driveInfos = driveInfos.Where(drive => drive.DriveType == DriveType.Removable); return driveInfos; } } }
Let us use the MVVM pattern also, so we define a ViewModelbase class, implementing INotifyPropertyChanged.
using System.ComponentModel; namespace TestPopWpfWindow { public class ViewModelBase : INotifyPropertyChanged { public event PropertyChangedEventHandler PropertyChanged; public void RaisePropertyChanged(string propertyName) { if (PropertyChanged != null) PropertyChanged(this, new PropertyChangedEventArgs(propertyName)); } } }It is also handy to have an implemetation of ICommand:
using System; using System.Windows.Input; namespace TestPopWpfWindow { public class RelayCommand : ICommand { private Predicate<object> _canExecute; private Action<object> _execute; public RelayCommand(Predicate<object> canExecute, Action<object> execute) { _canExecute = canExecute; _execute = execute; } public bool CanExecute(object parameter) { return _canExecute(parameter); } public event EventHandler CanExecuteChanged; public void Execute(object parameter) { _execute(parameter); } } }We also set the DataContext of MainWindow to an instance of a demo view model defined afterwards:
namespace TestPopWpfWindow { /// <summary> /// Interaction logic for MainWindow.xaml /// </summary> public partial class MainWindow : Window { public MainWindow() { InitializeComponent(); DataContext = new UsbDriveInfoDemoViewModel(); } } }We then define the view model itself and use System.Management.ManagementEventWatcher to look for changes in the drives mounted onto the system.
using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Management; using System.Windows; using System.Windows.Input; namespace TestPopWpfWindow { public class UsbDriveInfoDemoViewModel : ViewModelBase, IDisposable { public UsbDriveInfoDemoViewModel() { DriveInfos = new List<DriveInfo>(); ReloadDriveInfos(); RegisterManagementEventWatching(); TargetUsbDrive = @"E:<"; AccessCommand = new RelayCommand(x => true, x => MessageBox.Show("Functionality executed.")); } public int UsbDriveCount { get; set; } private string _targetUsbDrive; public string TargetUsbDrive { get { return _targetUsbDrive; } set { if (_targetUsbDrive != value) { _targetUsbDrive = value; RaisePropertyChanged("TargetUsbDrive"); RaisePropertyChanged("DriveInfo"); } } } public ICommand AccessCommand { get; set; } private void ReloadDriveInfos() { var usbDrives = UsbDriveListProvider.GetAllRemovableDrives(); Application.Current.Dispatcher.Invoke(() => { DriveInfos.Clear(); foreach (var usbDrive in usbDrives) { DriveInfos.Add(usbDrive); } UsbDriveCount = DriveInfos.Count; RaisePropertyChanged("UsbDriveCount"); RaisePropertyChanged("DriveInfos"); }); } public List<DriveInfo> DriveInfos { get; set; } private ManagementEventWatcher _watcher; private void RegisterManagementEventWatching() { _watcher = new ManagementEventWatcher(); var query = new WqlEventQuery("SELECT * FROM Win32_VolumeChangeEvent"); _watcher.EventArrived += watcher_EventArrived; _watcher.Query = query; _watcher.Start(); } private void watcher_EventArrived(object sender, EventArrivedEventArgs e) { Debug.WriteLine(e.NewEvent); ReloadDriveInfos(); } public void Dispose() { if (_watcher != null) { _watcher.Stop(); _watcher.EventArrived -= watcher_EventArrived; } } } }We also define a WPF multi-converter next to enable the button:
using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Linq; using System.Windows.Data; namespace TestPopWpfWindow { public class UsbDriveAvailableEnablerConverter : IMultiValueConverter { public object Convert(object[] values, Type targetType, object parameter, CultureInfo culture) { if (values == null || values.Count() != 2) return false; var driveInfos = values[1] as List<DriveInfo>; var targetDrive = values[0] as string; if (driveInfos == null || !driveInfos.Any() || string.IsNullOrEmpty(targetDrive)) return false; return driveInfos.Any(d => d.IsReady && d.Name == targetDrive); } public object[] ConvertBack(object value, Type[] targetTypes, object parameter, CultureInfo culture) { throw new NotImplementedException(); } } }And we define a GUI to test this code:
<Window x:Class="TestPopWpfWindow.MainWindow" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:local="clr-namespace:TestPopWpfWindow" Title="MainWindow" Height="350" Width="525"> <Window.Resources> <Style x:Key="usbLabel" TargetType="Label"> <Style.Triggers> <DataTrigger Binding="{Binding IsReady}" Value="False"> <Setter Property="Background" Value="Gray"></Setter> </DataTrigger> <DataTrigger Binding="{Binding IsReady}" Value="True"> <Setter Property="Background" Value="Green"></Setter> </DataTrigger> </Style.Triggers> </Style> <local:UsbDriveAvailableEnablerConverter x:Key="usbDriveAvailableEnablerConverter"></local:UsbDriveAvailableEnablerConverter> </Window.Resources> <Grid> <Grid.RowDefinitions> <RowDefinition Height="Auto"></RowDefinition> <RowDefinition Height="Auto"></RowDefinition> <RowDefinition Height="Auto"></RowDefinition> </Grid.RowDefinitions> <StackPanel Orientation="Vertical"> <TextBlock Text="USB Drive-detector" FontWeight="DemiBold" HorizontalAlignment="Center" FontSize="14" Margin="2"></TextBlock> <TextBlock Text="Removable drives on the system" FontWeight="Normal" HorizontalAlignment="Center" Margin="2"></TextBlock> <TextBlock Text="Drives detected:" FontWeight="Normal" HorizontalAlignment="Center" Margin="2"></TextBlock> <TextBlock Text="{Binding UsbDriveCount, UpdateSourceTrigger=PropertyChanged}" FontWeight="Normal" HorizontalAlignment="Center" Margin="2"></TextBlock> <ItemsControl Grid.Row="0" ItemsSource="{Binding DriveInfos, UpdateSourceTrigger=PropertyChanged}" Width="100" BorderBrush="Black" BorderThickness="1"> <ItemsControl.ItemTemplate> <DataTemplate> <StackPanel Orientation="Vertical"> <Label Style="{StaticResource usbLabel}" Width="32" Height="32" FontSize="18" Foreground="White" Content="{Binding Name}"> </Label> </StackPanel> </DataTemplate> </ItemsControl.ItemTemplate> </ItemsControl> </StackPanel> <Button Grid.Row="1" Height="24" Width="130" VerticalAlignment="Top" Margin="10" Content="Access functionality" Command="{Binding AccessCommand}"> <Button.IsEnabled> <MultiBinding Converter="{StaticResource usbDriveAvailableEnablerConverter}"> <MultiBinding.Bindings> <Binding Path="TargetUsbDrive"></Binding> <Binding Path="DriveInfos"></Binding> </MultiBinding.Bindings> </MultiBinding> </Button.IsEnabled> </Button> <StackPanel Grid.Row="2" Orientation="Horizontal" HorizontalAlignment="Center"> <TextBlock Margin="2" Text="Target this USB-drive:"></TextBlock> <TextBox Margin="2" Text="{Binding TargetUsbDrive, UpdateSourceTrigger=LostFocus}" Width="100"></TextBox> </StackPanel> </Grid> </Window>I have now provided the Visual Studio 2013 solution with the code above available for download here:
Download VS 2013 Solution with source code above
Explore contents of a running container in Docker
This is a short tip how to explorer the contents of a running container in Docker.
Get Docker for windows from here: Docker for Windows First off, we can list our containers with the following command:
Note the CONTAINER ID value. Use this to explore the contents of the running container:
Note that in this example I just have to type the necessary part of the Guid of the CONTAINER ID to discern it from others and issue an exec command with the parameters -it (interactive and pseudo tty session) followed with bash. From now on, I can explore the container easily: ls -al The following image sums this up: Another cool tip, how about installing a new nginx web server on port 81 on your Windows Dev Box? Simple!
And the following command pulls a windowsservercore docker image (10,7 GB size) and starts it up and gives you a pseudo-terminal which is interactive:
docker run -it microsoft/windowsservercore
Get Docker for windows from here: Docker for Windows First off, we can list our containers with the following command:
docker container ls
Note the CONTAINER ID value. Use this to explore the contents of the running container:
docker exec -it 231 bash
Note that in this example I just have to type the necessary part of the Guid of the CONTAINER ID to discern it from others and issue an exec command with the parameters -it (interactive and pseudo tty session) followed with bash. From now on, I can explore the container easily: ls -al The following image sums this up: Another cool tip, how about installing a new nginx web server on port 81 on your Windows Dev Box? Simple!
docker run --detach publish 81:80 nginx
And the following command pulls a windowsservercore docker image (10,7 GB size) and starts it up and gives you a pseudo-terminal which is interactive:
docker run -it microsoft/windowsservercore
Wednesday, 8 August 2018
Doubly Linked List in C#
I am reading the book "C#7 and .NET Core 2.0 High Performance" about data structures and came accross Doubly linked lists. This is an interesting data structure. We most often
use arrays and lists in .NET in everyday use, but both are not as high performant as linked lists when it comes to inserting and removing items in their structure. Removing an item
and inserting an item in a list is only quick, if we add to the end of the list or remove from the end of the list. Arrays have the same behavior, if you have a large data structure
with many items, consider using a LinkedList instead. .NET already got a good implementation of linked lists in System.Collections.Generic with the LinkedListNode class, so this
article just presents a class I wrote for fun on my own. If you want to see the source code of the .Net class, it is available here:
LinkedListNode implementation(Reference Source /.NET)
Now how fun is it to just use .NET's implementation, we want to learn something and do things ourselves as devoted coders? Therefore I present my own implementation! You can find the source code by cloning the following Git repo: Actually, the implementation is easy, the most cumbersome part is to be careful with the next and previous pointers. Just like the .NET implementation, this class supports generic and a payload of different types, in the demo I will use string as the payload of each node.
As we can see, with our ToString implementation we can deduce that the first and last node is special, the first one lacks a prev pointer illustrated by "HEAD->" and the last node lacks a next pointer illustrated with "->NULL". You will find this in the implementation of the class. I have decided to actually revert the order if the client wants to insert multiple values, as that ordering behaves more naturally. The client can look after a value or multiple values or search with a given predicate, or pass in a node and use it to search. Also, it is possible to remove a node. We end up with an implementation of a Doubly Linked list that can be used in many scenarios. I would advice you to use the .NET version as it supports more features, such as a pointer to the HEAD node. But this implementation is compact and easy to understand for many developers. You will usually use linked list in scenarios where you have much data and want to quickly insert or remove one or several nodes in the linked list. It also supports quick navigation from a node to its previous or next node. Imagine for example working with a class called Book which needs to have an iterable structure ("Pages") to move to the next and previous page and at the same time insert new pages or removing a page from the middle of the data structure. Using an array or a list would be low performant and slow, while a doubly linked list would actually allow the developer to create code that quickly inserts a new page or removes a page at an arbitrary position in the data structure of the Book. This class can of course be optimized to support for example circular linked list with a pointer always to HEAD, or maybe you want to have pointer to HEAD and TAIL and not have a circular list? The source code should be relatively self explanatory for the intermediate C#-developer to revise and improve. I hope you found this article interesting.
LinkedListNode implementation(Reference Source /.NET)
Now how fun is it to just use .NET's implementation, we want to learn something and do things ourselves as devoted coders? Therefore I present my own implementation! You can find the source code by cloning the following Git repo: Actually, the implementation is easy, the most cumbersome part is to be careful with the next and previous pointers. Just like the .NET implementation, this class supports generic and a payload of different types, in the demo I will use string as the payload of each node.
git clone https://toreaurstad@bitbucket.org/toreaurstad/doublylinkedlist.git
using System; using System.Collections.Generic; using System.Linq; using System.Text; // ReSharper disable ArrangeThisQualifier // ReSharper disable RedundantNameQualifier namespace DoublyLinkedList { /// <summary> /// Node in a doubly linked list data structure with pointers to the previous and next item in the linked list, if any. /// Allows quick insertion and removal of values in even large data structures /// </summary> /// <typeparam name="T">The type of Data in each node of the doubly linked list</typeparam> public class LinkedListNode<T> { public LinkedListNode(T data) { _data = data; _prev = null; //Init previous reference to null _next = null; //Init next reference to null } public T Data { get { return _data; } } /// <summary> /// Attempts to find a value in the doubly linked list. Uses object.Equals for comparison. O(N) complexity. /// </summary> /// <param name="value">Value to find</param> /// <returns>The first node with the matching value, if any.</returns> public LinkedListNode<T> Find(T value) { if (object.Equals(Data, value)) return this; if (this._next is null) return null; return Find(this._next, value); } /// <summary> /// Attempts to find a value in the doubly linked list by a matchen with a given predicate. Returns null if no node values matches. O(N) complexity /// </summary> /// <param name="searchCondition"></param> /// <returns></returns> public LinkedListNode<T> Find(Predicate<LinkedListNode<T>> searchCondition) { if (searchCondition(this)) return this; if (this._next != null) return this._next.Find(searchCondition); return null; } /// <summary> /// Searches for multiple values and returns the nodes found. The search returns the first match if any for every search value. O(N*N) complexity. /// </summary> /// <param name="values"></param> /// <returns></returns> public LinkedListNode<T>[] FindMultiple(params T[] values) { if (values is null) throw new ArgumentNullException(nameof(values)); if (!values.Any()) throw new ArgumentException("Please provide a nonempty array of values!"); var foundValues = new List<LinkedListNode<T>>(); foreach (T value in values) { LinkedListNode<T> foundValue = Find(value); if (foundValue != null) foundValues.Add(foundValue); } return foundValues.ToArray(); } // ReSharper disable once UnusedMember.Local private LinkedListNode<T> Find(LinkedListNode<T> node, Predicate<T> searchCondition) { if (node is null) return null; if (searchCondition(node.Data)) return node; if (node._next != null) return Find(node._next, searchCondition); return null; } private LinkedListNode<T> Find(LinkedListNode<T> node, T value) { if (node is null) return null; if (object.Equals(node.Data, value)) return node; if (node._next != null) return Find(node._next, value); return null; } /// <summary> /// Inserts a node into the doubly linked list. Adjusts the prev and next pointers of the inserted node. O(1) complexity. /// </summary> /// <param name="node">The node to insert, node's prev and next pointers will be overwritted if already set.</param> /// <returns>The inserted node with updated prev and next pointers</returns> public LinkedListNode<T> Insert(LinkedListNode<T> node) { if (node is null) throw new ArgumentNullException(nameof(node)); LinkedListNode<T> nextNode = this._next; node._prev = this; this._next = node; node._next = nextNode; if (nextNode != null) nextNode._prev = node; return node; } /// <summary> /// Inserts multiple nodes into the doubly linked list by building nodes with passed in values. O(1) complexity. /// </summary> /// <param name="values"></param> /// <returns></returns> public LinkedListNode<T>[] Insert(params T[] values) { if (values is null) throw new ArgumentNullException(nameof(values)); if (!values.Any()) throw new ArgumentException("Please provide a nonempty array of values!"); values = values.Reverse().ToArray(); //Reverse order so insertion behaves sequentially var inserted = new List<LinkedListNode<T>>(); foreach (T value in values) { LinkedListNode<T> node = new LinkedListNode<T>(value); inserted.Add(Insert(node)); } return inserted.ToArray(); } /// <summary> /// Removes a node from the linked list. Adjusts the previous and next pointers of removed node. O(1) complexity. /// </summary> /// <param name="node"></param> /// <returns></returns> public LinkedListNode<T> Remove(LinkedListNode<T> node) { if (node is null) throw new ArgumentNullException(nameof(node)); if (node._prev != null) node._prev._next = node._next; if (node._next != null) node._next._prev = node._prev; //Set unneeded references to null now and to avoid misuse node._prev = null; node._next = null; return node; } public void Remove() { if (this._prev != null) this._prev._next = this._next; if (this._next != null) this._next._prev = this._prev; //Set unneeded references to null now and to avoid misuse this._prev = null; this._next = null; } public override string ToString() { StringBuilder sb = new StringBuilder(); if (this._prev is null) sb.Append(Head); sb.Append(_data + GetArrow(this)); IterateLinkedList(this._next, sb); return sb.ToString(); } /// <summary> /// Iterates the doubly linked list and builds a string to output in the ToString() method /// </summary> /// <param name="node">LinkedListNode</param> /// <param name="sb">StringBuilder</param> private void IterateLinkedList(LinkedListNode<T> node, StringBuilder sb) { if (node != null) { sb.Append(node.Data + GetArrow(node)); if (node._next != null) IterateLinkedList(node._next, sb); } } private string GetArrow(LinkedListNode<T> node) { if (node != null) { if (node._next != null && node._next._prev != null) return DoubleArrow; if (node._next != null && node._next._prev == null) return Arrow; if (node._next == null) return Arrow + NullString; } return ArrowUndefined; } private const string Head = "HEAD->"; private const string Arrow = "->"; private const string DoubleArrow = "<->"; private const string ArrowUndefined = "??MISSINGLINK??"; private const string NullString = "NULL"; private readonly T _data; private LinkedListNode<T> _prev; private LinkedListNode<T> _next; } }This implementation has not got a specific pointer to the head of the list like a circular linked list can provide. The following code makes use of this class to demonstrate its usage:
using System; using System.Diagnostics; using System.Linq; namespace DoublyLinkedList { class Program { // ReSharper disable once UnusedParameter.Local static void Main(string[] args) { var root = new LinkedListNodeThe output of this linked list is displaying the contents of the doubly linked list:("Hello"); root.Insert(new LinkedListNode ("world")); root.Insert(new LinkedListNode ("Testing")); root.Insert(new LinkedListNode ("Double linked list!")); root.Insert("Inserting", "some", "values!"); root.Insert("Delete", "me", "please"); root.FindMultiple("Delete", "me").ToList().ForEach(n => n.Remove(n)); root.Find(n => n.Data.Contains("pl")).Remove(); var mismatch = root.Find("Nonexisting value"); Debug.Assert(mismatch is null, "Expected to not find any item in this doubly linked list with this search value"); string rootRepresentation = root.ToString(); Debug.Assert(rootRepresentation == @"HEAD->Hello<->Inserting<->some<->values!<->Double linked list!<->Testing<->world->NULL"); Console.WriteLine(rootRepresentation); Console.ReadKey(); } } }
HEAD->Hello<->Inserting<->some<->values!<->Double linked list!<->Testing<->world->NULL
As we can see, with our ToString implementation we can deduce that the first and last node is special, the first one lacks a prev pointer illustrated by "HEAD->" and the last node lacks a next pointer illustrated with "->NULL". You will find this in the implementation of the class. I have decided to actually revert the order if the client wants to insert multiple values, as that ordering behaves more naturally. The client can look after a value or multiple values or search with a given predicate, or pass in a node and use it to search. Also, it is possible to remove a node. We end up with an implementation of a Doubly Linked list that can be used in many scenarios. I would advice you to use the .NET version as it supports more features, such as a pointer to the HEAD node. But this implementation is compact and easy to understand for many developers. You will usually use linked list in scenarios where you have much data and want to quickly insert or remove one or several nodes in the linked list. It also supports quick navigation from a node to its previous or next node. Imagine for example working with a class called Book which needs to have an iterable structure ("Pages") to move to the next and previous page and at the same time insert new pages or removing a page from the middle of the data structure. Using an array or a list would be low performant and slow, while a doubly linked list would actually allow the developer to create code that quickly inserts a new page or removes a page at an arbitrary position in the data structure of the Book. This class can of course be optimized to support for example circular linked list with a pointer always to HEAD, or maybe you want to have pointer to HEAD and TAIL and not have a circular list? The source code should be relatively self explanatory for the intermediate C#-developer to revise and improve. I hope you found this article interesting.
Tuesday, 7 August 2018
Swapping variables in C# Unmanaged / Managed
This article will shortly present two ways of swapping variables in C#. Specifically, these two ways are only both available for value types and structs and unmanaged types, while the managed swap by using ref is available for values, structs and managed structs.
The method UnsafeSwap swaps two variables by using unsafe code and pointers. By passing in the address of the two variables to swap, it is possible to use the dereference operator * to not only copy into
a temporary variable but also use the syntax *a = *b to exchange the address the variable is pointing to, effectively we swap to variables with their content, here int is used.
Another way is do a SafeSwap where we pass in the ref of the variables and just change their contents.
Note that if you want to exchange two strings, you must pin the pointer of the chars using the fixed keyword.
unsafe void Main() { int one = 20; int two = 30; Console.WriteLine("Before swap, one: {0}, two: {1}", one, two); UnsafeSwap(&one, &two); Console.WriteLine("Unsafe swap, one: {0}, two: {1}", one, two); SafeSwap(ref one, ref two); Console.WriteLine("Safe swap back again, one: {0}, two: {1}", one, two); } unsafe void UnsafeSwap(int* a, int* b){ int temp = *a; *a = *b; *b = temp; } void SafeSwap(ref int a, ref int b){ int temp = a; a = b; b = temp; }To exchange two string variables, we must use the fixed keyword and of course only be able to exchange the characters from the char array consisting the first variable to be exchanged with the char array of the second variable. In addition, the string in C# is at a low level actually a character array null terminated with the '\0' sequence, at least when a string is treated as a character array.. The following method will exchange two string variables using unsafe / unmanaged code. Note that the two strings differ in length, so the last character is not exchanged. That is - our string is actually a character array and consists of multiple memory addresses.
unsafe void Main() { fixed (char* one = "hello??") { fixed (char* two = "world!") { char* ptrone = one; char* ptrtwo = two; while (*ptrone != '\0' && *ptrtwo != '\0') { Swap(ptrone, ptrtwo); Console.WriteLine("one: {0}", GetString(one)); Console.WriteLine("two: {0}", GetString(two)); ++ptrone; ++ptrtwo; } } } } unsafe void Swap(char* first, char* second){ char temp = *first; *first = *second; *second = temp; } unsafe string GetString(char* input){ char* ptr = input; var sw = new StringBuilder(); while (*ptr != '\0') { sw.Append(ptr->ToString()); ++ptr; } return sw.ToString(); }The output in Linqpad gives as the exchange of the string or actually a null terminated character array progresses:
one: wello??
two: horld!
one: wollo??
two: herld!
one: worlo??
two: helld!
one: worlo??
two: helld!
one: world??
two: hello!
one: world!?
two: hello?
Monday, 6 August 2018
Simple Base64 encoder in C#
Just read a bit about Base64 encoding and decided to try out writing my own in Linqpad for strings!
The way you Base64 encode is to treat each char in the input string as a byte value with groups of six bytes (this byte value is zero padded left)
and then mapping the 2^6 values into a Base64 table and outputting the corresponding values into a resulting string. Note that you also
pad the Base64 string with '=' char to ensure the entire bit length is divisible with three. That is why Base64 strings in .NET always have 0,1 or 2
'=' chars at the end. The characters used in the Base64 encoding is in .NET the chars [A-z] and [0-9] plus + and slash /.
void Main() { string wordToBase64Encode = "Hello world from base64 encoder. This is a sample input string, does it work?"; string wordBase64EncodedAccordingToNet = Convert.ToBase64String(Encoding.ASCII.GetBytes(wordToBase64Encode)).Dump(); string wordBase64EncodedAccordingToCustomBase64 = wordToBase64Encode.ToBase64String().Dump(); (("Are the two strings equal?: ") + string.Equals(wordBase64EncodedAccordingToNet, wordBase64EncodedAccordingToCustomBase64)).Dump(); } public static class LowLevelBase64Extensions { private static readonly char[] _base64Table = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', }; public static string ToBase64String(this string toBeEncoded){ byte[] toBeEncodedByteArray = Encoding.ASCII.GetBytes(toBeEncoded); string toBeEncodedBinaryString = string.Join("", toBeEncodedByteArray.Select(b => (Convert.ToString(b, 2).PadLeft(8, '0')))); //toBeEncodedBinaryString.Length.Dump(); int padCharacters = toBeEncodedBinaryString.Length % 3; //toBeEncoded.Dump(); StringWriter sw = new StringWriter(); for (var i = 0; i < toBeEncodedBinaryString.Length; i = i + 6) { string encodingToMap = toBeEncodedBinaryString.Substring(i, Math.Min(toBeEncodedBinaryString.Length - i, 6)) .PadRight(6, '0'); //encodingToMap.Dump(); byte encodingToMapByte = Convert.ToByte(encodingToMap, 2); char encodingMapped = _base64Table[(int)encodingToMapByte]; sw.Write(encodingMapped); } sw.Write(new String('=', padCharacters)); //Check 24 bits / 3 byte padding return sw.ToString(); } }Note - the casting to int of the encodingToMapByte also works if you cast the byte representation to short or byte. I have compared this custom Base64Encoder with .NET's own Convert.ToBase64String(), and they give equal strings in the use cases I have tried out. Make note that this is just playing around, .NET's own method is optimized for speed. This is more to show what happens with the input string to generate a Base64 encoded string. You can use this code as a basis for custom encoding shemes like Base32. I have used Encoding.ASCII.GetBytes to look up the ASCII code. So this implementation primarily supports input strings in languages using ASCII and Extended ASCII characters, at least that is what I have tested it with. The output in Linqpad shows the following corresponding results:
Sunday, 5 August 2018
Consuming video from WCF as a Base64 encoded strings
This article follows up our quest of loading up video byte arrays from WCF efficiently, but now the byte array is converted into a Base64 encoded string.
You can start by cloning the repository I have prepared from here:
Base64 encoded strings take six bits from the byte array and designates is as a char where the char can be one of 64 characters, in MIME implementation it is [A-z][0-9] and + and /, which is 64 different characters that the six bytes are encoded into. This means that 24 bits can be represented as three Base64 encodeded characters, that is 3 bytes in our byte array can be represented as FOUR base64 encoded chars. This 3:4 ratio is the rationale behind the MTOM optimization in WCF (to be discussed later). However, recap from the previous article where we downloaded a video sizing 4 MB on disk and Fiddler reporting it to be about 3 times larger. This is bloated data that I want to explore if we can fix up a bit. The bloated data is because I have managed not to truly enforce the XMLHttpRequest to send the data through the WCF REST binding (webHttpBinding) as true binary data - binary data is still sent as a string object to Javascript. This is sad, and I want to try to speed this up and avoid bloated data. To get this size down, we can start by using a Base64 encoded string instead in our XmlHttpRequest. First off, we define a service contract operation to return our string with Base64 data:
git clone https://toreaurstad@bitbucket.org/toreaurstad/wcfaudiostreamdemo.git
git fetch && git checkout VideBase64String_05082018
Base64 encoded strings take six bits from the byte array and designates is as a char where the char can be one of 64 characters, in MIME implementation it is [A-z][0-9] and + and /, which is 64 different characters that the six bytes are encoded into. This means that 24 bits can be represented as three Base64 encodeded characters, that is 3 bytes in our byte array can be represented as FOUR base64 encoded chars. This 3:4 ratio is the rationale behind the MTOM optimization in WCF (to be discussed later). However, recap from the previous article where we downloaded a video sizing 4 MB on disk and Fiddler reporting it to be about 3 times larger. This is bloated data that I want to explore if we can fix up a bit. The bloated data is because I have managed not to truly enforce the XMLHttpRequest to send the data through the WCF REST binding (webHttpBinding) as true binary data - binary data is still sent as a string object to Javascript. This is sad, and I want to try to speed this up and avoid bloated data. To get this size down, we can start by using a Base64 encoded string instead in our XmlHttpRequest. First off, we define a service contract operation to return our string with Base64 data:
[OperationContract] [WebGet(UriTemplate = "mediabytes/{videofile}")] string GetVideoAsBase64(string videofile);Now our Service Implementation looks like this:
using System; using System.IO; using System.ServiceModel.Activation; using System.ServiceModel.Web; using System.Web; using WcfStreamAudioDemo.Common; namespace WcfStreamAudioDemo.Host { [AspNetCompatibilityRequirements (RequirementsMode = AspNetCompatibilityRequirementsMode.Allowed)] public class VideoService : IVideoServiceContract { public Stream GetVideo(string videofile) { return GetVideoStream(videofile, ".mp4"); } public string GetVideoAsBase64(string videofile) { Stream videoStream = GetVideoStream(videofile, ".mp4"); byte[] buffer = new byte[32*1024]; int count; var memoryStream = new MemoryStream(); while ((count = videoStream.Read(buffer, 0, buffer.Length)) > 0) { memoryStream.Write(buffer, 0, count); } return Convert.ToBase64String(memoryStream.ToArray()); } private Stream GetVideoStream(string videofile, string extension) { string mediaFolder = HttpContext.Current.Server.MapPath("~/media"); string videoFileFullPath = Path.Combine(mediaFolder, videofile) + extension; return File.OpenRead(videoFileFullPath); } } }The client side code will now retrieve a Base64Encoded string. The following client side scripts loads up our video:
<script type="text/javascript"> window.onload = init; var source; //Video buffer function init() { loadByteArray('http://myserver/WcfStreamAudioDemo.Host/VideoService.svc/mediabytes/sintel_trailer-480p'); } function base64ToArrayBuffer(base64) { var binaryString = window.atob(base64); var len = binaryString.length; var bytes = new Uint8Array( len ); for (var i = 0; i < len; i++) { bytes[i] = binaryString.charCodeAt(i); } return bytes.buffer; } function loadByteArray(url) { var request = new XMLHttpRequest(); //request.overrideMimeType('text\/plain; charset=x-user-defined'); //request.responseType = 'blob'; request.open('GET', url, true); //request.setRequestHeader("Content-Type", "video/mp4"); request.onload = function() { //console.log(request.response); debugger; var responseData = request.response; if (responseData === undefined || responseData === null) return; responseData = responseData.slice(0, -9); responseData = responseData.substring(68); var videoByteArrayView = base64ToArrayBuffer(responseData); var blob = new Blob([videoByteArrayView], { type: "text/plain;charset=utf-8" }); var blobUrl = URL.createObjectURL(blob); var videoCtrlFedByByteArray = document.getElementById("videoCtrlFedByByteArray"); videoCtrlFedByByteArray.setAttribute("src", blobUrl); } //request.onload request.send(); } </script>There is still a lot of manual Js scripting here to prepare the video here, I will try to explain. We have now managed to drastically reduce the bloated data of our original 4.4 MB video down from 15+ MB to just 5.8 MB using Base64 encoded string. The retrieved Base64 encoded string is first decoded with the Js atob() function ("ASCII to BINARY"). A Uint8Array is initialized with the length of this decoded binary string and the binary string is iterated by using the charCodeAt method, returning the ArrayBuffer inside the constructed Uint8Array. Note that I got the Base64 encoded string as a XML first, which is why I do some chopping off the string using slice and substring. Anyways, after getting an ArrayBuffer from the Uint8Array's buffer property, we can construct a Blob object (Binary large object) and create an object url and then set the "src" attribute of our HTML5 Video control to this blobUrl. This is still not very elegant, I have seen examples using "arraybuffer" as responseType of the XmlHttpRequest object in Js to retrieve binary data, but I got garbled data back trying to use it with WCF REST. So in this article you have seen a way to send binary data from a WCF method by using a Base64 encoded string as an intermediary between the Server and the client. We got the file size of the request according to Fiddler inspection down to much less than the bloated binary array transfer that actually sent the byte array as a text string back to the client.
Wcf byte array + HTML 5 Video + Custombinding
Again, this article looks at different ways to load byte array from WCF representing video data to web clients, this approach will return a byte array and
use a CustomBinding in WCF.
You can start by cloning the repository I have prepared from here:
The following method is added to our WCF service contract, note that now we leave the webHttpBinding and use a CustomBinding.
git clone https://toreaurstad@bitbucket.org/toreaurstad/wcfaudiostreamdemo.git
git fetch && git checkout VideBase64String_05082018
The following method is added to our WCF service contract, note that now we leave the webHttpBinding and use a CustomBinding.
[OperationContract] byte[] GetVideoBytes(string videofile);There is no [WebGet] attribute this time, as noted we will not use WCF REST but SOAP instead. The web.config of the host website for the WCF services exposes this CustomBinding:
<system.serviceModel> <services> <service name="WcfStreamAudioDemo.Host.AudioService"> <endpoint behaviorConfiguration="RestBehaviorConfig" binding="webHttpBinding" bindingConfiguration="HttpStreaming" contract="WcfStreamAudioDemo.Common.IAudioServiceContract" /> </service> <service name="WcfStreamAudioDemo.Host.VideoService"> <endpoint behaviorConfiguration="RestBehaviorConfig" binding="webHttpBinding" bindingConfiguration="HttpStreaming" contract="WcfStreamAudioDemo.Common.IVideoServiceContract" /> <endpoint address="custom" binding="customBinding" bindingConfiguration="CustomBinding" contract="WcfStreamAudioDemo.Common.IVideoServiceContract" /> </service> </services> <bindings> <customBinding> <binding name="CustomBinding"> <binaryMessageEncoding> <readerQuotas maxArrayLength="100000000" maxStringContentLength="100000000"/> </binaryMessageEncoding> <httpTransport /> </binding> </customBinding> <webHttpBinding> <binding name="HttpStreaming" transferMode="Streamed" maxReceivedMessageSize="1000000000" maxBufferPoolSize="100000000"> <readerQuotas maxArrayLength="100000000" maxStringContentLength="100000000"/> </binding> </webHttpBinding> </bindings> <behaviors> <serviceBehaviors> <behavior> <!-- To avoid disclosing metadata information, set the values below to false before deployment --> <serviceMetadata httpGetEnabled="true" httpsGetEnabled="true"/> <!-- To receive exception details in faults for debugging purposes, set the value below to true. Set to false before deployment to avoid disclosing exception information --> <serviceDebug includeExceptionDetailInFaults="true"/> </behavior> </serviceBehaviors> <endpointBehaviors> <behavior name="RestBehaviorConfig"> <webHttp /> </behavior> </endpointBehaviors> </behaviors> <protocolMapping> <add binding="basicHttpsBinding" scheme="https" /> </protocolMapping> <serviceHostingEnvironment aspNetCompatibilityEnabled="true" multipleSiteBindingsEnabled="true" /> </system.serviceModel>Note that the custom binding allows us to set up a binaryMessageEncoding. The next step is to go to the client project and add a service reference to the host project containing the WCF service. The app.config file is then updated, relevant parts shown here:
<system.webServer> <directoryBrowse enabled="true" /> <security> <requestFiltering> <requestLimits maxAllowedContentLength="1073741824" /> </requestFiltering> </security> <staticContent> <mimeMap fileExtension=".mp4" mimeType="video/mp4" /> </staticContent> </system.webServer> <system.serviceModel> <bindings> <customBinding> <binding name="CustomBinding"> <binaryMessageEncoding> <readerQuotas maxArrayLength="100000000" maxStringContentLength="100000000" /> </binaryMessageEncoding> <httpTransport maxReceivedMessageSize="100000000" /> </binding> </customBinding> </bindings> <client> <endpoint address="http://he139920.helsemn.no/WcfStreamAudioDemo.Host/VideoService.svc/custom" contract="VideoService.IVideoServiceContract" binding="customBinding" bindingConfiguration="CustomBinding" /> </client> </system.serviceModel>Note the changes of setting up a MIME map for .mp4 files and setting up requestlimits. I will explain this soon. The client script now actually consists both of a server side script run in ASP.NET that sets up the video control in two ways. One way is to construct a HTML5 Data Url. This clearly was demanding for the browser to cope with and is not recommended. The video is actually embed on the page as a base64 encoded string! For a small video such as our video, it is actually possible still. It is of course fascinating that we can embed an entire video on our ASPX web page just as a Base64 encoded string, like it or not. The way to do this anyways is like this:
<script runat="server"> private void btnInvokeWcfService_OnClick(object sender, EventArgs e) { using (var proxy = new VideoServiceContractClient()) { byte[] payload = proxy.GetVideoBytes("sintel_trailer-480p"); SetVideoSourceToHtmlDataUri(payload); } } private void SetVideoSourceToHtmlDataUri(byte[] payload) { //Set a base64 Html data uri videoCtrlFedByByteArrayThroughProxy.Attributes["type"] = "video/mp4"; string base64String = Convert.ToBase64String(payload); videoCtrlFedByByteArrayThroughProxy.Attributes["src"] = "data:video/mp4;base64," + base64String; } </script>The following image shows how this actually works! Of course, this gives rendering and performance issues, as the web page now got very large content - the entire video is embedded into the page! Another way is to write the byte array to a temporary file on the server, and set the src attribute to this temporary file.
<script runat="server"> private void btnInvokeWcfService_OnClick(object sender, EventArgs e) { using (var proxy = new VideoServiceContractClient()) { byte[] payload = proxy.GetVideoBytes("sintel_trailer-480p"); SetVideoSourceToTempFile(payload); } } private void SetVideoSourceToTempFile(byte[] payload) { //write to a temp file string tempfile = Path.GetRandomFileName() + ".mp4"; string tempfilePathForWebServer = HttpContext.Current.Server.MapPath("media/") + tempfile; File.WriteAllBytes(tempfilePathForWebServer, payload); videoCtrlFedByByteArrayThroughProxy.Attributes["src"] = "media/" + tempfile; } </script>Note that in these two samples, I have adjusted the HTML5 video control to be accessible to ASP.NET like this:
<asp:Button runat="server" ID="btnInvokeWcfService" Text="Load Video" OnClick="btnInvokeWcfService_OnClick"/> <video id="videoCtrlFedByByteArrayThroughProxy" type="video/mp4" runat="server" controls width="320" height="240"> <p>Your browser doesn't support HTML5 video. Here is a <a href="http://wcfaudiodemohost.azurewebsites.net/VideoService.svc/mediabytes/sintel_trailer-480p">link to the video</a> instead.</p> </video>This method of retrieving video from WCF is the quickest way, it only fetches the original byte array data and it loads quickly. An adjusted version would not write to a file directly accessible on the web server, but use for example IsolatedStorage instead. I will look into that in the future. Hope you found this article interesting. CustomBindings in WCF gives you a lot of flexibility!