Monday 24 November 2014

Reducing complexity in Entity Framework queries with IQuyerable and Linqkit

I have added a presentation written in Norwegian that explains how one can reduce complexity in Entity Framework, using IQuyerable decomposition and Linqkit functionality. Reduce Entity Framework

Tuesday 14 October 2014

Asynchronous Entity Framework operations

Sample code:

using System.Collections.Generic;
using System.Data.Entity;
using System.Linq;
using System.Threading.Tasks;

namespace TestAsyncEntityFramework6
{
   
    public class CustomHelper
    {

        public static List<Customer> SelectAll()
        {
            using (var context = new NorthWindEntities())
            {
                var query = from c in context.Customers
                    orderby c.CustomerID ascending
                    select c;
                return query.ToList(); 
            }
        }

        public static async Task<List<Customer>> SelectAllAsync()
        {
            using (var context = new NorthWindEntities())
            {
                var query = from c in context.Customers
                    orderby c.CustomerID ascending
                    select c;
                return await query.ToListAsync();
            }
        }

        public static async Task<Customer> SelectByIdAsync(string id)
        {
            using (var context = new NorthWindEntities())
            {
                var query = from c in context.Customers
                    where c.CustomerID == id
                    select c;
                Customer obj = await query.SingleOrDefaultAsync();
                return obj;
            }
        }

        public static async Task<string> InsertAsync(Customer obj)
        {
            using (var context = new NorthWindEntities())
            {
                //try
                //{
                    context.Customers.Add(obj);
                    await context.SaveChangesAsync();
                    return "Customer added successfully!";
                //}
                //catch (Exception err)
                //{
                //    return "Crash"; 
                //}
            }
        }

        public static async Task<string> UpdateAsync(Customer obj)
        {
            using (var context = new NorthWindEntities())
            {
                Customer existing = await context.Customers.FindAsync(obj.CustomerID);
                existing.CompanyName = obj.CompanyName;
                existing.Country = obj.Country;
                await context.SaveChangesAsync();
                return "Customer updated successfully"; 
            }
        }

        public static async Task<string> DeleteAsync(string id)
        {
            using (var context = new NorthWindEntities())
            {
                Customer existing = await context.Customers.FindAsync(id);
                context.Customers.Remove(existing);
                await context.SaveChangesAsync();
                return "Customer deleted successfully!"; 
            }
        }

        public static async Task<Customer> GetCustomerByIdAsync(string id)
        {
            using (var context = new NorthWindEntities())
            {
                Customer existing = await context.Customers.FindAsync(id);
                return existing; 
            }
        }

    }
}


using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace TestAsyncEntityFramework6
{
    class Program
    {

        static void Main(string[] args)
        {
            Stopwatch sw = Stopwatch.StartNew();
            var task = CustomHelper.SelectAllAsync();
            task.Wait();
            Console.WriteLine("Got data!");
            List<Customer> data = task.Result;
            Console.WriteLine(data.Count);
            Console.WriteLine("Async op took: " + sw.ElapsedMilliseconds);
            sw.Stop();
            sw.Start();
            //data = 
            //var data =  CustomHelper.SelectAll();
            //Console.WriteLine("Got data!");
            //Console.WriteLine(data.Count);
            //Console.WriteLine("Sync operation took: " + sw.ElapsedMilliseconds);

            var c = new Customer {CustomerID = "TEIT", Country = "Burkina Faso", CompanyName = "Tore Aurstad IT"};
            try
            {
                var task2 = CustomHelper.InsertAsync(c);
                task2.Wait();
                Console.WriteLine(task2.Result);
            }
            catch (AggregateException ae)
            {
                Console.WriteLine(ae.Message);
            }

            var c3 = CustomHelper.GetCustomerByIdAsync("TEIT");
            c3.Wait();
            Console.WriteLine(c3.Result.Country);

            c3.Result.Country = "Norway"; 

            var c4 = CustomHelper.UpdateAsync(c3.Result);
            c4.Wait();
            Console.WriteLine(c4.Result);

            var c5 = CustomHelper.DeleteAsync(c3.Result.CustomerID);
            c5.Wait();
            Console.WriteLine(c5.Result); 

            Console.WriteLine("Press any key to continue ...");
            Console.ReadKey(); 
        }

    }
}


Monday 13 October 2014

EntityFramework.Extended library performance capabilities

EntityFramework.Extended is an additional library that can be installed with Entity Framework 6 and give additional performance capabilities with Entity Framework, such as: - Batch updates - Batch deletes - Batch queries - Query caching The following demo code shows some simple usage of EntityFramework.Extended library to use these new capabilities which the "Core" Entity Framework lacks good support for:

using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using EntityFramework.Caching;
using EntityFramework.Extensions;

namespace TestEntityFrameworkExtended
{

    /// <summary>
    /// Samples based on the EntityFramework.Extended Github site here: https://github.com/loresoft/EntityFramework.Extended
    /// </summary>
    /// <remarks>Nuget package page here: https://www.nuget.org/packages/EntityFramework.Extended/ Created by LoreSoft, open source
    /// Entity Framework 6 assumed to be a requirement here (TODO: Inspect, don't expect)</remarks>
    class Program
    {

        static void Main()
        {
            DemoBatchUpdates();
            DemoBatchQueries();
            DemoQueryResultCache(); 
            DemoBatchDeletion();
        }

        private static void DemoQueryResultCache()
        {
            using (var ctx = new SampleDbContext())
            {
                var vipUsers =
                    ctx.Users.Where(u => u.MiddleName == "VIPUser").FromCache(
                        CachePolicy.WithDurationExpiration(TimeSpan.FromSeconds(60)), new[]{ "Viktigperer" });
                //PrintUsers("Query cache resultat:", vipUsers);
                foreach (var i in Enumerable.Range(1, 20))
                {
                    if (i == 10)
                        CacheManager.Current.Expire("Viktigperer");
                    vipUsers = ctx.Users.Where(u => u.MiddleName == "VIPUser").FromCache();
                    Console.WriteLine("# of Vip Users: " + vipUsers.Count());
                    Thread.Sleep(1000);
                }
            }

            //Note that the FromCache call now caches the vip users indefinately (no expiration), until one explicitly expires the 
            //tagged cache (or avoid using the FromCache extension). Therefore, tagging cached results are good practice such that the
            //cache can be evicted by using CacheManager.Current.Expire. The cache tags can be set to const strings for example
            //The FromCache makes it easy to cache particular result sets that are known to be accessed by many users and therefore
            //can be cached. For example type registers in OpPlan or Theaters. The database round trip will then be avoided and the 
            //result is cached in the w3wp process server side. Reducing traffic and returning cached results will scale better, but 
            //the cache will also use more memory. Be critical to what to cache (OpPlan 4 Theaters for example can be cached and then
            //expired on demand using CacheManager.Current.Expire, reducing overall traffic between OpPlanWAS and database and also
            //pressure on the CPU) 
            using (var ctx = new SampleDbContext())
            {
                var vipUsers =
                    ctx.Users.Where(u => u.MiddleName == "VIPUser").FromCache();
            }
        }

        private static void DemoBatchQueries()
        {
            using (var ctx = new SampleDbContext())
            {
                //Building up a batch query using the future extension methods. The first call using .Value or .ToList
                //will then start all batched queries, avoiding round trips to database 
                var aQuery = ctx.Users.Where(u => u.FirstName == "Pedro" && u.MiddleName == "VIPUser").FutureFirstOrDefault();
                var bQuery =
                    ctx.Users.Where(u => u.FirstName == "Olivini").FutureFirstOrDefault();
                User a = aQuery.Value;
                User b = bQuery.Value; 
                PrintUsers("Batch query results: ", new[]{ a, b});
            }
        }

        private static void DemoBatchUpdates()
        {
            using (var ctx = new SampleDbContext())
            {
                ctx.Users.Delete(); //clear 

                ctx.Users.Add(new User
                {
                    FirstName = "Pedro",
                    LastName = "Hauginho"
                });
                ctx.Users.Add(new User
                {
                    FirstName = "Julio",
                    LastName = "Cannevaro"
                });
                ctx.Users.Add(new User
                {
                    FirstName = "Jono",
                    MiddleName = "Pedro",
                    LastName = "Salvatini"
                });
                ctx.Users.Add(new User
                {
                    FirstName = "Olivini",
                    LastName = "Hepsado"
                });
                ctx.SaveChanges();

                ctx.Users.Where(u => u.FirstName.Length <= 4).Update(u => new User {MiddleName = "VIPUser"});  //Batch update
            }

            //Important! Reading the database AFTER a batch update should create a NEW db context instance to get refreshed data!
            using (var ctx = new SampleDbContext())
            {
                PrintUsers("Batch update result: ", ctx.Users);
            }
        }

        private static void PrintUsers(string header, IEnumerable<User> users)
        {
            Console.WriteLine(Environment.NewLine + header);
            foreach (var user in users)
            {
                Console.WriteLine("{0} {1} {2}", user.FirstName, user.MiddleName, user.LastName);
            }

            Console.WriteLine("Press the Any Key to Continue ... Now where's that Any Key?");
            Console.ReadKey();
        }

        private static void DemoBatchDeletion()
        {
            using (var ctx = new SampleDbContext())
            {

                ctx.Users.Delete(); //clear (first demo of batch delete)

                ctx.Users.Add(new User
                {
                    FirstName = "Rudolf",
                    LastName = "Holgedra"
                });
                ctx.Users.Add(new User
                {
                    FirstName = "Mario",
                    LastName = "Madraminho"
                });
                ctx.Users.Add(new User
                {
                    FirstName = "Vittorio",
                    MiddleName = "Pedro",
                    LastName = "Salinas"
                });
                ctx.Users.Add(new User
                {
                    FirstName = "Fernando",
                    LastName = "Torres"
                });
                ctx.SaveChanges();

                ctx.Users.Where(u => u.FirstName == "Mario" || u.MiddleName == "Pedro").Delete(); //second batch delete 

                PrintUsers("Batch delete users result: ", ctx.Users.ToList());

            }

        }

    }

}




using System.Data.Entity;

namespace TestEntityFrameworkExtended
{
    
    public class SampleDbContext : DbContext
    {

        public DbSet<User> Users { get; set; }

    }

}


namespace TestEntityFrameworkExtended
{
    
    public class User
    {

        public int UserId { get; set; }

        public string LastName { get; set; }

        public string FirstName { get; set; }

        public string MiddleName { get; set; }

    }

}


After running the code above, one experience from the batch updates was that one should create a new db context / object context such that the data is refreshed after the batch update is performed. The batch queries will send multiple queries towards the database simultanously. Note that after a batch delete, also a new db context / object context should be instantiated. The query caching mechanism will indefinately cache all queries with the same query content with the default FromCache() overload. To expire a query cache, use CacheManager.Current.Expire("MyCacheKey"). It is a good practice to tag the cache and also set a timeout expiration, which can be set either absolute or sliding. Caching queries will mean that your serverside will spend more memory, such as the w3wp worker process running WCF services. Do not cache large result sets and also be aware when the cache should be evicted / expired such that correct, fresh data can be reloaded. Using query caching, batch updates, batch deletes and batch queries, EntityFramework.Extended gives EF a good performance boost. Entity Framework should consider incorporating these features into EF, as they are general performance enhancements. Extra note - It is not necessary to create a new DbContext if you explicitly refresh the context like for example the following sample code:

    //Alternatively force update after batch update (same for batch delete)
    ObjectContext octx = ((IObjectContextAdapter) ctx).ObjectContext; 
    octx.Refresh(RefreshMode.StoreWins, ctx.Users);

Monday 29 September 2014

Java 8 - using collect in the Stream API

The following code povides some examples how to do collect() in the Java Stream API.



import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.security.cert.PKIXRevocationChecker.Option;
import java.time.LocalDate;
import java.time.Month;
import java.time.Period;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;

/**
 *
 * @author José
 */
public class DataAndTime {

    public static void main(String[] args) {

        List<Person> persons = new ArrayList<>();
        
        try (
            BufferedReader reader = 
                new BufferedReader(
                    new InputStreamReader(
                        DataAndTime.class.getResourceAsStream("people.txt")));
            Stream<String> stream = reader.lines();
        ) {
            
            stream.map(
               line -> {
                   String[] s = line.split(" ");
                   String name = s[0].trim();
                   int year = Integer.parseInt(s[1]);
                   Month month = Month.of(Integer.parseInt(s[2]));
                   int day = Integer.parseInt(s[3]);
                   Person p = new Person(name, LocalDate.of(year, month, day));
                   persons.add(p);
                   return p;
               })
               .forEach(System.out::println);
            
            Optional<Person>opt = persons.stream().filter(p -> p.getAge() >= 20)
            .min(Comparator.comparing(Person::getAge));
            
            System.out.println("Youngest person among all persons: " + opt); 
            
            Optional<Person>optSecond = persons
              .stream()
              .max(Comparator.comparing(Person::getAge)); 
            
            System.out.println("Oldest person among all persons: " + optSecond);    
            
            Map<Integer, Long> map = 
              persons.stream()
              .collect(Collectors.groupingBy(
                Person::getAge, Collectors.counting())); 
            
            System.out.println(map);
            
            Map<Integer, List<String>> mapSecond = 
              persons.stream()
              .collect(Collectors.groupingBy(
                Person::getAge, 
                Collectors.mapping(
                  Person::getName,
                  Collectors.toList()
                  )
                )); 
            
            System.out.println(mapSecond);
            
            Map<Integer, Set<String>> mapThird = 
              persons.stream()
              .collect(Collectors.groupingBy(
                Person::getAge, 
                Collectors.mapping(
                  Person::getName,
                  Collectors.toCollection(TreeSet::new)
                  )
                )); 
            
            System.out.println(mapThird);
            
            Map<Integer, String> mapFourth = 
              persons.stream()
              .collect(Collectors.groupingBy(
                Person::getAge, 
                Collectors.mapping(
                  Person::getName,
                  Collectors.joining(", ")
                  )
                )); 
            
            System.out.println(mapFourth);
            
            
            
        } catch (IOException ioe) {
            System.out.println(ioe);
        }

        LocalDate now = LocalDate.of(2014, Month.MARCH, 12);
        
        persons.stream().forEach(
                p -> {
                    Period period = Period.between(p.getDateOfBirth(), now);
                    System.out.println(p.getName() + " was born " +
                            period.get(ChronoUnit.YEARS) + " years and " + 
                            period.get(ChronoUnit.MONTHS) + " months " + 
                            "[" + p.getDateOfBirth().until(now, ChronoUnit.MONTHS) 
                            + " months]"
                            );
                    
                });
    }
}

The definition of Person class:

/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */


import java.time.LocalDate;
import java.time.chrono.IsoChronology;

/**
 *
 * @author José
 */
public class Person {
    
    private String name;
    private LocalDate dateOfBirth;
    
    public Person(){}
    
    public Person(String name, LocalDate dateOfBirth) {
        this.name = name;
        this.dateOfBirth = dateOfBirth;
    }

    public String getName() {
        return name;
    }

    public LocalDate getDateOfBirth() {
        return dateOfBirth;
    }
    

 public int getAge(){
  return dateOfBirth.until(IsoChronology.INSTANCE.dateNow()).getYears();  
 }

    @Override
    public String toString() {
        return "Person{" + "name=" + name + ", dateOfBirth=" + dateOfBirth + '}';
    }
}


The sample persons are collected from this text file:

Sarah 1999 12 15
Philip 1993 8 12
Beth 1991 6 5
Simon 1990 3 23
Nina 1991 7 12
Allan 1985 2 14
Leonard 1996 10 27
Barbara 1988 4 19

Here is a sample Stream API query to retrieve a comma separated string of the names of the persons in the list, as an additional example of how to retrieve information from an entire collection without using collect, but with map and reduce:

   Optional personNames = persons.stream().map(p->p.getName()).reduce((p1,p2)-> p1 + "," + p2);
            System.out.println(personNames);       

Sunday 28 September 2014

Using immutability in Java to support functional programming and avoid side effects

The following article is based on Venkat Subramaniam's article "Programming with immutability in Java" in "The Developer" magazine No. 3 2013. The code presented below shows how Java 8 supports immutability with different techniques. Consider the following Java 8 code sample:

import java.util.*;
import java.util.stream.*;

public class ImmutableTest1 {

 public static void printTotal(Stream<Integer> values){
  long start = System.nanoTime(); 
  final int total = 
   values
   .filter(value->value>3)
   .filter(value->isEven(value))
   .mapToInt(value->value*2)
   .sum(); 

   long end = System.nanoTime(); 
   System.out.printf("Total: %d Time: %.3f seconds\n", total, (end - start)/1.0e9); 

 }

 public static boolean isEven(int number){
  try {
   Thread.sleep(100); 
  }
  catch (Exception ex){   
   System.out.println(ex.getMessage());
  }
  return number % 2 == 0; 
 }

 public static void main(String[] args){
  List<Integer>values = Arrays.asList(1,2,3,4,5,6,7,8,9,10);
  printTotal(values.stream());
  printTotal(values.parallelStream());
  
  for (int element : values) {
   Runnable runnable = () -> System.out.println(element);
   runnable.run();
  }
  
  List<String> namesList = Arrays.asList("Jake", "Raju", "Kim", "Kara", "Paul", "Brad", "Mike"); 
  System.out.println("Found a 3 letter name?: " + 
    namesList.stream().anyMatch(name->name.length() == 3)); 
  
  System.out.println("Found Kim?: " + 
    namesList.stream().anyMatch(name-> name.contains("Kim")));
 }

}

The code above shows how library functions in the Stream API of Java 8 avoids the use of mutable variables by the functional programming code style of chaining. Further, note that the use of a classical for loop prohibits the loop creating new Runnable instances, while the for loop over a collection allows this. The error given for classical for loop is: Local variable i defined in an enclosing scope must be final or effectively final ImmutableTest1.java /ImmutabilityTest1/src line 36 Java Problem given this code:
 for (int i = 0; i < values.size(); i++) {
   Runnable runnable = () -> System.out.println(values.get(i));
   runnable.run();
        }
In addition, recursion is a technique that provides immutability to your code, by avoiding use of mutable control variables, example follows:

import java.util.Scanner;


public class Guesser {
 
 final static int target = (int) (Math.random() * 100); 
 
 public static void play(final int attempts){
  System.out.print("Enter your guess:");
  final int guess = new Scanner(System.in).nextInt();
  if (guess < target)
   System.out.println("Aim higher"); 
  if (guess > target)
   System.out.println("Aim lower");
  
  if (guess == target)
   System.out.printf("You got it in %d attempts\n", attempts);
  else 
   play(attempts+1);
  
 }
 
 public static void main(String[] args){
  System.out.println("I've selected a number, can you guess?"); 
  play(1);
 }

}


Thursday 25 September 2014

Speeding up Java code execution using parallel execution

Disclaimer note: I am a C# programmer and only have some work experience with Java. The new features of Java 8 makes me as a code much more enthustiastic to this language, since parallel programming, functional programming and collection handling now is much better! In Java, using the Stream Api makes parallel computing easily accessible. Consider this simple code:

import java.io.*; 
import java.util.*; 
import java.util.stream.Stream;

public class FileTest {

 public static void processFile(File file){
  try {
         Thread.sleep(100 + (int)(Math.random()*2000));
         System.out.println(file.getAbsolutePath());
  }
  catch (Exception ex){
   System.out.println(file); 
  }
 }
 
 public static void main(String[] args){
  
  File currentDir = new File("."); 
  File[] children = currentDir.listFiles(); 

  Stream.of(children).forEach(
   file -> processFile(file)); 

 }
}

To speed up the execution of this simple code, just add parallel() before the .forEach:

import java.io.*; 
import java.util.*; 
import java.util.stream.Stream;

public class FileTest {

 public static void processFile(File file){
  try {
         Thread.sleep(100 + (int)(Math.random()*2000));
         System.out.println(file.getAbsolutePath());
  }
  catch (Exception ex){
   System.out.println(file); 
  }
 }
 
 public static void main(String[] args){
  
  File currentDir = new File("."); 
  File[] children = currentDir.listFiles(); 

  Stream.of(children).parallel().forEach(
   file -> processFile(file)); 

 }
}

All these features makes Java 8 a much more attractive language again, and definately more C# developers will look more into Java soon! :-) Some images from Sublime Text 3, which I use to compile and run the code above!

Introductory Java 8 Lambda and collection handling

As a disclaimer, I have coded C# for several years now and am familiar with .NET technology, but my experience with Java 8 is more limited, so this article is just introductory level Java 8 Lambda and collection handling. I will mainly present code examples for handling collections and using lambda expressions in Java 8, using the Eclipse Luna IDE. I also downloaded Jdk 8 to get the necessary Java Development Kit and Java 8 runtime required. The code presented here is heavily based on the tutorial Oracle provides on their websites: Lambda Expressions in Java Let's look at some code right away, defining some classes and enums:

import java.time.LocalDate;
import java.time.chrono.IsoChronology;
import java.util.ArrayList;
import java.util.List;

public class Person {

 String name;  
 Sex gender; 
 LocalDate birthday; 
 String emailAddress; 

 Person(String nameArg, LocalDate birthdayArg, Sex genderArg, String emailArg){
  name = nameArg;
  birthday = birthdayArg; 
  gender = genderArg; 
  emailAddress = emailArg;   
 }
 
 public int getAge(){
  return birthday.until(IsoChronology.INSTANCE.dateNow()).getYears();  
 }
 
 public void printPerson(){
  System.out.println(name + ", " + this.getAge());   
 }
 
 public Sex getGender(){
  return gender;
 }
 
 public String getName(){
  return name;
 }
 
 public String getEmailAddress(){
  return emailAddress;
 }
 
 public LocalDate getBirthday(){
  return birthday;
 }
 
 public static int compareByAge(Person a, Person b){
  return a.birthday.compareTo(b.birthday); 
 }
 
 public static List createRoster(){
  List roster = new ArrayList(); 
  roster.add(new Person(
    "Fred", 
    IsoChronology.INSTANCE.date(1991, 6, 20),
    Sex.MALE,
    "fred@example.com")); 
  roster.add(new Person(
    "Jane", 
    IsoChronology.INSTANCE.date(1990, 7, 15),
    Sex.FEMALE,
    "jane@example.com")); 
  roster.add(new Person(
    "George", 
    IsoChronology.INSTANCE.date(1993, 8, 13),
    Sex.MALE,
    "george@example.com")); 
  roster.add(new Person(
    "Bob", 
    IsoChronology.INSTANCE.date(2000, 9, 12),
    Sex.MALE,
    "bob@example.com"));   
  return roster;
 }
 

}



public class SimplePerson {

 public SimplePerson(String nameArg, int ageArg) {
  name = nameArg;
  age = ageArg;
 }
 
 String name;
 int age;
 
 public String getName(){
  return name;
 }
 
 public int getAge(){
  return age;
 }
 
}


public enum Sex {
 
 MALE,
 
 FEMALE

}



public interface ICheckPerson {
 
 boolean test(Person p);

}




The following code makes use of these definitions and demonstrates use of lambda and collections in Java 8:

import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collector;
import java.util.stream.Collectors;


public class RosterTest {
 
 //Approach 1: Create methods that Search for Persons that match one characteristics
 
 public static void printPersonsOlderThan(List<Person> roster, int age){
  for (Person p : roster){
   if (p.getAge() >= age){
    p.printPerson();
   }
  }  
 }

 
 //Approach 2: Create more generalized search methods 
 
 public static void printPersonsWithinAgeRange(
   List<Person> roster, int low, int high){
  for (Person p : roster){
   if (low <= p.getAge() && p.getAge() < high){
    p.printPerson();
   }
  }
 }
 
 
 //Approach 3: Specify search criteria code in a local class
 //Approach 4: Specify Search criteria code in an anonymous class
 //Approach 5: Specify search criteria code with a lambda expression
 public static void printPersons(
   List<Person> roster, ICheckPerson tester){
  for (Person p : roster){
   if (tester.test(p)){
    p.printPerson();       
   }
  }
 }
 
 
 //Approach 6: Use standard functional interfaces with lambda expressions
 public static void processPersonsWithPredicate(
   List<Person> roster,
   Predicate<Person> tester){
  for (Person p : roster){
   if (tester.test(p)){
    p.printPerson();
   }
  }  
 }
 
 
 //Approach 7, second example
 
 public static void processPersonsWithFunction(
   List<Person> roster,
   Predicate<Person> tester,
   Function<Person, String> mapper,
   Consumer<String> block){
  for (Person p : roster){
   if (tester.test(p)){
    String data = mapper.apply(p);
    block.accept(data); 
   }
  }
 }
 
 
 //Approach 8: Use generics more extensively
 
 public static <X,Y> void processElements(
   Iterable<X> source,
   Predicate<X> tester,
   Function<X,Y> mapper,
   Consumer<Y> block) {
  for (X p : source){
   if (tester.test(p)){
    Y data = mapper.apply(p);
    block.accept(data);
   }   
  }
 }
 
 
 
 public static void main(String... args){
  
  List<Person> roster = Person.createRoster(); 
  
  for (Person p : roster){
   p.printPerson();
  }
  
  //Approach 1: Create methods that Search for Persons that match one characteristic
  
  System.out.println("Persons older than 20:"); 
     printPersonsOlderThan(roster, 20);
     System.out.println();
     
     //Approach 2 : Create more generalized search methods 
     
     System.out.println("Persons between the ages of 14 and 30:"); 
     printPersonsWithinAgeRange(roster, 14, 30);
     System.out.println();
     
     //Approach 3 : Specify search criteria code in local class 
     
     System.out.println("Persons who are eligible for Selective Service:"); 
     
     class CheckPersonEligibleForSelectiveService implements ICheckPerson{

   @Override
   public boolean test(Person p) {
    return p.getGender() == Sex.MALE
      && p.getAge() >= 18 
      && p.getAge() <= 25;
   }      
     }
     
     printPersons(roster, new CheckPersonEligibleForSelectiveService());
     
     System.out.println();
     
     //Approach 4: Specify search Criteria code in an Anonymous class 
     
     System.out.println("Persons who are eligible for Selective Service " + 
      "(anonymous class)");
     
     printPersons(roster, new ICheckPerson() {   
   public boolean test(Person p) {
    return p.getGender() == Sex.MALE
      && p.getAge() >= 18
      && p.getAge() <= 25;
   }
     }); 
  
     //Approach 6: Use standard functional interfaces with lambda expressions 
     
     System.out.println("Persons who are eligible for Selective Service " + 
     "(with Predicate parameter):"); 
     
     processPersonsWithPredicate(roster,
       p -> p.getGender() == Sex.MALE
         && p.getAge() >= 18
         && p.getAge() <= 25
         );
     
     //Approach 7, second example 
     
     System.out.println();
     
     processPersonsWithFunction(roster, p -> 
     p.getGender() == Sex.MALE
     && p.getAge() >= 18 
     && p.getAge() <= 25, 
     p -> p.getEmailAddress(), 
     email -> System.out.println(email));
     
     System.out.println();
     
     //Approach 8: Use generics more extensively 
     
     System.out.println("Persons who are eligible for Selective Service " + 
     "(generic version):"); 
     processElements(roster, p -> 
     p.getGender() == Sex.MALE
     && p.getAge() >= 18 
     && p.getAge() <= 25, 
     p -> p.getEmailAddress(), 
     email -> System.out.println(email));
     
     //Approach 9: Bulk data operations that accept lambda expressions as parameters 
     
     System.out.println("Persons who are eligible for Selective Service " + 
      "(with bulk data operations):"); 
     
     roster.stream().filter(
       p -> p.getGender() == Sex.MALE
       && p.getAge() >= 18
       && p.getAge() <= 25)
       .map(p -> p.getEmailAddress())
       .forEach(email -> System.out.println(email));
     
     
     //Approach 10: Creating a new collection from a query 
     
     System.out.println("Persons who are eligible for selective Service " + 
     "(with collect to create new collection"); 
     
     List<Person> selectiveServicePersons = roster.stream().filter(
       p -> p.getGender() == Sex.MALE
       && p.getAge() >= 18 
       && p.getAge() <= 25)
       .collect(Collectors.<Person>toList()); 
     
     for(Person p: selectiveServicePersons){
      p.printPerson();
     }     
        
     
     System.out.println();
     
     //Approach 11: Using map to transform a collection to another type of collection    
     
     List<SimplePerson> personsTransformed = (List<SimplePerson>) roster.stream()
       .map(p->new SimplePerson(p.getName(), p.getAge()))
    .collect(Collectors.<SimplePerson>toList()); 
     
     for(SimplePerson p: personsTransformed){
      System.out.println(p.getName() + ", " + p.getAge()); 
     }
     
     System.out.println();
     
     //Approach 12: Using sorting for Approach 11 
     
     List<SimplePerson> personsTransformedSorted = (List<SimplePerson>) roster.stream()       
       .map(p->new SimplePerson(p.getName(), p.getAge()))
    .collect(Collectors.<SimplePerson>toList()); 
     
     class CustomComparator implements Comparator<SimplePerson>{
      @Override
      public int compare(SimplePerson o1, SimplePerson o2) {
       return o1.getAge() - o2.getAge();
      };
     }
     
     Collections.sort(personsTransformedSorted, new CustomComparator());
     
     for(SimplePerson p: personsTransformedSorted){
      System.out.println(p.getName() + ", " + p.getAge()); 
     }
     
     System.out.println();
     
     //Approach 13: Grouping sorting by gender 
     
     Map<Sex, List<Person>> mapByGenderMap = 
       roster.stream().collect(Collectors.groupingBy(Person::getGender));
     
     for (Map.Entry<Sex, List<Person>> entry : mapByGenderMap.entrySet()){
     
      System.out.println("Persons that are: " + entry.getKey());
      for (Person person : entry.getValue()) {
       person.printPerson();
      }      
     }  

     System.out.println();
     
     //Approach 14: Comparator that is implemented on the fly 
     List<Person> personsSortedByNameList = roster.stream()
       .sorted(new Comparator<Person>() {
        public int compare(Person x, Person y){
         return x.getName().compareTo(y.getName());
        }
    }).collect(Collectors.toList());
     
     System.out.println("Persons sorted by name");
     for (Person person : personsSortedByNameList){
      person.printPerson();
     }  

   //Approach 15: Comparator that is implemented on the fly using a lambda expression 
   List<Person> personsSortedByNameThroughLambdaList  =  roster.stream()
     .sorted((x,y)-> x.getName()
     .compareTo(y.getName())).collect(Collectors.toList()); 
     
    System.out.println("Persons sorted by name through lambda:");
     
    for (Person person : personsSortedByNameThroughLambdaList)
    {
      person.printPerson();
    }        
          
 } 
   

}


First perspectives of the Java 8 support

From looking through the sample code, one can see that Java 8 has got support for most of what LINQ offers, there are also some major drawbacks or shortcomings that are clear. I am not experienced with Java 8, so perhaps some of my considerations here are either wrong or inprecise, but overall the syntax of Java 8 is much more verbose and less fluent than that of LINQ. Also, there are differences between Java 8 and C#. The Consumer concept of Java is great and looks like a missing part in C#. While Java 8 uses ->, C# uses =>, and the rest of the functional syntax is very similar. Runnable also looks like Action of C#. The anonymous class concept of Java is however flawed compared to C# true anonymous functions. To do projections and a requirement to create a new class definition or interface definition is not very flexible. Instantiating interfaces is supported in Java and not in C#, this is however a positive feature of Java that C# lacks. Java supports also default implementations and static implementations in interfaces, which is very practical in many cases - C# does not support these default methods inside interfaces. To do grouping and sorting requires implementing comparators and groupings go via maps. All in all, C# and LINQ is so more flexible and the chaining syntax makes complicated functional programming easy. In Java 8, much functional programming is possible - in fact most functional programming that's supported in LINQ is also supported in Java 8 I guess - it is just so verbose, fragmented and indirect to do basic things such as sorting. I also like the true anonymous functions of C# using thew new { } syntax, which Java 8 seems to lack. The dynamic type inference that the "var" keyword of C# supports seems also to lack when working with Java 8 - you often have to help the compiler by doing specific casts. Again, there might be some misunderstandings here, but all in all Java 8 functional programmings seems "harder" than using C# and LINQ, but I guess this will improve as Java 8 now has got functional programming support. Also, Scala and Clojure are similar languages that are alternatives, where functional programming have been supported for years and have matured. It looks like Java's "marriage" with Oracle has halted progression and while Java 8 has evolved, there are still many parts of the language that can be improved to support functional programming. However, much about Java 8 is positive and it is to be expected that more C# developers (and VB?) will be attracted to Java since it now supports functional programming. But beware, C# programmers - here be dragons! There will be a learning curve and that compact syntax of C# you have learned to love is not the same experience when doing Java 8 functional programming!

Friday 18 July 2014

Implementing a fast dictionary for English words using a trie datastructure or radix tree in C#


The following article will present source code for implementing a fast dictionary for English words using a trie datastructure. The trie datastructure is basically a tree with fixed number of children, which in this case is kept as an array of Node instances. The trie is a tree of Node instances and will describe "paths", when resolving words. The application is itself a WPF application and will require .NET 4 or newer to execute. The following source code is the class Node, which is a specific node of the trie datastructure:

using System.Collections.Generic;

namespace AutoCompleteDictionary
{
    
    public class Node
    {

        private readonly Node[] children = new Node[26];

        public IEnumerable<KeyValuePair<Node, char>> AssignedChildren
        {
            get
            {
                for (int i = 0; i < 26; i++)
                {
                    if (children[i] != null)
                        yield return new KeyValuePair<Node, char>(children[i], (char)('a' + i));    
                }
            }
        }

        public Node GetOrCreate(char c)
        {
            Node child = this[c];
            if (child == null)
                child = this[c] = new Node();
            return child; 
        }

        public Node this[char c]
        {
            get { return children[c - 'a']; }
            set { children[c - 'a'] = value; }
        }

        public bool IsWordTerminator { get; set; }

    }

}

The Node class has a fixed sized Node array called children. The readonly property AssignedChildren returns which Node or letters (chars) are present at the current Node or "level", i.e. char position of the words that are mapped into the trie datastructure. In addition, the yield keyword is used here together with an iterator, which is implemented also in this class. The Node class will work with lowercase English letters, but could be adjusted for other alphabets as well. For a Norwegian alphabet, 29 letter would be used as the size of the children to accept the additional three Norwegian vowels, for example. Next, the code for the trie data structure is presented. It is itself a class.

using System.Collections.Generic;

namespace AutoCompleteDictionary
{
    
    public class Trie
    {

        private readonly Node root = new Node();

        public Node NodeForWord(string word, bool createPath)
        {
            Node current = root;

            foreach (char c in word)
            {
                if (createPath)
                    current = current.GetOrCreate(c);
                else
                    current = current[c];

                if (current == null)
                    return null;
            }

            return current;
        }

        public void AddNodeForWord(string word)
        {
            Node node = NodeForWord(word, true);
            node.IsWordTerminator = true; 
        }

        public bool ContainsWord(string word)
        {
            Node node = NodeForWord(word, false);
            return node != null && node.IsWordTerminator; 
        }

        public List PrefixedWords(string prefix)
        {
            var prefixedWords = new List<string>();
            Node node = NodeForWord(prefix, false);
            if (node == null)
                return prefixedWords;

            PrefixedWordsAux(prefix, node, prefixedWords);
            return prefixedWords; 
        }

        private void PrefixedWordsAux(string word, Node node, List<string> prefixedWords)
        {
            if (node.IsWordTerminator)
                prefixedWords.Add(word); 

            foreach (var child in node.AssignedChildren)
            {
                PrefixedWordsAux(word + child.Value, child.Key, prefixedWords); 
            }
        }

    }
}

The trie class or data structure will make use of Node instances as the nodes of its tree datastructure. It adds nodes with the AddNodeForWord method, which also sets the flag IsWordTerminator to true for the specific node. It will loop through the letters or chars of the passed in word or string and build up the Node subtree for the word. It is possible that "paths" are already registered. The method Containsword will not add new nodes but make use of the trie datastructure or Node tree and look if one for a given word ended up with a Node which is not null and that the Node has a flag IsWordTerminator which is true. The method PrefixedWords uses recursion to find "paths" or words that is, in the trie or Node tree that can be reached from the Node that the typed word matches, if any. The recursion will visit the entire subtree of the trie or Node subtree below the Node that matches the typed word, so that it is possible that the calculation will take some considerable type, if the user is not limited to typing at least some letters or chars for the prefix. In the application, three letters is set as a default minimum amount of letters to type. There are about 440,000 letters in this English dictionary. It is not complete, but there is a considerable amount. However, the time to get the matching words with the inputted prefix will for three letters or above typically take a very few milliseconds.
The next class is a simple view model that is used in the WPF client making use of the code above. The WPF xaml view sets the DataContext property to an instance of this view model to have a simple MVVM scenario, or in this case View-ViewModel scenario, as the Model is trivially the view model itself.

using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.ComponentModel;
using System.Diagnostics;
using System.IO;
using System.Linq; 

namespace AutoCompleteDictionary
{
    
    public class AutoCompleteViewModel : INotifyPropertyChanged
    {

        private Trie trie = new Trie(); 

        private int prefixMinSize;
        public int PrefixMinSize
        {
            get { return prefixMinSize; }
            set
            {
                if (prefixMinSize != value)
                {
                    prefixMinSize = value;
                    RaisePropertyChanged("PrefixMinSize"); 
                }
            }
        }

        private string calculationInfo;
        public string CalculationInfo
        {
            get { return calculationInfo; }
            set
            {
                if (calculationInfo != value)
                {
                    calculationInfo = value;
                    RaisePropertyChanged("CalculationInfo");
                }
            }
        }

        private string inputWord;
        public string InputWord
        {
            get { return inputWord; }
            set
            {
                if (inputWord != value)
                {
                    value = value.ToLower(); 
                    inputWord = string.Join("", value.ToCharArray().Where(c => (int)c >= (int)'a' && (int)c <= (int)'z').ToArray()) ;
                    RaisePropertyChanged("InputWord");
                }
            }
        }

        public ObservableCollection<string> PrefixList { get; set; } 

        public void RaisePropertyChanged(string propertyName)
        {
            if (PropertyChanged != null)
                PropertyChanged(this, new PropertyChangedEventArgs(propertyName)); 
        }


        #region INotifyPropertyChanged Members

        public event PropertyChangedEventHandler PropertyChanged;

        #endregion

        public AutoCompleteViewModel()
        {
            PrefixMinSize = 3;
            InputWord = "Adv";
            PrefixList = new ObservableCollection<string>();
            ProcessDictionaryList();
        }

        private void ProcessDictionaryList()
        {
            foreach (var word in File.ReadLines("english-words"))
            {
                trie.AddNodeForWord(word);
            }
        }

        public void SetPrefixList()
        {
            Stopwatch stopWatch = Stopwatch.StartNew(); 
            PrefixList.Clear();
            var prefixes = GetPrefixList();
            
            foreach (var prefix in prefixes)
            {
                PrefixList.Add(prefix); 
            }
            //RaisePropertyChanged("PrefixList"); 

            CalculationInfo = string.Format("Retrieved {0} words prefixed with {1}. Operation took {2} ms", PrefixList.Count, inputWord, stopWatch.ElapsedMilliseconds); 
        }

        private List GetPrefixList()
        {
            if (InputWord.Length >= PrefixMinSize)
            {
                var wordsStartingWithInputWord = trie.PrefixedWords(InputWord.ToLower());
                return wordsStartingWithInputWord;
            }
            return new List<string>(); 
        }
    }
}


If the English dictionary looks like a nice little toy, it is possible to download the program. A compiled version is in the folder bin\Debug if you want to run the program without compiling the source code. Requires Visual Studio 2012 or newer. Download the English dictionary WPF client presented above using a trie or radix tree data structure here

Implementing a fast point structure in C# for large-scale comparison checks and searches

The code below is extracted from Part I of the Pluralsight course "Making .NET Applications faster", discussed and presented below. Implementing a fast structure for 2D points in C# requires using a struct instead of a class, since this is value-based and not reference type, i.e making use of the stack and not the heap and avoiding expensive header fields of objects. In addition, it is necessary to:
  • Override the Equals method inherited from System.Object
  • Implement a method called Equals that returns true and has one input parameter, another instance of the same struct
  • Mark the struct with the generic IEquatable interface, IEquatable<PointV5>
  • Implement the operators == and != to make use of the Equals method receiving an instance of the struct
  • Implement GetHashCode, using Jon Skeet's advice of creating a weighted sum multiplied by prime numbers and the struct's fields
Implementing this equality regime will reduce overall size in memory about 4x and increase speed 4x to 10x for large scale scenarios. In the example code testing this code, 10 million 2D point structs of type PointV5 was tested. Most modern games will avoid reference types of course and stick to value types, i.e. structs. Since most of us are application developers, make sure if you create a LOT of objects, consider switching from class to struct type(s), if possible. Often, it is not needed to use classes, a struct will be sufficient (and faster and lighter). Check out the course page here: Pluralsight: Making .NET applications faster Pluralsight is a great course and IT resource for IT professionals!

using System;


    struct PointV5 : IEquatable
    {
        public int X;
        public int Y;

        public override bool Equals(object obj)
        {
            if (!(obj is PointV5)) return false;
            PointV5 other = (PointV5)obj;
            return X == other.X && Y == other.Y;
        }

        public bool Equals(PointV5 other)
        {
            return X == other.X && Y == other.Y;
        }

        public static bool operator ==(PointV5 a, PointV5 b)
        {
            return a.Equals(b);
        }

        public static bool operator !=(PointV5 a, PointV5 b)
        {
            return !a.Equals(b);
        }

        public override int GetHashCode()
        {
            // 19 and 29 are primes, and this doesn't assume anything about
            // the distribution of X and Y.
            // Also see http://stackoverflow.com/questions/263400/what-is-the-best-algorithm-for-an-overridden-system-object-gethashcode
            int hash = 19;
            hash = hash * 29 + X;
            hash = hash * 29 + Y;
            return hash;
        }
    }


Actually, a simple class is in fact faster in this scenario, according to the testing I did. Consider this class:

public class PointV0
{

        public int X { get; set; }

        public int Y { get; set; }

}

However, the price on pays here is higher memory overhead, as each point will have to be stored on the heap and have the object header fields and method table pointer fields, i.e. taking up more memory.

Elementary class
        Average time per lookup: 85,70ms
        Garbage collections: 0
Naked struct
        Average time per lookup: 435,10ms
        Garbage collections: 1018
With Equals override
        Average time per lookup: 248,70ms
        Garbage collections: 510
With Equals overload
        Average time per lookup: 239,50ms
        Garbage collections: 510
With IEquatable
        Average time per lookup: 168,60ms
        Garbage collections: 0
All bells and whistles
        Average time per lookup: 170,00ms
        Garbage collections: 0
Press any key to continue ..

I cannot conclude from these results that structs always are faster than classes, but it will always be more memory overhead to resort to classes instead of structs.. It looks though, that in this example, a simple class was the fastest choice!

Wednesday 25 June 2014

A generic IEqualityComparer of T written in C#

When working with LINQ, often one has to pass in an IEqualityComparer of the class(es) being used. Implementing IEqualityComparer on classes can sometimes be unwanted to just make the computations work, therefore a generic implementation would be nice to invoke when performing the LINQ expression without either adding IEqualityComparer or worse - having to rewrite it. Sometimes also multiple implementations are desired.. The following class LambdaComparer is a generic implementation of IEqualityComparer of T.


using System;
using System.Collections.Generic;

namespace Hemit.OpPlan.Client.Infrastructure.Utility
{
    /// <summary>
    /// LambdaComparer - avoids the need for writing custom IEqualityComparers
    /// 
    /// Usage:
    /// 
    /// List<MyObject> x = myCollection.Except(otherCollection, new LambdaComparer<MyObject>((x, y) => x.Id == y.Id)).ToList();
    /// 
    /// or
    /// 
    /// IEqualityComparer comparer = new LambdaComparer<MyObject>((x, y) => x.Id == y.Id);
    /// List<MyObject> x = myCollection.Except(otherCollection, comparer).ToList();
    /// 
    /// </summary>
    /// <typeparam name="T">The type to compare</typeparam>
    public class LambdaComparer<T> : IEqualityComparer<T>
    {
        private readonly Func<T, T, bool> _lambdaComparer;
        private readonly Func<T, int> _lambdaHash;

        public LambdaComparer(Func<T, T, bool> lambdaComparer) :
            this(lambdaComparer, o => 0)
        {
        }

        public LambdaComparer(Func<T, T, bool> lambdaComparer, Func<T, int> lambdaHash)
        {
            if (lambdaComparer == null)
            {
                throw new ArgumentNullException("lambdaComparer");
            }

            if (lambdaHash == null)
            {
                throw new ArgumentNullException("lambdaHash");
            }

            _lambdaComparer = lambdaComparer;
            _lambdaHash = lambdaHash;
        }

        public bool Equals(T x, T y)
        {
            return _lambdaComparer(x, y);
        }

        public int GetHashCode(T obj)
        {
            return _lambdaHash(obj);
        }
    }
}


The following unit tests uses this implementation of a generic IEqualityComparer of T:

using System;
using System.Collections.Generic;
using System.Linq;
using Hemit.OpPlan.Client.Infrastructure.Utility;
using NUnit.Framework;

namespace Hemit.OpPlan.Client.Infrastructure.Test.Utility
{
    
    [TestFixture]
    public class LambdaComparerTest
    {

        [Test] 
        public void LambdaComparerPerformsExpected()
        {
            var countriesFirst = new List<Tuple<int, string>>{ 
                new Tuple<int, string>(1, "Spain"),
                new Tuple<int, string>(3, "Brazil"),
                new Tuple<int, string>(5, "Argentina"),
                new Tuple<int, string>(6, "Switzerland"),
                new Tuple<int, string>(7, "Uruguay"),
                new Tuple<int, string>(8, "Colombia")
            };
            var countriesSecond = new List<Tuple<int, string>>{
                new Tuple<int, string>(1, "Spain"),
                new Tuple<int, string>(4, "Portugal"),
                new Tuple<int, string>(7, "Uruguay"),
                new Tuple<int, string>(10, "England"),
                new Tuple<int, string>(11, "Belgium"),
                new Tuple<int, string>(12, "Greece")
            };

            var expected = new List<Tuple<int, string>>
            {            
                new Tuple<int, string>(3, "Brazil"),
                new Tuple<int, string>(5, "Argentina"),
                new Tuple<int, string>(6, "Switzerland"),               
                new Tuple<int, string>(8, "Colombia")                
            }; 

            var countriesOnlyInFirst = countriesFirst.Except(countriesSecond, new LambdaComparer<Tuple<int, string>>((x, y) => x.Item1 == y.Item1));

            CollectionAssert.AreEqual(countriesOnlyInFirst, expected); 

        }


    }


}


In the unit test above, two lists containing the topmost FIFA ranking country teams in soccer in the world are being used in a LINQ Except expression, where the generic LambdaComparer class is being used. The class being passed in is Tuple of int and string: Tuple<int,string> - Note that an ordinary class could also be used here. The property Item1 of the Tuple is the "key" that is being used to compare two different objects - if it is the same, the objects are being considered to be the same. This results into a list of items from the first country list that are not being present in the second list. Finally, a list of the expected result is being built up and the NUnit CollectionAssert.AreEqual method is used for checking consistent result. The test passes. Much of .NET requires implementing interfaces such as IEqualityComparer, IComparer and more. Using a generic implementation class that expects Func expressions (the lambda expressions being passed, is a pattern that can give much flexibility.

Wednesday 7 May 2014

Generic type conversion in C#

Generic type conversion in C# is possible, but the type conversion must consider many different conversions. The following code below defines a generic method To<T> which is an extension method on object:

namespace Hemit.OpPlan.Common
{
    
    public static class ObjectExtensions
    {

        public static T To<T>(this object value)
        {
            if (value == null)
                return default(T);

            Type t = typeof(T); 

            if (t.IsGenericType && t.GetGenericTypeDefinition() == typeof(Nullable<>))
            {
                Type valueType = t.GetGenericArguments()[0];
                if (value == null)
                    return default(T);
                object result = Convert.ChangeType(value, valueType);
                return (T)result;
            }
            else if (typeof(T).IsEnum)
            {
                object result = Enum.Parse(typeof(T), value.ToString());
                return (T)result;
            }
            else
            {
                try
                {
                    object result = Convert.ChangeType(value, typeof(T));
                    return (T)result;
                }
                catch (Exception err)
                {
                    Debug.WriteLine(err.Message);
                    return default(T); 
                }
            }
        }

    }
}

Next is a couple of unit tests written in NUnit for this extension method. All tests passes.

using System;
using System.Reflection;
using Hemit.OpPlan.Common.DataContract;
using NUnit.Framework;  

namespace Hemit.OpPlan.Common.Test.Extensions
{
    
    [TestFixture] 
    public class ObjectExtensionsTest
    {

        [Test]
        [TestCase("4", typeof(int), 4)]
        [TestCase("true", typeof(bool), true)]
        [TestCase(null, typeof(bool), false)]
        [TestCase(null, typeof(Nullable<bool>), null)]
        [TestCase("true", typeof(Nullable<bool>), true)]
        [TestCase("FutureOperation", typeof(OperationStatusDataContract), OperationStatusDataContract.FutureOperation)]
        [TestCase("Postponed", typeof(OperationStatusDataContract), OperationStatusDataContract.Postponed)]
        public void ToReturnsExpected(object input, Type genericTypeArgument, object expected)
        {
            MethodInfo method = typeof(ObjectExtensions).GetMethod("To");
            MethodInfo genericMethod = method.MakeGenericMethod(new Type[] { genericTypeArgument });
            object actual = genericMethod.Invoke(input, new object[]{ input });
            Assert.AreEqual(actual, expected); 
        }

        [Test] 
        public void ToReturnsExpectedWithNullableBoolean()
        {
            int? inputValue = new Nullable<int>(3);
            int actual = ((int?)inputValue).To<int>(); 
            Assert.AreEqual(actual, 3); 
        }

    }

}


The code above shows how one can call a generic method, shown in the unit test. Multiple test cases are passed into this unit test method. As is visible to the reader, the class ObjectExtensions contains the method To, which considers a conversion of an object to a designated type. The conversion itself must of course be valid, in addition one should check that the value implements IConvertible or similar interface. I have kept the code rather short and will eloborate the code if I see thare are conversions that fail, which should work.

Wednesday 23 April 2014

Using LinqKit and PredicateBuilder to create reusable predicates or filters in Entity Framework

If one has worked with Entity Framework for a while, sooner or later it becomes clear that refactoring logic into separate methods is hard, because Entity Framework must transform the code one writes in Linq to Entities into SQL. Therefore, extracting logic in EF into method calls will compile, but give a runtime error. The solution is to use the library LinqKit, created by brothers Joe and Ben Albahiri. It is available as a Nuget package: install-package LinqKit or at the following web site: LinqKit

The following simple example shows how it is possible to create a reusable predicate separated into a method of its own and call this method in Linq to entities code. Make note of the use of the AsExpandable() extension method that transforms the DbSet into an IQueryable by returning a wrapper that allows this. The code is run inside LinqPad. Make note that LinqPad has got inherent support for PredicateBuilder, but the LinqKit DLL was added as a reference. Also the namespace imports added where: System.Linq, System.Linq.Expressions and LinqKit. Press F4 to set up the additional references and imports in Linqpad.

public Expression<Func<Patient, bool>> InterestingPatient(params string[] keywords){
 var predicate = PredicateBuilder.False<Patient>(); 
 foreach (string keyword in keywords){
  string temp = keyword; 
  predicate = predicate.Or(p => p.Name.Contains(temp)); 
 }
 return predicate; 
}

void Main()
{
   Patients.AsExpandable().Where(InterestingPatient("Olsen", "Nilsen")).Dump(); 
}




The following images shows the output. Make note that Patient here is a table in the context of my project. The data shown here is test data (not real pasients of course):

Wednesday 5 March 2014

Entity Framework Code First Generic Entity store operations

This article will discuss how generic entity store operations towards a data context can be used with EF Code First. This is tested with EF 6. First, define an interface with the often used generic entity store operations:

using System;
using System.Data.Entity;

namespace TestEFCodeFirst1
{

    public interface IRepo : IDisposable
    {

        T Insert<T>(T item, bool saveNow) where T : class;

        T Update<T>(T item, bool saveNow) where T : class;

        T Delete<T>(T item, bool saveNow) where T : class;

        int Save(); 

    }

}

Then implement this interface:

using System.Data.Entity;

namespace TestEFCodeFirst1
{
    
    public class Repo<TContext> : IRepo
        where TContext : DbContext, new()
    {

        public Repo()
        {
            Context = new TContext(); 
        }

        protected TContext Context { get; private set; }

        #region IRepo Members

        public T Insert<T>(T item, bool saveNow) where T : class
        {
            Context.Entry(item).State = EntityState.Added;
            if (saveNow)
                Context.SaveChanges();
            return item; 
        }

        public T Update<T>(T item, bool saveNow) where T : class
        {
            Context.Entry(item).State = EntityState.Modified;
            if (saveNow)
                Context.SaveChanges();
            return item; 
        }

        public T Delete<T>(T item, bool saveNow) where T : class
        {
            Context.Entry(item).State = EntityState.Deleted;
            if (saveNow)
                Context.SaveChanges();
            return item; 
        }

        public void Dispose()
        {
            Context.Dispose(); 
        }

        #endregion

        #region IRepo Members


        public int Save()
        {
            return Context.SaveChanges(); 
        }

        #endregion
    }

}

Next, create a simple DbContext to test out:

  
  public class DemoDbContext : DbContext
    {


        public DemoDbContext() : base("EFCodeFirstDemoDb1")
        {
            
            
        }


        public DbSet<Student> Students { get; private set; }

    }


An example of using this db context in a derived repo class is shown next:

public class DemoRepo : Repo<DemoDbContext>
{

}

The next code shows how to use this extension method:

using System;
using System.Data.Entity;

namespace TestEFCodeFirst1
{
    class Program
    {
        static void Main(string[] args)
        {

            Database.SetInitializer(new Init()); 

            using (var repo = new DemoRepo())
            {
                repo.Insert(new Student { Name = "Hardy", DateAdded = DateTime.Now,
                LastMod = DateTime.Now}, true);

                repo.Insert(new Student
                {
                    Name = "Joey",
                    DateAdded = DateTime.Now,
                    LastMod = DateTime.Now
                }, true);                

            }
        }

    }
}


To automatically patch your database do this:

In package-manager console type: Enable-Migrations In the configuration class, set AutomaticMigrationsEnabled to true.

namespace TestEFCodeFirst1.Migrations
{
    using System;
    using System.Data.Entity;
    using System.Data.Entity.Migrations;
    using System.Linq;

    internal sealed class Configuration : DbMigrationsConfiguration<TestEFCodeFirst1.DemoDbContext>
    {
        public Configuration()
        {
            AutomaticMigrationsEnabled = true;
        }

        protected override void Seed(TestEFCodeFirst1.DemoDbContext context)
        {
            //  This method will be called after migrating to the latest version.

            //  You can use the DbSet<T>.AddOrUpdate() helper extension method 
            //  to avoid creating duplicate seed data. E.g.
            //
            //    context.People.AddOrUpdate(
            //      p => p.FullName,
            //      new Person { FullName = "Andrew Peters" },
            //      new Person { FullName = "Brice Lambson" },
            //      new Person { FullName = "Rowan Miller" }
            //    );
            //
        }
    }
}


Add an Init class:

using TestEFCodeFirst1.Migrations;

namespace TestEFCodeFirst1
{

    internal class Init : System.Data.Entity.MigrateDatabaseToLatestVersion
    {

    }
}

This explains what is done in the line further up: Database.SetInitializer(new Init());

Tuesday 18 February 2014

Performing slow work outside the WPF UI thread and updating the WPF GUI afterwards

Developers using WPF are very familiar with the problem of doing slow work on the UI thread and trying to update the GUI thread afterwards. This is not only a problem in WPF, but also in Windows Forms and even web applications. If the UI thread is being kept busy with a heavy, synchronous calculation, other work such as resizing the window and even repainting the UI will suffer and this results in a non-responsive GUI that the user cannot work with. The solution is to do work in another thread or threads and update the GUI afterwards. To make this easier, I list up an extension method below to do this. The extension method is an extension method on the dispatcher, which will usually be this.Dispatcher in code behind or Dispatcher.CurrentDispatcher in MVVM scenarios and the user must supply the function that will return the thread work return a type T (this can be either an object or a list of objects for example) and also a gui update method that receives an object of type T that the thread work method calculated. The gui update method will usually specify what shall be performed in the GUI or in the ViewModel in MVVM scenarios, while the thread work method specifies first will specify how the object of type T is retrieved. It should be possible to use this extension method both in code-behind and MVVM scenarios.

public static class DispatcherUtil
    {

        public static void AsyncWorkAndUIThreadUpdate<T>(this Dispatcher currentDispatcher, Func<T> threadWork, Action<T> guiUpdate)
        {
            ThreadPool.QueueUserWorkItem(delegate(object state)
            {
                T resultAfterThreadWork = threadWork(); 
                currentDispatcher.BeginInvoke(DispatcherPriority.Normal, new Action<T>(delegate(T result){
                    guiUpdate(resultAfterThreadWork);
                }), resultAfterThreadWork); 

            }); 
        }

    }

Next, an example of using this:

  private void Button_Click(object sender, RoutedEventArgs e)
        {

            this.Dispatcher.AsyncWorkAndUIThreadUpdate(() => FooService.GetSomeFooData(),
                s => btnFoo.Content = s); 

        }

The FooService is very simple and simulates a resource intensive calculation that takes about 5 seconds:

public static class FooService
    {

        public static string GetSomeFooData()
        {
            Thread.Sleep(5000);
            return "Hello world! "  + DateTime.Now.ToString(); 
        }

    }

I use ThreadPool.QueueUserWorkItem here. Of course this is a very simple example, and in complex applications one needs to watch out for state problems in the UI. When it is not sure when the calculation is performed, you should perhaps consider to avoid refreshing the UI if the UI has changed, especially in more complex MVVM component based WPF User Interfaces, based on for example Prism. At the same time, locking up the UI is not good either. I have only tested the code above in a simple scenario, but expect it to work also with more complex WPF UI. There are two golden rules when it comes to threading in WPF: 1. Never access UI elements from another thread than the UI thread 2. Do not do slow work on the UI thread but in another thread Most difficulties doing off thread async work in WPF comes to updating the UI after the calcuations are done in the other thread(s). With this extension method, hopefully WPF developers will find it easier.

Traversing paged results using Massive data access wrapper

Massive, a library created by brilliant developer Rob Conery and Jon Skeet's frequent humorous side-kick, has got a lot of nice features. Its best features are ease of use and dynamic appeal. It is also fast and lightweight. After testing Massive, I wanted to create an extension method to traverse paged results returned from Massive. The code below is tested against the AdventureWorks2012 database. First off, install Massive. I have created a simple Console Project to test this out and then use the Library Package Manager to install Massive:

Install-Package Massive -Version 1.1.0 

Remember to add the -Version switch above. Then install the AdventureWorks 2012 database here: http://msftdbprodsamples.codeplex.com/releases/view/55330 Add a connection string against the Adventure Works 2012 database after installing this on your developer PC. Use Data Explorer or Server Explorer in Visual Studio. Rename the connection string in your app.config or web.config to "adventureworks2012". Define a model entity for the Production.Product table next:

    public class Products : DynamicModel
    {
        public Products()
            : base("adventureworks2012", "Production.Product", "productid")
        {

        }
    }

To add a table entity in Massive, specify the connection string name, then the name of the table and then the primary keys. If you omit the name of the connection string, the first connection string found is used. Next, the code for the extension method to work against this DynamicModel inherited class and other inherited classes from DynamicModel:

     public static class MassiveExtensions
    {

        public static void MassivePagedAction(this DynamicModel massiveObject, int pageSize = 20, int startPageIndex = 1, 
            int endPageIndex = -1, Action<int, dynamic> pageAction = null, Action<dynamic> itemAction = null)
        {

            var pages = new Dictionary<int, dynamic>(); 
            var firstPage = massiveObject.Paged(currentPage: startPageIndex, pageSize: pageSize);
            pages.Add(startPageIndex, firstPage);
            int endIndex = endPageIndex == -1 ? firstPage.TotalPages : Math.Min(firstPage.TotalPages, endPageIndex); 
            
            for (int currentPageIndex = startPageIndex + 1; currentPageIndex <= endIndex; currentPageIndex++)
            {
                var currentPage = massiveObject.Paged(currentPage: currentPageIndex, pageSize: pageSize);
                pages.Add(currentPageIndex, currentPage); 
            }

            foreach (var keyValuePair in pages)
            {
                if (pageAction != null)
                    pageAction(keyValuePair.Key, keyValuePair.Value);
                foreach (var item in keyValuePair.Value.Items)
                {
                    if (itemAction != null)
                        itemAction(item); 
                }
            }

        }

    }


The extension method puts the paged results in a dictionary, where the keys are the page index and the value is the dynamic object that Massive returns, containing the paged data. There are numerous optional parameters to this extension method. The page size is default set to 20, if not specified. The start page index is 1, which is actually the default value. Rob Conery should perhaps have chosen the page index to be 0-based, as this is the standard in C#, but querying for page index 0 will give empty results. The end page index is default set to -1. This means that all pages will be returned, until there are not more pages. If you set the endPageIndex to a value less than firstPage.TotalPages inside the extension method, i.e. the actual total pages in the database, only pages up to the endPageIndex is returned. To get a single page, pass in a start and end index that differs with 1 (where end index has a value of one larger than start index). It is also possible to pass in page actions and item actions here. They default to null, but obviously at least an item action is desired to be set. A variant of the method above to allow a Func to be passed in to for example return results to the caller is interesting. Using the extension method above is shown next:

//GET PAGES RESULTS OF THE PRODUCTS TABLE USING A PAGE SIZE OF FIVE. GET ENTIRE TABLE.

            DynamicModel table = new Products();
          
            table.MassivePagedAction(pageSize:5,pageAction: (indx, page) => 
             Console.WriteLine("\nProducts in page # {0}:\n", indx), 
             itemAction: item => Console.WriteLine(item.Name)); 


//SAMPLE OUTPUT:


------ Test started: Assembly: TestMassive.exe ------


Products in page # 1:

Adjustable Race
Bearing Ball
BB Ball Bearing
Headset Ball Bearings
Blade

Products in page # 2:

LL Crankarm
ML Crankarm
HL Crankarm
Chainring Bolts
Chainring Nut

Products in page # 3:

..

//This resulted in 101 pages - There are 504 products in the AdventureWorks 2012 database (primarily mountain bikes and clothes+equipment)

Products in page # 101:

HL Bottom Bracket
Road-750 Black, 44
Road-750 Black, 48
Road-750 Black, 52

//Final page contains only four items as expected

To get a paged result and work with this paged result, another extension method can be used:

  public static Dictionary<int,List<ExpandoObject>> MassivePagedRetrieval(this DynamicModel massiveObject, int pageSize = 20, int startPageIndex = 1,
           int endPageIndex = -1)
        {

            var pages = new Dictionary<int, dynamic>();
            var pagedResult = new Dictionary<int, List<ExpandoObject>>(); 
            var firstPage = massiveObject.Paged(currentPage: startPageIndex, pageSize: pageSize);
            pages.Add(startPageIndex, firstPage);
            int endIndex = endPageIndex == -1 ? firstPage.TotalPages : Math.Min(firstPage.TotalPages, endPageIndex);

            for (int currentPageIndex = startPageIndex + 1; currentPageIndex <= endIndex; currentPageIndex++)
            {
                var currentPage = massiveObject.Paged(currentPage: currentPageIndex, pageSize: pageSize);
                pages.Add(currentPageIndex, currentPage);
            }

            foreach (var keyValuePair in pages)
            {
                List<ExpandoObject> items = new List<ExpandoObject>(); 
                foreach (var item in keyValuePair.Value.Items)
                {
                    items.Add(item); 
                }
                pagedResult[keyValuePair.Key] = items;
            }

            return pagedResult;
        }

To use this extension method, use:

     Dictionary<int, List<ExpandoObject>> pagedResult = table.MassivePagedRetrieval(pageSize: 10); 

The key is the page index again and a list of ExpandoObject objects are returned for each page (or key) in the dictionary. ExpandoObject is a dynamic object. To watch the resulting data in Visual Studio, use the debugger and choose Dynamic View when inspecting the result through debugging brekpoints in your code. This article has focused on paged data, as Massive often can be used to performed paged data retrieval in for example ASP.NET MVC-based solutions, but also other solutions can use this data access wrapper. The requirement is .NET 4.0 as System.Dynamics is the library which is used. Massive supports most features as more complex Object Relational Wrappers, such as Entity Framework. Its better speed and ease of use should be tempting. Rob Conery has got additonal information here: Massive Github page There is a trade off with Massive too, its dynamic nature is also its Achilles heel. If you rename fields in the database, you most likely must also update your code and since it is dynamic, chances are that errors can occur and be detected in runtime, out in production. This is easier to avoid using strongly typing such that Entity Framework also. Sadly, Entity Framework and many other ORMs are also slow. The following code shows how it is possible to retrieve a result from two tables. As you can see, one has to pass in SQL to output data.

var productsWithCategories = 
table.Query("SELECT p.Name, pc.Name As CategoryName FROM Production.Product p INNER JOIN Production.ProductCategory pc ON p.ProductSubCategoryId = pc.ProductCategoryId");

foreach (var ppc in productsWithCategories)
{
 Console.WriteLine("Product Name: {0}, Product category name: {1}", ppc.Name, ppc.CategoryName);
}

//RESULTING OUTPUT: 

Product Name: Road-150 Red, 62, Product category name: Components
Product Name: Road-150 Red, 44, Product category name: Components
Product Name: Road-150 Red, 48, Product category name: Components
Product Name: Road-150 Red, 52, Product category name: Components

..

This results in a quick way to access the database, but also loosing Intellisense (auto completion), strongly static typing and compile type checking. When returning results from the database in a service, it is possible to return the data as ExpandoObjects as shown in the extension method MassivePagedRetrieval shown above. If one uses ASP.NET MVC, the Model can bind to this data. This results in fewer application layers and less coding, but at the same time a risk for those pesky runtime errors out in production. Massive is very quick and for web developers, this looks very promising. Using ElasticObject or similar techniques, the ExpandoObjects can be converted to either XML or better - JSON - and processed using jQuery and Javascript. If Massive catches on or some other, ligtweight ORM is preferred by developers remains to be seen. This is a moving field. What is sure, the days of heavy weight ORMs like Entity Framework will not be bright if not the performance catches up with these lighter ORM frameworks. At the same time, it is hard to say that it actually will result in shorter time to market (TTM) for developers, as they loose Intellisens and other good features such as Entity Framework supports. Test out Massive and see if it matches your needs. Use the extension methods above to get started with paged result retrieval, if they look interesting. Massive also supports of course inserts, updates and deletes. In addition, validations and callbacks can be put in the class that inherit from DynamicModel. Massive also has specialized functions such as the standard aggregation methods and convenience method such as Find that is defined in the DynamicModel. Lastly, Massive is heavily based on System.Dynamics assembly in .NET 4.0 and newer framework versions. DynamicModel inherits itself from System.Data.DynamicObject. Also bulk updates are supported in Massive.

Monday 17 February 2014

Using ElasticObject to parse arbitrarily large XML documents into object graphs

ElasticObject is a great implementation of a DynamicObject, that allows the developer to work with an arbitrarily large XML document, using an object graph. The code below access the Norwegian Weather Forecast service Yr.no to get some forecast data for the location where I grew up in Norway, where a weather station nearby automatically collects meteorological data such as wind, temperature, pressure and wind direction, humidity and so on. The data is available among other ways as XML to download and ElasticObject can be used to handle the XML. When handling XML, it is possible to create an XSD from sample XML data and in for example Visual Studio create an XSD file through XML->Create Schema option in Visual Studio after opening the sample XML file. Using the Visual Studio Command line, it is possible to generate a class through the command:

xsd /classes myfile.xsd

This generates a file called myfile.cs, which will be a data contract.

The data contract generated can be used to deserialize the XML into an object. Often this is a preferred strategy, since one gets a strongly typed object (graph) to work with. ElasticObject is a dynamic object, and allows the developer to avoid having to generate a serialization class before deserializing the received XML. Sometimes, this also allows changes in the XML to occur without affecting the code. Often it is a limited part of the XML tree, which is the received XML the developer needs to retrieve for further processing. Also, not having to generate classes from the XML schema is also convenient. Still, the developer needs to refer more often to the XML to understand the structure of the object which is created. The Intellisense of ElasticObject is rather poor, so the developer needs to query the object graph using Immediate Window and query the object to find the right "path" to the information in the XML document. Similar techniques using System.Linq.Xml and XPath queries can be used. To start using ElasticObject, install first the NuGet package in Visual Studio. Type the following command in Package Manager Console.

PM> install-package AmazedSaint.ElasticObject
Installing 'AmazedSaint.ElasticObject 1.2.0'.
Successfully installed 'AmazedSaint.ElasticObject 1.2.0'.
Adding 'AmazedSaint.ElasticObject 1.2.0' to TestElasticObject.
Successfully added 'AmazedSaint.ElasticObject 1.2.0' to TestElasticObject.

The following code then illustrates the use:

using AmazedSaint.Elastic;
using System;
using System.IO;
using System.Net;
using System.Xml.Linq;

namespace TestElasticObject
{
    class Program
    {

        static void Main(string[] args)
        {
            var wc = new WebClient();
          
            using (StreamReader sr = new StreamReader(wc.OpenRead(@"http://www.yr.no/stad/Norge/Nord-Tr%C3%B8ndelag/Steinkjer/S%C3%B8ndre%20Egge/varsel.xml")))
            {
                var data = sr.ReadToEnd();
                IterateForecasts(data);          
            }

            Console.WriteLine("Press any key to continue ...");
            Console.ReadKey(); 

        }

        private static void IterateForecasts(string data)
        {
            dynamic weatherdata = XElement.Parse(data).ToElastic();
            foreach (var node in weatherdata.forecast.text.location[null])
            {
                Console.WriteLine(string.Format("Fra-Til: {0} - {1}", node.from, node.to)); 
                Console.WriteLine(~node.body);
                Console.WriteLine();
            }
        }

    }
}


The code above uses WebClient to download the target XML data, then uses a StreamReader to read the XML file into a string. Then using XElement and the ToElastic extension method in AmazedSaint.Elastic namespace, this is stored into a dynamic variable which can be worked on. One important gotcha here is how to drill down into the object graph of the ElasticObject, which I could not figure out following the documentation, since it only contained more simple examples but I found on StackOverFlow: To drill further down into the object graph than its immediate child node, type the path to the element which contains the data to work with using a dot separated syntax and in addition: use the null index to get to the child element which contains the data to process - for example to output. When traversing the child element in the foreach loop, its child elements will be stored into the loop variable node and then it is possible to get to the attributes of the node. To get the value inside the element, use the tilde (~) operator. ElasticObject implements some operators to make it easier to work with XML and object graphs. For example, to cast an ElasticObject to XML, the > operator can be used:

            dynamic store = new ElasticObject("Store");
            store.Name = "Acme Store";
            store.Location.Address = "West Avenue, Heaven Street Road, LA";
            store.Products.Count = 2;
            store.Owner.FirstName = "Jack";
            store.Owner.SecondName = "Reacher";
            store.Owner <<= "this is some internal content for owner";

            var p1 = store.Products.Product();
            p1.Name = "Acme Floor Cleaner";
            p1.Price = 20;

            var p2 = store.Products.Product();
            p2.Name = "Acme Bun";
            p2.Price = 22; 

            XElement el = store > FormatType.Xml; 
            System.Console.WriteLine(el.ToString());

The ElasticObject is defined dynamically and the <<= operator is used to set the value inside the property, which will be shown when converting the ElasticObject to XML. In addition, the code above shows how to create child elements, as the Products property contains two child Product objects, which will be Product XML elements. The < operator is used to "pipe" the object into an XElement variable, which is the converted XML object from the ElasticObject object graph. To convert the object from an XML document to an ElasticObject, the ToElastic() extension method can be used to convert the XML document or XElement variable into the ElasticObject - the object graph again. Using ElasticObject, it is possible to work with XML in object graphs and since it is dynamic, it is not necessary to create new types, such as serialization data contracts. ElasticObject source code is available on GitHub here: https://github.com/amazedsaint/ElasticObject
Its creator is Anoop Madhusudanan, AmazedSaint profile on GitHub. It originally was a project hosted on CodePlex. ElasticObject should be considered as an alternative when working with XML and object graphs. It is very tempting to avoid having to create new objects to work with the XML but work with a dynamic object that can be extended and changed. It should also be efficient. An alternative can be to use anonymous types, but why go the hard way when one can go the easy - elastic way?

IoC container example

This article will present an IoC container that can resolve concrete types via interfaces or instances recursively. It should not be used in production code without improved loop detection and error handling, but can in some scenarios be used if one needs a very simple (and fast) IoC container.

The code is heavily based on Jon Skeet's walkthrough of a IoC container ("IoC container on the fly").

First the code of the IoC container itself:



using System;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;

namespace TestComposedIoCContainer
{
    
    public class CompositionContainer
    {

        private readonly Dictionary<Type, Func<object>> providers = new Dictionary<Type, Func<object>>();

        private readonly object providersLocker = new object(); 


        public void Bind<TKey, TConcrete>() where TConcrete : TKey
        {
            lock (providersLocker)
            {
                providers[typeof(TKey)] = () => ResolveByType(typeof(TConcrete));
            }
        }

        public void Bind<T>(T instance)
        {
            lock (providersLocker)
            {
                providers[typeof(T)] = () => instance;
            }
        }

        private object ResolveByType(Type type)
        {
            var constructors = type.GetConstructors();
            if (constructors != null)
            {
                ConstructorInfo cInfo;
                cInfo = constructors.Count() == 1 ? constructors.Single() : 
                    constructors.Where(c => 
                    c.GetCustomAttributes(typeof(ImportConstructorAttribute), false).Length > 0).FirstOrDefault(); 
                if (cInfo == null)
                    throw new Exception(GetUsageMessage(type));
                var arguments = cInfo.GetParameters().Select(p => Resolve(p.ParameterType)).ToArray();
                return cInfo.Invoke(arguments);
            }
            else
            {
                var instanceField = type.GetField("Instance");
                if (instanceField != null)
                    return instanceField.GetValue(null);
                else
                    throw new Exception(GetUsageMessage(type));
            }
        }

        private static string GetUsageMessage(Type type)
        {
            return "Could not resolve a type implementing " + type.Name 
            + " - it must be registered through Bind to the composition container and either contain a single constructor or one constructor " 
            + "decorated with ImportContructor attribute or a field named Instance";
        }

        internal TKey Resolve<TKey>()
        {
            return (TKey)Resolve(typeof(TKey)); 
        }

        internal object Resolve(Type type)
        {
            Func<object> provider;
            if (providers.TryGetValue(type, out provider))
                return provider();
            else
                return ResolveByType(type);
        }

    }

}


The IoC container contains a dictionary which has got a Type as the Key, which is the usually either the interface or the concrete type to register through the Bind calls to the container, and a function expression that returns either an instance or resolves an instance through the container. When resolving a type, it can be multiple constructors in the class. I have extended Jon Skeet's code a bit, and by decorating the constructor by a ImportConstructor attribute, it is possible to specify which constructor is the constructor that should be the inversion of control constructor. Pass in all dependencies that must be resolved in that constructor. If the importing type or "part" to use a phrase from MEF, has no constructors that should be used, a field called "Instance" can be used. This is to support singleton patterns or similar. I have chosen to add locking when performing binds, to make this thread safe. Resolving instances is not made thread safe, as this can give very much locking. Usually a composition container is anyways set up in a single thread through registering bindings and then multiple threads will possibly access the IoC container. It is possible to put the CompositionContainer itself as a singleton, which is what one usually wants. I have added a generic singleton implementation in my blog that can be used to support this. The import constructor attribute is very simple:

using System;

namespace TestComposedIoCContainer
{
    
    public class ImportConstructorAttribute : Attribute
    {
    }
}

A unit test to display the use of this composition container, followed by the class definitions:

using NUnit.Framework;
using System;

namespace TestComposedIoCContainer
{
   
    [TestFixture]
    public class Program
    {

        [Test]
        public void MainTest()
        {
            var container = new CompositionContainer();
            container.Bind<ICalculator, Calculator>();
            container.Bind<IAdder, Adder>();
            container.Bind<IMultiplier, Multiplier>(); 

            var calculator = container.Resolve<ICalculator>();
            Console.WriteLine("Calculator resolved!");
            int resultAdd = calculator.Add(3, 5);
            Console.WriteLine("3 + 5 = {0}", resultAdd);

            int resultMult = calculator.Multiply(4, 8);
            Console.WriteLine("4 * 8 = {0}", resultMult);
        }

        public static void Main(string[] args)
        {

        }

    }

}


//CLASS DEFINITIONS

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace TestComposedIoCContainer
{
    
    public class Calculator : ICalculator
    {

        private IAdder adder;

        private IMultiplier multiplier;

        public Calculator()
        {

        }

        [ImportConstructor]
        public Calculator(IAdder adder, IMultiplier multiplier)
        {
            this.adder = adder;
            this.multiplier = multiplier; 
        }

        public int Add(int x, int y)
        {
            return adder.Add(x, y); 
        }

        public int Multiply(int x, int y)
        {
            return multiplier.Multiply(x, y); 
        }

    }


namespace TestComposedIoCContainer
{
    
    public interface ICalculator
    {

        int Add(int x, int y);

        int Multiply(int x, int y); 

    }

}


}


namespace TestComposedIoCContainer
{
    
    public class Adder : IAdder
    {

        public int Add(int x, int y)
        {
            return x + y; 
        }
    }

}


namespace TestComposedIoCContainer
{
    
    public interface IAdder
    {

        int Add(int x, int y);

    }

}


namespace TestComposedIoCContainer
{
    
    public interface IMultiplier
    {

        int Multiply(int x, int y);

    }

}


namespace TestComposedIoCContainer
{
    
    public class Multiplier : IMultiplier
    {

        public Multiplier()
        {

        }

        public int Multiply(int x, int y)
        {
            return x * y;            
        }

    }

}

//RESULT OF RUNNING NUNIT UNIT TEST ABOVE: 

------ Test started: Assembly: TestComposedIoCContainer.exe ------

Calculator resolved!
3 + 5 = 8
4 * 8 = 32

1 passed, 0 failed, 0 skipped, took 0,46 seconds (NUnit 2.6.2).



The code above should only be used for simple IoC scenarios. The IoC container uses very much reflection, which should not be consider the quickest way of resolving objects. The IoC container code can though resolve arbitrarily complex object graphs, when the registering of binds are done correctly, however there is a weakness here - the container does not do loop detection very good. For example, if a "part" or "component" of the composition container imports another parts and this part again resolves that part or some other recursive relation, looping will start to occur. But at the same time - many IoC frameworks are poor at detecting such errors anyways and will also give infinite recursion here. If you have simple needs for IoC resolution, the code above can be used in simple scenarios but most production code should instead choose among the many professional IoC frameworks out there.