Browse Source

Implement SSE2 32 bit assembly algorithm as well.

Con Kolivas 14 years ago
parent
commit
7dc3db2340
7 changed files with 479 additions and 7 deletions
  1. 7 1
      Makefile.am
  2. 2 1
      configure.ac
  3. 32 5
      main.c
  4. 10 0
      miner.h
  5. 136 0
      sha256_sse2_i386.c
  6. 8 0
      x86_32/Makefile.am
  7. 284 0
      x86_32/sha256_xmm.asm

+ 7 - 1
Makefile.am

@@ -22,7 +22,7 @@ cgminer_SOURCES	= elist.h miner.h compat.h bench_block.h	\
 		  ocl.c ocl.h findnonce.c findnonce.h 		\
 		  ocl.c ocl.h findnonce.c findnonce.h 		\
 		  sha256_generic.c sha256_4way.c sha256_via.c	\
 		  sha256_generic.c sha256_4way.c sha256_via.c	\
 		  sha256_cryptopp.c sha256_sse2_amd64.c		\
 		  sha256_cryptopp.c sha256_sse2_amd64.c		\
-		  sha256_sse4_amd64.c \
+		  sha256_sse4_amd64.c sha256_sse2_i386.c		\
 		  phatk110817.cl poclbm110817.cl
 		  phatk110817.cl poclbm110817.cl
 
 
 cgminer_LDFLAGS	= $(PTHREAD_FLAGS)
 cgminer_LDFLAGS	= $(PTHREAD_FLAGS)
@@ -35,4 +35,10 @@ SUBDIRS		+= x86_64
 cgminer_LDADD	+= x86_64/libx8664.a
 cgminer_LDADD	+= x86_64/libx8664.a
 AM_CFLAGS	= -DHAS_YASM
 AM_CFLAGS	= -DHAS_YASM
 endif
 endif
+else
+if HAS_YASM
+SUBDIRS		+= x86_32
+cgminer_LDADD	+= x86_32/libx8632.a
+AM_CFLAGS	= -DHAS_YASM
+endif
 endif
 endif

+ 2 - 1
configure.ac

@@ -119,7 +119,7 @@ if test "x$YASM" != "xfalse" ; then
   fi
   fi
 fi
 fi
 if test "x$has_yasm" = "xfalse" ; then
 if test "x$has_yasm" = "xfalse" ; then
-  AC_MSG_NOTICE([yasm is required for the sse2_64 algorithm. It will be skipped.])
+  AC_MSG_NOTICE([yasm is required for the assembly algorithms. They will be skipped.])
 fi
 fi
 
 
 AM_CONDITIONAL([HAS_YASM], [test x$has_yasm = xtrue])
 AM_CONDITIONAL([HAS_YASM], [test x$has_yasm = xtrue])
@@ -176,6 +176,7 @@ AC_CONFIG_FILES([
 	compat/Makefile
 	compat/Makefile
 	compat/jansson/Makefile
 	compat/jansson/Makefile
 	x86_64/Makefile
 	x86_64/Makefile
+	x86_32/Makefile
 	ccan/Makefile
 	ccan/Makefile
 	lib/Makefile
 	lib/Makefile
 	])
 	])

+ 32 - 5
main.c

@@ -106,6 +106,7 @@ enum sha256_algos {
 	ALGO_VIA,		/* VIA padlock */
 	ALGO_VIA,		/* VIA padlock */
 	ALGO_CRYPTOPP,		/* Crypto++ (C) */
 	ALGO_CRYPTOPP,		/* Crypto++ (C) */
 	ALGO_CRYPTOPP_ASM32,	/* Crypto++ 32-bit assembly */
 	ALGO_CRYPTOPP_ASM32,	/* Crypto++ 32-bit assembly */
+	ALGO_SSE2_32,		/* SSE2 for x86_32 */
 	ALGO_SSE2_64,		/* SSE2 for x86_64 */
 	ALGO_SSE2_64,		/* SSE2 for x86_64 */
 	ALGO_SSE4_64,		/* SSE4 for x86_64 */
 	ALGO_SSE4_64,		/* SSE4 for x86_64 */
 };
 };
@@ -142,6 +143,9 @@ static const char *algo_names[] = {
 #ifdef WANT_CRYPTOPP_ASM32
 #ifdef WANT_CRYPTOPP_ASM32
 	[ALGO_CRYPTOPP_ASM32]	= "cryptopp_asm32",
 	[ALGO_CRYPTOPP_ASM32]	= "cryptopp_asm32",
 #endif
 #endif
+#ifdef WANT_X8632_SSE2
+	[ALGO_SSE2_32]		= "sse2_32",
+#endif
 #ifdef WANT_X8664_SSE2
 #ifdef WANT_X8664_SSE2
 	[ALGO_SSE2_64]		= "sse2_64",
 	[ALGO_SSE2_64]		= "sse2_64",
 #endif
 #endif
@@ -163,6 +167,9 @@ static const sha256_func sha256_funcs[] = {
 #ifdef WANT_CRYPTOPP_ASM32
 #ifdef WANT_CRYPTOPP_ASM32
 	[ALGO_CRYPTOPP_ASM32]	= (sha256_func)scanhash_asm32,
 	[ALGO_CRYPTOPP_ASM32]	= (sha256_func)scanhash_asm32,
 #endif
 #endif
+#ifdef WANT_X8632_SSE2
+	[ALGO_SSE2_32]		= (sha256_func)scanhash_sse2_32,
+#endif
 #ifdef WANT_X8664_SSE2
 #ifdef WANT_X8664_SSE2
 	[ALGO_SSE2_64]		= (sha256_func)scanhash_sse2_64,
 	[ALGO_SSE2_64]		= (sha256_func)scanhash_sse2_64,
 #endif
 #endif
@@ -193,10 +200,10 @@ int opt_scantime = 60;
 int opt_bench_algo = -1;
 int opt_bench_algo = -1;
 static const bool opt_time = true;
 static const bool opt_time = true;
 static bool opt_restart = true;
 static bool opt_restart = true;
-#if defined(WANT_X8664_SSE4) && defined(__SSE4_1__)
-static enum sha256_algos opt_algo = ALGO_SSE4_64;
-#elif defined(WANT_X8664_SSE2) && defined(__SSE2__)
+#if defined(WANT_X8664_SSE2) && defined(__SSE2__)
 static enum sha256_algos opt_algo = ALGO_SSE2_64;
 static enum sha256_algos opt_algo = ALGO_SSE2_64;
+#elif defined(WANT_X8632_SSE2) && defined(__SSE2__)
+static enum sha256_algos opt_algo = ALGO_SSE2_32;
 #else
 #else
 static enum sha256_algos opt_algo = ALGO_C;
 static enum sha256_algos opt_algo = ALGO_C;
 #endif
 #endif
@@ -818,6 +825,10 @@ static enum sha256_algos pick_fastest_algo()
 		bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
 		bench_algo(&best_rate, &best_algo, ALGO_CRYPTOPP_ASM32);
 	#endif
 	#endif
 
 
+	#if defined(WANT_X8632_SSE2)
+		bench_algo(&best_rate, &best_algo, ALGO_SSE2_32);
+	#endif
+
 	#if defined(WANT_X8664_SSE2)
 	#if defined(WANT_X8664_SSE2)
 		bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
 		bench_algo(&best_rate, &best_algo, ALGO_SSE2_64);
 	#endif
 	#endif
@@ -1042,11 +1053,14 @@ static struct opt_table opt_config_table[] = {
 #ifdef WANT_CRYPTOPP_ASM32
 #ifdef WANT_CRYPTOPP_ASM32
 		     "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
 		     "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation"
 #endif
 #endif
+#ifdef WANT_X8632_SSE2
+		     "\n\tsse2_32\t\tSSE2 32 bit implementation for i386 machines"
+#endif
 #ifdef WANT_X8664_SSE2
 #ifdef WANT_X8664_SSE2
-		     "\n\tsse2_64\t\tSSE2 implementation for x86_64 machines"
+		     "\n\tsse2_64\t\tSSE2 64 bit implementation for x86_64 machines"
 #endif
 #endif
 #ifdef WANT_X8664_SSE4
 #ifdef WANT_X8664_SSE4
-		     "\n\tsse4_64\t\tSSE4 implementation for x86_64 machines"
+		     "\n\tsse4_64\t\tSSE4.1 64 bit implementation for x86_64 machines"
 #endif
 #endif
 		),
 		),
 	OPT_WITH_ARG("--bench-algo|-b",
 	OPT_WITH_ARG("--bench-algo|-b",
@@ -3305,6 +3319,19 @@ static void *miner_thread(void *userdata)
 					work->blk.nonce);
 					work->blk.nonce);
 			break;
 			break;
 
 
+#ifdef WANT_X8632_SSE2
+		case ALGO_SSE2_32: {
+			unsigned int rc5 =
+			        scanhash_sse2_32(thr_id, work->midstate, work->data + 64,
+						 work->hash1, work->hash,
+						 work->target,
+					         max_nonce, &hashes_done,
+						 work->blk.nonce);
+			rc = (rc5 == -1) ? false : true;
+			}
+			break;
+#endif
+
 #ifdef WANT_X8664_SSE2
 #ifdef WANT_X8664_SSE2
 		case ALGO_SSE2_64: {
 		case ALGO_SSE2_64: {
 			unsigned int rc5 =
 			unsigned int rc5 =

+ 10 - 0
miner.h

@@ -55,6 +55,10 @@ void *alloca (size_t);
 #define WANT_SSE2_4WAY 1
 #define WANT_SSE2_4WAY 1
 #endif
 #endif
 
 
+#if defined(__i386__) && defined(HAS_YASM)
+#define WANT_X8632_SSE2 1
+#endif
+
 #if defined(__i386__) || defined(__x86_64__)
 #if defined(__i386__) || defined(__x86_64__)
 #define WANT_VIA_PADLOCK 1
 #define WANT_VIA_PADLOCK 1
 #endif
 #endif
@@ -297,6 +301,12 @@ extern int scanhash_sse4_64(int, const unsigned char *pmidstate, unsigned char *
 	uint32_t max_nonce, unsigned long *nHashesDone,
 	uint32_t max_nonce, unsigned long *nHashesDone,
 	uint32_t nonce);
 	uint32_t nonce);
 
 
+extern int scanhash_sse2_32(int, const unsigned char *pmidstate, unsigned char *pdata,
+	unsigned char *phash1, unsigned char *phash,
+	const unsigned char *ptarget,
+	uint32_t max_nonce, unsigned long *nHashesDone,
+	uint32_t nonce);
+
 extern int
 extern int
 timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y);
 timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y);
 
 

+ 136 - 0
sha256_sse2_i386.c

@@ -0,0 +1,136 @@
+/*
+ * SHA-256 driver for ASM routine for x86_64 on Linux
+ * Copyright (c) Mark Crichton <crichton@gimp.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include "config.h"
+
+#include "miner.h"
+
+#ifdef WANT_X8632_SSE2
+
+#include <string.h>
+#include <assert.h>
+
+#include <xmmintrin.h>
+#include <stdint.h>
+#include <stdio.h>
+
+extern void CalcSha256_x86 (__m128i *res, __m128i *data, const uint32_t init[8])__attribute__((fastcall));
+
+static uint32_t g_sha256_k[]__attribute__((aligned(0x100))) = {
+    0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, /*  0 */
+    0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+    0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, /*  8 */
+    0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+    0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, /* 16 */
+    0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+    0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, /* 24 */
+    0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+    0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, /* 32 */
+    0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+    0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, /* 40 */
+    0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+    0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, /* 48 */
+    0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+    0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, /* 56 */
+    0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+
+const uint32_t sha256_32init[8]__attribute__((aligned(0x100))) =
+{0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
+
+__m128i g_4sha256_k[64];
+__m128i sha256_consts_m128i[64]__attribute__((aligned(0x1000)));
+
+int scanhash_sse2_32(int thr_id, const unsigned char *pmidstate,
+	unsigned char *pdata,
+	unsigned char *phash1, unsigned char *phash,
+	const unsigned char *ptarget,
+	uint32_t max_nonce, unsigned long *nHashesDone,
+	uint32_t nonce)
+{
+    uint32_t *nNonce_p = (uint32_t *)(pdata + 12);
+    uint32_t m_midstate[8], m_w[16], m_w1[16];
+    __m128i m_4w[64] __attribute__ ((aligned (0x100)));
+    __m128i m_4hash[64] __attribute__ ((aligned (0x100)));
+    __m128i m_4hash1[64] __attribute__ ((aligned (0x100)));
+    __m128i offset;
+    int i;
+
+    work_restart[thr_id].restart = 0;
+
+    /* For debugging */
+    union {
+        __m128i m;
+        uint32_t i[4];
+    } mi;
+
+    /* Message expansion */
+    memcpy(m_midstate, pmidstate, sizeof(m_midstate));
+    memcpy(m_w, pdata, sizeof(m_w)); /* The 2nd half of the data */
+    memcpy(m_w1, phash1, sizeof(m_w1));
+    memset(m_4hash, 0, sizeof(m_4hash));
+
+    /* Transmongrify */
+    for (i = 0; i < 16; i++)
+        m_4w[i] = _mm_set1_epi32(m_w[i]);
+
+    for (i = 0; i < 16; i++)
+        m_4hash1[i] = _mm_set1_epi32(m_w1[i]);
+
+    for (i = 0; i < 64; i++)
+	sha256_consts_m128i[i] = _mm_set1_epi32(g_sha256_k[i]);
+
+    offset = _mm_set_epi32(0x3, 0x2, 0x1, 0x0);
+
+    for (;;)
+    {
+	int j;
+
+	m_4w[3] = _mm_add_epi32(offset, _mm_set1_epi32(nonce));
+
+	/* Some optimization can be done here W.R.T. precalculating some hash */
+	CalcSha256_x86 (m_4hash1, m_4w, m_midstate);
+	CalcSha256_x86 (m_4hash, m_4hash1, sha256_32init);
+
+	for (j = 0; j < 4; j++) {
+	    mi.m = m_4hash[7];
+	    if (unlikely(mi.i[j] == 0))
+		break;
+        }
+
+	/* If j = true, we found a hit...so check it */
+	/* Use the C version for a check... */
+	if (unlikely(j != 4)) {
+		for (i = 0; i < 8; i++) {
+		    mi.m = m_4hash[i];
+		    *(uint32_t *)&(phash)[i*4] = mi.i[j];
+		}
+
+		if (fulltest(phash, ptarget)) {
+		     *nHashesDone = nonce;
+		     *nNonce_p = nonce + j;
+		     return nonce + j;
+		}
+	}
+
+	nonce += 4;
+
+        if (unlikely((nonce >= max_nonce) || work_restart[thr_id].restart))
+        {
+            *nHashesDone = nonce;
+            return -1;
+	}
+   }
+}
+
+#endif /* WANT_X8632_SSE2 */
+

+ 8 - 0
x86_32/Makefile.am

@@ -0,0 +1,8 @@
+noinst_LIBRARIES	= libx8632.a
+
+SUFFIXES = .asm
+
+libx8632_a_SOURCES	= sha256_xmm.asm
+
+.asm.o:
+	$(YASM) -f elf32 $<

+ 284 - 0
x86_32/sha256_xmm.asm

@@ -0,0 +1,284 @@
+;; SHA-256 for X86 for Linux, based off of:
+
+; (c) Ufasoft 2011 http://ufasoft.com mailto:support@ufasoft.com
+; Version 2011
+; This software is Public Domain
+
+; SHA-256 CPU SSE cruncher for Bitcoin Miner
+
+ALIGN 32
+BITS 32
+
+%define hash ecx
+%define data edx
+%define init esi
+
+; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
+%define LAB_CALC_PARA	2
+%define LAB_CALC_UNROLL	8
+
+%define LAB_LOOP_UNROLL 8
+
+extern sha256_consts_m128i
+
+global CalcSha256_x86
+;	CalcSha256	hash(ecx), data(edx), init([esp+4])
+CalcSha256_x86:
+                push	esi
+                push	edi
+                mov	init, [esp+12]
+
+	push	ebx
+
+LAB_NEXT_NONCE:
+
+	mov	eax, 64*4					; 256 - rcx is # of SHA-2 rounds
+	mov	ebx, 16*4					; 64 - rax is where we expand to
+
+LAB_SHA:
+	push	eax
+	lea	eax, qword [data+eax*4]				; + 1024
+	lea	edi, qword [data+ebx*4]				; + 256
+
+LAB_CALC:
+%macro	lab_calc_blk 1
+	movdqa	xmm0, [edi-(15-%1)*16]				; xmm0 = W[I-15]
+	movdqa	xmm4, [edi-(15-(%1+1))*16]			; xmm4 = W[I-15+1]
+	movdqa	xmm2, xmm0					; xmm2 = W[I-15]
+	movdqa	xmm6, xmm4					; xmm6 = W[I-15+1]
+	psrld	xmm0, 3						; xmm0 = W[I-15] >> 3
+	psrld	xmm4, 3						; xmm4 = W[I-15+1] >> 3
+	movdqa	xmm1, xmm0					; xmm1 = W[I-15] >> 3
+	movdqa	xmm5, xmm4					; xmm5 = W[I-15+1] >> 3
+	pslld	xmm2, 14					; xmm2 = W[I-15] << 14
+	pslld	xmm6, 14					; xmm6 = W[I-15+1] << 14
+	psrld	xmm1, 4						; xmm1 = W[I-15] >> 7
+	psrld	xmm5, 4						; xmm5 = W[I-15+1] >> 7
+	pxor	xmm0, xmm1					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
+	pxor	xmm4, xmm5					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
+	psrld	xmm1, 11					; xmm1 = W[I-15] >> 18
+	psrld	xmm5, 11					; xmm5 = W[I-15+1] >> 18
+	pxor	xmm0, xmm2					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
+	pxor	xmm4, xmm6					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
+	pslld	xmm2, 11					; xmm2 = W[I-15] << 25
+	pslld	xmm6, 11					; xmm6 = W[I-15+1] << 25
+	pxor	xmm0, xmm1					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
+	pxor	xmm4, xmm5					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
+	pxor	xmm0, xmm2					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
+	pxor	xmm4, xmm6					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
+
+	movdqa	xmm3, [edi-(2-%1)*16]				; xmm3 = W[I-2]
+	movdqa	xmm7, [edi-(2-(%1+1))*16]			; xmm7 = W[I-2+1]
+
+	paddd	xmm0, [edi-(16-%1)*16]				; xmm0 = s0(W[I-15]) + W[I-16]
+	paddd	xmm4, [edi-(16-(%1+1))*16]			; xmm4 = s0(W[I-15+1]) + W[I-16+1]
+
+;;;;;;;;;;;;;;;;;;
+
+	movdqa	xmm2, xmm3					; xmm2 = W[I-2]
+	movdqa	xmm6, xmm7					; xmm6 = W[I-2+1]
+	psrld	xmm3, 10					; xmm3 = W[I-2] >> 10
+	psrld	xmm7, 10					; xmm7 = W[I-2+1] >> 10
+	movdqa	xmm1, xmm3					; xmm1 = W[I-2] >> 10
+	movdqa	xmm5, xmm7					; xmm5 = W[I-2+1] >> 10
+
+	paddd	xmm0, [edi-(7-%1)*16]				; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
+
+	pslld	xmm2, 13					; xmm2 = W[I-2] << 13
+	pslld	xmm6, 13					; xmm6 = W[I-2+1] << 13
+	psrld	xmm1, 7						; xmm1 = W[I-2] >> 17
+	psrld	xmm5, 7						; xmm5 = W[I-2+1] >> 17
+
+	paddd	xmm4, [edi-(7-(%1+1))*16]			; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
+
+	pxor	xmm3, xmm1					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
+	pxor	xmm7, xmm5					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
+	psrld	xmm1, 2						; xmm1 = W[I-2] >> 19
+	psrld	xmm5, 2						; xmm5 = W[I-2+1] >> 19
+	pxor	xmm3, xmm2					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
+	pxor	xmm7, xmm6					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
+	pslld	xmm2, 2						; xmm2 = W[I-2] << 15
+	pslld	xmm6, 2						; xmm6 = W[I-2+1] << 15
+	pxor	xmm3, xmm1					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
+	pxor	xmm7, xmm5					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
+	pxor	xmm3, xmm2					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
+	pxor	xmm7, xmm6					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
+
+	paddd	xmm0, xmm3					; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
+	paddd	xmm4, xmm7					; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
+	movdqa	[edi+(%1*16)], xmm0
+	movdqa	[edi+((%1+1)*16)], xmm4
+%endmacro
+
+%assign i 0
+%rep    LAB_CALC_UNROLL
+        lab_calc_blk i
+%assign i i+LAB_CALC_PARA
+%endrep
+
+	add	edi, LAB_CALC_UNROLL*LAB_CALC_PARA*16
+	cmp	edi, eax
+	jb	LAB_CALC
+
+	pop	eax
+	mov	ebx, 0
+
+; Load the init values of the message into the hash.
+
+	movdqa	xmm7, [init]
+	pshufd	xmm5, xmm7, 0x55		; xmm5 == b
+	pshufd	xmm4, xmm7, 0xAA		; xmm4 == c
+	pshufd	xmm3, xmm7, 0xFF		; xmm3 == d
+	pshufd	xmm7, xmm7, 0			; xmm7 == a
+
+	movdqa	xmm0, [init+4*4]
+	pshufd	xmm1, xmm0, 0x55		; [hash+0*16] == f
+	movdqa	[hash+0*16], xmm1
+
+	pshufd	xmm1, xmm0, 0xAA		; [hash+1*16] == g
+	movdqa	[hash+1*16], xmm1
+
+	pshufd	xmm1, xmm0, 0xFF		; [hash+2*16] == h
+	movdqa	[hash+2*16], xmm1
+
+	pshufd	xmm0, xmm0, 0			; xmm0 == e
+
+LAB_LOOP:
+
+;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
+
+%macro	lab_loop_blk 0
+	movdqa	xmm6, [data+ebx*4]
+	paddd	xmm6, sha256_consts_m128i[ebx*4]
+	add	ebx, 4
+
+	paddd	xmm6, [hash+2*16]		; +h
+
+	movdqa	xmm1, xmm0
+	movdqa	xmm2, [hash+1*16]
+	pandn	xmm1, xmm2	; ~e & g
+
+	movdqa	[hash+2*16], xmm2		; h = g
+	movdqa	xmm2, [hash+0*16]		; f
+	movdqa	[hash+1*16], xmm2		; g = f
+
+
+	pand	xmm2, xmm0	; e & f
+	pxor	xmm1, xmm2	; (e & f) ^ (~e & g)
+	movdqa	[hash+0*16], xmm0		; f = e
+
+	paddd	xmm6, xmm1	; Ch + h + w[i] + k[i]
+
+	movdqa	xmm1, xmm0
+	psrld	xmm0, 6
+	movdqa	xmm2, xmm0
+	pslld	xmm1, 7
+	psrld	xmm2, 5
+	pxor	xmm0, xmm1
+	pxor	xmm0, xmm2
+	pslld	xmm1, 14
+	psrld	xmm2, 14
+	pxor	xmm0, xmm1
+	pxor	xmm0, xmm2
+	pslld	xmm1, 5
+	pxor	xmm0, xmm1	; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)
+	paddd	xmm6, xmm0	; xmm6 = t1
+
+	movdqa	xmm0, xmm3	; d
+	paddd	xmm0, xmm6	; e = d+t1
+
+	movdqa	xmm1, xmm5	; =b
+	movdqa	xmm3, xmm4	; d = c
+	movdqa	xmm2, xmm4	; c
+	pand	xmm2, xmm5	; b & c
+	pand	xmm4, xmm7	; a & c
+	pand	xmm1, xmm7	; a & b
+	pxor	xmm1, xmm4
+	movdqa	xmm4, xmm5	; c = b
+	movdqa	xmm5, xmm7	; b = a
+	pxor	xmm1, xmm2	; (a & c) ^ (a & d) ^ (c & d)
+	paddd	xmm6, xmm1	; t1 + ((a & c) ^ (a & d) ^ (c & d))
+
+	movdqa	xmm2, xmm7
+	psrld	xmm7, 2
+	movdqa	xmm1, xmm7
+	pslld	xmm2, 10
+	psrld	xmm1, 11
+	pxor	xmm7, xmm2
+	pxor	xmm7, xmm1
+	pslld	xmm2, 9
+	psrld	xmm1, 9
+	pxor	xmm7, xmm2
+	pxor	xmm7, xmm1
+	pslld	xmm2, 11
+	pxor	xmm7, xmm2
+	paddd	xmm7, xmm6	; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
+%endmacro
+
+%assign i 0
+%rep    LAB_LOOP_UNROLL
+        lab_loop_blk
+%assign i i+1
+%endrep
+
+	cmp	ebx, eax
+	jb	LAB_LOOP
+
+; Finished the 64 rounds, calculate hash and save
+
+	movdqa	xmm1, [init]
+	pshufd	xmm2, xmm1, 0x55
+	pshufd	xmm6, xmm1, 0xAA
+	movdqa	[hash+3*16], xmm6
+	pshufd	xmm6, xmm1, 0xFF
+	movdqa	[hash+4*16], xmm6
+	pshufd	xmm1, xmm1, 0
+
+	paddd	xmm5, xmm2
+	paddd	xmm4, [hash+3*16]
+	paddd	xmm3, [hash+4*16]
+	paddd	xmm7, xmm1
+
+	movdqa	xmm1, [init+4*4]
+	pshufd	xmm2, xmm1, 0x55
+	pshufd	xmm6, xmm1, 0xAA
+	movdqa	[hash+3*16], xmm6
+	pshufd	xmm6, xmm1, 0xFF
+	movdqa	[hash+4*16], xmm6
+	pshufd	xmm1, xmm1, 0
+
+	movdqa	xmm6, [hash+0*16]
+	paddd	xmm2, xmm6
+	movdqa	[hash+0*16], xmm2
+
+
+	movdqa	xmm2, [hash+3*16]
+	movdqa	xmm6, [hash+1*16]
+	paddd	xmm2, xmm6
+	movdqa	[hash+1*16], xmm2
+
+	movdqa	xmm2, [hash+4*16]
+	movdqa	xmm6, [hash+2*16]
+	paddd	xmm2, xmm6
+	movdqa	[hash+2*16], xmm2
+
+	paddd	xmm0, xmm1
+
+	movdqa	xmm1, [hash+0*16]
+	movdqa	xmm2, [hash+1*16]
+	movdqa	xmm6, [hash+2*16]
+
+	movdqa	[hash+0*16], xmm7
+	movdqa	[hash+1*16], xmm5
+	movdqa	[hash+2*16], xmm4
+	movdqa	[hash+3*16], xmm3
+	movdqa	[hash+4*16], xmm0
+	movdqa	[hash+5*16], xmm1
+	movdqa	[hash+6*16], xmm2
+	movdqa	[hash+7*16], xmm6
+
+LAB_RET:
+	pop	ebx
+                pop	edi
+                pop	esi
+                retn	4