/[rdesktop]/sourceforge.net/trunk/rdesktop/crypto/md5_locl.h
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Diff of /sourceforge.net/trunk/rdesktop/crypto/md5_locl.h

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 31 by matty, Tue Aug 15 10:32:09 2000 UTC revision 32 by matty, Sat Sep 15 09:37:17 2001 UTC
# Line 1  Line 1 
1  /* crypto/md5/md5_locl.h */  /* crypto/md5/md5_locl.h */
2  /* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com)  /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3   * All rights reserved.   * All rights reserved.
4   *   *
5   * This package is an SSL implementation written   * This package is an SSL implementation written
# Line 56  Line 56 
56   * [including the GNU Public Licence.]   * [including the GNU Public Licence.]
57   */   */
58    
 /* On sparc, this actually slows things down :-( */  
 #if defined(sun)  
 #undef B_ENDIAN  
 #endif  
   
59  #include <stdlib.h>  #include <stdlib.h>
60  #include <string.h>  #include <string.h>
61  #include "md5.h"  #include "md5.h"
62    
63  #define ULONG   unsigned long  #ifndef MD5_LONG_LOG2
64  #define UCHAR   unsigned char  #define MD5_LONG_LOG2 2 /* default to 32 bits */
65  #define UINT    unsigned int  #endif
66    
67    #ifdef MD5_ASM
68    # if defined(__i386) || defined(_M_IX86) || defined(__INTEL__)
69    #  define md5_block_host_order md5_block_asm_host_order
70    # elif defined(__sparc) && defined(ULTRASPARC)
71       void md5_block_asm_data_order_aligned (MD5_CTX *c, const MD5_LONG *p,int num);
72    #  define HASH_BLOCK_DATA_ORDER_ALIGNED md5_block_asm_data_order_aligned
73    # endif
74    #endif
75    
76  #if defined(NOCONST)  void md5_block_host_order (MD5_CTX *c, const void *p,int num);
77  #define const  void md5_block_data_order (MD5_CTX *c, const void *p,int num);
78    
79    #if defined(__i386) || defined(_M_IX86) || defined(__INTEL__)
80    /*
81     * *_block_host_order is expected to handle aligned data while
82     * *_block_data_order - unaligned. As algorithm and host (x86)
83     * are in this case of the same "endianness" these two are
84     * otherwise indistinguishable. But normally you don't want to
85     * call the same function because unaligned access in places
86     * where alignment is expected is usually a "Bad Thing". Indeed,
87     * on RISCs you get punished with BUS ERROR signal or *severe*
88     * performance degradation. Intel CPUs are in turn perfectly
89     * capable of loading unaligned data without such drastic side
90     * effect. Yes, they say it's slower than aligned load, but no
91     * exception is generated and therefore performance degradation
92     * is *incomparable* with RISCs. What we should weight here is
93     * costs of unaligned access against costs of aligning data.
94     * According to my measurements allowing unaligned access results
95     * in ~9% performance improvement on Pentium II operating at
96     * 266MHz. I won't be surprised if the difference will be higher
97     * on faster systems:-)
98     *
99     *                              <appro@fy.chalmers.se>
100     */
101    #define md5_block_data_order md5_block_host_order
102  #endif  #endif
103    
104  #undef c2l  #define DATA_ORDER_IS_LITTLE_ENDIAN
105  #define c2l(c,l)        (l = ((unsigned long)(*((c)++)))     , \  
106                           l|=(((unsigned long)(*((c)++)))<< 8), \  #define HASH_LONG               MD5_LONG
107                           l|=(((unsigned long)(*((c)++)))<<16), \  #define HASH_LONG_LOG2          MD5_LONG_LOG2
108                           l|=(((unsigned long)(*((c)++)))<<24))  #define HASH_CTX                MD5_CTX
109    #define HASH_CBLOCK             MD5_CBLOCK
110  #undef p_c2l  #define HASH_LBLOCK             MD5_LBLOCK
111  #define p_c2l(c,l,n)    { \  #define HASH_UPDATE             MD5_Update
112                          switch (n) { \  #define HASH_TRANSFORM          MD5_Transform
113                          case 0: l =((unsigned long)(*((c)++))); \  #define HASH_FINAL              MD5_Final
114                          case 1: l|=((unsigned long)(*((c)++)))<< 8; \  #define HASH_MAKE_STRING(c,s)   do {    \
115                          case 2: l|=((unsigned long)(*((c)++)))<<16; \          unsigned long ll;               \
116                          case 3: l|=((unsigned long)(*((c)++)))<<24; \          ll=(c)->A; HOST_l2c(ll,(s));    \
117                                  } \          ll=(c)->B; HOST_l2c(ll,(s));    \
118                          }          ll=(c)->C; HOST_l2c(ll,(s));    \
119            ll=(c)->D; HOST_l2c(ll,(s));    \
120  /* NOTE the pointer is not incremented at the end of this */          } while (0)
121  #undef c2l_p  #define HASH_BLOCK_HOST_ORDER   md5_block_host_order
122  #define c2l_p(c,l,n)    { \  #if !defined(L_ENDIAN) || defined(md5_block_data_order)
123                          l=0; \  #define HASH_BLOCK_DATA_ORDER   md5_block_data_order
124                          (c)+=n; \  /*
125                          switch (n) { \   * Little-endians (Intel and Alpha) feel better without this.
126                          case 3: l =((unsigned long)(*(--(c))))<<16; \   * It looks like memcpy does better job than generic
127                          case 2: l|=((unsigned long)(*(--(c))))<< 8; \   * md5_block_data_order on copying-n-aligning input data.
128                          case 1: l|=((unsigned long)(*(--(c))))    ; \   * But frankly speaking I didn't expect such result on Alpha.
129                                  } \   * On the other hand I've got this with egcs-1.0.2 and if
130                          }   * program is compiled with another (better?) compiler it
131     * might turn out other way around.
132  #undef p_c2l_p   *
133  #define p_c2l_p(c,l,sc,len) { \   *                              <appro@fy.chalmers.se>
134                          switch (sc) \   */
                                 { \  
                         case 0: l =((unsigned long)(*((c)++))); \  
                                 if (--len == 0) break; \  
                         case 1: l|=((unsigned long)(*((c)++)))<< 8; \  
                                 if (--len == 0) break; \  
                         case 2: l|=((unsigned long)(*((c)++)))<<16; \  
                                 } \  
                         }  
   
 #undef l2c  
 #define l2c(l,c)        (*((c)++)=(unsigned char)(((l)    )&0xff), \  
                          *((c)++)=(unsigned char)(((l)>> 8)&0xff), \  
                          *((c)++)=(unsigned char)(((l)>>16)&0xff), \  
                          *((c)++)=(unsigned char)(((l)>>24)&0xff))  
   
 /* NOTE - c is not incremented as per l2c */  
 #undef l2cn  
 #define l2cn(l1,l2,c,n) { \  
                         c+=n; \  
                         switch (n) { \  
                         case 8: *(--(c))=(unsigned char)(((l2)>>24)&0xff); \  
                         case 7: *(--(c))=(unsigned char)(((l2)>>16)&0xff); \  
                         case 6: *(--(c))=(unsigned char)(((l2)>> 8)&0xff); \  
                         case 5: *(--(c))=(unsigned char)(((l2)    )&0xff); \  
                         case 4: *(--(c))=(unsigned char)(((l1)>>24)&0xff); \  
                         case 3: *(--(c))=(unsigned char)(((l1)>>16)&0xff); \  
                         case 2: *(--(c))=(unsigned char)(((l1)>> 8)&0xff); \  
                         case 1: *(--(c))=(unsigned char)(((l1)    )&0xff); \  
                                 } \  
                         }  
   
 /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */  
 #if defined(WIN32)  
 /* 5 instructions with rotate instruction, else 9 */  
 #define Endian_Reverse32(a) \  
         { \  
         unsigned long l=(a); \  
         (a)=((ROTATE(l,8)&0x00FF00FF)|(ROTATE(l,24)&0xFF00FF00)); \  
         }  
 #else  
 /* 6 instructions with rotate instruction, else 8 */  
 #define Endian_Reverse32(a) \  
         { \  
         unsigned long l=(a); \  
         l=(((l&0xFF00FF00)>>8L)|((l&0x00FF00FF)<<8L)); \  
         (a)=ROTATE(l,16L); \  
         }  
135  #endif  #endif
136    
137    #include "md32_common.h"
138    
139  /*  /*
140  #define F(x,y,z)        (((x) & (y))  |  ((~(x)) & (z)))  #define F(x,y,z)        (((x) & (y))  |  ((~(x)) & (z)))
141  #define G(x,y,z)        (((x) & (z))  |  ((y) & (~(z))))  #define G(x,y,z)        (((x) & (z))  |  ((y) & (~(z))))
142  */  */
143    
144  /* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be  /* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
145   * simplified to the code below.  Wei attributes these optimisations   * simplified to the code below.  Wei attributes these optimizations
146   * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.   * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
147   */   */
148  #define F(b,c,d)        ((((c) ^ (d)) & (b)) ^ (d))  #define F(b,c,d)        ((((c) ^ (d)) & (b)) ^ (d))
# Line 166  Line 150 
150  #define H(b,c,d)        ((b) ^ (c) ^ (d))  #define H(b,c,d)        ((b) ^ (c) ^ (d))
151  #define I(b,c,d)        (((~(d)) | (b)) ^ (c))  #define I(b,c,d)        (((~(d)) | (b)) ^ (c))
152    
 #undef ROTATE  
 #if defined(WIN32)  
 #define ROTATE(a,n)     _lrotl(a,n)  
 #else  
 #define ROTATE(a,n)     (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))  
 #endif  
   
   
153  #define R0(a,b,c,d,k,s,t) { \  #define R0(a,b,c,d,k,s,t) { \
154          a+=((k)+(t)+F((b),(c),(d))); \          a+=((k)+(t)+F((b),(c),(d))); \
155          a=ROTATE(a,s); \          a=ROTATE(a,s); \

Legend:
Removed from v.31  
changed lines
  Added in v.32

  ViewVC Help
Powered by ViewVC 1.1.26