- 论坛徽章:
- 0
|
一个二进制和16进制互转的代码:
typedef unsigned char *LPSTR;
UCHAR hextable[16] =
{ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
'E', 'F'
};
#define TOHEX(a, b) {*b++ = hextable[a >> 4];*b++ = hextable[a&0xf];}
LPSTR
BinToHex (LPBYTE p, int len)
{
int i;
LPSTR str = (LPSTR) malloc (len * 3 + 1);
LPSTR basestr = str;
for (i = 0; i < len; i++)
{
TOHEX (p[i], str);
}
*str = '\0';
return basestr;
}
#define HEXTOBIN(x) ( (x) >= '0' && (x) <= '9' ? ((x)-'0') : \
(x) >= 'A' && (x) <= 'F' ? ((x)-'A'+10) : ((x)-'a'+10))
LPBYTE
HexToBin (LPSTR p, int len)
{
int i;
LPBYTE out = (LPBYTE) malloc (len >> 1);
LPBYTE out_org = out;
for (i = 0; i < len; i += 2)
{
*out++ = (HEXTOBIN (p[i]) << 4) | HEXTOBIN (p[i + 1]);
}
return out_org;
} |
为什么#define TOHEX(a, b) {*b++ = hextable[a >> 4];*b++ = hextable[a&0xf];}中的 hextable[a >> 4]能够正确运行,这样一个简单的移位在a的高位为1时,将被补1,这样的话程序不能正确找到数组中的字母啊,可是我做了测试有发现确实是正确的,想不通啊 |
|