分类目录归档:C

libcurl与CLOSE_WAIT

调用libcurl下载,然后使用netstat查看发现有大量的TCP连接保持在CLOSE_WAIT状态
查看libcurl的文档说明,有这样一个选项:

CURLOPT_FORBID_REUSE

Pass a long. Set to 1 to make the next transfer explicitly close the connection when done. Normally, libcurl keeps all connections alive when done with one transfer in case a succeeding one follows that can re-use them. This option should be used with caution and only if you understand what it does. Set to 0 to have libcurl keep the connection open for possible later re-use (default behavior).

也就是说,默认情况下libcurl完成一个任务以后,出于重用连接的考虑不会马上关闭
如果没有新的TCP请求来重用这个连接,那么只能等到CLOSE_WAIT超时,这个时间默认在7200秒甚至更高,太多的CLOSE_WAIT连接会导致性能问题

解决方法:

curl_easy_setopt(curl, CURLOPT_FORBID_REUSE, 1);

最好再修改一下TCP参数调低CLOSE_WAIT和TIME_WAIT的超时时间

url的encode和decode

相关RFC:http://curl.haxx.se/rfc/rfc2396.txt


/* —————————————————————————
* Encode URL by converting special characters to %XX (where XX are hexadecimal digits)
* Don’t forget to free the return value.
*/
char *urlencode(const char *url)
{
#define COPY_TO_ENCODE_URL(c) \
if (outlen < pos) { \ outlen += 10; \ out = realloc(out, outlen + 1); \ } \ out[pos ++] = c; \ size_t i, len = strlen(url), outlen; outlen = len; char *out = malloc(outlen + 1); int pos; for (i = 0, pos = 0; i < len; i ++) { if ((url[i] >= 48 && url[i] <= 57) // 0-9 || (url[i] >= 65 && url[i] <= 90) //a-z || (url[i] >= 97 && url[i] <= 122) //A-Z || url[i] == ';' || url[i] == '/' || url[i] == '?' || url[i] == ':' || url[i] == '@' || url[i] == '&' || url[i] == '=' || url[i] == '+' || url[i] == '$' || url[i] == ',' || url[i] == '-' || url[i] == '_' || url[i] == '.' || url[i] == '!' || url[i] == '~' || url[i] == '*' || url[i] == '\'' || url[i] == '(' || url[i] == ')') { /* straight copy */ COPY_TO_ENCODE_URL(url[i]); } else { #undef HEX_TO_DIGIT char dig1 = (url[i] & 0xF0) >> 4;
char dig2 = (url[i] & 0x0F);
if (dig1 >= 0 && dig1 <= 9) dig1 += 48; //0,48inascii if (dig1 >= 10 && dig1 <= 15) dig1 += 65 - 10; //A,65inascii if (dig2 >= 0 && dig2 <= 9) dig2 += 48; if (dig2 >= 10 && dig2 <= 15) dig2 += 65 - 10; COPY_TO_ENCODE_URL('%'); COPY_TO_ENCODE_URL(dig1); COPY_TO_ENCODE_URL(dig2); } } out[pos] = '\0'; return (out); } /* --------------------------------------------------------------------------- * Decode URL by converting %XX (where XX are hexadecimal digits) to the * character it represents. Don't forget to free the return value. */ char *urldecode(const char *url) { size_t i, len = strlen(url); char *out = malloc(len + 1); int pos; for (i = 0, pos = 0; i < len; i ++) { if (url[i] == '%' && i+2 < len && isxdigit(url[i + 1]) && isxdigit(url[i + 2])) { /* decode %XX */ #define HEX_TO_DIGIT(hex) ( \ ((hex) >= ‘A’ && (hex) <= 'F') ? ((hex) - 'A' + 10): \ ((hex) >= ‘a’ && (hex) <= 'f') ? ((hex) - 'a' + 10): \ ((hex) - '0') ) out[pos ++] = HEX_TO_DIGIT(url[i + 1]) * 16 + HEX_TO_DIGIT(url[i + 2]); i += 2; #undef HEX_TO_DIGIT } else { /* straight copy */ out[pos ++] = url[i]; } } out[pos] = '\0'; return (out); }

[Socket]获取客户端IP

有两种方法,一种是在accept的时候获取,一种是通过getpeername获取


#include

int accept(int socket, struct sockaddr *restrict address,
socklen_t *restrict address_len);

int getpeername(int socket, struct sockaddr *restrict address,
socklen_t *restrict address_len);

两者区别主要在取IP的fd不一样,前者是在监听的fd,后者是在连接建立的fd

struct sockaddr_in addr;
socklen_t addr_len;
int32_t listen_fd, sock_fd;

/* create listening port */

addr_len = sizeof(addr);

memset(&addr, 0, addr_len);
sock_fd = accept(listen_fd, (struct sockaddr *)&addr, &addr_len);
printf(“%d\n”, addr.sin_addr.s_addr);

memset(&addr, 0, addr_len);
getpeername(sock_fd, (struct sockaddr *)&addr, &addr_len);
printf(“%d\n”, addr.sin_addr.s_addr);

当然这种长整型格式的IP不一定是我们想要的,可以通过inet_ntoa转换


#include

in_addr_t inet_addr(const char *cp);
char *inet_ntoa(struct in_addr in);


printf(“%s\n”, inet_ntoa(addr));