/ util.c
util.c
1 /* 2 * Copyright 2011-2018 Con Kolivas 3 * Copyright 2011-2015 Andrew Smith 4 * Copyright 2010 Jeff Garzik 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 3 of the License, or (at your option) 9 * any later version. See COPYING for more details. 10 */ 11 12 #include "config.h" 13 14 #include <stdio.h> 15 #include <stdlib.h> 16 #include <ctype.h> 17 #include <stdarg.h> 18 #include <string.h> 19 #include <jansson.h> 20 #ifdef HAVE_LIBCURL 21 #include <curl/curl.h> 22 #endif 23 #include <time.h> 24 #include <errno.h> 25 #include <unistd.h> 26 #include <sys/types.h> 27 #ifndef WIN32 28 #include <fcntl.h> 29 # ifdef __linux 30 # include <sys/prctl.h> 31 # endif 32 # include <sys/socket.h> 33 # include <netinet/in.h> 34 # include <netinet/tcp.h> 35 # include <netdb.h> 36 #else 37 # include <winsock2.h> 38 # include <ws2tcpip.h> 39 # include <mmsystem.h> 40 #endif 41 #include <sched.h> 42 43 #include "miner.h" 44 #include "elist.h" 45 #include "compat.h" 46 #include "util.h" 47 48 #define DEFAULT_SOCKWAIT 60 49 #ifndef STRATUM_USER_AGENT 50 #define STRATUM_USER_AGENT 51 #endif 52 53 bool successful_connect = false; 54 55 int no_yield(void) 56 { 57 return 0; 58 } 59 60 int (*selective_yield)(void) = &no_yield; 61 62 static void keep_sockalive(SOCKETTYPE fd) 63 { 64 const int tcp_one = 1; 65 #ifndef WIN32 66 const int tcp_keepidle = 45; 67 const int tcp_keepintvl = 30; 68 int flags = fcntl(fd, F_GETFL, 0); 69 70 fcntl(fd, F_SETFL, O_NONBLOCK | flags); 71 #else 72 u_long flags = 1; 73 74 ioctlsocket(fd, FIONBIO, &flags); 75 #endif 76 77 setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); 78 if (!opt_delaynet) 79 #ifndef __linux 80 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); 81 #else /* __linux */ 82 fcntl(fd, F_SETFD, FD_CLOEXEC); 83 setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); 84 setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); 85 setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); 86 setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); 87 #endif /* __linux */ 88 89 #ifdef __APPLE_CC__ 90 setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)); 91 #endif /* __APPLE_CC__ */ 92 93 } 94 95 #ifdef WIN32 96 /* Generic versions of inet_pton for windows, using different names in case 97 * it is implemented in ming in the future. */ 98 #define W32NS_INADDRSZ 4 99 #define W32NS_IN6ADDRSZ 16 100 #define W32NS_INT16SZ 2 101 102 static int Inet_Pton4(const char *src, char *dst) 103 { 104 uint8_t tmp[W32NS_INADDRSZ], *tp; 105 106 int saw_digit = 0; 107 int octets = 0; 108 *(tp = tmp) = 0; 109 110 int ch; 111 while ((ch = *src++) != '\0') 112 { 113 if (ch >= '0' && ch <= '9') 114 { 115 uint32_t n = *tp * 10 + (ch - '0'); 116 117 if (saw_digit && *tp == 0) 118 return 0; 119 120 if (n > 255) 121 return 0; 122 123 *tp = n; 124 if (!saw_digit) 125 { 126 if (++octets > 4) 127 return 0; 128 saw_digit = 1; 129 } 130 } 131 else if (ch == '.' && saw_digit) 132 { 133 if (octets == 4) 134 return 0; 135 *++tp = 0; 136 saw_digit = 0; 137 } 138 else 139 return 0; 140 } 141 if (octets < 4) 142 return 0; 143 144 cg_memcpy(dst, tmp, W32NS_INADDRSZ); 145 146 return 1; 147 } 148 149 static int Inet_Pton6(const char *src, char *dst) 150 { 151 static const char xdigits[] = "0123456789abcdef"; 152 uint8_t tmp[W32NS_IN6ADDRSZ]; 153 154 uint8_t *tp = (uint8_t*) memset(tmp, '\0', W32NS_IN6ADDRSZ); 155 uint8_t *endp = tp + W32NS_IN6ADDRSZ; 156 uint8_t *colonp = NULL; 157 158 /* Leading :: requires some special handling. */ 159 if (*src == ':') 160 { 161 if (*++src != ':') 162 return 0; 163 } 164 165 const char *curtok = src; 166 int saw_xdigit = 0; 167 uint32_t val = 0; 168 int ch; 169 while ((ch = tolower(*src++)) != '\0') 170 { 171 const char *pch = strchr(xdigits, ch); 172 if (pch != NULL) 173 { 174 val <<= 4; 175 val |= (pch - xdigits); 176 if (val > 0xffff) 177 return 0; 178 saw_xdigit = 1; 179 continue; 180 } 181 if (ch == ':') 182 { 183 curtok = src; 184 if (!saw_xdigit) 185 { 186 if (colonp) 187 return 0; 188 colonp = tp; 189 continue; 190 } 191 else if (*src == '\0') 192 { 193 return 0; 194 } 195 if (tp + W32NS_INT16SZ > endp) 196 return 0; 197 *tp++ = (uint8_t) (val >> 8) & 0xff; 198 *tp++ = (uint8_t) val & 0xff; 199 saw_xdigit = 0; 200 val = 0; 201 continue; 202 } 203 if (ch == '.' && ((tp + W32NS_INADDRSZ) <= endp) && 204 Inet_Pton4(curtok, (char*) tp) > 0) 205 { 206 tp += W32NS_INADDRSZ; 207 saw_xdigit = 0; 208 break; /* '\0' was seen by inet_pton4(). */ 209 } 210 return 0; 211 } 212 if (saw_xdigit) 213 { 214 if (tp + W32NS_INT16SZ > endp) 215 return 0; 216 *tp++ = (uint8_t) (val >> 8) & 0xff; 217 *tp++ = (uint8_t) val & 0xff; 218 } 219 if (colonp != NULL) 220 { 221 int i; 222 /* 223 * Since some memmove()'s erroneously fail to handle 224 * overlapping regions, we'll do the shift by hand. 225 */ 226 const int n = tp - colonp; 227 228 if (tp == endp) 229 return 0; 230 231 for (i = 1; i <= n; i++) 232 { 233 endp[-i] = colonp[n - i]; 234 colonp[n - i] = 0; 235 } 236 tp = endp; 237 } 238 if (tp != endp) 239 return 0; 240 241 cg_memcpy(dst, tmp, W32NS_IN6ADDRSZ); 242 243 return 1; 244 } 245 246 int Inet_Pton(int af, const char *src, void *dst) 247 { 248 switch (af) 249 { 250 case AF_INET: 251 return Inet_Pton4(src, dst); 252 case AF_INET6: 253 return Inet_Pton6(src, dst); 254 default: 255 return -1; 256 } 257 } 258 #endif 259 260 /* Align a size_t to 4 byte boundaries for fussy arches */ 261 static inline void align_len(size_t *len) 262 { 263 if (*len % 4) 264 *len += 4 - (*len % 4); 265 } 266 267 void *_cgmalloc(size_t size, const char *file, const char *func, const int line) 268 { 269 void *ret; 270 271 align_len(&size); 272 ret = malloc(size); 273 if (unlikely(!ret)) 274 quit(1, "Failed to malloc size %d from %s %s:%d", (int)size, file, func, line); 275 return ret; 276 } 277 278 void *_cgcalloc(const size_t memb, size_t size, const char *file, const char *func, const int line) 279 { 280 void *ret; 281 282 align_len(&size); 283 ret = calloc(memb, size); 284 if (unlikely(!ret)) 285 quit(1, "Failed to calloc memb %d size %d from %s %s:%d", (int)memb, (int)size, file, func, line); 286 return ret; 287 } 288 289 void *_cgrealloc(void *ptr, size_t size, const char *file, const char *func, const int line) 290 { 291 void *ret; 292 293 align_len(&size); 294 ret = realloc(ptr, size); 295 if (unlikely(!ret)) 296 quit(1, "Failed to realloc size %d from %s %s:%d", (int)size, file, func, line); 297 return ret; 298 } 299 300 struct tq_ent { 301 void *data; 302 struct list_head q_node; 303 }; 304 305 #ifdef HAVE_LIBCURL 306 struct timeval nettime; 307 308 struct data_buffer { 309 void *buf; 310 size_t len; 311 }; 312 313 struct upload_buffer { 314 const void *buf; 315 size_t len; 316 }; 317 318 struct header_info { 319 char *lp_path; 320 int rolltime; 321 char *reason; 322 char *stratum_url; 323 bool hadrolltime; 324 bool canroll; 325 bool hadexpire; 326 }; 327 328 static void databuf_free(struct data_buffer *db) 329 { 330 if (!db) 331 return; 332 333 free(db->buf); 334 335 memset(db, 0, sizeof(*db)); 336 } 337 338 static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, 339 void *user_data) 340 { 341 struct data_buffer *db = user_data; 342 size_t len = size * nmemb; 343 size_t oldlen, newlen; 344 void *newmem; 345 static const unsigned char zero = 0; 346 347 oldlen = db->len; 348 newlen = oldlen + len; 349 350 newmem = cgrealloc(db->buf, newlen + 1); 351 db->buf = newmem; 352 db->len = newlen; 353 cg_memcpy(db->buf + oldlen, ptr, len); 354 cg_memcpy(db->buf + newlen, &zero, 1); /* null terminate */ 355 356 return len; 357 } 358 359 static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, 360 void *user_data) 361 { 362 struct upload_buffer *ub = user_data; 363 unsigned int len = size * nmemb; 364 365 if (len > ub->len) 366 len = ub->len; 367 368 if (len) { 369 cg_memcpy(ptr, ub->buf, len); 370 ub->buf += len; 371 ub->len -= len; 372 } 373 374 return len; 375 } 376 377 static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) 378 { 379 struct header_info *hi = user_data; 380 size_t remlen, slen, ptrlen = size * nmemb; 381 char *rem, *val = NULL, *key = NULL; 382 void *tmp; 383 384 val = cgcalloc(1, ptrlen); 385 key = cgcalloc(1, ptrlen); 386 387 tmp = memchr(ptr, ':', ptrlen); 388 if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ 389 goto out; 390 slen = tmp - ptr; 391 if ((slen + 1) == ptrlen) /* skip key w/ no value */ 392 goto out; 393 cg_memcpy(key, ptr, slen); /* store & nul term key */ 394 key[slen] = 0; 395 396 rem = ptr + slen + 1; /* trim value's leading whitespace */ 397 remlen = ptrlen - slen - 1; 398 while ((remlen > 0) && (isspace(*rem))) { 399 remlen--; 400 rem++; 401 } 402 403 cg_memcpy(val, rem, remlen); /* store value, trim trailing ws */ 404 val[remlen] = 0; 405 while ((*val) && (isspace(val[strlen(val) - 1]))) 406 val[strlen(val) - 1] = 0; 407 408 if (!*val) /* skip blank value */ 409 goto out; 410 411 if (opt_protocol) 412 applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); 413 414 if (!strcasecmp("X-Roll-Ntime", key)) { 415 hi->hadrolltime = true; 416 if (!strncasecmp("N", val, 1)) 417 applog(LOG_DEBUG, "X-Roll-Ntime: N found"); 418 else { 419 hi->canroll = true; 420 421 /* Check to see if expire= is supported and if not, set 422 * the rolltime to the default scantime */ 423 if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) { 424 sscanf(val + 7, "%d", &hi->rolltime); 425 hi->hadexpire = true; 426 } else 427 hi->rolltime = max_scantime; 428 applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); 429 } 430 } 431 432 if (!strcasecmp("X-Long-Polling", key)) { 433 hi->lp_path = val; /* steal memory reference */ 434 val = NULL; 435 } 436 437 if (!strcasecmp("X-Reject-Reason", key)) { 438 hi->reason = val; /* steal memory reference */ 439 val = NULL; 440 } 441 442 if (!strcasecmp("X-Stratum", key)) { 443 hi->stratum_url = val; 444 val = NULL; 445 } 446 447 out: 448 free(key); 449 free(val); 450 return ptrlen; 451 } 452 453 static void last_nettime(struct timeval *last) 454 { 455 rd_lock(&netacc_lock); 456 last->tv_sec = nettime.tv_sec; 457 last->tv_usec = nettime.tv_usec; 458 rd_unlock(&netacc_lock); 459 } 460 461 static void set_nettime(void) 462 { 463 wr_lock(&netacc_lock); 464 cgtime(&nettime); 465 wr_unlock(&netacc_lock); 466 } 467 468 #if CURL_HAS_KEEPALIVE 469 static void keep_curlalive(CURL *curl) 470 { 471 const int tcp_keepidle = 45; 472 const int tcp_keepintvl = 30; 473 const long int keepalive = 1; 474 475 curl_easy_setopt(curl, CURLOPT_TCP_KEEPALIVE, keepalive); 476 curl_easy_setopt(curl, CURLOPT_TCP_KEEPIDLE, tcp_keepidle); 477 curl_easy_setopt(curl, CURLOPT_TCP_KEEPINTVL, tcp_keepintvl); 478 } 479 #else 480 static void keep_curlalive(CURL *curl) 481 { 482 SOCKETTYPE sock; 483 484 curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&sock); 485 keep_sockalive(sock); 486 } 487 #endif 488 489 static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, 490 __maybe_unused char *data, size_t size, void *userdata) 491 { 492 struct pool *pool = (struct pool *)userdata; 493 494 switch(type) { 495 case CURLINFO_HEADER_IN: 496 case CURLINFO_DATA_IN: 497 case CURLINFO_SSL_DATA_IN: 498 pool->cgminer_pool_stats.net_bytes_received += size; 499 break; 500 case CURLINFO_HEADER_OUT: 501 case CURLINFO_DATA_OUT: 502 case CURLINFO_SSL_DATA_OUT: 503 pool->cgminer_pool_stats.net_bytes_sent += size; 504 break; 505 case CURLINFO_TEXT: 506 default: 507 break; 508 } 509 return 0; 510 } 511 512 json_t *json_web_config(const char *url) 513 { 514 struct data_buffer all_data = {NULL, 0}; 515 char curl_err_str[CURL_ERROR_SIZE]; 516 long timeout = 60; 517 json_error_t err; 518 json_t *val; 519 CURL *curl; 520 int rc; 521 522 memset(&err, 0, sizeof(err)); 523 524 curl = curl_easy_init(); 525 if (unlikely(!curl)) 526 quithere(1, "CURL initialisation failed"); 527 528 curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); 529 530 curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); 531 curl_easy_setopt(curl, CURLOPT_URL, url); 532 curl_easy_setopt(curl, CURLOPT_ENCODING, ""); 533 curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); 534 535 curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); 536 curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); 537 curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); 538 curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); 539 curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); 540 541 val = NULL; 542 rc = curl_easy_perform(curl); 543 curl_easy_cleanup(curl); 544 if (rc) { 545 applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str); 546 goto c_out; 547 } 548 549 if (!all_data.buf) { 550 applog(LOG_ERR, "Empty config data received from '%s'", url); 551 goto c_out; 552 } 553 554 val = JSON_LOADS(all_data.buf, &err); 555 if (!val) { 556 applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url, 557 err.line, err.text); 558 } 559 databuf_free(&all_data); 560 561 c_out: 562 return val; 563 } 564 565 json_t *json_rpc_call(CURL *curl, const char *url, 566 const char *userpass, const char *rpc_req, 567 bool probe, bool longpoll, int *rolltime, 568 struct pool *pool, bool share) 569 { 570 long timeout = longpoll ? (60 * 60) : 60; 571 struct data_buffer all_data = {NULL, 0}; 572 struct header_info hi = {NULL, 0, NULL, NULL, false, false, false}; 573 char len_hdr[64], user_agent_hdr[128]; 574 char curl_err_str[CURL_ERROR_SIZE]; 575 struct curl_slist *headers = NULL; 576 struct upload_buffer upload_data; 577 json_t *val, *err_val, *res_val; 578 bool probing = false; 579 double byte_count; 580 json_error_t err; 581 int rc; 582 583 memset(&err, 0, sizeof(err)); 584 585 /* it is assumed that 'curl' is freshly [re]initialized at this pt */ 586 587 if (probe) 588 probing = !pool->probed; 589 curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); 590 591 // CURLOPT_VERBOSE won't write to stderr if we use CURLOPT_DEBUGFUNCTION 592 curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); 593 curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); 594 curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); 595 596 curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); 597 curl_easy_setopt(curl, CURLOPT_URL, url); 598 curl_easy_setopt(curl, CURLOPT_ENCODING, ""); 599 curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); 600 601 /* Shares are staggered already and delays in submission can be costly 602 * so do not delay them */ 603 if (!opt_delaynet || share) 604 curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); 605 curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); 606 curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); 607 curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); 608 curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); 609 curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); 610 curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); 611 curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); 612 curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); 613 curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); 614 if (pool->rpc_proxy) { 615 curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); 616 curl_easy_setopt(curl, CURLOPT_PROXYTYPE, pool->rpc_proxytype); 617 } else if (opt_socks_proxy) { 618 curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); 619 curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); 620 } 621 if (userpass) { 622 curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); 623 curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); 624 } 625 if (longpoll) 626 keep_curlalive(curl); 627 curl_easy_setopt(curl, CURLOPT_POST, 1); 628 629 if (opt_protocol) 630 applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); 631 632 upload_data.buf = rpc_req; 633 upload_data.len = strlen(rpc_req); 634 sprintf(len_hdr, "Content-Length: %lu", 635 (unsigned long) upload_data.len); 636 sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE_STRING); 637 638 headers = curl_slist_append(headers, 639 "Content-type: application/json"); 640 headers = curl_slist_append(headers, 641 "X-Mining-Extensions: longpoll midstate rollntime submitold"); 642 643 if (likely(global_hashrate)) { 644 char ghashrate[255]; 645 646 sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, global_hashrate); 647 headers = curl_slist_append(headers, ghashrate); 648 } 649 650 headers = curl_slist_append(headers, len_hdr); 651 headers = curl_slist_append(headers, user_agent_hdr); 652 headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ 653 654 curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); 655 656 if (opt_delaynet) { 657 /* Don't delay share submission, but still track the nettime */ 658 if (!share) { 659 long long now_msecs, last_msecs; 660 struct timeval now, last; 661 662 cgtime(&now); 663 last_nettime(&last); 664 now_msecs = (long long)now.tv_sec * 1000; 665 now_msecs += now.tv_usec / 1000; 666 last_msecs = (long long)last.tv_sec * 1000; 667 last_msecs += last.tv_usec / 1000; 668 if (now_msecs > last_msecs && now_msecs - last_msecs < 250) { 669 struct timespec rgtp; 670 671 rgtp.tv_sec = 0; 672 rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; 673 nanosleep(&rgtp, NULL); 674 } 675 } 676 set_nettime(); 677 } 678 679 rc = curl_easy_perform(curl); 680 if (rc) { 681 applog(LOG_INFO, "HTTP request failed: %s", curl_err_str); 682 goto err_out; 683 } 684 685 if (!all_data.buf) { 686 applog(LOG_DEBUG, "Empty data received in json_rpc_call."); 687 goto err_out; 688 } 689 690 pool->cgminer_pool_stats.times_sent++; 691 if (curl_easy_getinfo(curl, CURLINFO_SIZE_UPLOAD, &byte_count) == CURLE_OK) 692 pool->cgminer_pool_stats.bytes_sent += byte_count; 693 pool->cgminer_pool_stats.times_received++; 694 if (curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &byte_count) == CURLE_OK) 695 pool->cgminer_pool_stats.bytes_received += byte_count; 696 697 if (probing) { 698 pool->probed = true; 699 /* If X-Long-Polling was found, activate long polling */ 700 if (hi.lp_path) { 701 if (pool->hdr_path != NULL) 702 free(pool->hdr_path); 703 pool->hdr_path = hi.lp_path; 704 } else 705 pool->hdr_path = NULL; 706 if (hi.stratum_url) { 707 pool->stratum_url = hi.stratum_url; 708 hi.stratum_url = NULL; 709 } 710 } else { 711 if (hi.lp_path) { 712 free(hi.lp_path); 713 hi.lp_path = NULL; 714 } 715 if (hi.stratum_url) { 716 free(hi.stratum_url); 717 hi.stratum_url = NULL; 718 } 719 } 720 721 *rolltime = hi.rolltime; 722 pool->cgminer_pool_stats.rolltime = hi.rolltime; 723 pool->cgminer_pool_stats.hadrolltime = hi.hadrolltime; 724 pool->cgminer_pool_stats.canroll = hi.canroll; 725 pool->cgminer_pool_stats.hadexpire = hi.hadexpire; 726 727 val = JSON_LOADS(all_data.buf, &err); 728 if (!val) { 729 applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); 730 731 if (opt_protocol) 732 applog(LOG_DEBUG, "JSON protocol response:\n%s", (char *)(all_data.buf)); 733 734 goto err_out; 735 } 736 737 if (opt_protocol) { 738 char *s = json_dumps(val, JSON_INDENT(3)); 739 740 applog(LOG_DEBUG, "JSON protocol response:\n%s", s); 741 free(s); 742 } 743 744 /* JSON-RPC valid response returns a non-null 'result', 745 * and a null 'error'. 746 */ 747 res_val = json_object_get(val, "result"); 748 err_val = json_object_get(val, "error"); 749 750 if (!res_val ||(err_val && !json_is_null(err_val))) { 751 char *s; 752 753 if (err_val) 754 s = json_dumps(err_val, JSON_INDENT(3)); 755 else 756 s = strdup("(unknown reason)"); 757 758 applog(LOG_INFO, "JSON-RPC call failed: %s", s); 759 760 free(s); 761 762 goto err_out; 763 } 764 765 if (hi.reason) { 766 json_object_set_new(val, "reject-reason", json_string(hi.reason)); 767 free(hi.reason); 768 hi.reason = NULL; 769 } 770 successful_connect = true; 771 databuf_free(&all_data); 772 curl_slist_free_all(headers); 773 curl_easy_reset(curl); 774 return val; 775 776 err_out: 777 databuf_free(&all_data); 778 curl_slist_free_all(headers); 779 curl_easy_reset(curl); 780 if (!successful_connect) 781 applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); 782 curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); 783 return NULL; 784 } 785 #define PROXY_HTTP CURLPROXY_HTTP 786 #define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0 787 #define PROXY_SOCKS4 CURLPROXY_SOCKS4 788 #define PROXY_SOCKS5 CURLPROXY_SOCKS5 789 #define PROXY_SOCKS4A CURLPROXY_SOCKS4A 790 #define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME 791 #else /* HAVE_LIBCURL */ 792 #define PROXY_HTTP 0 793 #define PROXY_HTTP_1_0 1 794 #define PROXY_SOCKS4 2 795 #define PROXY_SOCKS5 3 796 #define PROXY_SOCKS4A 4 797 #define PROXY_SOCKS5H 5 798 #endif /* HAVE_LIBCURL */ 799 800 static struct { 801 const char *name; 802 proxytypes_t proxytype; 803 } proxynames[] = { 804 { "http:", PROXY_HTTP }, 805 { "http0:", PROXY_HTTP_1_0 }, 806 { "socks4:", PROXY_SOCKS4 }, 807 { "socks5:", PROXY_SOCKS5 }, 808 { "socks4a:", PROXY_SOCKS4A }, 809 { "socks5h:", PROXY_SOCKS5H }, 810 { NULL, 0 } 811 }; 812 813 const char *proxytype(proxytypes_t proxytype) 814 { 815 int i; 816 817 for (i = 0; proxynames[i].name; i++) 818 if (proxynames[i].proxytype == proxytype) 819 return proxynames[i].name; 820 821 return "invalid"; 822 } 823 824 char *get_proxy(char *url, struct pool *pool) 825 { 826 pool->rpc_proxy = NULL; 827 828 char *split; 829 int plen, len, i; 830 831 for (i = 0; proxynames[i].name; i++) { 832 plen = strlen(proxynames[i].name); 833 if (strncmp(url, proxynames[i].name, plen) == 0) { 834 if (!(split = strchr(url, '|'))) 835 return url; 836 837 *split = '\0'; 838 len = split - url; 839 pool->rpc_proxy = cgmalloc(1 + len - plen); 840 strcpy(pool->rpc_proxy, url + plen); 841 extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); 842 pool->rpc_proxytype = proxynames[i].proxytype; 843 url = split + 1; 844 break; 845 } 846 } 847 return url; 848 } 849 850 /* Adequate size s==len*2 + 1 must be alloced to use this variant */ 851 void __bin2hex(char *s, const unsigned char *p, size_t len) 852 { 853 int i; 854 static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; 855 856 for (i = 0; i < (int)len; i++) { 857 *s++ = hex[p[i] >> 4]; 858 *s++ = hex[p[i] & 0xF]; 859 } 860 *s++ = '\0'; 861 } 862 863 /* Returns a malloced array string of a binary value of arbitrary length. The 864 * array is rounded up to a 4 byte size to appease architectures that need 865 * aligned array sizes */ 866 char *bin2hex(const unsigned char *p, size_t len) 867 { 868 ssize_t slen; 869 char *s; 870 871 slen = len * 2 + 1; 872 if (slen % 4) 873 slen += 4 - (slen % 4); 874 s = cgcalloc(slen, 1); 875 __bin2hex(s, p, len); 876 877 return s; 878 } 879 880 static const int hex2bin_tbl[256] = { 881 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 882 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 883 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 884 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, 885 -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, 886 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 887 -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, 888 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 889 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 890 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 891 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 892 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 893 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 894 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 895 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 896 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 897 }; 898 899 /* Does the reverse of bin2hex but does not allocate any ram */ 900 bool hex2bin(unsigned char *p, const char *hexstr, size_t len) 901 { 902 int nibble1, nibble2; 903 unsigned char idx; 904 bool ret = false; 905 906 while (*hexstr && len) { 907 if (unlikely(!hexstr[1])) { 908 applog(LOG_ERR, "hex2bin str truncated"); 909 return ret; 910 } 911 912 idx = *hexstr++; 913 nibble1 = hex2bin_tbl[idx]; 914 idx = *hexstr++; 915 nibble2 = hex2bin_tbl[idx]; 916 917 if (unlikely((nibble1 < 0) || (nibble2 < 0))) { 918 applog(LOG_ERR, "hex2bin scan failed"); 919 return ret; 920 } 921 922 *p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2); 923 --len; 924 } 925 926 if (likely(len == 0 && *hexstr == 0)) 927 ret = true; 928 return ret; 929 } 930 931 static bool _valid_hex(char *s, const char *file, const char *func, const int line) 932 { 933 bool ret = false; 934 int i, len; 935 936 if (unlikely(!s)) { 937 applog(LOG_ERR, "Null string passed to valid_hex from"IN_FMT_FFL, file, func, line); 938 return ret; 939 } 940 len = strlen(s); 941 for (i = 0; i < len; i++) { 942 unsigned char idx = s[i]; 943 944 if (unlikely(hex2bin_tbl[idx] < 0)) { 945 applog(LOG_ERR, "Invalid char 0x%x passed to valid_hex from"IN_FMT_FFL, idx, file, func, line); 946 return ret; 947 } 948 } 949 ret = true; 950 return ret; 951 } 952 953 #define valid_hex(s) _valid_hex(s, __FILE__, __func__, __LINE__) 954 955 static bool _valid_ascii(char *s, const char *file, const char *func, const int line) 956 { 957 bool ret = false; 958 int i, len; 959 960 if (unlikely(!s)) { 961 applog(LOG_ERR, "Null string passed to valid_ascii from"IN_FMT_FFL, file, func, line); 962 return ret; 963 } 964 len = strlen(s); 965 if (unlikely(!len)) { 966 applog(LOG_ERR, "Zero length string passed to valid_ascii from"IN_FMT_FFL, file, func, line); 967 return ret; 968 } 969 for (i = 0; i < len; i++) { 970 unsigned char idx = s[i]; 971 972 if (unlikely(idx < 32 || idx > 126)) { 973 applog(LOG_ERR, "Invalid char 0x%x passed to valid_ascii from"IN_FMT_FFL, idx, file, func, line); 974 return ret; 975 } 976 } 977 ret = true; 978 return ret; 979 } 980 981 #define valid_ascii(s) _valid_ascii(s, __FILE__, __func__, __LINE__) 982 983 static const int b58tobin_tbl[] = { 984 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 985 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 986 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 987 -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, 988 -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, 989 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, 990 -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 991 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 992 }; 993 994 /* b58bin should always be at least 25 bytes long and already checked to be 995 * valid. */ 996 void b58tobin(unsigned char *b58bin, const char *b58) 997 { 998 uint32_t c, bin32[7]; 999 int len, i, j; 1000 uint64_t t; 1001 1002 memset(bin32, 0, 7 * sizeof(uint32_t)); 1003 len = strlen(b58); 1004 for (i = 0; i < len; i++) { 1005 c = b58[i]; 1006 c = b58tobin_tbl[c]; 1007 for (j = 6; j >= 0; j--) { 1008 t = ((uint64_t)bin32[j]) * 58 + c; 1009 c = (t & 0x3f00000000ull) >> 32; 1010 bin32[j] = t & 0xffffffffull; 1011 } 1012 } 1013 *(b58bin++) = bin32[0] & 0xff; 1014 for (i = 1; i < 7; i++) { 1015 *((uint32_t *)b58bin) = htobe32(bin32[i]); 1016 b58bin += sizeof(uint32_t); 1017 } 1018 } 1019 1020 void address_to_pubkeyhash(unsigned char *pkh, const char *addr) 1021 { 1022 unsigned char b58bin[25]; 1023 1024 memset(b58bin, 0, 25); 1025 b58tobin(b58bin, addr); 1026 pkh[0] = 0x76; 1027 pkh[1] = 0xa9; 1028 pkh[2] = 0x14; 1029 cg_memcpy(&pkh[3], &b58bin[1], 20); 1030 pkh[23] = 0x88; 1031 pkh[24] = 0xac; 1032 } 1033 1034 /* For encoding nHeight into coinbase, return how many bytes were used */ 1035 int ser_number(unsigned char *s, int32_t val) 1036 { 1037 int32_t *i32 = (int32_t *)&s[1]; 1038 int len; 1039 1040 if (val < 17) { 1041 s[0] = 0x50 + val; 1042 return 1; 1043 } 1044 if (val < 128) 1045 len = 1; 1046 else if (val < 32768) 1047 len = 2; 1048 else if (val < 8388608) 1049 len = 3; 1050 else 1051 len = 4; 1052 *i32 = htole32(val); 1053 s[0] = len++; 1054 return len; 1055 } 1056 1057 /* For encoding variable length strings */ 1058 unsigned char *ser_string(char *s, int *slen) 1059 { 1060 size_t len = strlen(s); 1061 unsigned char *ret; 1062 1063 ret = cgmalloc(1 + len + 8); // Leave room for largest size 1064 if (len < 253) { 1065 ret[0] = len; 1066 cg_memcpy(ret + 1, s, len); 1067 *slen = len + 1; 1068 } else if (len < 0x10000) { 1069 uint16_t *u16 = (uint16_t *)&ret[1]; 1070 1071 ret[0] = 253; 1072 *u16 = htobe16(len); 1073 cg_memcpy(ret + 3, s, len); 1074 *slen = len + 3; 1075 } else { 1076 /* size_t is only 32 bit on many platforms anyway */ 1077 uint32_t *u32 = (uint32_t *)&ret[1]; 1078 1079 ret[0] = 254; 1080 *u32 = htobe32(len); 1081 cg_memcpy(ret + 5, s, len); 1082 *slen = len + 5; 1083 } 1084 return ret; 1085 } 1086 1087 bool fulltest(const unsigned char *hash, const unsigned char *target) 1088 { 1089 uint32_t *hash32 = (uint32_t *)hash; 1090 uint32_t *target32 = (uint32_t *)target; 1091 bool rc = true; 1092 int i; 1093 1094 for (i = 28 / 4; i >= 0; i--) { 1095 uint32_t h32tmp = le32toh(hash32[i]); 1096 uint32_t t32tmp = le32toh(target32[i]); 1097 1098 if (h32tmp > t32tmp) { 1099 rc = false; 1100 break; 1101 } 1102 if (h32tmp < t32tmp) { 1103 rc = true; 1104 break; 1105 } 1106 } 1107 1108 if (opt_debug) { 1109 unsigned char hash_swap[32], target_swap[32]; 1110 char *hash_str, *target_str; 1111 1112 swab256(hash_swap, hash); 1113 swab256(target_swap, target); 1114 hash_str = bin2hex(hash_swap, 32); 1115 target_str = bin2hex(target_swap, 32); 1116 1117 applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", 1118 hash_str, 1119 target_str, 1120 rc ? "YES (hash <= target)" : 1121 "no (false positive; hash > target)"); 1122 1123 free(hash_str); 1124 free(target_str); 1125 } 1126 1127 return rc; 1128 } 1129 1130 struct thread_q *tq_new(void) 1131 { 1132 struct thread_q *tq; 1133 1134 tq = cgcalloc(1, sizeof(*tq)); 1135 INIT_LIST_HEAD(&tq->q); 1136 pthread_mutex_init(&tq->mutex, NULL); 1137 pthread_cond_init(&tq->cond, NULL); 1138 1139 return tq; 1140 } 1141 1142 void tq_free(struct thread_q *tq) 1143 { 1144 struct tq_ent *ent, *iter; 1145 1146 if (!tq) 1147 return; 1148 1149 list_for_each_entry_safe(ent, iter, &tq->q, q_node) { 1150 list_del(&ent->q_node); 1151 free(ent); 1152 } 1153 1154 pthread_cond_destroy(&tq->cond); 1155 pthread_mutex_destroy(&tq->mutex); 1156 1157 memset(tq, 0, sizeof(*tq)); /* poison */ 1158 free(tq); 1159 } 1160 1161 static void tq_freezethaw(struct thread_q *tq, bool frozen) 1162 { 1163 mutex_lock(&tq->mutex); 1164 tq->frozen = frozen; 1165 pthread_cond_signal(&tq->cond); 1166 mutex_unlock(&tq->mutex); 1167 } 1168 1169 void tq_freeze(struct thread_q *tq) 1170 { 1171 tq_freezethaw(tq, true); 1172 } 1173 1174 void tq_thaw(struct thread_q *tq) 1175 { 1176 tq_freezethaw(tq, false); 1177 } 1178 1179 bool tq_push(struct thread_q *tq, void *data) 1180 { 1181 struct tq_ent *ent; 1182 bool rc = true; 1183 1184 ent = cgcalloc(1, sizeof(*ent)); 1185 ent->data = data; 1186 INIT_LIST_HEAD(&ent->q_node); 1187 1188 mutex_lock(&tq->mutex); 1189 if (!tq->frozen) { 1190 list_add_tail(&ent->q_node, &tq->q); 1191 } else { 1192 free(ent); 1193 rc = false; 1194 } 1195 pthread_cond_signal(&tq->cond); 1196 mutex_unlock(&tq->mutex); 1197 1198 return rc; 1199 } 1200 1201 void *tq_pop(struct thread_q *tq) 1202 { 1203 struct tq_ent *ent; 1204 void *rval = NULL; 1205 int rc; 1206 1207 mutex_lock(&tq->mutex); 1208 if (!list_empty(&tq->q)) 1209 goto pop; 1210 1211 rc = pthread_cond_wait(&tq->cond, &tq->mutex); 1212 if (rc) 1213 goto out; 1214 if (list_empty(&tq->q)) 1215 goto out; 1216 pop: 1217 ent = list_entry(tq->q.next, struct tq_ent, q_node); 1218 rval = ent->data; 1219 1220 list_del(&ent->q_node); 1221 free(ent); 1222 out: 1223 mutex_unlock(&tq->mutex); 1224 1225 return rval; 1226 } 1227 1228 int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) 1229 { 1230 cgsem_init(&thr->sem); 1231 1232 return pthread_create(&thr->pth, attr, start, arg); 1233 } 1234 1235 void thr_info_cancel(struct thr_info *thr) 1236 { 1237 if (!thr) 1238 return; 1239 1240 if (PTH(thr) != 0L) { 1241 pthread_cancel(thr->pth); 1242 PTH(thr) = 0L; 1243 } 1244 cgsem_destroy(&thr->sem); 1245 } 1246 1247 void subtime(struct timeval *a, struct timeval *b) 1248 { 1249 timersub(a, b, b); 1250 } 1251 1252 void addtime(struct timeval *a, struct timeval *b) 1253 { 1254 timeradd(a, b, b); 1255 } 1256 1257 bool time_more(struct timeval *a, struct timeval *b) 1258 { 1259 return timercmp(a, b, >); 1260 } 1261 1262 bool time_less(struct timeval *a, struct timeval *b) 1263 { 1264 return timercmp(a, b, <); 1265 } 1266 1267 void copy_time(struct timeval *dest, const struct timeval *src) 1268 { 1269 cg_memcpy(dest, src, sizeof(struct timeval)); 1270 } 1271 1272 void timespec_to_val(struct timeval *val, const struct timespec *spec) 1273 { 1274 val->tv_sec = spec->tv_sec; 1275 val->tv_usec = spec->tv_nsec / 1000; 1276 } 1277 1278 void timeval_to_spec(struct timespec *spec, const struct timeval *val) 1279 { 1280 spec->tv_sec = val->tv_sec; 1281 spec->tv_nsec = val->tv_usec * 1000; 1282 } 1283 1284 void us_to_timeval(struct timeval *val, int64_t us) 1285 { 1286 lldiv_t tvdiv = lldiv(us, 1000000); 1287 1288 val->tv_sec = tvdiv.quot; 1289 val->tv_usec = tvdiv.rem; 1290 } 1291 1292 void us_to_timespec(struct timespec *spec, int64_t us) 1293 { 1294 lldiv_t tvdiv = lldiv(us, 1000000); 1295 1296 spec->tv_sec = tvdiv.quot; 1297 spec->tv_nsec = tvdiv.rem * 1000; 1298 } 1299 1300 void ms_to_timespec(struct timespec *spec, int64_t ms) 1301 { 1302 lldiv_t tvdiv = lldiv(ms, 1000); 1303 1304 spec->tv_sec = tvdiv.quot; 1305 spec->tv_nsec = tvdiv.rem * 1000000; 1306 } 1307 1308 void ms_to_timeval(struct timeval *val, int64_t ms) 1309 { 1310 lldiv_t tvdiv = lldiv(ms, 1000); 1311 1312 val->tv_sec = tvdiv.quot; 1313 val->tv_usec = tvdiv.rem * 1000; 1314 } 1315 1316 static void spec_nscheck(struct timespec *ts) 1317 { 1318 while (ts->tv_nsec >= 1000000000) { 1319 ts->tv_nsec -= 1000000000; 1320 ts->tv_sec++; 1321 } 1322 while (ts->tv_nsec < 0) { 1323 ts->tv_nsec += 1000000000; 1324 ts->tv_sec--; 1325 } 1326 } 1327 1328 void timeraddspec(struct timespec *a, const struct timespec *b) 1329 { 1330 a->tv_sec += b->tv_sec; 1331 a->tv_nsec += b->tv_nsec; 1332 spec_nscheck(a); 1333 } 1334 1335 #ifdef USE_BITMAIN_SOC 1336 static int __maybe_unused timespec_to_ms(struct timespec *ts) 1337 { 1338 return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; 1339 } 1340 1341 /* Subtract b from a */ 1342 static void __maybe_unused timersubspec(struct timespec *a, const struct timespec *b) 1343 { 1344 a->tv_sec -= b->tv_sec; 1345 a->tv_nsec -= b->tv_nsec; 1346 spec_nscheck(a); 1347 } 1348 #else /* USE_BITMAIN_SOC */ 1349 static int timespec_to_ms(struct timespec *ts) 1350 { 1351 return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; 1352 } 1353 1354 static int64_t timespec_to_us(struct timespec *ts) 1355 { 1356 return (int64_t)ts->tv_sec * 1000000 + ts->tv_nsec / 1000; 1357 } 1358 1359 /* Subtract b from a */ 1360 static void timersubspec(struct timespec *a, const struct timespec *b) 1361 { 1362 a->tv_sec -= b->tv_sec; 1363 a->tv_nsec -= b->tv_nsec; 1364 spec_nscheck(a); 1365 } 1366 #endif /* USE_BITMAIN_SOC */ 1367 1368 char *Strcasestr(char *haystack, const char *needle) 1369 { 1370 char *lowhay, *lowneedle, *ret; 1371 int hlen, nlen, i, ofs; 1372 1373 if (unlikely(!haystack || !needle)) 1374 return NULL; 1375 hlen = strlen(haystack); 1376 nlen = strlen(needle); 1377 if (!hlen || !nlen) 1378 return NULL; 1379 lowhay = alloca(hlen); 1380 lowneedle = alloca(nlen); 1381 for (i = 0; i < hlen; i++) 1382 lowhay[i] = tolower(haystack[i]); 1383 for (i = 0; i < nlen; i++) 1384 lowneedle[i] = tolower(needle[i]); 1385 ret = strstr(lowhay, lowneedle); 1386 if (!ret) 1387 return ret; 1388 ofs = ret - lowhay; 1389 return haystack + ofs; 1390 } 1391 1392 char *Strsep(char **stringp, const char *delim) 1393 { 1394 char *ret = *stringp; 1395 char *p; 1396 1397 p = (ret != NULL) ? strpbrk(ret, delim) : NULL; 1398 1399 if (p == NULL) 1400 *stringp = NULL; 1401 else { 1402 *p = '\0'; 1403 *stringp = p + 1; 1404 } 1405 1406 return ret; 1407 } 1408 1409 /* Get timespec specifically for use by cond_timedwait functions which use 1410 * CLOCK_REALTIME for expiry */ 1411 void cgcond_time(struct timespec *abstime) 1412 { 1413 clock_gettime(CLOCK_REALTIME, abstime); 1414 } 1415 1416 #ifdef WIN32 1417 /* Mingw32 has no strsep so create our own custom one */ 1418 1419 /* Windows start time is since 1601 LOL so convert it to unix epoch 1970. */ 1420 #define EPOCHFILETIME (116444736000000000LL) 1421 1422 /* These are cgminer specific sleep functions that use an absolute nanosecond 1423 * resolution timer to avoid poor usleep accuracy and overruns. */ 1424 1425 /* Return the system time as an lldiv_t in decimicroseconds. */ 1426 static void decius_time(lldiv_t *lidiv) 1427 { 1428 FILETIME ft; 1429 LARGE_INTEGER li; 1430 1431 GetSystemTimeAsFileTime(&ft); 1432 li.LowPart = ft.dwLowDateTime; 1433 li.HighPart = ft.dwHighDateTime; 1434 li.QuadPart -= EPOCHFILETIME; 1435 1436 /* SystemTime is in decimicroseconds so divide by an unusual number */ 1437 *lidiv = lldiv(li.QuadPart, 10000000); 1438 } 1439 1440 /* This is a cgminer gettimeofday wrapper. Since we always call gettimeofday 1441 * with tz set to NULL, and windows' default resolution is only 15ms, this 1442 * gives us higher resolution times on windows. */ 1443 void cgtime(struct timeval *tv) 1444 { 1445 lldiv_t lidiv; 1446 1447 decius_time(&lidiv); 1448 tv->tv_sec = lidiv.quot; 1449 tv->tv_usec = lidiv.rem / 10; 1450 } 1451 1452 #else /* WIN32 */ 1453 void cgtime(struct timeval *tv) 1454 { 1455 cgtimer_t cgt; 1456 1457 cgtimer_time(&cgt); 1458 timespec_to_val(tv, &cgt); 1459 } 1460 1461 int cgtimer_to_ms(cgtimer_t *cgt) 1462 { 1463 return timespec_to_ms(cgt); 1464 } 1465 1466 /* Subtracts b from a and stores it in res. */ 1467 void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) 1468 { 1469 res->tv_sec = a->tv_sec - b->tv_sec; 1470 res->tv_nsec = a->tv_nsec - b->tv_nsec; 1471 if (res->tv_nsec < 0) { 1472 res->tv_nsec += 1000000000; 1473 res->tv_sec--; 1474 } 1475 } 1476 #endif /* WIN32 */ 1477 1478 #if defined(CLOCK_MONOTONIC) && !defined(__FreeBSD__) && !defined(__APPLE__) && !defined(WIN32) /* Essentially just linux */ 1479 //#ifdef CLOCK_MONOTONIC /* Essentially just linux */ 1480 void cgtimer_time(cgtimer_t *ts_start) 1481 { 1482 clock_gettime(CLOCK_MONOTONIC, ts_start); 1483 } 1484 1485 static void nanosleep_abstime(struct timespec *ts_end) 1486 { 1487 int ret; 1488 1489 do { 1490 ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); 1491 } while (ret == EINTR); 1492 } 1493 1494 /* Reentrant version of cgsleep functions allow start time to be set separately 1495 * from the beginning of the actual sleep, allowing scheduling delays to be 1496 * counted in the sleep. */ 1497 #ifdef USE_BITMAIN_SOC 1498 void cgsleep_ms_r(cgtimer_t *ts_start, int ms) 1499 { 1500 struct timespec ts_end; 1501 1502 ms_to_timespec(&ts_end, ms); 1503 timeraddspec(&ts_end, ts_start); 1504 nanosleep_abstime(&ts_end); 1505 } 1506 1507 void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) 1508 { 1509 struct timespec ts_end; 1510 1511 us_to_timespec(&ts_end, us); 1512 timeraddspec(&ts_end, ts_start); 1513 nanosleep_abstime(&ts_end); 1514 } 1515 #else /* USE_BITMAIN_SOC */ 1516 int cgsleep_ms_r(cgtimer_t *ts_start, int ms) 1517 { 1518 struct timespec ts_end, ts_diff; 1519 int msdiff; 1520 1521 ms_to_timespec(&ts_end, ms); 1522 timeraddspec(&ts_end, ts_start); 1523 cgtimer_time(&ts_diff); 1524 /* Should be a negative value if we still have to sleep */ 1525 timersubspec(&ts_diff, &ts_end); 1526 msdiff = -timespec_to_ms(&ts_diff); 1527 if (msdiff <= 0) 1528 return 0; 1529 1530 nanosleep_abstime(&ts_end); 1531 return msdiff; 1532 } 1533 1534 int64_t cgsleep_us_r(cgtimer_t *ts_start, int64_t us) 1535 { 1536 struct timespec ts_end, ts_diff; 1537 int64_t usdiff; 1538 1539 us_to_timespec(&ts_end, us); 1540 timeraddspec(&ts_end, ts_start); 1541 cgtimer_time(&ts_diff); 1542 usdiff = -timespec_to_us(&ts_diff); 1543 if (usdiff <= 0) 1544 return 0; 1545 1546 nanosleep_abstime(&ts_end); 1547 return usdiff; 1548 } 1549 #endif /* USE_BITMAIN_SOC */ 1550 #else /* CLOCK_MONOTONIC */ 1551 #ifdef __MACH__ 1552 #include <mach/clock.h> 1553 #include <mach/mach.h> 1554 void cgtimer_time(cgtimer_t *ts_start) 1555 { 1556 clock_serv_t cclock; 1557 mach_timespec_t mts; 1558 1559 host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); 1560 clock_get_time(cclock, &mts); 1561 mach_port_deallocate(mach_task_self(), cclock); 1562 ts_start->tv_sec = mts.tv_sec; 1563 ts_start->tv_nsec = mts.tv_nsec; 1564 } 1565 #elif !defined(WIN32) /* __MACH__ - Everything not linux/macosx/win32 */ 1566 void cgtimer_time(cgtimer_t *ts_start) 1567 { 1568 struct timeval tv; 1569 1570 cgtime(&tv); 1571 ts_start->tv_sec = tv.tv_sec; 1572 ts_start->tv_nsec = tv.tv_usec * 1000; 1573 } 1574 #endif /* __MACH__ */ 1575 1576 #ifdef WIN32 1577 /* For windows we use the SystemTime stored as a LARGE_INTEGER as the cgtimer_t 1578 * typedef, allowing us to have sub-microsecond resolution for times, do simple 1579 * arithmetic for timer calculations, and use windows' own hTimers to get 1580 * accurate absolute timeouts. */ 1581 int cgtimer_to_ms(cgtimer_t *cgt) 1582 { 1583 return (int)(cgt->QuadPart / 10000LL); 1584 } 1585 1586 /* Subtracts b from a and stores it in res. */ 1587 void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) 1588 { 1589 res->QuadPart = a->QuadPart - b->QuadPart; 1590 } 1591 1592 /* Note that cgtimer time is NOT offset by the unix epoch since we use absolute 1593 * timeouts with hTimers. */ 1594 void cgtimer_time(cgtimer_t *ts_start) 1595 { 1596 FILETIME ft; 1597 1598 GetSystemTimeAsFileTime(&ft); 1599 ts_start->LowPart = ft.dwLowDateTime; 1600 ts_start->HighPart = ft.dwHighDateTime; 1601 } 1602 1603 static void liSleep(LARGE_INTEGER *li, int timeout) 1604 { 1605 HANDLE hTimer; 1606 DWORD ret; 1607 1608 if (unlikely(timeout <= 0)) 1609 return; 1610 1611 hTimer = CreateWaitableTimer(NULL, TRUE, NULL); 1612 if (unlikely(!hTimer)) 1613 quit(1, "Failed to create hTimer in liSleep"); 1614 ret = SetWaitableTimer(hTimer, li, 0, NULL, NULL, 0); 1615 if (unlikely(!ret)) 1616 quit(1, "Failed to SetWaitableTimer in liSleep"); 1617 /* We still use a timeout as a sanity check in case the system time 1618 * is changed while we're running */ 1619 ret = WaitForSingleObject(hTimer, timeout); 1620 if (unlikely(ret != WAIT_OBJECT_0 && ret != WAIT_TIMEOUT)) 1621 quit(1, "Failed to WaitForSingleObject in liSleep"); 1622 CloseHandle(hTimer); 1623 } 1624 1625 void cgsleep_ms_r(cgtimer_t *ts_start, int ms) 1626 { 1627 LARGE_INTEGER li; 1628 1629 li.QuadPart = ts_start->QuadPart + (int64_t)ms * 10000LL; 1630 liSleep(&li, ms); 1631 } 1632 1633 void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) 1634 { 1635 LARGE_INTEGER li; 1636 int ms; 1637 1638 li.QuadPart = ts_start->QuadPart + us * 10LL; 1639 ms = us / 1000; 1640 if (!ms) 1641 ms = 1; 1642 liSleep(&li, ms); 1643 } 1644 #else /* WIN32 */ 1645 static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start) 1646 { 1647 struct timespec now; 1648 1649 timeraddspec(ts_diff, ts_start); 1650 cgtimer_time(&now); 1651 timersubspec(ts_diff, &now); 1652 if (unlikely(ts_diff->tv_sec < 0)) 1653 return; 1654 nanosleep(ts_diff, NULL); 1655 } 1656 1657 void cgsleep_ms_r(cgtimer_t *ts_start, int ms) 1658 { 1659 struct timespec ts_diff; 1660 1661 ms_to_timespec(&ts_diff, ms); 1662 cgsleep_spec(&ts_diff, ts_start); 1663 } 1664 1665 void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) 1666 { 1667 struct timespec ts_diff; 1668 1669 us_to_timespec(&ts_diff, us); 1670 cgsleep_spec(&ts_diff, ts_start); 1671 } 1672 #endif /* WIN32 */ 1673 #endif /* CLOCK_MONOTONIC */ 1674 1675 void cgsleep_ms(int ms) 1676 { 1677 cgtimer_t ts_start; 1678 1679 cgsleep_prepare_r(&ts_start); 1680 cgsleep_ms_r(&ts_start, ms); 1681 } 1682 1683 static void busywait_us(int64_t us) 1684 { 1685 struct timeval diff, end, now; 1686 1687 cgtime(&end); 1688 us_to_timeval(&diff, us); 1689 addtime(&diff, &end); 1690 do { 1691 sched_yield(); 1692 cgtime(&now); 1693 } while (time_less(&now, &end)); 1694 } 1695 1696 void cgsleep_us(int64_t us) 1697 { 1698 cgtimer_t ts_start; 1699 1700 /* Most timer resolution is unlikely to be able to sleep accurately 1701 * for less than 1ms so busywait instead. */ 1702 if (us < 1000) 1703 return busywait_us(us); 1704 cgsleep_prepare_r(&ts_start); 1705 cgsleep_us_r(&ts_start, us); 1706 } 1707 1708 /* Returns the microseconds difference between end and start times as a double */ 1709 double us_tdiff(struct timeval *end, struct timeval *start) 1710 { 1711 /* Sanity check. We should only be using this for small differences so 1712 * limit the max to 60 seconds. */ 1713 if (unlikely(end->tv_sec - start->tv_sec > 60)) 1714 return 60000000; 1715 return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); 1716 } 1717 1718 /* Returns the milliseconds difference between end and start times */ 1719 int ms_tdiff(struct timeval *end, struct timeval *start) 1720 { 1721 /* Like us_tdiff, limit to 1 hour. */ 1722 if (unlikely(end->tv_sec - start->tv_sec > 3600)) 1723 return 3600000; 1724 return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; 1725 } 1726 1727 /* Returns the seconds difference between end and start times as a double */ 1728 double tdiff(struct timeval *end, struct timeval *start) 1729 { 1730 return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; 1731 } 1732 1733 bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) 1734 { 1735 char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; 1736 char url_address[256], port[6]; 1737 int url_len, port_len = 0; 1738 1739 *sockaddr_url = url; 1740 url_begin = strstr(url, "//"); 1741 if (!url_begin) 1742 url_begin = url; 1743 else 1744 url_begin += 2; 1745 1746 /* Look for numeric ipv6 entries */ 1747 ipv6_begin = strstr(url_begin, "["); 1748 ipv6_end = strstr(url_begin, "]"); 1749 if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) 1750 url_end = strstr(ipv6_end, ":"); 1751 else 1752 url_end = strstr(url_begin, ":"); 1753 if (url_end) { 1754 url_len = url_end - url_begin; 1755 port_len = strlen(url_begin) - url_len - 1; 1756 if (port_len < 1) 1757 return false; 1758 port_start = url_end + 1; 1759 } else 1760 url_len = strlen(url_begin); 1761 1762 if (url_len < 1) 1763 return false; 1764 1765 /* Get rid of the [] */ 1766 if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) { 1767 url_len -= 2; 1768 url_begin++; 1769 } 1770 1771 snprintf(url_address, 254, "%.*s", url_len, url_begin); 1772 1773 if (port_len) { 1774 char *slash; 1775 1776 snprintf(port, 6, "%.*s", port_len, port_start); 1777 slash = strchr(port, '/'); 1778 if (slash) 1779 *slash = '\0'; 1780 } else 1781 strcpy(port, "80"); 1782 1783 *sockaddr_port = strdup(port); 1784 *sockaddr_url = strdup(url_address); 1785 1786 return true; 1787 } 1788 1789 enum send_ret { 1790 SEND_OK, 1791 SEND_SELECTFAIL, 1792 SEND_SENDFAIL, 1793 SEND_INACTIVE 1794 }; 1795 1796 /* Send a single command across a socket, appending \n to it. This should all 1797 * be done under stratum lock except when first establishing the socket */ 1798 static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) 1799 { 1800 SOCKETTYPE sock = pool->sock; 1801 ssize_t ssent = 0; 1802 1803 strcat(s, "\n"); 1804 len++; 1805 1806 while (len > 0 ) { 1807 struct timeval timeout = {1, 0}; 1808 ssize_t sent; 1809 fd_set wd; 1810 retry: 1811 FD_ZERO(&wd); 1812 FD_SET(sock, &wd); 1813 if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { 1814 if (interrupted()) 1815 goto retry; 1816 return SEND_SELECTFAIL; 1817 } 1818 #ifdef __APPLE__ 1819 sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); 1820 #elif WIN32 1821 sent = send(pool->sock, s + ssent, len, 0); 1822 #else 1823 sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); 1824 #endif 1825 if (sent < 0) { 1826 if (!sock_blocks()) 1827 return SEND_SENDFAIL; 1828 sent = 0; 1829 } 1830 ssent += sent; 1831 len -= sent; 1832 } 1833 1834 pool->cgminer_pool_stats.times_sent++; 1835 pool->cgminer_pool_stats.bytes_sent += ssent; 1836 pool->cgminer_pool_stats.net_bytes_sent += ssent; 1837 return SEND_OK; 1838 } 1839 1840 bool stratum_send(struct pool *pool, char *s, ssize_t len) 1841 { 1842 enum send_ret ret = SEND_INACTIVE; 1843 1844 if (opt_protocol) 1845 applog(LOG_DEBUG, "SEND: %s", s); 1846 1847 mutex_lock(&pool->stratum_lock); 1848 if (pool->stratum_active) 1849 ret = __stratum_send(pool, s, len); 1850 mutex_unlock(&pool->stratum_lock); 1851 1852 /* This is to avoid doing applog under stratum_lock */ 1853 switch (ret) { 1854 default: 1855 case SEND_OK: 1856 break; 1857 case SEND_SELECTFAIL: 1858 applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); 1859 suspend_stratum(pool); 1860 break; 1861 case SEND_SENDFAIL: 1862 applog(LOG_DEBUG, "Failed to send in stratum_send"); 1863 suspend_stratum(pool); 1864 break; 1865 case SEND_INACTIVE: 1866 applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); 1867 break; 1868 } 1869 return (ret == SEND_OK); 1870 } 1871 1872 static bool socket_full(struct pool *pool, int wait) 1873 { 1874 SOCKETTYPE sock = pool->sock; 1875 struct timeval timeout; 1876 fd_set rd; 1877 1878 if (unlikely(wait < 0)) 1879 wait = 0; 1880 FD_ZERO(&rd); 1881 FD_SET(sock, &rd); 1882 timeout.tv_usec = 0; 1883 timeout.tv_sec = wait; 1884 if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) 1885 return true; 1886 return false; 1887 } 1888 1889 /* Check to see if Santa's been good to you */ 1890 bool sock_full(struct pool *pool) 1891 { 1892 if (strlen(pool->sockbuf)) 1893 return true; 1894 1895 return (socket_full(pool, 0)); 1896 } 1897 1898 static void clear_sockbuf(struct pool *pool) 1899 { 1900 if (likely(pool->sockbuf)) 1901 strcpy(pool->sockbuf, ""); 1902 } 1903 1904 static void clear_sock(struct pool *pool) 1905 { 1906 ssize_t n; 1907 1908 mutex_lock(&pool->stratum_lock); 1909 do { 1910 if (pool->sock) 1911 n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); 1912 else 1913 n = 0; 1914 } while (n > 0); 1915 mutex_unlock(&pool->stratum_lock); 1916 1917 clear_sockbuf(pool); 1918 } 1919 1920 /* Realloc memory to new size and zero any extra memory added */ 1921 void ckrecalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line) 1922 { 1923 if (new == old) 1924 return; 1925 *ptr = _cgrealloc(*ptr, new, file, func, line); 1926 if (new > old) 1927 memset(*ptr + old, 0, new - old); 1928 } 1929 1930 /* Make sure the pool sockbuf is large enough to cope with any coinbase size 1931 * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE 1932 * and zeroing the new memory */ 1933 static void recalloc_sock(struct pool *pool, size_t len) 1934 { 1935 size_t old, new; 1936 1937 old = strlen(pool->sockbuf); 1938 new = old + len + 1; 1939 if (new < pool->sockbuf_size) 1940 return; 1941 new = new + (RBUFSIZE - (new % RBUFSIZE)); 1942 // Avoid potentially recursive locking 1943 // applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new); 1944 pool->sockbuf = cgrealloc(pool->sockbuf, new); 1945 memset(pool->sockbuf + old, 0, new - old); 1946 pool->sockbuf_size = new; 1947 } 1948 1949 /* Peeks at a socket to find the first end of line and then reads just that 1950 * from the socket and returns that as a malloced char */ 1951 char *recv_line(struct pool *pool) 1952 { 1953 char *tok, *sret = NULL; 1954 ssize_t len, buflen; 1955 int waited = 0; 1956 1957 if (!strstr(pool->sockbuf, "\n")) { 1958 struct timeval rstart, now; 1959 1960 cgtime(&rstart); 1961 if (!socket_full(pool, DEFAULT_SOCKWAIT)) { 1962 applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); 1963 goto out; 1964 } 1965 1966 do { 1967 char s[RBUFSIZE]; 1968 size_t slen; 1969 ssize_t n; 1970 1971 memset(s, 0, RBUFSIZE); 1972 n = recv(pool->sock, s, RECVSIZE, 0); 1973 if (!n) { 1974 applog(LOG_DEBUG, "Socket closed waiting in recv_line"); 1975 suspend_stratum(pool); 1976 break; 1977 } 1978 cgtime(&now); 1979 waited = tdiff(&now, &rstart); 1980 if (n < 0) { 1981 if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) { 1982 applog(LOG_DEBUG, "Failed to recv sock in recv_line"); 1983 suspend_stratum(pool); 1984 break; 1985 } 1986 } else { 1987 slen = strlen(s); 1988 recalloc_sock(pool, slen); 1989 strcat(pool->sockbuf, s); 1990 } 1991 } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); 1992 } 1993 1994 buflen = strlen(pool->sockbuf); 1995 tok = strtok(pool->sockbuf, "\n"); 1996 if (!tok) { 1997 applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); 1998 goto out; 1999 } 2000 sret = strdup(tok); 2001 len = strlen(sret); 2002 2003 /* Copy what's left in the buffer after the \n, including the 2004 * terminating \0 */ 2005 if (buflen > len + 1) 2006 memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); 2007 else 2008 strcpy(pool->sockbuf, ""); 2009 2010 pool->cgminer_pool_stats.times_received++; 2011 pool->cgminer_pool_stats.bytes_received += len; 2012 pool->cgminer_pool_stats.net_bytes_received += len; 2013 out: 2014 if (!sret) 2015 clear_sock(pool); 2016 else if (opt_protocol) 2017 applog(LOG_DEBUG, "RECVD: %s", sret); 2018 return sret; 2019 } 2020 2021 /* Extracts a string value from a json array with error checking. To be used 2022 * when the value of the string returned is only examined and not to be stored. 2023 * See json_array_string below */ 2024 static char *__json_array_string(json_t *val, unsigned int entry) 2025 { 2026 json_t *arr_entry; 2027 2028 if (json_is_null(val)) 2029 return NULL; 2030 if (!json_is_array(val)) 2031 return NULL; 2032 if (entry > json_array_size(val)) 2033 return NULL; 2034 arr_entry = json_array_get(val, entry); 2035 if (!json_is_string(arr_entry)) 2036 return NULL; 2037 2038 return (char *)json_string_value(arr_entry); 2039 } 2040 2041 /* Creates a freshly malloced dup of __json_array_string */ 2042 static char *json_array_string(json_t *val, unsigned int entry) 2043 { 2044 char *buf = __json_array_string(val, entry); 2045 2046 if (buf) 2047 return strdup(buf); 2048 return NULL; 2049 } 2050 2051 static char *blank_merkle = "0000000000000000000000000000000000000000000000000000000000000000"; 2052 2053 #ifdef HAVE_LIBCURL 2054 static void decode_exit(struct pool *pool, char *cb) 2055 { 2056 CURL *curl = curl_easy_init(); 2057 char *decreq, *s; 2058 json_t *val; 2059 int dummy; 2060 2061 if (!opt_btcd && !sleep(3) && !opt_btcd) { 2062 applog(LOG_ERR, "No bitcoind specified, unable to decode coinbase."); 2063 exit(1); 2064 } 2065 decreq = cgmalloc(strlen(cb) + 256); 2066 2067 sprintf(decreq, "{\"id\": 0, \"method\": \"decoderawtransaction\", \"params\": [\"%s\"]}\n", 2068 cb); 2069 val = json_rpc_call(curl, opt_btcd->rpc_url, opt_btcd->rpc_userpass, decreq, 2070 false, false, &dummy, opt_btcd, false); 2071 free(decreq); 2072 if (!val) { 2073 applog(LOG_ERR, "Failed json_rpc_call to btcd %s", opt_btcd->rpc_url); 2074 exit(1); 2075 } 2076 s = json_dumps(val, JSON_INDENT(4)); 2077 printf("Pool %s:\n%s\n", pool->rpc_url, s); 2078 free(s); 2079 exit(0); 2080 } 2081 #else 2082 static void decode_exit(struct pool __maybe_unused *pool, char __maybe_unused *b) 2083 { 2084 } 2085 #endif 2086 2087 static int calculate_num_bits(int num) 2088 { 2089 int ret=0; 2090 while(num != 0) 2091 { 2092 ret++; 2093 num /= 16; 2094 } 2095 return ret; 2096 } 2097 2098 static void get_vmask(struct pool *pool, char *bbversion) 2099 { 2100 char defaultStr[9]= "00000000"; 2101 int bversion, num_bits, i, j; 2102 uint8_t buffer[4] = {}; 2103 uint32_t uiMagicNum; 2104 char *tmpstr; 2105 uint32_t *p1; 2106 2107 p1 = (uint32_t *)buffer; 2108 bversion = strtol(bbversion, NULL, 16); 2109 2110 for (i = 0; i < 4; i++) { 2111 uiMagicNum = bversion | pool->vmask_003[i]; 2112 //printf("[ccx]uiMagicNum:0x%x. \n", uiMagicNum); 2113 *p1 = bswap_32(uiMagicNum); 2114 2115 //printf("[ccx]*p1:0x%x. \n", *p1); 2116 switch(i) { 2117 case 0: 2118 pool->vmask_001[8] = *p1; 2119 break; 2120 case 1: 2121 pool->vmask_001[4] = *p1; 2122 break; 2123 case 2: 2124 pool->vmask_001[2] = *p1; 2125 break; 2126 case 3: 2127 pool->vmask_001[0] = *p1; 2128 break; 2129 default: 2130 break; 2131 } 2132 } 2133 2134 for (i = 0; i < 16; i++) { 2135 if ((i!= 2) && (i!=4) && (i!=8)) 2136 pool->vmask_001[i] = pool->vmask_001[0]; 2137 } 2138 2139 for (i = 0; i < 16; i++) 2140 memcpy(pool->vmask_002[i], defaultStr, 9); 2141 2142 for (i = 0; i < 3; i++) { 2143 char cMask[12]; 2144 2145 tmpstr = (char *)cgcalloc(9, 1); 2146 num_bits = calculate_num_bits(pool->vmask_003[i]); 2147 for (j = 0; j < (8-num_bits); j++) 2148 tmpstr[j] = '0'; 2149 2150 snprintf(cMask, 9, "%x", pool->vmask_003[i]); 2151 memcpy(tmpstr + 8 - num_bits, cMask, num_bits); 2152 tmpstr[8] = '\0'; 2153 2154 //printf("[ccx]tmpstr:%s. \n", tmpstr); 2155 switch(i) { 2156 case 0: 2157 memcpy(pool->vmask_002[8], tmpstr, 9); 2158 break; 2159 case 1: 2160 memcpy(pool->vmask_002[4], tmpstr, 9); 2161 break; 2162 case 2: 2163 memcpy(pool->vmask_002[2], tmpstr, 9); 2164 break; 2165 default: 2166 break; 2167 } 2168 free(tmpstr); 2169 } 2170 } 2171 2172 static bool set_vmask(struct pool *pool, json_t *val) 2173 { 2174 int mask, tmpMask = 0, cnt = 0, i, rem; 2175 const char *version_mask; 2176 2177 version_mask = json_string_value(val); 2178 applog(LOG_INFO, "Pool %d version_mask:%s.", pool->pool_no, version_mask); 2179 2180 mask = strtol(version_mask, NULL, 16); 2181 if (!mask) 2182 return false; 2183 2184 pool->vmask_003[0] = mask; 2185 2186 while (mask % 16 == 0) { 2187 cnt++; 2188 mask /= 16; 2189 } 2190 2191 if ((rem = mask % 16)) 2192 tmpMask = rem; 2193 else if ((rem = mask % 8)) 2194 tmpMask = rem; 2195 else if ((rem = mask % 4)) 2196 tmpMask = rem; 2197 else if ((rem = mask % 2)) 2198 tmpMask = rem; 2199 2200 for (i = 0; i < cnt; i++) 2201 tmpMask *= 16; 2202 pool->vmask_003[2] = tmpMask; 2203 pool->vmask_003[1] = pool->vmask_003[0] - tmpMask; 2204 2205 return true; 2206 } 2207 2208 #ifdef USE_VMASK 2209 2210 #define STRATUM_VERSION_ROLLING "version-rolling" 2211 #define STRATUM_VERSION_ROLLING_LEN (sizeof(STRATUM_VERSION_ROLLING) - 1) 2212 2213 /** 2214 * Configures stratum mining based on connected hardware capabilities 2215 * (version rolling etc.) 2216 * 2217 * Sample communication 2218 * Request: 2219 * {"id": 1, "method": "mining.configure", "params": [ ["version-rolling"], "version-rolling.mask": "ffffffff" }]}\n 2220 * Response: 2221 * {"id": 1, "result": { "version-rolling": True, "version-rolling.mask": "00003000" }, "error": null}\n 2222 * 2223 * @param pool 2224 * 2225 * 2226 * @return 2227 */ 2228 static bool configure_stratum_mining(struct pool *pool) 2229 { 2230 char s[RBUFSIZE]; 2231 char *response_str = NULL; 2232 bool config_status = false; 2233 bool version_rolling_status = false; 2234 bool version_mask_valid = false; 2235 const char *key; 2236 json_t *response, *value, *res_val, *err_val; 2237 json_error_t err; 2238 2239 snprintf(s, RBUFSIZE, 2240 "{\"id\": %d, \"method\": \"mining.configure\", \"params\": " 2241 "[[\""STRATUM_VERSION_ROLLING"\"], " 2242 "{\""STRATUM_VERSION_ROLLING".mask\": \"%x\"" 2243 "}]}", 2244 swork_id++, 0xffffffff); 2245 2246 if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { 2247 applog(LOG_DEBUG, "Failed to send mining.configure"); 2248 goto out; 2249 } 2250 if (!socket_full(pool, DEFAULT_SOCKWAIT)) { 2251 applog(LOG_DEBUG, "Timed out waiting for response in %s", __FUNCTION__); 2252 goto out; 2253 } 2254 response_str = recv_line(pool); 2255 if (!response_str) 2256 goto out; 2257 2258 response = JSON_LOADS(response_str, &err); 2259 free(response_str); 2260 2261 res_val = json_object_get(response, "result"); 2262 err_val = json_object_get(response, "error"); 2263 2264 if (!res_val || json_is_null(res_val) || 2265 (err_val && !json_is_null(err_val))) { 2266 char *ss; 2267 2268 if (err_val) 2269 ss = json_dumps(err_val, JSON_INDENT(3)); 2270 else 2271 ss = strdup("(unknown reason)"); 2272 2273 applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); 2274 2275 free(ss); 2276 2277 goto json_response_error; 2278 } 2279 2280 json_object_foreach(res_val, key, value) { 2281 if (!strcasecmp(key, STRATUM_VERSION_ROLLING) && 2282 strlen(key) == STRATUM_VERSION_ROLLING_LEN) 2283 version_rolling_status = json_boolean_value(value); 2284 else if (!strcasecmp(key, STRATUM_VERSION_ROLLING ".mask")) 2285 pool->vmask = version_mask_valid = set_vmask(pool, value); 2286 else 2287 applog(LOG_ERR, "JSON-RPC unexpected mining.configure value: %s", key); 2288 } 2289 2290 /* Valid configuration for now only requires enabled version rolling and valid bit mask */ 2291 config_status = version_rolling_status && version_mask_valid; 2292 2293 json_response_error: 2294 json_decref(response); 2295 2296 out: 2297 return config_status; 2298 } 2299 #else 2300 static inline bool configure_stratum_mining(struct pool __maybe_unused *pool) 2301 { 2302 return true; 2303 } 2304 #endif 2305 2306 static bool parse_notify(struct pool *pool, json_t *val) 2307 { 2308 char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, 2309 *ntime, header[260]; 2310 unsigned char *cb1 = NULL, *cb2 = NULL; 2311 size_t cb1_len, cb2_len, alloc_len; 2312 bool clean, ret = false; 2313 int merkles, i; 2314 json_t *arr; 2315 2316 arr = json_array_get(val, 4); 2317 if (!arr || !json_is_array(arr)) 2318 goto out; 2319 2320 merkles = json_array_size(arr); 2321 2322 job_id = json_array_string(val, 0); 2323 prev_hash = __json_array_string(val, 1); 2324 coinbase1 = json_array_string(val, 2); 2325 coinbase2 = json_array_string(val, 3); 2326 bbversion = __json_array_string(val, 5); 2327 nbit = __json_array_string(val, 6); 2328 ntime = __json_array_string(val, 7); 2329 clean = json_is_true(json_array_get(val, 8)); 2330 2331 get_vmask(pool, bbversion); 2332 2333 if (!valid_ascii(job_id) || !valid_hex(prev_hash) || !valid_hex(coinbase1) || 2334 !valid_hex(coinbase2) || !valid_hex(bbversion) || !valid_hex(nbit) || 2335 !valid_hex(ntime)) { 2336 /* Annoying but we must not leak memory */ 2337 free(job_id); 2338 free(coinbase1); 2339 free(coinbase2); 2340 goto out; 2341 } 2342 2343 cg_wlock(&pool->data_lock); 2344 free(pool->swork.job_id); 2345 pool->swork.job_id = job_id; 2346 if (memcmp(pool->prev_hash, prev_hash, 64)) { 2347 pool->swork.clean = true; 2348 } else { 2349 pool->swork.clean = clean; 2350 } 2351 snprintf(pool->prev_hash, 65, "%s", prev_hash); 2352 cb1_len = strlen(coinbase1) / 2; 2353 cb2_len = strlen(coinbase2) / 2; 2354 snprintf(pool->bbversion, 9, "%s", bbversion); 2355 snprintf(pool->nbit, 9, "%s", nbit); 2356 snprintf(pool->ntime, 9, "%s", ntime); 2357 if (pool->next_diff > 0) { 2358 pool->sdiff = pool->next_diff; 2359 pool->next_diff = pool->diff_after; 2360 pool->diff_after = 0; 2361 } 2362 alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len; 2363 pool->nonce2_offset = cb1_len + pool->n1_len; 2364 2365 for (i = 0; i < pool->merkles; i++) 2366 free(pool->swork.merkle_bin[i]); 2367 if (merkles) { 2368 pool->swork.merkle_bin = cgrealloc(pool->swork.merkle_bin, 2369 sizeof(char *) * merkles + 1); 2370 for (i = 0; i < merkles; i++) { 2371 char *merkle = json_array_string(arr, i); 2372 2373 pool->swork.merkle_bin[i] = cgmalloc(32); 2374 if (opt_protocol) 2375 applog(LOG_DEBUG, "merkle %d: %s", i, merkle); 2376 ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32); 2377 free(merkle); 2378 if (unlikely(!ret)) { 2379 applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify"); 2380 goto out_unlock; 2381 } 2382 } 2383 } 2384 pool->merkles = merkles; 2385 if (pool->merkles < 2) 2386 pool->bad_work++; 2387 if (clean) 2388 pool->nonce2 = 0; 2389 #if 0 2390 header_len = strlen(pool->bbversion) + 2391 strlen(pool->prev_hash); 2392 /* merkle_hash */ 32 + 2393 strlen(pool->ntime) + 2394 strlen(pool->nbit) + 2395 /* nonce */ 8 + 2396 /* workpadding */ 96; 2397 #endif 2398 snprintf(header, 257, 2399 "%s%s%s%s%s%s%s", 2400 pool->bbversion, 2401 pool->prev_hash, 2402 blank_merkle, 2403 pool->ntime, 2404 pool->nbit, 2405 "00000000", /* nonce */ 2406 workpadding); 2407 2408 ret = hex2bin(pool->header_bin, header, 128); 2409 if (unlikely(!ret)) { 2410 applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify"); 2411 goto out_unlock; 2412 } 2413 2414 cb1 = alloca(cb1_len); 2415 ret = hex2bin(cb1, coinbase1, cb1_len); 2416 if (unlikely(!ret)) { 2417 applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify"); 2418 goto out_unlock; 2419 } 2420 cb2 = alloca(cb2_len); 2421 ret = hex2bin(cb2, coinbase2, cb2_len); 2422 if (unlikely(!ret)) { 2423 applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify"); 2424 goto out_unlock; 2425 } 2426 free(pool->coinbase); 2427 pool->coinbase = cgcalloc(alloc_len, 1); 2428 cg_memcpy(pool->coinbase, cb1, cb1_len); 2429 if (pool->n1_len) 2430 cg_memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len); 2431 cg_memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len); 2432 if (opt_debug || opt_decode) { 2433 char *cb = bin2hex(pool->coinbase, pool->coinbase_len); 2434 2435 if (opt_decode) 2436 decode_exit(pool, cb); 2437 applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); 2438 free(cb); 2439 } 2440 out_unlock: 2441 cg_wunlock(&pool->data_lock); 2442 2443 if (opt_protocol) { 2444 applog(LOG_DEBUG, "job_id: %s", job_id); 2445 applog(LOG_DEBUG, "prev_hash: %s", prev_hash); 2446 applog(LOG_DEBUG, "coinbase1: %s", coinbase1); 2447 applog(LOG_DEBUG, "coinbase2: %s", coinbase2); 2448 applog(LOG_DEBUG, "bbversion: %s", bbversion); 2449 applog(LOG_DEBUG, "nbit: %s", nbit); 2450 applog(LOG_DEBUG, "ntime: %s", ntime); 2451 applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); 2452 } 2453 free(coinbase1); 2454 free(coinbase2); 2455 2456 /* A notify message is the closest stratum gets to a getwork */ 2457 pool->getwork_requested++; 2458 total_getworks++; 2459 if (pool == current_pool()) 2460 opt_work_update = true; 2461 out: 2462 return ret; 2463 } 2464 2465 static bool parse_diff(struct pool *pool, json_t *val) 2466 { 2467 double old_diff, diff; 2468 2469 diff = json_number_value(json_array_get(val, 0)); 2470 if (diff <= 0) 2471 return false; 2472 2473 /* We can only change one diff per notify so assume diffs are being 2474 * stacked for successive notifies. */ 2475 cg_wlock(&pool->data_lock); 2476 if (pool->next_diff) 2477 pool->diff_after = diff; 2478 else 2479 pool->next_diff = diff; 2480 old_diff = pool->sdiff; 2481 cg_wunlock(&pool->data_lock); 2482 2483 if (old_diff != diff) { 2484 int idiff = diff; 2485 2486 if ((double)idiff == diff) 2487 applog(LOG_NOTICE, "Pool %d difficulty changed to %d", 2488 pool->pool_no, idiff); 2489 else 2490 applog(LOG_NOTICE, "Pool %d difficulty changed to %.1f", 2491 pool->pool_no, diff); 2492 } else 2493 applog(LOG_DEBUG, "Pool %d difficulty set to %f", pool->pool_no, 2494 diff); 2495 2496 return true; 2497 } 2498 2499 static void __suspend_stratum(struct pool *pool) 2500 { 2501 clear_sockbuf(pool); 2502 pool->stratum_active = pool->stratum_notify = false; 2503 if (pool->sock) 2504 CLOSESOCKET(pool->sock); 2505 pool->sock = 0; 2506 } 2507 2508 static bool parse_reconnect(struct pool *pool, json_t *val) 2509 { 2510 char *sockaddr_url, *stratum_port, *tmp; 2511 char *url, *port, address[256]; 2512 int port_no; 2513 2514 memset(address, 0, 255); 2515 url = (char *)json_string_value(json_array_get(val, 0)); 2516 if (!url) 2517 url = pool->sockaddr_url; 2518 else { 2519 char *dot_pool, *dot_reconnect; 2520 dot_pool = strchr(pool->sockaddr_url, '.'); 2521 if (!dot_pool) { 2522 applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", 2523 pool->sockaddr_url); 2524 return false; 2525 } 2526 dot_reconnect = strchr(url, '.'); 2527 if (!dot_reconnect) { 2528 applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", 2529 url); 2530 return false; 2531 } 2532 if (strcmp(dot_pool, dot_reconnect)) { 2533 applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", 2534 pool->sockaddr_url); 2535 return false; 2536 } 2537 } 2538 2539 port_no = json_integer_value(json_array_get(val, 1)); 2540 if (port_no) { 2541 port = alloca(256); 2542 sprintf(port, "%d", port_no); 2543 } else { 2544 port = (char *)json_string_value(json_array_get(val, 1)); 2545 if (!port) 2546 port = pool->stratum_port; 2547 } 2548 2549 snprintf(address, 254, "%s:%s", url, port); 2550 2551 if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) 2552 return false; 2553 2554 applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); 2555 2556 clear_pool_work(pool); 2557 2558 mutex_lock(&pool->stratum_lock); 2559 __suspend_stratum(pool); 2560 tmp = pool->sockaddr_url; 2561 pool->sockaddr_url = sockaddr_url; 2562 pool->stratum_url = pool->sockaddr_url; 2563 free(tmp); 2564 tmp = pool->stratum_port; 2565 pool->stratum_port = stratum_port; 2566 free(tmp); 2567 mutex_unlock(&pool->stratum_lock); 2568 2569 return restart_stratum(pool); 2570 } 2571 2572 static bool send_version(struct pool *pool, json_t *val) 2573 { 2574 json_t *id_val = json_object_get(val, "id"); 2575 char s[RBUFSIZE]; 2576 int id; 2577 2578 if (!id_val) 2579 return false; 2580 id = json_integer_value(json_object_get(val, "id")); 2581 2582 sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION""STRATUM_USER_AGENT"\", \"error\": null}", id); 2583 if (!stratum_send(pool, s, strlen(s))) 2584 return false; 2585 2586 return true; 2587 } 2588 2589 static bool send_pong(struct pool *pool, json_t *val) 2590 { 2591 json_t *id_val = json_object_get(val, "id"); 2592 char s[RBUFSIZE]; 2593 int id; 2594 2595 if (!id_val) 2596 return false; 2597 id = json_integer_value(json_object_get(val, "id")); 2598 2599 sprintf(s, "{\"id\": %d, \"result\": \"pong\", \"error\": null}", id); 2600 if (!stratum_send(pool, s, strlen(s))) 2601 return false; 2602 2603 return true; 2604 } 2605 2606 static bool show_message(struct pool *pool, json_t *val) 2607 { 2608 char *msg; 2609 2610 if (!json_is_array(val)) 2611 return false; 2612 msg = (char *)json_string_value(json_array_get(val, 0)); 2613 if (!msg) 2614 return false; 2615 applog(LOG_NOTICE, "Pool %d message: %s", pool->pool_no, msg); 2616 return true; 2617 } 2618 2619 static bool parse_vmask(struct pool *pool, json_t *params) 2620 { 2621 bool ret = false; 2622 2623 if (!params) { 2624 applog(LOG_INFO, "No params with parse_vmask given for pool %d", 2625 pool->pool_no); 2626 goto out; 2627 } 2628 if (json_is_array(params)) 2629 params = json_array_get(params, 0); 2630 if (!json_is_string(params) || !json_string_length(params)) { 2631 applog(LOG_INFO, "Params invalid string for parse_vmask for pool %d", 2632 pool->pool_no); 2633 goto out; 2634 } 2635 pool->vmask = set_vmask(pool, params); 2636 ret = true; 2637 out: 2638 return ret; 2639 } 2640 2641 bool parse_method(struct pool *pool, char *s) 2642 { 2643 json_t *val = NULL, *method, *err_val, *params; 2644 json_error_t err; 2645 bool ret = false; 2646 char *buf; 2647 2648 if (!s) 2649 goto out; 2650 2651 val = JSON_LOADS(s, &err); 2652 if (!val) { 2653 applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); 2654 goto out; 2655 } 2656 2657 method = json_object_get(val, "method"); 2658 if (!method) 2659 goto out_decref; 2660 err_val = json_object_get(val, "error"); 2661 params = json_object_get(val, "params"); 2662 2663 if (err_val && !json_is_null(err_val)) { 2664 char *ss; 2665 2666 if (err_val) 2667 ss = json_dumps(err_val, JSON_INDENT(3)); 2668 else 2669 ss = strdup("(unknown reason)"); 2670 2671 applog(LOG_INFO, "JSON-RPC method decode of %s failed: %s", s, ss); 2672 free(ss); 2673 goto out_decref; 2674 } 2675 2676 buf = (char *)json_string_value(method); 2677 if (!buf) 2678 goto out_decref; 2679 2680 if (!strncasecmp(buf, "mining.notify", 13)) { 2681 if (parse_notify(pool, params)) 2682 pool->stratum_notify = ret = true; 2683 else 2684 pool->stratum_notify = ret = false; 2685 goto out_decref; 2686 } 2687 2688 if (!strncasecmp(buf, "mining.set_difficulty", 21)) { 2689 ret = parse_diff(pool, params); 2690 goto out_decref; 2691 } 2692 2693 if (!strncasecmp(buf, "client.reconnect", 16)) { 2694 ret = parse_reconnect(pool, params); 2695 goto out_decref; 2696 } 2697 2698 if (!strncasecmp(buf, "client.get_version", 18)) { 2699 ret = send_version(pool, val); 2700 goto out_decref; 2701 } 2702 2703 if (!strncasecmp(buf, "client.show_message", 19)) { 2704 ret = show_message(pool, params); 2705 goto out_decref; 2706 } 2707 2708 if (!strncasecmp(buf, "mining.ping", 11)) { 2709 applog(LOG_INFO, "Pool %d ping", pool->pool_no); 2710 ret = send_pong(pool, val); 2711 goto out_decref; 2712 } 2713 2714 if (!strncasecmp(buf, "mining.set_version_mask", 23)) { 2715 ret = parse_vmask(pool, params); 2716 goto out_decref; 2717 } 2718 applog(LOG_INFO, "Unknown JSON-RPC from pool %d: %s", pool->pool_no, s); 2719 out_decref: 2720 json_decref(val); 2721 out: 2722 return ret; 2723 } 2724 2725 bool auth_stratum(struct pool *pool) 2726 { 2727 json_t *val = NULL, *res_val, *err_val; 2728 char s[RBUFSIZE], *sret = NULL; 2729 json_error_t err; 2730 bool ret = false; 2731 2732 sprintf(s, "{\"id\": %d, \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", 2733 swork_id++, pool->rpc_user, pool->rpc_pass); 2734 2735 if (!stratum_send(pool, s, strlen(s))) 2736 return ret; 2737 2738 /* Parse all data in the queue and anything left should be auth */ 2739 while (42) { 2740 sret = recv_line(pool); 2741 if (!sret) 2742 return ret; 2743 if (parse_method(pool, sret)) 2744 free(sret); 2745 else 2746 break; 2747 } 2748 2749 val = JSON_LOADS(sret, &err); 2750 free(sret); 2751 res_val = json_object_get(val, "result"); 2752 err_val = json_object_get(val, "error"); 2753 2754 if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) { 2755 char *ss; 2756 2757 if (err_val) 2758 ss = json_dumps(err_val, JSON_INDENT(3)); 2759 else 2760 ss = strdup("(unknown reason)"); 2761 applog(LOG_INFO, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); 2762 free(ss); 2763 2764 suspend_stratum(pool); 2765 2766 goto out; 2767 } 2768 2769 ret = true; 2770 applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); 2771 pool->probed = true; 2772 successful_connect = true; 2773 2774 if (opt_suggest_diff) { 2775 sprintf(s, "{\"id\": %d, \"method\": \"mining.suggest_difficulty\", \"params\": [%d]}", 2776 swork_id++, opt_suggest_diff); 2777 stratum_send(pool, s, strlen(s)); 2778 } 2779 out: 2780 json_decref(val); 2781 return ret; 2782 } 2783 2784 static int recv_byte(int sockd) 2785 { 2786 char c; 2787 2788 if (recv(sockd, &c, 1, 0) != -1) 2789 return c; 2790 2791 return -1; 2792 } 2793 2794 static bool http_negotiate(struct pool *pool, int sockd, bool http0) 2795 { 2796 char buf[1024]; 2797 int i, len; 2798 2799 if (http0) { 2800 snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.0\r\n\r\n", 2801 pool->sockaddr_url, pool->stratum_port); 2802 } else { 2803 snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.1\r\nHost: %s:%s\r\n\r\n", 2804 pool->sockaddr_url, pool->stratum_port, pool->sockaddr_url, 2805 pool->stratum_port); 2806 } 2807 applog(LOG_DEBUG, "Sending proxy %s:%s - %s", 2808 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); 2809 send(sockd, buf, strlen(buf), 0); 2810 len = recv(sockd, buf, 12, 0); 2811 if (len <= 0) { 2812 applog(LOG_WARNING, "Couldn't read from proxy %s:%s after sending CONNECT", 2813 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 2814 return false; 2815 } 2816 buf[len] = '\0'; 2817 applog(LOG_DEBUG, "Received from proxy %s:%s - %s", 2818 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); 2819 if (strcmp(buf, "HTTP/1.1 200") && strcmp(buf, "HTTP/1.0 200")) { 2820 applog(LOG_WARNING, "HTTP Error from proxy %s:%s - %s", 2821 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); 2822 return false; 2823 } 2824 2825 /* Ignore unwanted headers till we get desired response */ 2826 for (i = 0; i < 4; i++) { 2827 buf[i] = recv_byte(sockd); 2828 if (buf[i] == (char)-1) { 2829 applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", 2830 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 2831 return false; 2832 } 2833 } 2834 while (strncmp(buf, "\r\n\r\n", 4)) { 2835 for (i = 0; i < 3; i++) 2836 buf[i] = buf[i + 1]; 2837 buf[3] = recv_byte(sockd); 2838 if (buf[3] == (char)-1) { 2839 applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", 2840 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 2841 return false; 2842 } 2843 } 2844 2845 applog(LOG_DEBUG, "Success negotiating with %s:%s HTTP proxy", 2846 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 2847 return true; 2848 } 2849 2850 static bool socks5_negotiate(struct pool *pool, int sockd) 2851 { 2852 unsigned char atyp, uclen; 2853 unsigned short port; 2854 char buf[515]; 2855 int i, len; 2856 2857 buf[0] = 0x05; 2858 buf[1] = 0x01; 2859 buf[2] = 0x00; 2860 applog(LOG_DEBUG, "Attempting to negotiate with %s:%s SOCKS5 proxy", 2861 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); 2862 send(sockd, buf, 3, 0); 2863 if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != buf[2]) { 2864 applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", 2865 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); 2866 return false; 2867 } 2868 2869 buf[0] = 0x05; 2870 buf[1] = 0x01; 2871 buf[2] = 0x00; 2872 buf[3] = 0x03; 2873 len = (strlen(pool->sockaddr_url)); 2874 if (len > 255) 2875 len = 255; 2876 uclen = len; 2877 buf[4] = (uclen & 0xff); 2878 cg_memcpy(buf + 5, pool->sockaddr_url, len); 2879 port = atoi(pool->stratum_port); 2880 buf[5 + len] = (port >> 8); 2881 buf[6 + len] = (port & 0xff); 2882 send(sockd, buf, (7 + len), 0); 2883 if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != 0x00) { 2884 applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", 2885 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); 2886 return false; 2887 } 2888 2889 recv_byte(sockd); 2890 atyp = recv_byte(sockd); 2891 if (atyp == 0x01) { 2892 for (i = 0; i < 4; i++) 2893 recv_byte(sockd); 2894 } else if (atyp == 0x03) { 2895 len = recv_byte(sockd); 2896 for (i = 0; i < len; i++) 2897 recv_byte(sockd); 2898 } else { 2899 applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", 2900 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); 2901 return false; 2902 } 2903 for (i = 0; i < 2; i++) 2904 recv_byte(sockd); 2905 2906 applog(LOG_DEBUG, "Success negotiating with %s:%s SOCKS5 proxy", 2907 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 2908 return true; 2909 } 2910 2911 static bool socks4_negotiate(struct pool *pool, int sockd, bool socks4a) 2912 { 2913 unsigned short port; 2914 in_addr_t inp; 2915 char buf[515]; 2916 int i, len; 2917 2918 buf[0] = 0x04; 2919 buf[1] = 0x01; 2920 port = atoi(pool->stratum_port); 2921 buf[2] = port >> 8; 2922 buf[3] = port & 0xff; 2923 sprintf(&buf[8], "CGMINER"); 2924 2925 /* See if we've been given an IP address directly to avoid needing to 2926 * resolve it. */ 2927 inp = inet_addr(pool->sockaddr_url); 2928 inp = ntohl(inp); 2929 if ((int)inp != -1) 2930 socks4a = false; 2931 else { 2932 /* Try to extract the IP address ourselves first */ 2933 struct addrinfo servinfobase, *servinfo, hints; 2934 2935 servinfo = &servinfobase; 2936 memset(&hints, 0, sizeof(struct addrinfo)); 2937 hints.ai_family = AF_INET; /* IPV4 only */ 2938 if (!getaddrinfo(pool->sockaddr_url, NULL, &hints, &servinfo)) { 2939 struct sockaddr_in *saddr_in = (struct sockaddr_in *)servinfo->ai_addr; 2940 2941 inp = ntohl(saddr_in->sin_addr.s_addr); 2942 socks4a = false; 2943 freeaddrinfo(servinfo); 2944 } 2945 } 2946 2947 if (!socks4a) { 2948 if ((int)inp == -1) { 2949 applog(LOG_WARNING, "Invalid IP address specified for socks4 proxy: %s", 2950 pool->sockaddr_url); 2951 return false; 2952 } 2953 buf[4] = (inp >> 24) & 0xFF; 2954 buf[5] = (inp >> 16) & 0xFF; 2955 buf[6] = (inp >> 8) & 0xFF; 2956 buf[7] = (inp >> 0) & 0xFF; 2957 send(sockd, buf, 16, 0); 2958 } else { 2959 /* This appears to not be working but hopefully most will be 2960 * able to resolve IP addresses themselves. */ 2961 buf[4] = 0; 2962 buf[5] = 0; 2963 buf[6] = 0; 2964 buf[7] = 1; 2965 len = strlen(pool->sockaddr_url); 2966 if (len > 255) 2967 len = 255; 2968 cg_memcpy(&buf[16], pool->sockaddr_url, len); 2969 len += 16; 2970 buf[len++] = '\0'; 2971 send(sockd, buf, len, 0); 2972 } 2973 2974 if (recv_byte(sockd) != 0x00 || recv_byte(sockd) != 0x5a) { 2975 applog(LOG_WARNING, "Bad response from %s:%s SOCKS4 server", 2976 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 2977 return false; 2978 } 2979 2980 for (i = 0; i < 6; i++) 2981 recv_byte(sockd); 2982 2983 return true; 2984 } 2985 2986 static void noblock_socket(SOCKETTYPE fd) 2987 { 2988 #ifndef WIN32 2989 int flags = fcntl(fd, F_GETFL, 0); 2990 2991 fcntl(fd, F_SETFL, O_NONBLOCK | flags); 2992 #else 2993 u_long flags = 1; 2994 2995 ioctlsocket(fd, FIONBIO, &flags); 2996 #endif 2997 } 2998 2999 static void block_socket(SOCKETTYPE fd) 3000 { 3001 #ifndef WIN32 3002 int flags = fcntl(fd, F_GETFL, 0); 3003 3004 fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); 3005 #else 3006 u_long flags = 0; 3007 3008 ioctlsocket(fd, FIONBIO, &flags); 3009 #endif 3010 } 3011 3012 static bool sock_connecting(void) 3013 { 3014 #ifndef WIN32 3015 return errno == EINPROGRESS; 3016 #else 3017 return WSAGetLastError() == WSAEWOULDBLOCK; 3018 #endif 3019 } 3020 static bool setup_stratum_socket(struct pool *pool) 3021 { 3022 struct addrinfo *servinfo, hints, *p; 3023 char *sockaddr_url, *sockaddr_port; 3024 int sockd; 3025 3026 mutex_lock(&pool->stratum_lock); 3027 pool->stratum_active = false; 3028 if (pool->sock) 3029 CLOSESOCKET(pool->sock); 3030 pool->sock = 0; 3031 mutex_unlock(&pool->stratum_lock); 3032 3033 memset(&hints, 0, sizeof(struct addrinfo)); 3034 hints.ai_family = AF_UNSPEC; 3035 hints.ai_socktype = SOCK_STREAM; 3036 3037 if (!pool->rpc_proxy && opt_socks_proxy) { 3038 pool->rpc_proxy = opt_socks_proxy; 3039 extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); 3040 pool->rpc_proxytype = PROXY_SOCKS5; 3041 } 3042 3043 if (pool->rpc_proxy) { 3044 sockaddr_url = pool->sockaddr_proxy_url; 3045 sockaddr_port = pool->sockaddr_proxy_port; 3046 } else { 3047 sockaddr_url = pool->sockaddr_url; 3048 sockaddr_port = pool->stratum_port; 3049 } 3050 if (getaddrinfo(sockaddr_url, sockaddr_port, &hints, &servinfo) != 0) { 3051 if (!pool->probed) { 3052 applog(LOG_WARNING, "Failed to resolve (?wrong URL) %s:%s", 3053 sockaddr_url, sockaddr_port); 3054 pool->probed = true; 3055 } else { 3056 applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", 3057 sockaddr_url, sockaddr_port); 3058 } 3059 return false; 3060 } 3061 3062 for (p = servinfo; p != NULL; p = p->ai_next) { 3063 sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); 3064 if (sockd == -1) { 3065 applog(LOG_DEBUG, "Failed socket"); 3066 continue; 3067 } 3068 3069 /* Iterate non blocking over entries returned by getaddrinfo 3070 * to cope with round robin DNS entries, finding the first one 3071 * we can connect to quickly. */ 3072 noblock_socket(sockd); 3073 if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { 3074 struct timeval tv_timeout = {1, 0}; 3075 int selret; 3076 fd_set rw; 3077 3078 if (!sock_connecting()) { 3079 CLOSESOCKET(sockd); 3080 applog(LOG_DEBUG, "Failed sock connect"); 3081 continue; 3082 } 3083 retry: 3084 FD_ZERO(&rw); 3085 FD_SET(sockd, &rw); 3086 selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); 3087 if (selret > 0 && FD_ISSET(sockd, &rw)) { 3088 socklen_t len; 3089 int err, n; 3090 3091 len = sizeof(err); 3092 n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); 3093 if (!n && !err) { 3094 applog(LOG_DEBUG, "Succeeded delayed connect"); 3095 block_socket(sockd); 3096 break; 3097 } 3098 } 3099 if (selret < 0 && interrupted()) 3100 goto retry; 3101 CLOSESOCKET(sockd); 3102 applog(LOG_DEBUG, "Select timeout/failed connect"); 3103 continue; 3104 } 3105 applog(LOG_WARNING, "Succeeded immediate connect"); 3106 block_socket(sockd); 3107 3108 break; 3109 } 3110 if (p == NULL) { 3111 applog(LOG_INFO, "Failed to connect to stratum on %s:%s", 3112 sockaddr_url, sockaddr_port); 3113 freeaddrinfo(servinfo); 3114 return false; 3115 } 3116 freeaddrinfo(servinfo); 3117 3118 if (pool->rpc_proxy) { 3119 switch (pool->rpc_proxytype) { 3120 case PROXY_HTTP_1_0: 3121 if (!http_negotiate(pool, sockd, true)) 3122 return false; 3123 break; 3124 case PROXY_HTTP: 3125 if (!http_negotiate(pool, sockd, false)) 3126 return false; 3127 break; 3128 case PROXY_SOCKS5: 3129 case PROXY_SOCKS5H: 3130 if (!socks5_negotiate(pool, sockd)) 3131 return false; 3132 break; 3133 case PROXY_SOCKS4: 3134 if (!socks4_negotiate(pool, sockd, false)) 3135 return false; 3136 break; 3137 case PROXY_SOCKS4A: 3138 if (!socks4_negotiate(pool, sockd, true)) 3139 return false; 3140 break; 3141 default: 3142 applog(LOG_WARNING, "Unsupported proxy type for %s:%s", 3143 pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); 3144 return false; 3145 break; 3146 } 3147 } 3148 3149 if (!pool->sockbuf) { 3150 pool->sockbuf = cgcalloc(RBUFSIZE, 1); 3151 pool->sockbuf_size = RBUFSIZE; 3152 } 3153 3154 pool->sock = sockd; 3155 keep_sockalive(sockd); 3156 return true; 3157 } 3158 3159 static char *get_sessionid(json_t *val) 3160 { 3161 char *ret = NULL; 3162 json_t *arr_val; 3163 int arrsize, i; 3164 3165 arr_val = json_array_get(val, 0); 3166 if (!arr_val || !json_is_array(arr_val)) 3167 goto out; 3168 arrsize = json_array_size(arr_val); 3169 for (i = 0; i < arrsize; i++) { 3170 json_t *arr = json_array_get(arr_val, i); 3171 char *notify; 3172 3173 if (!arr | !json_is_array(arr)) 3174 break; 3175 notify = __json_array_string(arr, 0); 3176 if (!notify) 3177 continue; 3178 if (!strncasecmp(notify, "mining.notify", 13)) { 3179 ret = json_array_string(arr, 1); 3180 break; 3181 } 3182 } 3183 out: 3184 return ret; 3185 } 3186 3187 void suspend_stratum(struct pool *pool) 3188 { 3189 applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); 3190 3191 mutex_lock(&pool->stratum_lock); 3192 __suspend_stratum(pool); 3193 mutex_unlock(&pool->stratum_lock); 3194 } 3195 3196 bool initiate_stratum(struct pool *pool) 3197 { 3198 bool ret = false, recvd = false, noresume = false, sockd = false; 3199 char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid, *tmp; 3200 json_t *val = NULL, *res_val, *err_val; 3201 json_error_t err; 3202 int n2size; 3203 3204 resend: 3205 if (!setup_stratum_socket(pool)) { 3206 sockd = false; 3207 goto out; 3208 } 3209 3210 sockd = true; 3211 3212 if (recvd) { 3213 /* Get rid of any crap lying around if we're resending */ 3214 clear_sock(pool); 3215 } 3216 3217 /* Attempt to configure stratum protocol feature set first. */ 3218 if (!configure_stratum_mining(pool)) 3219 goto out; 3220 3221 if (recvd) { 3222 sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); 3223 } else { 3224 if (pool->sessionid) 3225 sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION""STRATUM_USER_AGENT"\", \"%s\"]}", swork_id++, pool->sessionid); 3226 else 3227 sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION""STRATUM_USER_AGENT"\"]}", swork_id++); 3228 } 3229 3230 if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { 3231 applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); 3232 goto out; 3233 } 3234 3235 if (!socket_full(pool, DEFAULT_SOCKWAIT)) { 3236 applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); 3237 goto out; 3238 } 3239 rereceive: 3240 sret = recv_line(pool); 3241 if (!sret) 3242 goto out; 3243 3244 recvd = true; 3245 3246 val = JSON_LOADS(sret, &err); 3247 if (!val) { 3248 applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); 3249 goto out; 3250 } 3251 3252 res_val = json_object_get(val, "result"); 3253 err_val = json_object_get(val, "error"); 3254 3255 if (!res_val) { 3256 /* Check for a method just in case */ 3257 json_t *method_val = json_object_get(val, "method"); 3258 3259 if (method_val && parse_method(pool, sret)) { 3260 free(sret); 3261 sret = NULL; 3262 goto rereceive; 3263 } 3264 } 3265 3266 if (!res_val || json_is_null(res_val) || 3267 (err_val && !json_is_null(err_val))) { 3268 char *ss; 3269 3270 if (err_val) 3271 ss = json_dumps(err_val, JSON_INDENT(3)); 3272 else 3273 ss = strdup("(unknown reason)"); 3274 3275 applog(LOG_INFO, "JSON-RPC decode of message %s failed: %s", sret, ss); 3276 3277 free(ss); 3278 3279 goto out; 3280 } 3281 3282 sessionid = get_sessionid(res_val); 3283 if (!sessionid) 3284 applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); 3285 nonce1 = json_array_string(res_val, 1); 3286 if (!valid_hex(nonce1)) { 3287 applog(LOG_INFO, "Failed to get valid nonce1 in initiate_stratum"); 3288 free(sessionid); 3289 free(nonce1); 3290 goto out; 3291 } 3292 n2size = json_integer_value(json_array_get(res_val, 2)); 3293 if (n2size < 2 || n2size > 16) { 3294 applog(LOG_INFO, "Failed to get valid n2size in initiate_stratum"); 3295 free(sessionid); 3296 free(nonce1); 3297 goto out; 3298 } 3299 3300 if (sessionid && pool->sessionid && !strcmp(sessionid, pool->sessionid)) { 3301 applog(LOG_NOTICE, "Pool %d successfully negotiated resume with the same session ID", 3302 pool->pool_no); 3303 } 3304 3305 cg_wlock(&pool->data_lock); 3306 tmp = pool->sessionid; 3307 pool->sessionid = sessionid; 3308 free(tmp); 3309 tmp = pool->nonce1; 3310 pool->nonce1 = nonce1; 3311 free(tmp); 3312 pool->n1_len = strlen(nonce1) / 2; 3313 free(pool->nonce1bin); 3314 pool->nonce1bin = cgcalloc(pool->n1_len, 1); 3315 hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); 3316 pool->n2size = n2size; 3317 cg_wunlock(&pool->data_lock); 3318 3319 if (sessionid) 3320 applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); 3321 3322 ret = true; 3323 out: 3324 if (ret) { 3325 if (!pool->stratum_url) 3326 pool->stratum_url = pool->sockaddr_url; 3327 pool->stratum_active = true; 3328 pool->next_diff = pool->diff_after = 0; 3329 pool->sdiff = 1; 3330 if (opt_protocol) { 3331 applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", 3332 pool->pool_no, pool->nonce1, pool->n2size); 3333 } 3334 } else { 3335 if (recvd && !noresume) { 3336 /* Reset the sessionid used for stratum resuming in case the pool 3337 * does not support it, or does not know how to respond to the 3338 * presence of the sessionid parameter. */ 3339 cg_wlock(&pool->data_lock); 3340 free(pool->sessionid); 3341 free(pool->nonce1); 3342 pool->sessionid = pool->nonce1 = NULL; 3343 cg_wunlock(&pool->data_lock); 3344 3345 applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); 3346 noresume = true; 3347 json_decref(val); 3348 goto resend; 3349 } 3350 applog(LOG_DEBUG, "Initiate stratum failed"); 3351 if (sockd) 3352 suspend_stratum(pool); 3353 } 3354 3355 json_decref(val); 3356 free(sret); 3357 return ret; 3358 } 3359 3360 bool restart_stratum(struct pool *pool) 3361 { 3362 bool ret = false; 3363 3364 if (pool->stratum_active) 3365 suspend_stratum(pool); 3366 if (!initiate_stratum(pool)) 3367 goto out; 3368 if (!auth_stratum(pool)) 3369 goto out; 3370 ret = true; 3371 out: 3372 if (!ret) 3373 pool_died(pool); 3374 else 3375 stratum_resumed(pool); 3376 return ret; 3377 } 3378 3379 void dev_error(struct cgpu_info *dev, enum dev_reason reason) 3380 { 3381 dev->device_last_not_well = time(NULL); 3382 dev->device_not_well_reason = reason; 3383 3384 switch (reason) { 3385 case REASON_THREAD_FAIL_INIT: 3386 dev->thread_fail_init_count++; 3387 break; 3388 case REASON_THREAD_ZERO_HASH: 3389 dev->thread_zero_hash_count++; 3390 break; 3391 case REASON_THREAD_FAIL_QUEUE: 3392 dev->thread_fail_queue_count++; 3393 break; 3394 case REASON_DEV_SICK_IDLE_60: 3395 dev->dev_sick_idle_60_count++; 3396 break; 3397 case REASON_DEV_DEAD_IDLE_600: 3398 dev->dev_dead_idle_600_count++; 3399 break; 3400 case REASON_DEV_NOSTART: 3401 dev->dev_nostart_count++; 3402 break; 3403 case REASON_DEV_OVER_HEAT: 3404 dev->dev_over_heat_count++; 3405 break; 3406 case REASON_DEV_THERMAL_CUTOFF: 3407 dev->dev_thermal_cutoff_count++; 3408 break; 3409 case REASON_DEV_COMMS_ERROR: 3410 dev->dev_comms_error_count++; 3411 break; 3412 case REASON_DEV_THROTTLE: 3413 dev->dev_throttle_count++; 3414 break; 3415 } 3416 } 3417 3418 /* Realloc an existing string to fit an extra string s, appending s to it. */ 3419 void *realloc_strcat(char *ptr, char *s) 3420 { 3421 size_t old = 0, len = strlen(s); 3422 char *ret; 3423 3424 if (!len) 3425 return ptr; 3426 if (ptr) 3427 old = strlen(ptr); 3428 3429 len += old + 1; 3430 ret = cgmalloc(len); 3431 3432 if (ptr) { 3433 sprintf(ret, "%s%s", ptr, s); 3434 free(ptr); 3435 } else 3436 sprintf(ret, "%s", s); 3437 return ret; 3438 } 3439 3440 /* Make a text readable version of a string using 0xNN for < ' ' or > '~' 3441 * Including 0x00 at the end 3442 * You must free the result yourself */ 3443 void *str_text(char *ptr) 3444 { 3445 unsigned char *uptr; 3446 char *ret, *txt; 3447 3448 if (ptr == NULL) { 3449 ret = strdup("(null)"); 3450 3451 if (unlikely(!ret)) 3452 quithere(1, "Failed to malloc null"); 3453 } 3454 3455 uptr = (unsigned char *)ptr; 3456 3457 ret = txt = cgmalloc(strlen(ptr) * 4 + 5); // Guaranteed >= needed 3458 3459 do { 3460 if (*uptr < ' ' || *uptr > '~') { 3461 sprintf(txt, "0x%02x", *uptr); 3462 txt += 4; 3463 } else 3464 *(txt++) = *uptr; 3465 } while (*(uptr++)); 3466 3467 *txt = '\0'; 3468 3469 return ret; 3470 } 3471 3472 void RenameThread(const char* name) 3473 { 3474 char buf[16]; 3475 3476 snprintf(buf, sizeof(buf), "cg@%s", name); 3477 #if defined(PR_SET_NAME) 3478 // Only the first 15 characters are used (16 - NUL terminator) 3479 prctl(PR_SET_NAME, buf, 0, 0, 0); 3480 #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) 3481 pthread_set_name_np(pthread_self(), buf); 3482 #elif defined(MAC_OSX) 3483 pthread_setname_np(buf); 3484 #else 3485 // Prevent warnings 3486 (void)buf; 3487 #endif 3488 } 3489 3490 /* cgminer specific wrappers for true unnamed semaphore usage on platforms 3491 * that support them and for apple which does not. We use a single byte across 3492 * a pipe to emulate semaphore behaviour there. */ 3493 #ifdef __APPLE__ 3494 void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) 3495 { 3496 int flags, fd, i; 3497 3498 if (pipe(cgsem->pipefd) == -1) 3499 quitfrom(1, file, func, line, "Failed pipe errno=%d", errno); 3500 3501 /* Make the pipes FD_CLOEXEC to allow them to close should we call 3502 * execv on restart. */ 3503 for (i = 0; i < 2; i++) { 3504 fd = cgsem->pipefd[i]; 3505 flags = fcntl(fd, F_GETFD, 0); 3506 flags |= FD_CLOEXEC; 3507 if (fcntl(fd, F_SETFD, flags) == -1) 3508 quitfrom(1, file, func, line, "Failed to fcntl errno=%d", errno); 3509 } 3510 } 3511 3512 void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) 3513 { 3514 const char buf = 1; 3515 int ret; 3516 3517 retry: 3518 ret = write(cgsem->pipefd[1], &buf, 1); 3519 if (unlikely(ret == 0)) 3520 applog(LOG_WARNING, "Failed to write errno=%d" IN_FMT_FFL, errno, file, func, line); 3521 else if (unlikely(ret < 0 && interrupted)) 3522 goto retry; 3523 } 3524 3525 void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) 3526 { 3527 char buf; 3528 int ret; 3529 retry: 3530 ret = read(cgsem->pipefd[0], &buf, 1); 3531 if (unlikely(ret == 0)) 3532 applog(LOG_WARNING, "Failed to read errno=%d" IN_FMT_FFL, errno, file, func, line); 3533 else if (unlikely(ret < 0 && interrupted)) 3534 goto retry; 3535 } 3536 3537 void cgsem_destroy(cgsem_t *cgsem) 3538 { 3539 close(cgsem->pipefd[1]); 3540 close(cgsem->pipefd[0]); 3541 } 3542 3543 /* This is similar to sem_timedwait but takes a millisecond value */ 3544 int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) 3545 { 3546 struct timeval timeout; 3547 int ret, fd; 3548 fd_set rd; 3549 char buf; 3550 3551 retry: 3552 fd = cgsem->pipefd[0]; 3553 FD_ZERO(&rd); 3554 FD_SET(fd, &rd); 3555 ms_to_timeval(&timeout, ms); 3556 ret = select(fd + 1, &rd, NULL, NULL, &timeout); 3557 3558 if (ret > 0) { 3559 ret = read(fd, &buf, 1); 3560 return 0; 3561 } 3562 if (likely(!ret)) 3563 return ETIMEDOUT; 3564 if (interrupted()) 3565 goto retry; 3566 quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); 3567 /* We don't reach here */ 3568 return 0; 3569 } 3570 3571 /* Reset semaphore count back to zero */ 3572 void cgsem_reset(cgsem_t *cgsem) 3573 { 3574 int ret, fd; 3575 fd_set rd; 3576 char buf; 3577 3578 fd = cgsem->pipefd[0]; 3579 FD_ZERO(&rd); 3580 FD_SET(fd, &rd); 3581 do { 3582 struct timeval timeout = {0, 0}; 3583 3584 ret = select(fd + 1, &rd, NULL, NULL, &timeout); 3585 if (ret > 0) 3586 ret = read(fd, &buf, 1); 3587 else if (unlikely(ret < 0 && interrupted())) 3588 ret = 1; 3589 } while (ret > 0); 3590 } 3591 #else 3592 void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) 3593 { 3594 int ret; 3595 if ((ret = sem_init(cgsem, 0, 0))) 3596 quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); 3597 } 3598 3599 void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) 3600 { 3601 if (unlikely(sem_post(cgsem))) 3602 quitfrom(1, file, func, line, "Failed to sem_post errno=%d cgsem=0x%p", errno, cgsem); 3603 } 3604 3605 void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) 3606 { 3607 retry: 3608 if (unlikely(sem_wait(cgsem))) { 3609 if (interrupted()) 3610 goto retry; 3611 quitfrom(1, file, func, line, "Failed to sem_wait errno=%d cgsem=0x%p", errno, cgsem); 3612 } 3613 } 3614 3615 int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) 3616 { 3617 struct timespec abs_timeout, tdiff; 3618 int ret; 3619 3620 cgcond_time(&abs_timeout); 3621 ms_to_timespec(&tdiff, ms); 3622 timeraddspec(&abs_timeout, &tdiff); 3623 retry: 3624 ret = sem_timedwait(cgsem, &abs_timeout); 3625 3626 if (ret) { 3627 if (likely(sock_timeout())) 3628 return ETIMEDOUT; 3629 if (interrupted()) 3630 goto retry; 3631 quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); 3632 } 3633 return 0; 3634 } 3635 3636 void cgsem_reset(cgsem_t *cgsem) 3637 { 3638 int ret; 3639 3640 do { 3641 ret = sem_trywait(cgsem); 3642 if (unlikely(ret < 0 && interrupted())) 3643 ret = 0; 3644 } while (!ret); 3645 } 3646 3647 void cgsem_destroy(cgsem_t *cgsem) 3648 { 3649 sem_destroy(cgsem); 3650 } 3651 #endif 3652 3653 /* Provide a completion_timeout helper function for unreliable functions that 3654 * may die due to driver issues etc that time out if the function fails and 3655 * can then reliably return. */ 3656 struct cg_completion { 3657 cgsem_t cgsem; 3658 void (*fn)(void *fnarg); 3659 void *fnarg; 3660 }; 3661 3662 void *completion_thread(void *arg) 3663 { 3664 struct cg_completion *cgc = (struct cg_completion *)arg; 3665 3666 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); 3667 cgc->fn(cgc->fnarg); 3668 cgsem_post(&cgc->cgsem); 3669 3670 return NULL; 3671 } 3672 3673 bool cg_completion_timeout(void *fn, void *fnarg, int timeout) 3674 { 3675 struct cg_completion *cgc; 3676 pthread_t pthread; 3677 bool ret = false; 3678 3679 cgc = cgmalloc(sizeof(struct cg_completion)); 3680 cgsem_init(&cgc->cgsem); 3681 cgc->fn = fn; 3682 cgc->fnarg = fnarg; 3683 3684 pthread_create(&pthread, NULL, completion_thread, (void *)cgc); 3685 3686 ret = cgsem_mswait(&cgc->cgsem, timeout); 3687 if (!ret) { 3688 pthread_join(pthread, NULL); 3689 free(cgc); 3690 } else 3691 pthread_cancel(pthread); 3692 return !ret; 3693 } 3694 3695 void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line) 3696 { 3697 if (unlikely(n < 1 || n > (1ul << 31))) { 3698 applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from %s %s():%d", 3699 n, file, func, line); 3700 return; 3701 } 3702 if (unlikely(!dest)) { 3703 applog(LOG_ERR, "ERR: Asked to memcpy %u bytes to NULL from %s %s():%d", 3704 n, file, func, line); 3705 return; 3706 } 3707 if (unlikely(!src)) { 3708 applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from NULL from %s %s():%d", 3709 n, file, func, line); 3710 return; 3711 } 3712 memcpy(dest, src, n); 3713 }