crash_generation_server.cc
1 // Copyright 2008 Google LLC 2 // 3 // Redistribution and use in source and binary forms, with or without 4 // modification, are permitted provided that the following conditions are 5 // met: 6 // 7 // * Redistributions of source code must retain the above copyright 8 // notice, this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above 10 // copyright notice, this list of conditions and the following disclaimer 11 // in the documentation and/or other materials provided with the 12 // distribution. 13 // * Neither the name of Google LLC nor the names of its 14 // contributors may be used to endorse or promote products derived from 15 // this software without specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 29 #ifdef HAVE_CONFIG_H 30 #include <config.h> // Must come first 31 #endif 32 33 #include "client/windows/crash_generation/crash_generation_server.h" 34 #include <windows.h> 35 #include <cassert> 36 #include <list> 37 #include "client/windows/common/auto_critical_section.h" 38 #include "common/scoped_ptr.h" 39 40 #include "client/windows/crash_generation/client_info.h" 41 42 namespace google_breakpad { 43 44 // Output buffer size. 45 static const size_t kOutBufferSize = 64; 46 47 // Input buffer size. 48 static const size_t kInBufferSize = 64; 49 50 // Access flags for the client on the dump request event. 51 static const DWORD kDumpRequestEventAccess = EVENT_MODIFY_STATE; 52 53 // Access flags for the client on the dump generated event. 54 static const DWORD kDumpGeneratedEventAccess = EVENT_MODIFY_STATE | 55 SYNCHRONIZE; 56 57 // Access flags for the client on the mutex. 58 static const DWORD kMutexAccess = SYNCHRONIZE; 59 60 // Attribute flags for the pipe. 61 static const DWORD kPipeAttr = FILE_FLAG_FIRST_PIPE_INSTANCE | 62 PIPE_ACCESS_DUPLEX | 63 FILE_FLAG_OVERLAPPED; 64 65 // Mode for the pipe. 66 static const DWORD kPipeMode = PIPE_TYPE_MESSAGE | 67 PIPE_READMODE_MESSAGE | 68 PIPE_WAIT; 69 70 // For pipe I/O, execute the callback in the wait thread itself, 71 // since the callback does very little work. The callback executes 72 // the code for one of the states of the server state machine and 73 // the code for all of the states perform async I/O and hence 74 // finish very quickly. 75 static const ULONG kPipeIOThreadFlags = WT_EXECUTEINWAITTHREAD; 76 77 // Dump request threads will, most likely, generate dumps. That may 78 // take some time to finish, so specify WT_EXECUTELONGFUNCTION flag. 79 static const ULONG kDumpRequestThreadFlags = WT_EXECUTEINWAITTHREAD | 80 WT_EXECUTELONGFUNCTION; 81 82 static bool IsClientRequestValid(const ProtocolMessage& msg) { 83 return msg.tag == MESSAGE_TAG_UPLOAD_REQUEST || 84 (msg.tag == MESSAGE_TAG_REGISTRATION_REQUEST && 85 msg.id != 0 && 86 msg.thread_id != NULL && 87 msg.exception_pointers != NULL && 88 msg.assert_info != NULL); 89 } 90 91 #ifndef NDEBUG 92 static bool CheckForIOIncomplete(bool success) { 93 // We should never get an I/O incomplete since we should not execute this 94 // unless the operation has finished and the overlapped event is signaled. If 95 // we do get INCOMPLETE, we have a bug in our code. 96 return success ? false : (GetLastError() == ERROR_IO_INCOMPLETE); 97 } 98 #endif 99 100 CrashGenerationServer::CrashGenerationServer( 101 const std::wstring& pipe_name, 102 SECURITY_ATTRIBUTES* pipe_sec_attrs, 103 OnClientConnectedCallback connect_callback, 104 void* connect_context, 105 OnClientDumpRequestCallback dump_callback, 106 void* dump_context, 107 OnClientExitedCallback exit_callback, 108 void* exit_context, 109 OnClientUploadRequestCallback upload_request_callback, 110 void* upload_context, 111 bool generate_dumps, 112 const std::wstring* dump_path) 113 : pipe_name_(pipe_name), 114 pipe_sec_attrs_(pipe_sec_attrs), 115 pipe_(NULL), 116 pipe_wait_handle_(NULL), 117 server_alive_handle_(NULL), 118 connect_callback_(connect_callback), 119 connect_context_(connect_context), 120 dump_callback_(dump_callback), 121 dump_context_(dump_context), 122 exit_callback_(exit_callback), 123 exit_context_(exit_context), 124 upload_request_callback_(upload_request_callback), 125 upload_context_(upload_context), 126 generate_dumps_(generate_dumps), 127 pre_fetch_custom_info_(true), 128 dump_path_(dump_path ? *dump_path : L""), 129 server_state_(IPC_SERVER_STATE_UNINITIALIZED), 130 shutting_down_(false), 131 overlapped_(), 132 client_info_(NULL) { 133 InitializeCriticalSection(&sync_); 134 } 135 136 // This should never be called from the OnPipeConnected callback. 137 // Otherwise the UnregisterWaitEx call below will cause a deadlock. 138 CrashGenerationServer::~CrashGenerationServer() { 139 // New scope to release the lock automatically. 140 { 141 // Make sure no clients are added or removed beyond this point. 142 // Before adding or removing any clients, the critical section 143 // must be entered and the shutting_down_ flag checked. The 144 // critical section is then exited only after the clients_ list 145 // modifications are done and the list is in a consistent state. 146 AutoCriticalSection lock(&sync_); 147 148 // Indicate to existing threads that server is shutting down. 149 shutting_down_ = true; 150 } 151 // No one will modify the clients_ list beyond this point - 152 // not even from another thread. 153 154 // Even if there are no current worker threads running, it is possible that 155 // an I/O request is pending on the pipe right now but not yet done. 156 // In fact, it's very likely this is the case unless we are in an ERROR 157 // state. If we don't wait for the pending I/O to be done, then when the I/O 158 // completes, it may write to invalid memory. AppVerifier will flag this 159 // problem too. So we disconnect from the pipe and then wait for the server 160 // to get into error state so that the pending I/O will fail and get 161 // cleared. 162 DisconnectNamedPipe(pipe_); 163 int num_tries = 100; 164 while (num_tries-- && server_state_ != IPC_SERVER_STATE_ERROR) { 165 Sleep(10); 166 } 167 168 // Unregister wait on the pipe. 169 if (pipe_wait_handle_) { 170 // Wait for already executing callbacks to finish. 171 UnregisterWaitEx(pipe_wait_handle_, INVALID_HANDLE_VALUE); 172 } 173 174 // Close the pipe to avoid further client connections. 175 if (pipe_) { 176 CloseHandle(pipe_); 177 } 178 179 // Request all ClientInfo objects to unregister all waits. 180 // No need to enter the critical section because no one is allowed to modify 181 // the clients_ list once the shutting_down_ flag is set. 182 std::list<ClientInfo*>::iterator iter; 183 for (iter = clients_.begin(); iter != clients_.end(); ++iter) { 184 ClientInfo* client_info = *iter; 185 // Unregister waits. Wait for already executing callbacks to finish. 186 // Unregister the client process exit wait first and only then unregister 187 // the dump request wait. The reason is that the OnClientExit callback 188 // also unregisters the dump request wait and such a race (doing the same 189 // unregistration from two threads) is undesirable. 190 client_info->UnregisterProcessExitWait(true); 191 client_info->UnregisterDumpRequestWaitAndBlockUntilNoPending(); 192 193 // Destroying the ClientInfo here is safe because all wait operations for 194 // this ClientInfo were unregistered and no pending or running callbacks 195 // for this ClientInfo can possible exist (block_until_no_pending option 196 // was used). 197 delete client_info; 198 } 199 200 if (server_alive_handle_) { 201 // Release the mutex before closing the handle so that clients requesting 202 // dumps wait for a long time for the server to generate a dump. 203 ReleaseMutex(server_alive_handle_); 204 CloseHandle(server_alive_handle_); 205 } 206 207 if (overlapped_.hEvent) { 208 CloseHandle(overlapped_.hEvent); 209 } 210 211 DeleteCriticalSection(&sync_); 212 } 213 214 bool CrashGenerationServer::Start() { 215 if (server_state_ != IPC_SERVER_STATE_UNINITIALIZED) { 216 return false; 217 } 218 219 server_state_ = IPC_SERVER_STATE_INITIAL; 220 221 server_alive_handle_ = CreateMutex(NULL, TRUE, NULL); 222 if (!server_alive_handle_) { 223 return false; 224 } 225 226 // Event to signal the client connection and pipe reads and writes. 227 overlapped_.hEvent = CreateEvent(NULL, // Security descriptor. 228 TRUE, // Manual reset. 229 FALSE, // Initially nonsignaled. 230 NULL); // Name. 231 if (!overlapped_.hEvent) { 232 return false; 233 } 234 235 // Register a callback with the thread pool for the client connection. 236 if (!RegisterWaitForSingleObject(&pipe_wait_handle_, 237 overlapped_.hEvent, 238 OnPipeConnected, 239 this, 240 INFINITE, 241 kPipeIOThreadFlags)) { 242 return false; 243 } 244 245 pipe_ = CreateNamedPipe(pipe_name_.c_str(), 246 kPipeAttr, 247 kPipeMode, 248 1, 249 kOutBufferSize, 250 kInBufferSize, 251 0, 252 pipe_sec_attrs_); 253 if (pipe_ == INVALID_HANDLE_VALUE) { 254 return false; 255 } 256 257 // Kick-start the state machine. This will initiate an asynchronous wait 258 // for client connections. 259 if (!SetEvent(overlapped_.hEvent)) { 260 server_state_ = IPC_SERVER_STATE_ERROR; 261 return false; 262 } 263 264 // If we are in error state, it's because we failed to start listening. 265 return true; 266 } 267 268 // If the server thread serving clients ever gets into the 269 // ERROR state, reset the event, close the pipe and remain 270 // in the error state forever. Error state means something 271 // that we didn't account for has happened, and it's dangerous 272 // to do anything unknowingly. 273 void CrashGenerationServer::HandleErrorState() { 274 assert(server_state_ == IPC_SERVER_STATE_ERROR); 275 276 // If the server is shutting down anyway, don't clean up 277 // here since shut down process will clean up. 278 if (shutting_down_) { 279 return; 280 } 281 282 if (pipe_wait_handle_) { 283 UnregisterWait(pipe_wait_handle_); 284 pipe_wait_handle_ = NULL; 285 } 286 287 if (pipe_) { 288 CloseHandle(pipe_); 289 pipe_ = NULL; 290 } 291 292 if (overlapped_.hEvent) { 293 CloseHandle(overlapped_.hEvent); 294 overlapped_.hEvent = NULL; 295 } 296 } 297 298 // When the server thread serving clients is in the INITIAL state, 299 // try to connect to the pipe asynchronously. If the connection 300 // finishes synchronously, directly go into the CONNECTED state; 301 // otherwise go into the CONNECTING state. For any problems, go 302 // into the ERROR state. 303 void CrashGenerationServer::HandleInitialState() { 304 assert(server_state_ == IPC_SERVER_STATE_INITIAL); 305 306 if (!ResetEvent(overlapped_.hEvent)) { 307 EnterErrorState(); 308 return; 309 } 310 311 bool success = ConnectNamedPipe(pipe_, &overlapped_) != FALSE; 312 DWORD error_code = success ? ERROR_SUCCESS : GetLastError(); 313 314 // From MSDN, it is not clear that when ConnectNamedPipe is used 315 // in an overlapped mode, will it ever return non-zero value, and 316 // if so, in what cases. 317 assert(!success); 318 319 switch (error_code) { 320 case ERROR_IO_PENDING: 321 EnterStateWhenSignaled(IPC_SERVER_STATE_CONNECTING); 322 break; 323 324 case ERROR_PIPE_CONNECTED: 325 EnterStateImmediately(IPC_SERVER_STATE_CONNECTED); 326 break; 327 328 default: 329 EnterErrorState(); 330 break; 331 } 332 } 333 334 // When the server thread serving the clients is in the CONNECTING state, 335 // try to get the result of the asynchronous connection request using 336 // the OVERLAPPED object. If the result indicates the connection is done, 337 // go into the CONNECTED state. If the result indicates I/O is still 338 // INCOMPLETE, remain in the CONNECTING state. For any problems, 339 // go into the DISCONNECTING state. 340 void CrashGenerationServer::HandleConnectingState() { 341 assert(server_state_ == IPC_SERVER_STATE_CONNECTING); 342 343 DWORD bytes_count = 0; 344 bool success = GetOverlappedResult(pipe_, 345 &overlapped_, 346 &bytes_count, 347 FALSE) != FALSE; 348 DWORD error_code = success ? ERROR_SUCCESS : GetLastError(); 349 350 if (success) { 351 EnterStateImmediately(IPC_SERVER_STATE_CONNECTED); 352 } else if (error_code != ERROR_IO_INCOMPLETE) { 353 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 354 } else { 355 // remain in CONNECTING state 356 } 357 } 358 359 // When the server thread serving the clients is in the CONNECTED state, 360 // try to issue an asynchronous read from the pipe. If read completes 361 // synchronously or if I/O is pending then go into the READING state. 362 // For any problems, go into the DISCONNECTING state. 363 void CrashGenerationServer::HandleConnectedState() { 364 assert(server_state_ == IPC_SERVER_STATE_CONNECTED); 365 366 DWORD bytes_count = 0; 367 memset(&msg_, 0, sizeof(msg_)); 368 bool success = ReadFile(pipe_, 369 &msg_, 370 sizeof(msg_), 371 &bytes_count, 372 &overlapped_) != FALSE; 373 DWORD error_code = success ? ERROR_SUCCESS : GetLastError(); 374 375 // Note that the asynchronous read issued above can finish before the 376 // code below executes. But, it is okay to change state after issuing 377 // the asynchronous read. This is because even if the asynchronous read 378 // is done, the callback for it would not be executed until the current 379 // thread finishes its execution. 380 if (success || error_code == ERROR_IO_PENDING) { 381 EnterStateWhenSignaled(IPC_SERVER_STATE_READING); 382 } else { 383 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 384 } 385 } 386 387 // When the server thread serving the clients is in the READING state, 388 // try to get the result of the async read. If async read is done, 389 // go into the READ_DONE state. For any problems, go into the 390 // DISCONNECTING state. 391 void CrashGenerationServer::HandleReadingState() { 392 assert(server_state_ == IPC_SERVER_STATE_READING); 393 394 DWORD bytes_count = 0; 395 bool success = GetOverlappedResult(pipe_, 396 &overlapped_, 397 &bytes_count, 398 FALSE) != FALSE; 399 if (success && bytes_count == sizeof(ProtocolMessage)) { 400 EnterStateImmediately(IPC_SERVER_STATE_READ_DONE); 401 return; 402 } 403 404 assert(!CheckForIOIncomplete(success)); 405 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 406 } 407 408 // When the server thread serving the client is in the READ_DONE state, 409 // validate the client's request message, register the client by 410 // creating appropriate objects and prepare the response. Then try to 411 // write the response to the pipe asynchronously. If that succeeds, 412 // go into the WRITING state. For any problems, go into the DISCONNECTING 413 // state. 414 void CrashGenerationServer::HandleReadDoneState() { 415 assert(server_state_ == IPC_SERVER_STATE_READ_DONE); 416 417 if (!IsClientRequestValid(msg_)) { 418 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 419 return; 420 } 421 422 if (msg_.tag == MESSAGE_TAG_UPLOAD_REQUEST) { 423 if (upload_request_callback_) 424 upload_request_callback_(upload_context_, msg_.id); 425 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 426 return; 427 } 428 429 scoped_ptr<ClientInfo> client_info( 430 new ClientInfo(this, 431 msg_.id, 432 msg_.dump_type, 433 msg_.thread_id, 434 msg_.exception_pointers, 435 msg_.assert_info, 436 msg_.custom_client_info)); 437 438 if (!client_info->Initialize()) { 439 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 440 return; 441 } 442 443 // Issues an asynchronous WriteFile call if successful. 444 // Iff successful, assigns ownership of the client_info pointer to the server 445 // instance, in which case we must be sure not to free it in this function. 446 if (!RespondToClient(client_info.get())) { 447 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 448 return; 449 } 450 451 // This is only valid as long as it can be found in the clients_ list 452 client_info_ = client_info.release(); 453 454 // Note that the asynchronous write issued by RespondToClient function 455 // can finish before the code below executes. But it is okay to change 456 // state after issuing the asynchronous write. This is because even if 457 // the asynchronous write is done, the callback for it would not be 458 // executed until the current thread finishes its execution. 459 EnterStateWhenSignaled(IPC_SERVER_STATE_WRITING); 460 } 461 462 // When the server thread serving the clients is in the WRITING state, 463 // try to get the result of the async write. If the async write is done, 464 // go into the WRITE_DONE state. For any problems, go into the 465 // DISONNECTING state. 466 void CrashGenerationServer::HandleWritingState() { 467 assert(server_state_ == IPC_SERVER_STATE_WRITING); 468 469 DWORD bytes_count = 0; 470 bool success = GetOverlappedResult(pipe_, 471 &overlapped_, 472 &bytes_count, 473 FALSE) != FALSE; 474 if (success) { 475 EnterStateImmediately(IPC_SERVER_STATE_WRITE_DONE); 476 return; 477 } 478 479 assert(!CheckForIOIncomplete(success)); 480 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 481 } 482 483 // When the server thread serving the clients is in the WRITE_DONE state, 484 // try to issue an async read on the pipe. If the read completes synchronously 485 // or if I/O is still pending then go into the READING_ACK state. For any 486 // issues, go into the DISCONNECTING state. 487 void CrashGenerationServer::HandleWriteDoneState() { 488 assert(server_state_ == IPC_SERVER_STATE_WRITE_DONE); 489 490 DWORD bytes_count = 0; 491 bool success = ReadFile(pipe_, 492 &msg_, 493 sizeof(msg_), 494 &bytes_count, 495 &overlapped_) != FALSE; 496 DWORD error_code = success ? ERROR_SUCCESS : GetLastError(); 497 498 if (success) { 499 EnterStateImmediately(IPC_SERVER_STATE_READING_ACK); 500 } else if (error_code == ERROR_IO_PENDING) { 501 EnterStateWhenSignaled(IPC_SERVER_STATE_READING_ACK); 502 } else { 503 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 504 } 505 } 506 507 // When the server thread serving the clients is in the READING_ACK state, 508 // try to get result of async read. Go into the DISCONNECTING state. 509 void CrashGenerationServer::HandleReadingAckState() { 510 assert(server_state_ == IPC_SERVER_STATE_READING_ACK); 511 512 DWORD bytes_count = 0; 513 bool success = GetOverlappedResult(pipe_, 514 &overlapped_, 515 &bytes_count, 516 FALSE) != FALSE; 517 if (success) { 518 // The connection handshake with the client is now complete; perform 519 // the callback. 520 if (connect_callback_) { 521 // Note that there is only a single copy of the ClientInfo of the 522 // currently connected client. However it is being referenced from 523 // two different places: 524 // - the client_info_ member 525 // - the clients_ list 526 // The lifetime of this ClientInfo depends on the lifetime of the 527 // client process - basically it can go away at any time. 528 // However, as long as it is referenced by the clients_ list it 529 // is guaranteed to be valid. Enter the critical section and check 530 // to see whether the client_info_ can be found in the list. 531 // If found, execute the callback and only then leave the critical 532 // section. 533 AutoCriticalSection lock(&sync_); 534 535 bool client_is_still_alive = false; 536 std::list<ClientInfo*>::iterator iter; 537 for (iter = clients_.begin(); iter != clients_.end(); ++iter) { 538 if (client_info_ == *iter) { 539 client_is_still_alive = true; 540 break; 541 } 542 } 543 544 if (client_is_still_alive) { 545 connect_callback_(connect_context_, client_info_); 546 } 547 } 548 } else { 549 assert(!CheckForIOIncomplete(success)); 550 } 551 552 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING); 553 } 554 555 // When the server thread serving the client is in the DISCONNECTING state, 556 // disconnect from the pipe and reset the event. If anything fails, go into 557 // the ERROR state. If it goes well, go into the INITIAL state and set the 558 // event to start all over again. 559 void CrashGenerationServer::HandleDisconnectingState() { 560 assert(server_state_ == IPC_SERVER_STATE_DISCONNECTING); 561 562 // Done serving the client. 563 client_info_ = NULL; 564 565 overlapped_.Internal = NULL; 566 overlapped_.InternalHigh = NULL; 567 overlapped_.Offset = 0; 568 overlapped_.OffsetHigh = 0; 569 overlapped_.Pointer = NULL; 570 571 if (!ResetEvent(overlapped_.hEvent)) { 572 EnterErrorState(); 573 return; 574 } 575 576 if (!DisconnectNamedPipe(pipe_)) { 577 EnterErrorState(); 578 return; 579 } 580 581 // If the server is shutting down do not connect to the 582 // next client. 583 if (shutting_down_) { 584 return; 585 } 586 587 EnterStateImmediately(IPC_SERVER_STATE_INITIAL); 588 } 589 590 void CrashGenerationServer::EnterErrorState() { 591 SetEvent(overlapped_.hEvent); 592 server_state_ = IPC_SERVER_STATE_ERROR; 593 } 594 595 void CrashGenerationServer::EnterStateWhenSignaled(IPCServerState state) { 596 server_state_ = state; 597 } 598 599 void CrashGenerationServer::EnterStateImmediately(IPCServerState state) { 600 server_state_ = state; 601 602 if (!SetEvent(overlapped_.hEvent)) { 603 server_state_ = IPC_SERVER_STATE_ERROR; 604 } 605 } 606 607 bool CrashGenerationServer::PrepareReply(const ClientInfo& client_info, 608 ProtocolMessage* reply) const { 609 reply->tag = MESSAGE_TAG_REGISTRATION_RESPONSE; 610 reply->id = GetCurrentProcessId(); 611 612 if (CreateClientHandles(client_info, reply)) { 613 return true; 614 } 615 616 // Closing of remote handles (belonging to a different process) can 617 // only be done through DuplicateHandle. 618 if (reply->dump_request_handle) { 619 DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle 620 reply->dump_request_handle, // hSourceHandle 621 NULL, // hTargetProcessHandle 622 0, // lpTargetHandle 623 0, // dwDesiredAccess 624 FALSE, // bInheritHandle 625 DUPLICATE_CLOSE_SOURCE); // dwOptions 626 reply->dump_request_handle = NULL; 627 } 628 629 if (reply->dump_generated_handle) { 630 DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle 631 reply->dump_generated_handle, // hSourceHandle 632 NULL, // hTargetProcessHandle 633 0, // lpTargetHandle 634 0, // dwDesiredAccess 635 FALSE, // bInheritHandle 636 DUPLICATE_CLOSE_SOURCE); // dwOptions 637 reply->dump_generated_handle = NULL; 638 } 639 640 if (reply->server_alive_handle) { 641 DuplicateHandle(client_info.process_handle(), // hSourceProcessHandle 642 reply->server_alive_handle, // hSourceHandle 643 NULL, // hTargetProcessHandle 644 0, // lpTargetHandle 645 0, // dwDesiredAccess 646 FALSE, // bInheritHandle 647 DUPLICATE_CLOSE_SOURCE); // dwOptions 648 reply->server_alive_handle = NULL; 649 } 650 651 return false; 652 } 653 654 bool CrashGenerationServer::CreateClientHandles(const ClientInfo& client_info, 655 ProtocolMessage* reply) const { 656 HANDLE current_process = GetCurrentProcess(); 657 if (!DuplicateHandle(current_process, 658 client_info.dump_requested_handle(), 659 client_info.process_handle(), 660 &reply->dump_request_handle, 661 kDumpRequestEventAccess, 662 FALSE, 663 0)) { 664 return false; 665 } 666 667 if (!DuplicateHandle(current_process, 668 client_info.dump_generated_handle(), 669 client_info.process_handle(), 670 &reply->dump_generated_handle, 671 kDumpGeneratedEventAccess, 672 FALSE, 673 0)) { 674 return false; 675 } 676 677 if (!DuplicateHandle(current_process, 678 server_alive_handle_, 679 client_info.process_handle(), 680 &reply->server_alive_handle, 681 kMutexAccess, 682 FALSE, 683 0)) { 684 return false; 685 } 686 687 return true; 688 } 689 690 bool CrashGenerationServer::RespondToClient(ClientInfo* client_info) { 691 ProtocolMessage reply; 692 if (!PrepareReply(*client_info, &reply)) { 693 return false; 694 } 695 696 DWORD bytes_count = 0; 697 bool success = WriteFile(pipe_, 698 &reply, 699 sizeof(reply), 700 &bytes_count, 701 &overlapped_) != FALSE; 702 DWORD error_code = success ? ERROR_SUCCESS : GetLastError(); 703 704 if (!success && error_code != ERROR_IO_PENDING) { 705 return false; 706 } 707 708 // Takes over ownership of client_info. We MUST return true if AddClient 709 // succeeds. 710 return AddClient(client_info); 711 } 712 713 // The server thread servicing the clients runs this method. The method 714 // implements the state machine described in ReadMe.txt along with the 715 // helper methods HandleXXXState. 716 void CrashGenerationServer::HandleConnectionRequest() { 717 // If the server is shutting down, get into ERROR state, reset the event so 718 // more workers don't run and return immediately. 719 if (shutting_down_) { 720 server_state_ = IPC_SERVER_STATE_ERROR; 721 ResetEvent(overlapped_.hEvent); 722 return; 723 } 724 725 switch (server_state_) { 726 case IPC_SERVER_STATE_ERROR: 727 HandleErrorState(); 728 break; 729 730 case IPC_SERVER_STATE_INITIAL: 731 HandleInitialState(); 732 break; 733 734 case IPC_SERVER_STATE_CONNECTING: 735 HandleConnectingState(); 736 break; 737 738 case IPC_SERVER_STATE_CONNECTED: 739 HandleConnectedState(); 740 break; 741 742 case IPC_SERVER_STATE_READING: 743 HandleReadingState(); 744 break; 745 746 case IPC_SERVER_STATE_READ_DONE: 747 HandleReadDoneState(); 748 break; 749 750 case IPC_SERVER_STATE_WRITING: 751 HandleWritingState(); 752 break; 753 754 case IPC_SERVER_STATE_WRITE_DONE: 755 HandleWriteDoneState(); 756 break; 757 758 case IPC_SERVER_STATE_READING_ACK: 759 HandleReadingAckState(); 760 break; 761 762 case IPC_SERVER_STATE_DISCONNECTING: 763 HandleDisconnectingState(); 764 break; 765 766 default: 767 assert(false); 768 // This indicates that we added one more state without 769 // adding handling code. 770 server_state_ = IPC_SERVER_STATE_ERROR; 771 break; 772 } 773 } 774 775 bool CrashGenerationServer::AddClient(ClientInfo* client_info) { 776 HANDLE request_wait_handle = NULL; 777 if (!RegisterWaitForSingleObject(&request_wait_handle, 778 client_info->dump_requested_handle(), 779 OnDumpRequest, 780 client_info, 781 INFINITE, 782 kDumpRequestThreadFlags)) { 783 return false; 784 } 785 786 client_info->set_dump_request_wait_handle(request_wait_handle); 787 788 // OnClientEnd will be called when the client process terminates. 789 HANDLE process_wait_handle = NULL; 790 if (!RegisterWaitForSingleObject(&process_wait_handle, 791 client_info->process_handle(), 792 OnClientEnd, 793 client_info, 794 INFINITE, 795 WT_EXECUTEONLYONCE)) { 796 return false; 797 } 798 799 client_info->set_process_exit_wait_handle(process_wait_handle); 800 801 // New scope to hold the lock for the shortest time. 802 { 803 AutoCriticalSection lock(&sync_); 804 if (shutting_down_) { 805 // If server is shutting down, don't add new clients 806 return false; 807 } 808 clients_.push_back(client_info); 809 } 810 811 return true; 812 } 813 814 // static 815 void CALLBACK CrashGenerationServer::OnPipeConnected(void* context, BOOLEAN) { 816 assert(context); 817 818 CrashGenerationServer* obj = 819 reinterpret_cast<CrashGenerationServer*>(context); 820 obj->HandleConnectionRequest(); 821 } 822 823 // static 824 void CALLBACK CrashGenerationServer::OnDumpRequest(void* context, BOOLEAN) { 825 assert(context); 826 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context); 827 828 CrashGenerationServer* crash_server = client_info->crash_server(); 829 assert(crash_server); 830 if (crash_server->pre_fetch_custom_info_) { 831 client_info->PopulateCustomInfo(); 832 } 833 crash_server->HandleDumpRequest(*client_info); 834 835 ResetEvent(client_info->dump_requested_handle()); 836 } 837 838 // static 839 void CALLBACK CrashGenerationServer::OnClientEnd(void* context, BOOLEAN) { 840 assert(context); 841 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context); 842 843 CrashGenerationServer* crash_server = client_info->crash_server(); 844 assert(crash_server); 845 846 crash_server->HandleClientProcessExit(client_info); 847 } 848 849 void CrashGenerationServer::HandleClientProcessExit(ClientInfo* client_info) { 850 assert(client_info); 851 852 // Must unregister the dump request wait operation and wait for any 853 // dump requests that might be pending to finish before proceeding 854 // with the client_info cleanup. 855 client_info->UnregisterDumpRequestWaitAndBlockUntilNoPending(); 856 857 if (exit_callback_) { 858 exit_callback_(exit_context_, client_info); 859 } 860 861 // Start a new scope to release lock automatically. 862 { 863 AutoCriticalSection lock(&sync_); 864 if (shutting_down_) { 865 // The crash generation server is shutting down and as part of the 866 // shutdown process it will delete all clients from the clients_ list. 867 return; 868 } 869 clients_.remove(client_info); 870 } 871 872 // Explicitly unregister the process exit wait using the non-blocking method. 873 // Otherwise, the destructor will attempt to unregister it using the blocking 874 // method which will lead to a deadlock because it is being called from the 875 // callback of the same wait operation 876 client_info->UnregisterProcessExitWait(false); 877 878 delete client_info; 879 } 880 881 void CrashGenerationServer::HandleDumpRequest(const ClientInfo& client_info) { 882 bool execute_callback = true; 883 // Generate the dump only if it's explicitly requested by the 884 // server application; otherwise the server might want to generate 885 // dump in the callback. 886 std::wstring dump_path; 887 if (generate_dumps_) { 888 if (!GenerateDump(client_info, &dump_path)) { 889 // client proccess terminated or some other error 890 execute_callback = false; 891 } 892 } 893 894 if (dump_callback_ && execute_callback) { 895 std::wstring* ptr_dump_path = (dump_path == L"") ? NULL : &dump_path; 896 dump_callback_(dump_context_, &client_info, ptr_dump_path); 897 } 898 899 SetEvent(client_info.dump_generated_handle()); 900 } 901 902 bool CrashGenerationServer::GenerateDump(const ClientInfo& client, 903 std::wstring* dump_path) { 904 assert(client.pid() != 0); 905 assert(client.process_handle()); 906 907 // We have to get the address of EXCEPTION_INFORMATION from 908 // the client process address space. 909 EXCEPTION_POINTERS* client_ex_info = NULL; 910 if (!client.GetClientExceptionInfo(&client_ex_info)) { 911 return false; 912 } 913 914 DWORD client_thread_id = 0; 915 if (!client.GetClientThreadId(&client_thread_id)) { 916 return false; 917 } 918 919 MinidumpGenerator dump_generator(dump_path_, 920 client.process_handle(), 921 client.pid(), 922 client_thread_id, 923 GetCurrentThreadId(), 924 client_ex_info, 925 client.assert_info(), 926 client.dump_type(), 927 true); 928 929 if (!dump_generator.GenerateDumpFile(dump_path)) { 930 return false; 931 } 932 933 // If the client requests a full memory dump, we will write a normal mini 934 // dump and a full memory dump. Both dump files use the same uuid as file 935 // name prefix. 936 if (client.dump_type() & MiniDumpWithFullMemory) { 937 std::wstring full_dump_path; 938 if (!dump_generator.GenerateFullDumpFile(&full_dump_path)) { 939 return false; 940 } 941 } 942 943 return dump_generator.WriteMinidump(); 944 } 945 946 } // namespace google_breakpad