/ rpcserver.go
rpcserver.go
1 package lnd 2 3 import ( 4 "bytes" 5 "context" 6 "encoding/hex" 7 "errors" 8 "fmt" 9 "image/color" 10 "io" 11 "maps" 12 "math" 13 "net" 14 "net/http" 15 "os" 16 "path/filepath" 17 "runtime" 18 "sort" 19 "strconv" 20 "strings" 21 "sync" 22 "sync/atomic" 23 "time" 24 25 "github.com/btcsuite/btcd/blockchain" 26 "github.com/btcsuite/btcd/btcec/v2" 27 "github.com/btcsuite/btcd/btcec/v2/ecdsa" 28 "github.com/btcsuite/btcd/btcutil" 29 "github.com/btcsuite/btcd/btcutil/psbt" 30 "github.com/btcsuite/btcd/chaincfg" 31 "github.com/btcsuite/btcd/chaincfg/chainhash" 32 "github.com/btcsuite/btcd/txscript" 33 "github.com/btcsuite/btcd/wire" 34 "github.com/btcsuite/btcwallet/waddrmgr" 35 "github.com/btcsuite/btcwallet/wallet" 36 "github.com/btcsuite/btcwallet/wallet/txauthor" 37 proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" 38 "github.com/lightningnetwork/lnd/autopilot" 39 "github.com/lightningnetwork/lnd/build" 40 "github.com/lightningnetwork/lnd/chainreg" 41 "github.com/lightningnetwork/lnd/chanacceptor" 42 "github.com/lightningnetwork/lnd/chanbackup" 43 "github.com/lightningnetwork/lnd/chanfitness" 44 "github.com/lightningnetwork/lnd/channeldb" 45 "github.com/lightningnetwork/lnd/channelnotifier" 46 "github.com/lightningnetwork/lnd/clock" 47 "github.com/lightningnetwork/lnd/contractcourt" 48 "github.com/lightningnetwork/lnd/discovery" 49 "github.com/lightningnetwork/lnd/feature" 50 "github.com/lightningnetwork/lnd/fn/v2" 51 "github.com/lightningnetwork/lnd/funding" 52 graphdb "github.com/lightningnetwork/lnd/graph/db" 53 "github.com/lightningnetwork/lnd/graph/db/models" 54 "github.com/lightningnetwork/lnd/htlcswitch" 55 "github.com/lightningnetwork/lnd/htlcswitch/hop" 56 "github.com/lightningnetwork/lnd/input" 57 "github.com/lightningnetwork/lnd/invoices" 58 "github.com/lightningnetwork/lnd/keychain" 59 "github.com/lightningnetwork/lnd/labels" 60 "github.com/lightningnetwork/lnd/lncfg" 61 "github.com/lightningnetwork/lnd/lnrpc" 62 "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" 63 "github.com/lightningnetwork/lnd/lnrpc/routerrpc" 64 "github.com/lightningnetwork/lnd/lnrpc/walletrpc" 65 "github.com/lightningnetwork/lnd/lntypes" 66 "github.com/lightningnetwork/lnd/lnutils" 67 "github.com/lightningnetwork/lnd/lnwallet" 68 "github.com/lightningnetwork/lnd/lnwallet/btcwallet" 69 "github.com/lightningnetwork/lnd/lnwallet/chainfee" 70 "github.com/lightningnetwork/lnd/lnwallet/chancloser" 71 "github.com/lightningnetwork/lnd/lnwallet/chanfunding" 72 "github.com/lightningnetwork/lnd/lnwallet/types" 73 "github.com/lightningnetwork/lnd/lnwire" 74 "github.com/lightningnetwork/lnd/macaroons" 75 "github.com/lightningnetwork/lnd/onionmessage" 76 paymentsdb "github.com/lightningnetwork/lnd/payments/db" 77 "github.com/lightningnetwork/lnd/peer" 78 "github.com/lightningnetwork/lnd/peernotifier" 79 "github.com/lightningnetwork/lnd/record" 80 "github.com/lightningnetwork/lnd/routing" 81 "github.com/lightningnetwork/lnd/routing/blindedpath" 82 "github.com/lightningnetwork/lnd/routing/route" 83 "github.com/lightningnetwork/lnd/rpcperms" 84 "github.com/lightningnetwork/lnd/signal" 85 "github.com/lightningnetwork/lnd/sweep" 86 "github.com/lightningnetwork/lnd/tlv" 87 "github.com/lightningnetwork/lnd/watchtower" 88 "github.com/lightningnetwork/lnd/zpay32" 89 "github.com/tv42/zbase32" 90 "google.golang.org/grpc" 91 "google.golang.org/grpc/codes" 92 "google.golang.org/grpc/status" 93 "google.golang.org/protobuf/proto" 94 "gopkg.in/macaroon-bakery.v2/bakery" 95 ) 96 97 const ( 98 // defaultNumBlocksEstimate is the number of blocks that we fall back 99 // to issuing an estimate for if a fee pre fence doesn't specify an 100 // explicit conf target or fee rate. 101 defaultNumBlocksEstimate = 6 102 ) 103 104 var ( 105 // readPermissions is a slice of all entities that allow read 106 // permissions for authorization purposes, all lowercase. 107 readPermissions = []bakery.Op{ 108 { 109 Entity: "onchain", 110 Action: "read", 111 }, 112 { 113 Entity: "offchain", 114 Action: "read", 115 }, 116 { 117 Entity: "address", 118 Action: "read", 119 }, 120 { 121 Entity: "message", 122 Action: "read", 123 }, 124 { 125 Entity: "peers", 126 Action: "read", 127 }, 128 { 129 Entity: "info", 130 Action: "read", 131 }, 132 { 133 Entity: "invoices", 134 Action: "read", 135 }, 136 { 137 Entity: "signer", 138 Action: "read", 139 }, 140 { 141 Entity: "macaroon", 142 Action: "read", 143 }, 144 } 145 146 // writePermissions is a slice of all entities that allow write 147 // permissions for authorization purposes, all lowercase. 148 writePermissions = []bakery.Op{ 149 { 150 Entity: "onchain", 151 Action: "write", 152 }, 153 { 154 Entity: "offchain", 155 Action: "write", 156 }, 157 { 158 Entity: "address", 159 Action: "write", 160 }, 161 { 162 Entity: "message", 163 Action: "write", 164 }, 165 { 166 Entity: "peers", 167 Action: "write", 168 }, 169 { 170 Entity: "info", 171 Action: "write", 172 }, 173 { 174 Entity: "invoices", 175 Action: "write", 176 }, 177 { 178 Entity: "signer", 179 Action: "generate", 180 }, 181 { 182 Entity: "macaroon", 183 Action: "generate", 184 }, 185 { 186 Entity: "macaroon", 187 Action: "write", 188 }, 189 } 190 191 // invoicePermissions is a slice of all the entities that allows a user 192 // to only access calls that are related to invoices, so: streaming 193 // RPCs, generating, and listening invoices. 194 invoicePermissions = []bakery.Op{ 195 { 196 Entity: "invoices", 197 Action: "read", 198 }, 199 { 200 Entity: "invoices", 201 Action: "write", 202 }, 203 { 204 Entity: "address", 205 Action: "read", 206 }, 207 { 208 Entity: "address", 209 Action: "write", 210 }, 211 { 212 Entity: "onchain", 213 Action: "read", 214 }, 215 } 216 217 // TODO(guggero): Refactor into constants that are used for all 218 // permissions in this file. Also expose the list of possible 219 // permissions in an RPC when per RPC permissions are 220 // implemented. 221 validActions = []string{"read", "write", "generate"} 222 validEntities = []string{ 223 "onchain", "offchain", "address", "message", 224 "peers", "info", "invoices", "signer", "macaroon", 225 macaroons.PermissionEntityCustomURI, 226 } 227 228 // If the --no-macaroons flag is used to start lnd, the macaroon service 229 // is not initialized. errMacaroonDisabled is then returned when 230 // macaroon related services are used. 231 errMacaroonDisabled = fmt.Errorf("macaroon authentication disabled, " + 232 "remove --no-macaroons flag to enable") 233 ) 234 235 // stringInSlice returns true if a string is contained in the given slice. 236 func stringInSlice(a string, slice []string) bool { 237 for _, b := range slice { 238 if b == a { 239 return true 240 } 241 } 242 return false 243 } 244 245 // GetAllPermissions returns all the permissions required to interact with lnd. 246 func GetAllPermissions() []bakery.Op { 247 allPerms := make([]bakery.Op, 0) 248 249 // The map will help keep track of which specific permission pairs have 250 // already been added to the slice. 251 allPermsMap := make(map[string]map[string]struct{}) 252 253 for _, perms := range MainRPCServerPermissions() { 254 for _, perm := range perms { 255 entity := perm.Entity 256 action := perm.Action 257 258 // If this specific entity-action permission pair isn't 259 // in the map yet. Add it to map, and the permission 260 // slice. 261 if acts, ok := allPermsMap[entity]; ok { 262 if _, ok := acts[action]; !ok { 263 allPermsMap[entity][action] = struct{}{} 264 265 allPerms = append( 266 allPerms, perm, 267 ) 268 } 269 } else { 270 allPermsMap[entity] = make(map[string]struct{}) 271 allPermsMap[entity][action] = struct{}{} 272 allPerms = append(allPerms, perm) 273 } 274 } 275 } 276 277 return allPerms 278 } 279 280 // MainRPCServerPermissions returns a mapping of the main RPC server calls to 281 // the permissions they require. 282 func MainRPCServerPermissions() map[string][]bakery.Op { 283 return map[string][]bakery.Op{ 284 "/lnrpc.Lightning/SendCoins": {{ 285 Entity: "onchain", 286 Action: "write", 287 }}, 288 "/lnrpc.Lightning/ListUnspent": {{ 289 Entity: "onchain", 290 Action: "read", 291 }}, 292 "/lnrpc.Lightning/SendMany": {{ 293 Entity: "onchain", 294 Action: "write", 295 }}, 296 "/lnrpc.Lightning/NewAddress": {{ 297 Entity: "address", 298 Action: "write", 299 }}, 300 "/lnrpc.Lightning/SignMessage": {{ 301 Entity: "message", 302 Action: "write", 303 }}, 304 "/lnrpc.Lightning/VerifyMessage": {{ 305 Entity: "message", 306 Action: "read", 307 }}, 308 "/lnrpc.Lightning/ConnectPeer": {{ 309 Entity: "peers", 310 Action: "write", 311 }}, 312 "/lnrpc.Lightning/DisconnectPeer": {{ 313 Entity: "peers", 314 Action: "write", 315 }}, 316 "/lnrpc.Lightning/OpenChannel": {{ 317 Entity: "onchain", 318 Action: "write", 319 }, { 320 Entity: "offchain", 321 Action: "write", 322 }}, 323 "/lnrpc.Lightning/BatchOpenChannel": {{ 324 Entity: "onchain", 325 Action: "write", 326 }, { 327 Entity: "offchain", 328 Action: "write", 329 }}, 330 "/lnrpc.Lightning/OpenChannelSync": {{ 331 Entity: "onchain", 332 Action: "write", 333 }, { 334 Entity: "offchain", 335 Action: "write", 336 }}, 337 "/lnrpc.Lightning/CloseChannel": {{ 338 Entity: "onchain", 339 Action: "write", 340 }, { 341 Entity: "offchain", 342 Action: "write", 343 }}, 344 "/lnrpc.Lightning/AbandonChannel": {{ 345 Entity: "offchain", 346 Action: "write", 347 }}, 348 "/lnrpc.Lightning/GetInfo": {{ 349 Entity: "info", 350 Action: "read", 351 }}, 352 "/lnrpc.Lightning/GetDebugInfo": {{ 353 Entity: "info", 354 Action: "read", 355 }, { 356 Entity: "offchain", 357 Action: "read", 358 }, { 359 Entity: "onchain", 360 Action: "read", 361 }, { 362 Entity: "peers", 363 Action: "read", 364 }}, 365 "/lnrpc.Lightning/GetRecoveryInfo": {{ 366 Entity: "info", 367 Action: "read", 368 }}, 369 "/lnrpc.Lightning/ListPeers": {{ 370 Entity: "peers", 371 Action: "read", 372 }}, 373 "/lnrpc.Lightning/WalletBalance": {{ 374 Entity: "onchain", 375 Action: "read", 376 }}, 377 "/lnrpc.Lightning/EstimateFee": {{ 378 Entity: "onchain", 379 Action: "read", 380 }}, 381 "/lnrpc.Lightning/ChannelBalance": {{ 382 Entity: "offchain", 383 Action: "read", 384 }}, 385 "/lnrpc.Lightning/PendingChannels": {{ 386 Entity: "offchain", 387 Action: "read", 388 }}, 389 "/lnrpc.Lightning/ListChannels": {{ 390 Entity: "offchain", 391 Action: "read", 392 }}, 393 "/lnrpc.Lightning/SubscribeChannelEvents": {{ 394 Entity: "offchain", 395 Action: "read", 396 }}, 397 "/lnrpc.Lightning/ClosedChannels": {{ 398 Entity: "offchain", 399 Action: "read", 400 }}, 401 "/lnrpc.Lightning/SendPayment": {{ 402 Entity: "offchain", 403 Action: "write", 404 }}, 405 "/lnrpc.Lightning/SendPaymentSync": {{ 406 Entity: "offchain", 407 Action: "write", 408 }}, 409 "/lnrpc.Lightning/SendToRoute": {{ 410 Entity: "offchain", 411 Action: "write", 412 }}, 413 "/lnrpc.Lightning/SendToRouteSync": {{ 414 Entity: "offchain", 415 Action: "write", 416 }}, 417 "/lnrpc.Lightning/AddInvoice": {{ 418 Entity: "invoices", 419 Action: "write", 420 }}, 421 "/lnrpc.Lightning/LookupInvoice": {{ 422 Entity: "invoices", 423 Action: "read", 424 }}, 425 "/lnrpc.Lightning/ListInvoices": {{ 426 Entity: "invoices", 427 Action: "read", 428 }}, 429 "/lnrpc.Lightning/SubscribeInvoices": {{ 430 Entity: "invoices", 431 Action: "read", 432 }}, 433 "/lnrpc.Lightning/SubscribeTransactions": {{ 434 Entity: "onchain", 435 Action: "read", 436 }}, 437 "/lnrpc.Lightning/GetTransactions": {{ 438 Entity: "onchain", 439 Action: "read", 440 }}, 441 "/lnrpc.Lightning/DescribeGraph": {{ 442 Entity: "info", 443 Action: "read", 444 }}, 445 "/lnrpc.Lightning/GetNodeMetrics": {{ 446 Entity: "info", 447 Action: "read", 448 }}, 449 "/lnrpc.Lightning/GetChanInfo": {{ 450 Entity: "info", 451 Action: "read", 452 }}, 453 "/lnrpc.Lightning/GetNodeInfo": {{ 454 Entity: "info", 455 Action: "read", 456 }}, 457 "/lnrpc.Lightning/QueryRoutes": {{ 458 Entity: "info", 459 Action: "read", 460 }}, 461 "/lnrpc.Lightning/GetNetworkInfo": {{ 462 Entity: "info", 463 Action: "read", 464 }}, 465 "/lnrpc.Lightning/StopDaemon": {{ 466 Entity: "info", 467 Action: "write", 468 }}, 469 "/lnrpc.Lightning/SubscribeChannelGraph": {{ 470 Entity: "info", 471 Action: "read", 472 }}, 473 "/lnrpc.Lightning/DeleteCanceledInvoice": {{ 474 Entity: "invoices", 475 Action: "write", 476 }}, 477 "/lnrpc.Lightning/ListPayments": {{ 478 Entity: "offchain", 479 Action: "read", 480 }}, 481 "/lnrpc.Lightning/DeletePayment": {{ 482 Entity: "offchain", 483 Action: "write", 484 }}, 485 "/lnrpc.Lightning/DeleteAllPayments": {{ 486 Entity: "offchain", 487 Action: "write", 488 }}, 489 "/lnrpc.Lightning/DebugLevel": {{ 490 Entity: "info", 491 Action: "write", 492 }}, 493 "/lnrpc.Lightning/DecodePayReq": {{ 494 Entity: "offchain", 495 Action: "read", 496 }}, 497 "/lnrpc.Lightning/FeeReport": {{ 498 Entity: "offchain", 499 Action: "read", 500 }}, 501 "/lnrpc.Lightning/UpdateChannelPolicy": {{ 502 Entity: "offchain", 503 Action: "write", 504 }}, 505 "/lnrpc.Lightning/ForwardingHistory": {{ 506 Entity: "offchain", 507 Action: "read", 508 }}, 509 "/lnrpc.Lightning/RestoreChannelBackups": {{ 510 Entity: "offchain", 511 Action: "write", 512 }}, 513 "/lnrpc.Lightning/ExportChannelBackup": {{ 514 Entity: "offchain", 515 Action: "read", 516 }}, 517 "/lnrpc.Lightning/VerifyChanBackup": {{ 518 Entity: "offchain", 519 Action: "read", 520 }}, 521 "/lnrpc.Lightning/ExportAllChannelBackups": {{ 522 Entity: "offchain", 523 Action: "read", 524 }}, 525 "/lnrpc.Lightning/SubscribeChannelBackups": {{ 526 Entity: "offchain", 527 Action: "read", 528 }}, 529 "/lnrpc.Lightning/ChannelAcceptor": {{ 530 Entity: "onchain", 531 Action: "write", 532 }, { 533 Entity: "offchain", 534 Action: "write", 535 }}, 536 "/lnrpc.Lightning/BakeMacaroon": {{ 537 Entity: "macaroon", 538 Action: "generate", 539 }}, 540 "/lnrpc.Lightning/ListMacaroonIDs": {{ 541 Entity: "macaroon", 542 Action: "read", 543 }}, 544 "/lnrpc.Lightning/DeleteMacaroonID": {{ 545 Entity: "macaroon", 546 Action: "write", 547 }}, 548 "/lnrpc.Lightning/ListPermissions": {{ 549 Entity: "info", 550 Action: "read", 551 }}, 552 "/lnrpc.Lightning/CheckMacaroonPermissions": {{ 553 Entity: "macaroon", 554 Action: "read", 555 }}, 556 "/lnrpc.Lightning/SubscribePeerEvents": {{ 557 Entity: "peers", 558 Action: "read", 559 }}, 560 "/lnrpc.Lightning/FundingStateStep": {{ 561 Entity: "onchain", 562 Action: "write", 563 }, { 564 Entity: "offchain", 565 Action: "write", 566 }}, 567 lnrpc.RegisterRPCMiddlewareURI: {{ 568 Entity: "macaroon", 569 Action: "write", 570 }}, 571 "/lnrpc.Lightning/SendCustomMessage": {{ 572 Entity: "offchain", 573 Action: "write", 574 }}, 575 "/lnrpc.Lightning/SubscribeCustomMessages": {{ 576 Entity: "offchain", 577 Action: "read", 578 }}, 579 "/lnrpc.Lightning/SendOnionMessage": {{ 580 Entity: "offchain", 581 Action: "write", 582 }}, 583 "/lnrpc.Lightning/SubscribeOnionMessages": {{ 584 Entity: "offchain", 585 Action: "read", 586 }}, 587 "/lnrpc.Lightning/LookupHtlcResolution": {{ 588 Entity: "offchain", 589 Action: "read", 590 }}, 591 "/lnrpc.Lightning/ListAliases": {{ 592 Entity: "offchain", 593 Action: "read", 594 }}, 595 } 596 } 597 598 // AuxDataParser is an interface that is used to parse auxiliary custom data 599 // within RPC messages. This is used to transform binary blobs to human-readable 600 // JSON representations. 601 type AuxDataParser interface { 602 // InlineParseCustomData replaces any custom data binary blob in the 603 // given RPC message with its corresponding JSON formatted data. This 604 // transforms the binary (likely TLV encoded) data to a human-readable 605 // JSON representation (still as byte slice). 606 InlineParseCustomData(msg proto.Message) error 607 } 608 609 // rpcServer is a gRPC, RPC front end to the lnd daemon. 610 // TODO(roasbeef): pagination support for the list-style calls 611 type rpcServer struct { 612 started int32 // To be used atomically. 613 shutdown int32 // To be used atomically. 614 615 // Required by the grpc-gateway/v2 library for forward compatibility. 616 // Must be after the atomically used variables to not break struct 617 // alignment. 618 lnrpc.UnimplementedLightningServer 619 620 server *server 621 622 cfg *Config 623 624 // subServers are a set of sub-RPC servers that use the same gRPC and 625 // listening sockets as the main RPC server, but which maintain their 626 // own independent service. This allows us to expose a set of 627 // micro-service like abstractions to the outside world for users to 628 // consume. 629 subServers []lnrpc.SubServer 630 subGrpcHandlers []lnrpc.GrpcHandler 631 632 // routerBackend contains the backend implementation of the router 633 // rpc sub server. 634 routerBackend *routerrpc.RouterBackend 635 636 // chanPredicate is used in the bidirectional ChannelAcceptor streaming 637 // method. 638 chanPredicate chanacceptor.MultiplexAcceptor 639 640 quit chan struct{} 641 642 // macService is the macaroon service that we need to mint new 643 // macaroons. 644 macService *macaroons.Service 645 646 // selfNode is our own pubkey. 647 selfNode route.Vertex 648 649 // interceptorChain is the interceptor added to our gRPC server. 650 interceptorChain *rpcperms.InterceptorChain 651 652 // implCfg is the configuration for some of the interfaces that can be 653 // provided externally. 654 implCfg *ImplementationCfg 655 656 // interceptor is used to be able to request a shutdown 657 interceptor signal.Interceptor 658 659 graphCache sync.RWMutex 660 describeGraphResp *lnrpc.ChannelGraph 661 graphCacheEvictor *time.Timer 662 } 663 664 // A compile time check to ensure that rpcServer fully implements the 665 // LightningServer gRPC service. 666 var _ lnrpc.LightningServer = (*rpcServer)(nil) 667 668 // newRPCServer creates and returns a new instance of the rpcServer. Before 669 // dependencies are added, this will be an non-functioning RPC server only to 670 // be used to register the LightningService with the gRPC server. 671 func newRPCServer(cfg *Config, interceptorChain *rpcperms.InterceptorChain, 672 implCfg *ImplementationCfg, interceptor signal.Interceptor) *rpcServer { 673 674 // We go trhough the list of registered sub-servers, and create a gRPC 675 // handler for each. These are used to register with the gRPC server 676 // before all dependencies are available. 677 registeredSubServers := lnrpc.RegisteredSubServers() 678 679 var subServerHandlers []lnrpc.GrpcHandler 680 for _, subServer := range registeredSubServers { 681 subServerHandlers = append( 682 subServerHandlers, subServer.NewGrpcHandler(), 683 ) 684 } 685 686 return &rpcServer{ 687 cfg: cfg, 688 subGrpcHandlers: subServerHandlers, 689 interceptorChain: interceptorChain, 690 implCfg: implCfg, 691 quit: make(chan struct{}, 1), 692 interceptor: interceptor, 693 } 694 } 695 696 // addDeps populates all dependencies needed by the RPC server, and any 697 // of the sub-servers that it maintains. When this is done, the RPC server can 698 // be started, and start accepting RPC calls. 699 func (r *rpcServer) addDeps(ctx context.Context, s *server, 700 macService *macaroons.Service, 701 subServerCgs *subRPCServerConfigs, atpl *autopilot.Manager, 702 invoiceRegistry *invoices.InvoiceRegistry, tower *watchtower.Standalone, 703 chanPredicate chanacceptor.MultiplexAcceptor, 704 invoiceHtlcModifier *invoices.HtlcModificationInterceptor) error { 705 706 // Set up router rpc backend. 707 selfNode, err := s.v1Graph.SourceNode(ctx) 708 if err != nil { 709 return err 710 } 711 graph := s.graphDB 712 713 routerBackend := &routerrpc.RouterBackend{ 714 SelfNode: selfNode.PubKeyBytes, 715 Clock: clock.NewDefaultClock(), 716 FetchChannelCapacity: func(chanID uint64) (btcutil.Amount, 717 error) { 718 719 info, _, _, err := graph.FetchChannelEdgesByID( 720 ctx, chanID, 721 ) 722 if err != nil { 723 return 0, err 724 } 725 return info.Capacity, nil 726 }, 727 FetchAmountPairCapacity: func(nodeFrom, nodeTo route.Vertex, 728 amount lnwire.MilliSatoshi) (btcutil.Amount, error) { 729 730 return routing.FetchAmountPairCapacity( 731 graph, selfNode.PubKeyBytes, nodeFrom, nodeTo, 732 amount, 733 ) 734 }, 735 FetchChannelEndpoints: func(chanID uint64) (route.Vertex, 736 route.Vertex, error) { 737 738 info, _, _, err := graph.FetchChannelEdgesByID( 739 ctx, chanID, 740 ) 741 if err != nil { 742 return route.Vertex{}, route.Vertex{}, 743 fmt.Errorf("unable to fetch channel "+ 744 "edges by channel ID %d: %v", 745 chanID, err) 746 } 747 748 return info.NodeKey1Bytes, info.NodeKey2Bytes, nil 749 }, 750 HasNode: func(nodePub route.Vertex) (bool, error) { 751 exists, err := s.v1Graph.HasNode(ctx, nodePub) 752 753 return exists, err 754 }, 755 FindRoute: s.chanRouter.FindRoute, 756 MissionControl: s.defaultMC, 757 ActiveNetParams: r.cfg.ActiveNetParams.Params, 758 Tower: s.controlTower, 759 MaxTotalTimelock: r.cfg.MaxOutgoingCltvExpiry, 760 DefaultFinalCltvDelta: uint16(r.cfg.Bitcoin.TimeLockDelta), 761 SubscribeHtlcEvents: s.htlcNotifier.SubscribeHtlcEvents, 762 InterceptableForwarder: s.interceptableSwitch, 763 SetChannelEnabled: func(outpoint wire.OutPoint) error { 764 return s.chanStatusMgr.RequestEnable(outpoint, true) 765 }, 766 SetChannelDisabled: func(outpoint wire.OutPoint) error { 767 return s.chanStatusMgr.RequestDisable(outpoint, true) 768 }, 769 SetChannelAuto: s.chanStatusMgr.RequestAuto, 770 UseStatusInitiated: subServerCgs.RouterRPC.UseStatusInitiated, 771 ParseCustomChannelData: func(msg proto.Message) error { 772 err = fn.MapOptionZ( 773 r.server.implCfg.AuxDataParser, 774 func(parser AuxDataParser) error { 775 return parser.InlineParseCustomData(msg) 776 }, 777 ) 778 if err != nil { 779 return fmt.Errorf("error parsing custom data: "+ 780 "%w", err) 781 } 782 783 return nil 784 }, 785 ShouldSetExpAccountability: func() bool { 786 return !s.cfg.ProtocolOptions.NoExpAccountability() 787 }, 788 } 789 790 genInvoiceFeatures := func() *lnwire.FeatureVector { 791 return s.featureMgr.Get(feature.SetInvoice) 792 } 793 genAmpInvoiceFeatures := func() *lnwire.FeatureVector { 794 return s.featureMgr.Get(feature.SetInvoiceAmp) 795 } 796 797 parseAddr := func(addr string) (net.Addr, error) { 798 return parseAddr(addr, r.cfg.net) 799 } 800 801 var ( 802 subServers []lnrpc.SubServer 803 subServerPerms []lnrpc.MacaroonPerms 804 ) 805 806 // Before we create any of the sub-servers, we need to ensure that all 807 // the dependencies they need are properly populated within each sub 808 // server configuration struct. 809 // 810 // TODO(roasbeef): extend sub-sever config to have both (local vs remote) DB 811 err = subServerCgs.PopulateDependencies( 812 r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry, 813 s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter, 814 routerBackend, s.nodeSigner, s.graphDB, s.chanStateDB, 815 s.sweeper, tower, s.towerClientMgr, r.cfg.net.ResolveTCPAddr, 816 genInvoiceFeatures, genAmpInvoiceFeatures, 817 s.getNodeAnnouncement, s.updateAndBroadcastSelfNode, parseAddr, 818 rpcsLog, s.aliasMgr, r.implCfg.AuxDataParser, 819 invoiceHtlcModifier, 820 ) 821 if err != nil { 822 return err 823 } 824 825 // Now that the sub-servers have all their dependencies in place, we 826 // can create each sub-server! 827 for _, subServerInstance := range r.subGrpcHandlers { 828 subServer, macPerms, err := subServerInstance.CreateSubServer( 829 subServerCgs, 830 ) 831 if err != nil { 832 return err 833 } 834 835 // We'll collect the sub-server, and also the set of 836 // permissions it needs for macaroons so we can apply the 837 // interceptors below. 838 subServers = append(subServers, subServer) 839 subServerPerms = append(subServerPerms, macPerms) 840 } 841 842 // Next, we need to merge the set of sub server macaroon permissions 843 // with the main RPC server permissions so we can unite them under a 844 // single set of interceptors. 845 for m, ops := range MainRPCServerPermissions() { 846 err := r.interceptorChain.AddPermission(m, ops) 847 if err != nil { 848 return err 849 } 850 } 851 852 for _, subServerPerm := range subServerPerms { 853 for method, ops := range subServerPerm { 854 err := r.interceptorChain.AddPermission(method, ops) 855 if err != nil { 856 return err 857 } 858 } 859 } 860 861 // External subserver possibly need to register their own permissions 862 // and macaroon validator. 863 for method, ops := range r.implCfg.ExternalValidator.Permissions() { 864 err := r.interceptorChain.AddPermission(method, ops) 865 if err != nil { 866 return err 867 } 868 869 // Give the external subservers the possibility to also use 870 // their own validator to check any macaroons attached to calls 871 // to this method. This allows them to have their own root key 872 // ID database and permission entities. 873 err = macService.RegisterExternalValidator( 874 method, r.implCfg.ExternalValidator, 875 ) 876 if err != nil { 877 return fmt.Errorf("could not register external "+ 878 "macaroon validator: %v", err) 879 } 880 } 881 882 // Finally, with all the set up complete, add the last dependencies to 883 // the rpc server. 884 r.server = s 885 r.subServers = subServers 886 r.routerBackend = routerBackend 887 r.chanPredicate = chanPredicate 888 r.macService = macService 889 r.selfNode = selfNode.PubKeyBytes 890 891 graphCacheDuration := r.cfg.Caches.RPCGraphCacheDuration 892 if graphCacheDuration != 0 { 893 r.graphCacheEvictor = time.NewTimer(graphCacheDuration) 894 895 go func() { 896 for { 897 select { 898 // The timer fired, so we'll purge the graph 899 // cache. 900 case <-r.graphCacheEvictor.C: 901 r.graphCache.Lock() 902 r.describeGraphResp = nil 903 r.graphCache.Unlock() 904 905 // Reset the timer so we'll fire 906 // again after the specified 907 // duration. 908 r.graphCacheEvictor.Reset( 909 graphCacheDuration, 910 ) 911 912 // The server is quitting, so we'll stop the 913 // timer and exit. 914 case <-r.quit: 915 if !r.graphCacheEvictor.Stop() { 916 // Drain the channel if Stop() 917 // returns false, meaning the 918 // timer has already fired. 919 <-r.graphCacheEvictor.C 920 } 921 922 return 923 } 924 } 925 }() 926 } 927 928 return nil 929 } 930 931 // RegisterWithGrpcServer registers the rpcServer and any subservers with the 932 // root gRPC server. 933 func (r *rpcServer) RegisterWithGrpcServer(grpcServer *grpc.Server) error { 934 // Register the main RPC server. 935 lnrpc.RegisterLightningServer(grpcServer, r) 936 937 // Now the main RPC server has been registered, we'll iterate through 938 // all the sub-RPC servers and register them to ensure that requests 939 // are properly routed towards them. 940 for _, subServer := range r.subGrpcHandlers { 941 err := subServer.RegisterWithRootServer(grpcServer) 942 if err != nil { 943 return fmt.Errorf("unable to register "+ 944 "sub-server with root: %v", err) 945 } 946 } 947 948 // Before actually listening on the gRPC listener, give external 949 // subservers the chance to register to our gRPC server. Those external 950 // subservers (think GrUB) are responsible for starting/stopping on 951 // their own, we just let them register their services to the same 952 // server instance so all of them can be exposed on the same 953 // port/listener. 954 err := r.implCfg.RegisterGrpcSubserver(grpcServer) 955 if err != nil { 956 rpcsLog.Errorf("error registering external gRPC "+ 957 "subserver: %v", err) 958 } 959 960 return nil 961 } 962 963 // Start launches any helper goroutines required for the rpcServer to function. 964 func (r *rpcServer) Start() error { 965 if atomic.AddInt32(&r.started, 1) != 1 { 966 return nil 967 } 968 969 // First, we'll start all the sub-servers to ensure that they're ready 970 // to take new requests in. 971 // 972 // TODO(roasbeef): some may require that the entire daemon be started 973 // at that point 974 for _, subServer := range r.subServers { 975 rpcsLog.Debugf("Starting sub RPC server: %v", subServer.Name()) 976 977 if err := subServer.Start(); err != nil { 978 return err 979 } 980 } 981 982 return nil 983 } 984 985 // RegisterWithRestProxy registers the RPC server and any subservers with the 986 // given REST proxy. 987 func (r *rpcServer) RegisterWithRestProxy(restCtx context.Context, 988 restMux *proxy.ServeMux, restDialOpts []grpc.DialOption, 989 restProxyDest string) error { 990 991 // With our custom REST proxy mux created, register our main RPC and 992 // give all subservers a chance to register as well. 993 err := lnrpc.RegisterLightningHandlerFromEndpoint( 994 restCtx, restMux, restProxyDest, restDialOpts, 995 ) 996 if err != nil { 997 return err 998 } 999 1000 // Register our State service with the REST proxy. 1001 err = lnrpc.RegisterStateHandlerFromEndpoint( 1002 restCtx, restMux, restProxyDest, restDialOpts, 1003 ) 1004 if err != nil { 1005 return err 1006 } 1007 1008 // Register all the subservers with the REST proxy. 1009 for _, subServer := range r.subGrpcHandlers { 1010 err := subServer.RegisterWithRestServer( 1011 restCtx, restMux, restProxyDest, restDialOpts, 1012 ) 1013 if err != nil { 1014 return fmt.Errorf("unable to register REST sub-server "+ 1015 "with root: %v", err) 1016 } 1017 } 1018 1019 // Before listening on any of the interfaces, we also want to give the 1020 // external subservers a chance to register their own REST proxy stub 1021 // with our mux instance. 1022 err = r.implCfg.RegisterRestSubserver( 1023 restCtx, restMux, restProxyDest, restDialOpts, 1024 ) 1025 if err != nil { 1026 rpcsLog.Errorf("error registering external REST subserver: %v", 1027 err) 1028 } 1029 return nil 1030 } 1031 1032 // Stop signals any active goroutines for a graceful closure. 1033 func (r *rpcServer) Stop() error { 1034 if atomic.AddInt32(&r.shutdown, 1) != 1 { 1035 return nil 1036 } 1037 1038 rpcsLog.Infof("Stopping RPC Server") 1039 1040 close(r.quit) 1041 1042 // After we've signalled all of our active goroutines to exit, we'll 1043 // then do the same to signal a graceful shutdown of all the sub 1044 // servers. 1045 for _, subServer := range r.subServers { 1046 rpcsLog.Infof("Stopping %v Sub-RPC Server", 1047 subServer.Name()) 1048 1049 if err := subServer.Stop(); err != nil { 1050 rpcsLog.Errorf("unable to stop sub-server %v: %v", 1051 subServer.Name(), err) 1052 continue 1053 } 1054 } 1055 1056 return nil 1057 } 1058 1059 // addrPairsToOutputs converts a map describing a set of outputs to be created, 1060 // the outputs themselves. The passed map pairs up an address, to a desired 1061 // output value amount. Each address is converted to its corresponding pkScript 1062 // to be used within the constructed output(s). 1063 func addrPairsToOutputs(addrPairs map[string]int64, 1064 params *chaincfg.Params) ([]*wire.TxOut, error) { 1065 1066 outputs := make([]*wire.TxOut, 0, len(addrPairs)) 1067 for addr, amt := range addrPairs { 1068 addr, err := btcutil.DecodeAddress(addr, params) 1069 if err != nil { 1070 return nil, err 1071 } 1072 1073 if !addr.IsForNet(params) { 1074 return nil, fmt.Errorf("address is not for %s", 1075 params.Name) 1076 } 1077 1078 pkscript, err := txscript.PayToAddrScript(addr) 1079 if err != nil { 1080 return nil, err 1081 } 1082 1083 outputs = append(outputs, wire.NewTxOut(amt, pkscript)) 1084 } 1085 1086 return outputs, nil 1087 } 1088 1089 // allowCORS wraps the given http.Handler with a function that adds the 1090 // Access-Control-Allow-Origin header to the response. 1091 func allowCORS(handler http.Handler, origins []string) http.Handler { 1092 allowHeaders := "Access-Control-Allow-Headers" 1093 allowMethods := "Access-Control-Allow-Methods" 1094 allowOrigin := "Access-Control-Allow-Origin" 1095 1096 // If the user didn't supply any origins that means CORS is disabled 1097 // and we should return the original handler. 1098 if len(origins) == 0 { 1099 return handler 1100 } 1101 1102 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 1103 origin := r.Header.Get("Origin") 1104 1105 // Skip everything if the browser doesn't send the Origin field. 1106 if origin == "" { 1107 handler.ServeHTTP(w, r) 1108 return 1109 } 1110 1111 // Set the static header fields first. 1112 w.Header().Set( 1113 allowHeaders, 1114 "Content-Type, Accept, Grpc-Metadata-Macaroon", 1115 ) 1116 w.Header().Set(allowMethods, "GET, POST, DELETE") 1117 1118 // Either we allow all origins or the incoming request matches 1119 // a specific origin in our list of allowed origins. 1120 for _, allowedOrigin := range origins { 1121 if allowedOrigin == "*" || origin == allowedOrigin { 1122 // Only set allowed origin to requested origin. 1123 w.Header().Set(allowOrigin, origin) 1124 1125 break 1126 } 1127 } 1128 1129 // For a pre-flight request we only need to send the headers 1130 // back. No need to call the rest of the chain. 1131 if r.Method == "OPTIONS" { 1132 return 1133 } 1134 1135 // Everything's prepared now, we can pass the request along the 1136 // chain of handlers. 1137 handler.ServeHTTP(w, r) 1138 }) 1139 } 1140 1141 // sendCoinsOnChain makes an on-chain transaction in or to send coins to one or 1142 // more addresses specified in the passed payment map. The payment map maps an 1143 // address to a specified output value to be sent to that address. 1144 func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64, 1145 feeRate chainfee.SatPerKWeight, minConfs int32, label string, 1146 strategy wallet.CoinSelectionStrategy, 1147 selectedUtxos fn.Set[wire.OutPoint]) (*chainhash.Hash, error) { 1148 1149 outputs, err := addrPairsToOutputs(paymentMap, r.cfg.ActiveNetParams.Params) 1150 if err != nil { 1151 return nil, err 1152 } 1153 1154 // We first do a dry run, to sanity check we won't spend our wallet 1155 // balance below the reserved amount. 1156 authoredTx, err := r.server.cc.Wallet.CreateSimpleTx( 1157 selectedUtxos, outputs, feeRate, minConfs, strategy, true, 1158 ) 1159 if err != nil { 1160 return nil, err 1161 } 1162 1163 // Check the authored transaction and use the explicitly set change index 1164 // to make sure that the wallet reserved balance is not invalidated. 1165 _, err = r.server.cc.Wallet.CheckReservedValueTx( 1166 lnwallet.CheckReservedValueTxReq{ 1167 Tx: authoredTx.Tx, 1168 ChangeIndex: &authoredTx.ChangeIndex, 1169 }, 1170 ) 1171 if err != nil { 1172 return nil, err 1173 } 1174 1175 // If that checks out, we're fairly confident that creating sending to 1176 // these outputs will keep the wallet balance above the reserve. 1177 tx, err := r.server.cc.Wallet.SendOutputs( 1178 selectedUtxos, outputs, feeRate, minConfs, label, strategy, 1179 ) 1180 if err != nil { 1181 return nil, err 1182 } 1183 1184 txHash := tx.TxHash() 1185 return &txHash, nil 1186 } 1187 1188 // ListUnspent returns useful information about each unspent output owned by 1189 // the wallet, as reported by the underlying `ListUnspentWitness`; the 1190 // information returned is: outpoint, amount in satoshis, address, address 1191 // type, scriptPubKey in hex and number of confirmations. The result is 1192 // filtered to contain outputs whose number of confirmations is between a 1193 // minimum and maximum number of confirmations specified by the user, with 1194 // 0 meaning unconfirmed. 1195 func (r *rpcServer) ListUnspent(ctx context.Context, 1196 in *lnrpc.ListUnspentRequest) (*lnrpc.ListUnspentResponse, error) { 1197 1198 // Validate the confirmation arguments. 1199 minConfs, maxConfs, err := lnrpc.ParseConfs(in.MinConfs, in.MaxConfs) 1200 if err != nil { 1201 return nil, err 1202 } 1203 1204 // With our arguments validated, we'll query the internal wallet for 1205 // the set of UTXOs that match our query. 1206 // 1207 // We'll acquire the global coin selection lock to ensure there aren't 1208 // any other concurrent processes attempting to lock any UTXOs which may 1209 // be shown available to us. 1210 var utxos []*lnwallet.Utxo 1211 err = r.server.cc.Wallet.WithCoinSelectLock(func() error { 1212 utxos, err = r.server.cc.Wallet.ListUnspentWitness( 1213 minConfs, maxConfs, in.Account, 1214 ) 1215 return err 1216 }) 1217 if err != nil { 1218 return nil, err 1219 } 1220 1221 rpcUtxos, err := lnrpc.MarshalUtxos(utxos, r.cfg.ActiveNetParams.Params) 1222 if err != nil { 1223 return nil, err 1224 } 1225 1226 maxStr := "" 1227 if maxConfs != math.MaxInt32 { 1228 maxStr = " max=" + fmt.Sprintf("%d", maxConfs) 1229 } 1230 1231 rpcsLog.Debugf("[listunspent] min=%v%v, generated utxos: %v", minConfs, 1232 maxStr, utxos) 1233 1234 return &lnrpc.ListUnspentResponse{ 1235 Utxos: rpcUtxos, 1236 }, nil 1237 } 1238 1239 // EstimateFee handles a request for estimating the fee for sending a 1240 // transaction spending to multiple specified outputs in parallel. 1241 func (r *rpcServer) EstimateFee(ctx context.Context, 1242 in *lnrpc.EstimateFeeRequest) (*lnrpc.EstimateFeeResponse, error) { 1243 1244 // Create the list of outputs we are spending to. 1245 outputs, err := addrPairsToOutputs(in.AddrToAmount, r.cfg.ActiveNetParams.Params) 1246 if err != nil { 1247 return nil, err 1248 } 1249 1250 // Query the fee estimator for the fee rate for the given confirmation 1251 // target. 1252 target := in.TargetConf 1253 feePref := sweep.FeeEstimateInfo{ 1254 ConfTarget: uint32(target), 1255 } 1256 1257 // Since we are providing a fee estimation as an RPC response, there's 1258 // no need to set a max feerate here, so we use 0. 1259 feePerKw, err := feePref.Estimate(r.server.cc.FeeEstimator, 0) 1260 if err != nil { 1261 return nil, err 1262 } 1263 1264 // Then, we'll extract the minimum number of confirmations that each 1265 // output we use to fund the transaction should satisfy. 1266 minConfs, err := lnrpc.ExtractMinConfs( 1267 in.GetMinConfs(), in.GetSpendUnconfirmed(), 1268 ) 1269 if err != nil { 1270 return nil, err 1271 } 1272 1273 coinSelectionStrategy, err := lnrpc.UnmarshallCoinSelectionStrategy( 1274 in.CoinSelectionStrategy, 1275 r.server.cc.Wallet.Cfg.CoinSelectionStrategy, 1276 ) 1277 if err != nil { 1278 return nil, err 1279 } 1280 1281 var selectOutpoints fn.Set[wire.OutPoint] 1282 if len(in.Inputs) != 0 { 1283 wireOutpoints, err := toWireOutpoints(in.Inputs) 1284 if err != nil { 1285 return nil, fmt.Errorf("can't create outpoints %w", err) 1286 } 1287 1288 if fn.HasDuplicates(wireOutpoints) { 1289 return nil, fmt.Errorf("selected outpoints contain " + 1290 "duplicate values") 1291 } 1292 1293 selectOutpoints = fn.NewSet(wireOutpoints...) 1294 } 1295 1296 // We will ask the wallet to create a tx using this fee rate. We set 1297 // dryRun=true to avoid inflating the change addresses in the db. 1298 var tx *txauthor.AuthoredTx 1299 wallet := r.server.cc.Wallet 1300 err = wallet.WithCoinSelectLock(func() error { 1301 tx, err = wallet.CreateSimpleTx( 1302 selectOutpoints, outputs, feePerKw, minConfs, 1303 coinSelectionStrategy, true, 1304 ) 1305 return err 1306 }) 1307 if err != nil { 1308 return nil, err 1309 } 1310 1311 // Use the created tx to calculate the total fee. 1312 totalOutput := int64(0) 1313 for _, out := range tx.Tx.TxOut { 1314 totalOutput += out.Value 1315 } 1316 totalFee := int64(tx.TotalInput) - totalOutput 1317 1318 // Return the inputs the estimate is for. 1319 outStr := make([]string, 0, len(tx.Tx.TxIn)) 1320 for _, txIn := range tx.Tx.TxIn { 1321 outStr = append( 1322 outStr, txIn.PreviousOutPoint.String(), 1323 ) 1324 } 1325 1326 inputs, err := UtxosToOutpoints(outStr) 1327 if err != nil { 1328 return nil, fmt.Errorf("can't convert outpoints %w", err) 1329 } 1330 1331 resp := &lnrpc.EstimateFeeResponse{ 1332 FeeSat: totalFee, 1333 SatPerVbyte: uint64(feePerKw.FeePerVByte()), 1334 1335 // Deprecated field. 1336 FeerateSatPerByte: int64(feePerKw.FeePerVByte()), 1337 Inputs: inputs, 1338 } 1339 1340 rpcsLog.Debugf("[estimatefee] fee estimate for conf target %d: %v", 1341 target, resp) 1342 1343 return resp, nil 1344 } 1345 1346 // maybeUseDefaultConf makes sure that when the user doesn't set either the fee 1347 // rate or conf target, the default conf target is used. 1348 func maybeUseDefaultConf(satPerByte int64, satPerVByte uint64, 1349 targetConf uint32) uint32 { 1350 1351 // If the fee rate is set, there's no need to use the default conf 1352 // target. In this case, we just return the targetConf from the 1353 // request. 1354 if satPerByte != 0 || satPerVByte != 0 { 1355 return targetConf 1356 } 1357 1358 // Return the user specified conf target if set. 1359 if targetConf != 0 { 1360 return targetConf 1361 } 1362 1363 // If the fee rate is not set, yet the conf target is zero, the default 1364 // 6 will be returned. 1365 rpcsLog.Warnf("Expected either 'sat_per_vbyte' or 'conf_target' to " + 1366 "be set, using default conf of 6 instead") 1367 1368 return defaultNumBlocksEstimate 1369 } 1370 1371 // SendCoins executes a request to send coins to a particular address. Unlike 1372 // SendMany, this RPC call only allows creating a single output at a time. 1373 func (r *rpcServer) SendCoins(ctx context.Context, 1374 in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) { 1375 1376 // Keep the old behavior prior to 0.18.0 - when the user doesn't set 1377 // fee rate or conf target, the default conf target of 6 is used. 1378 targetConf := maybeUseDefaultConf( 1379 in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf), 1380 ) 1381 1382 // Calculate an appropriate fee rate for this transaction. 1383 feePerKw, err := lnrpc.CalculateFeeRate( 1384 uint64(in.SatPerByte), in.SatPerVbyte, // nolint:staticcheck 1385 targetConf, r.server.cc.FeeEstimator, 1386 ) 1387 if err != nil { 1388 return nil, err 1389 } 1390 1391 // Then, we'll extract the minimum number of confirmations that each 1392 // output we use to fund the transaction should satisfy. 1393 minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed) 1394 if err != nil { 1395 return nil, err 1396 } 1397 1398 rpcsLog.Infof("[sendcoins] addr=%v, amt=%v, sat/kw=%v, min_confs=%v, "+ 1399 "send_all=%v, select_outpoints=%v", 1400 in.Addr, btcutil.Amount(in.Amount), int64(feePerKw), minConfs, 1401 in.SendAll, len(in.Outpoints)) 1402 1403 // Decode the address receiving the coins, we need to check whether the 1404 // address is valid for this network. 1405 targetAddr, err := btcutil.DecodeAddress( 1406 in.Addr, r.cfg.ActiveNetParams.Params, 1407 ) 1408 if err != nil { 1409 return nil, err 1410 } 1411 1412 // Make the check on the decoded address according to the active network. 1413 if !targetAddr.IsForNet(r.cfg.ActiveNetParams.Params) { 1414 return nil, fmt.Errorf("address: %v is not valid for this "+ 1415 "network: %v", targetAddr.String(), 1416 r.cfg.ActiveNetParams.Params.Name) 1417 } 1418 1419 // If the destination address parses to a valid pubkey, we assume the user 1420 // accidentally tried to send funds to a bare pubkey address. This check is 1421 // here to prevent unintended transfers. 1422 decodedAddr, _ := hex.DecodeString(in.Addr) 1423 _, err = btcec.ParsePubKey(decodedAddr) 1424 if err == nil { 1425 return nil, fmt.Errorf("cannot send coins to pubkeys") 1426 } 1427 1428 label, err := labels.ValidateAPI(in.Label) 1429 if err != nil { 1430 return nil, err 1431 } 1432 1433 coinSelectionStrategy, err := lnrpc.UnmarshallCoinSelectionStrategy( 1434 in.CoinSelectionStrategy, 1435 r.server.cc.Wallet.Cfg.CoinSelectionStrategy, 1436 ) 1437 if err != nil { 1438 return nil, err 1439 } 1440 1441 var txid *chainhash.Hash 1442 1443 wallet := r.server.cc.Wallet 1444 maxFeeRate := r.cfg.Sweeper.MaxFeeRate.FeePerKWeight() 1445 1446 var selectOutpoints fn.Set[wire.OutPoint] 1447 if len(in.Outpoints) != 0 { 1448 wireOutpoints, err := toWireOutpoints(in.Outpoints) 1449 if err != nil { 1450 return nil, fmt.Errorf("can't create outpoints "+ 1451 "%w", err) 1452 } 1453 1454 if fn.HasDuplicates(wireOutpoints) { 1455 return nil, fmt.Errorf("selected outpoints contain " + 1456 "duplicate values") 1457 } 1458 1459 selectOutpoints = fn.NewSet(wireOutpoints...) 1460 } 1461 1462 // If the send all flag is active, then we'll attempt to sweep all the 1463 // coins in the wallet in a single transaction (if possible), 1464 // otherwise, we'll respect the amount, and attempt a regular 2-output 1465 // send. 1466 if in.SendAll { 1467 // At this point, the amount shouldn't be set since we've been 1468 // instructed to sweep all the coins from the wallet. 1469 if in.Amount != 0 { 1470 return nil, fmt.Errorf("amount set while SendAll is " + 1471 "active") 1472 } 1473 1474 _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock() 1475 if err != nil { 1476 return nil, err 1477 } 1478 1479 // With the sweeper instance created, we can now generate a 1480 // transaction that will sweep ALL outputs from the wallet in a 1481 // single transaction. This will be generated in a concurrent 1482 // safe manner, so no need to worry about locking. The tx will 1483 // pay to the change address created above if we needed to 1484 // reserve any value, the rest will go to targetAddr. 1485 sweepTxPkg, err := sweep.CraftSweepAllTx( 1486 feePerKw, maxFeeRate, uint32(bestHeight), nil, 1487 targetAddr, wallet, wallet, wallet.WalletController, 1488 r.server.cc.Signer, minConfs, selectOutpoints, 1489 ) 1490 if err != nil { 1491 return nil, err 1492 } 1493 1494 // Before we publish the transaction we make sure it won't 1495 // violate our reserved wallet value. 1496 var reservedVal btcutil.Amount 1497 err = wallet.WithCoinSelectLock(func() error { 1498 var err error 1499 reservedVal, err = wallet.CheckReservedValueTx( 1500 lnwallet.CheckReservedValueTxReq{ 1501 Tx: sweepTxPkg.SweepTx, 1502 }, 1503 ) 1504 return err 1505 }) 1506 1507 // If sending everything to this address would invalidate our 1508 // reserved wallet balance, we create a new sweep tx, where 1509 // we'll send the reserved value back to our wallet. 1510 if err == lnwallet.ErrReservedValueInvalidated { 1511 sweepTxPkg.CancelSweepAttempt() 1512 1513 rpcsLog.Debugf("Reserved value %v not satisfied after "+ 1514 "send_all, trying with change output", 1515 reservedVal) 1516 1517 // We'll request a change address from the wallet, 1518 // where we'll send this reserved value back to. This 1519 // ensures this is an address the wallet knows about, 1520 // allowing us to pass the reserved value check. 1521 changeAddr, err := r.server.cc.Wallet.NewAddress( 1522 lnwallet.TaprootPubkey, true, 1523 lnwallet.DefaultAccountName, 1524 ) 1525 if err != nil { 1526 return nil, err 1527 } 1528 1529 // Send the reserved value to this change address, the 1530 // remaining funds will go to the targetAddr. 1531 outputs := []sweep.DeliveryAddr{ 1532 { 1533 Addr: changeAddr, 1534 Amt: reservedVal, 1535 }, 1536 } 1537 1538 sweepTxPkg, err = sweep.CraftSweepAllTx( 1539 feePerKw, maxFeeRate, uint32(bestHeight), 1540 outputs, targetAddr, wallet, wallet, 1541 wallet.WalletController, 1542 r.server.cc.Signer, minConfs, selectOutpoints, 1543 ) 1544 if err != nil { 1545 return nil, err 1546 } 1547 1548 // Sanity check the new tx by re-doing the check. 1549 err = wallet.WithCoinSelectLock(func() error { 1550 _, err := wallet.CheckReservedValueTx( 1551 lnwallet.CheckReservedValueTxReq{ 1552 Tx: sweepTxPkg.SweepTx, 1553 }, 1554 ) 1555 return err 1556 }) 1557 if err != nil { 1558 sweepTxPkg.CancelSweepAttempt() 1559 1560 return nil, err 1561 } 1562 } else if err != nil { 1563 sweepTxPkg.CancelSweepAttempt() 1564 1565 return nil, err 1566 } 1567 1568 rpcsLog.Debugf("Sweeping coins from wallet to addr=%v, "+ 1569 "with tx=%v", in.Addr, 1570 lnutils.SpewLogClosure(sweepTxPkg.SweepTx)) 1571 1572 // As our sweep transaction was created, successfully, we'll 1573 // now attempt to publish it, cancelling the sweep pkg to 1574 // return all outputs if it fails. 1575 err = wallet.PublishTransaction(sweepTxPkg.SweepTx, label) 1576 if err != nil { 1577 sweepTxPkg.CancelSweepAttempt() 1578 1579 return nil, fmt.Errorf("unable to broadcast sweep "+ 1580 "transaction: %v", err) 1581 } 1582 1583 sweepTXID := sweepTxPkg.SweepTx.TxHash() 1584 txid = &sweepTXID 1585 } else { 1586 1587 // We'll now construct out payment map, and use the wallet's 1588 // coin selection synchronization method to ensure that no coin 1589 // selection (funding, sweep alls, other sends) can proceed 1590 // while we instruct the wallet to send this transaction. 1591 paymentMap := map[string]int64{targetAddr.String(): in.Amount} 1592 err := wallet.WithCoinSelectLock(func() error { 1593 newTXID, err := r.sendCoinsOnChain( 1594 paymentMap, feePerKw, minConfs, label, 1595 coinSelectionStrategy, selectOutpoints, 1596 ) 1597 if err != nil { 1598 return err 1599 } 1600 1601 txid = newTXID 1602 1603 return nil 1604 }) 1605 if err != nil { 1606 return nil, err 1607 } 1608 } 1609 1610 rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String()) 1611 1612 return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil 1613 } 1614 1615 // SendMany handles a request for a transaction create multiple specified 1616 // outputs in parallel. 1617 func (r *rpcServer) SendMany(ctx context.Context, 1618 in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) { 1619 1620 // Keep the old behavior prior to 0.18.0 - when the user doesn't set 1621 // fee rate or conf target, the default conf target of 6 is used. 1622 targetConf := maybeUseDefaultConf( 1623 in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf), 1624 ) 1625 1626 // Calculate an appropriate fee rate for this transaction. 1627 feePerKw, err := lnrpc.CalculateFeeRate( 1628 uint64(in.SatPerByte), in.SatPerVbyte, // nolint:staticcheck 1629 targetConf, r.server.cc.FeeEstimator, 1630 ) 1631 if err != nil { 1632 return nil, err 1633 } 1634 1635 // Then, we'll extract the minimum number of confirmations that each 1636 // output we use to fund the transaction should satisfy. 1637 minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed) 1638 if err != nil { 1639 return nil, err 1640 } 1641 1642 label, err := labels.ValidateAPI(in.Label) 1643 if err != nil { 1644 return nil, err 1645 } 1646 1647 coinSelectionStrategy, err := lnrpc.UnmarshallCoinSelectionStrategy( 1648 in.CoinSelectionStrategy, 1649 r.server.cc.Wallet.Cfg.CoinSelectionStrategy, 1650 ) 1651 if err != nil { 1652 return nil, err 1653 } 1654 1655 rpcsLog.Infof("[sendmany] outputs=%v, sat/kw=%v", 1656 lnutils.SpewLogClosure(in.AddrToAmount), int64(feePerKw)) 1657 1658 var txid *chainhash.Hash 1659 1660 // We'll attempt to send to the target set of outputs, ensuring that we 1661 // synchronize with any other ongoing coin selection attempts which 1662 // happen to also be concurrently executing. 1663 wallet := r.server.cc.Wallet 1664 err = wallet.WithCoinSelectLock(func() error { 1665 sendManyTXID, err := r.sendCoinsOnChain( 1666 in.AddrToAmount, feePerKw, minConfs, label, 1667 coinSelectionStrategy, nil, 1668 ) 1669 if err != nil { 1670 return err 1671 } 1672 1673 txid = sendManyTXID 1674 1675 return nil 1676 }) 1677 if err != nil { 1678 return nil, err 1679 } 1680 1681 rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String()) 1682 1683 return &lnrpc.SendManyResponse{Txid: txid.String()}, nil 1684 } 1685 1686 // NewAddress creates a new address under control of the local wallet. 1687 func (r *rpcServer) NewAddress(ctx context.Context, 1688 in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) { 1689 1690 // Always use the default wallet account unless one was specified. 1691 account := lnwallet.DefaultAccountName 1692 if in.Account != "" { 1693 account = in.Account 1694 } 1695 1696 // Translate the gRPC proto address type to the wallet controller's 1697 // available address types. 1698 var ( 1699 addr btcutil.Address 1700 err error 1701 ) 1702 switch in.Type { 1703 case lnrpc.AddressType_WITNESS_PUBKEY_HASH: 1704 addr, err = r.server.cc.Wallet.NewAddress( 1705 lnwallet.WitnessPubKey, false, account, 1706 ) 1707 if err != nil { 1708 return nil, err 1709 } 1710 1711 case lnrpc.AddressType_NESTED_PUBKEY_HASH: 1712 addr, err = r.server.cc.Wallet.NewAddress( 1713 lnwallet.NestedWitnessPubKey, false, account, 1714 ) 1715 if err != nil { 1716 return nil, err 1717 } 1718 1719 case lnrpc.AddressType_TAPROOT_PUBKEY: 1720 addr, err = r.server.cc.Wallet.NewAddress( 1721 lnwallet.TaprootPubkey, false, account, 1722 ) 1723 if err != nil { 1724 return nil, err 1725 } 1726 1727 case lnrpc.AddressType_UNUSED_WITNESS_PUBKEY_HASH: 1728 addr, err = r.server.cc.Wallet.LastUnusedAddress( 1729 lnwallet.WitnessPubKey, account, 1730 ) 1731 if err != nil { 1732 return nil, err 1733 } 1734 1735 case lnrpc.AddressType_UNUSED_NESTED_PUBKEY_HASH: 1736 addr, err = r.server.cc.Wallet.LastUnusedAddress( 1737 lnwallet.NestedWitnessPubKey, account, 1738 ) 1739 if err != nil { 1740 return nil, err 1741 } 1742 1743 case lnrpc.AddressType_UNUSED_TAPROOT_PUBKEY: 1744 addr, err = r.server.cc.Wallet.LastUnusedAddress( 1745 lnwallet.TaprootPubkey, account, 1746 ) 1747 if err != nil { 1748 return nil, err 1749 } 1750 1751 default: 1752 return nil, fmt.Errorf("unknown address type: %v", in.Type) 1753 } 1754 1755 rpcsLog.Debugf("[newaddress] account=%v type=%v addr=%v", account, 1756 in.Type, addr.String()) 1757 return &lnrpc.NewAddressResponse{Address: addr.String()}, nil 1758 } 1759 1760 var ( 1761 // signedMsgPrefix is a special prefix that we'll prepend to any 1762 // messages we sign/verify. We do this to ensure that we don't 1763 // accidentally sign a sighash, or other sensitive material. By 1764 // prepending this fragment, we mind message signing to our particular 1765 // context. 1766 signedMsgPrefix = []byte("Lightning Signed Message:") 1767 ) 1768 1769 // SignMessage signs a message with the resident node's private key. The 1770 // returned signature string is zbase32 encoded and pubkey recoverable, meaning 1771 // that only the message digest and signature are needed for verification. 1772 func (r *rpcServer) SignMessage(_ context.Context, 1773 in *lnrpc.SignMessageRequest) (*lnrpc.SignMessageResponse, error) { 1774 1775 if in.Msg == nil { 1776 return nil, fmt.Errorf("need a message to sign") 1777 } 1778 1779 in.Msg = append(signedMsgPrefix, in.Msg...) 1780 sigBytes, err := r.server.nodeSigner.SignMessageCompact( 1781 in.Msg, !in.SingleHash, 1782 ) 1783 if err != nil { 1784 return nil, err 1785 } 1786 1787 sig := zbase32.EncodeToString(sigBytes) 1788 return &lnrpc.SignMessageResponse{Signature: sig}, nil 1789 } 1790 1791 // VerifyMessage verifies a signature over a msg. The signature must be zbase32 1792 // encoded and signed by an active node in the resident node's channel 1793 // database. In addition to returning the validity of the signature, 1794 // VerifyMessage also returns the recovered pubkey from the signature. 1795 func (r *rpcServer) VerifyMessage(ctx context.Context, 1796 in *lnrpc.VerifyMessageRequest) (*lnrpc.VerifyMessageResponse, error) { 1797 1798 if in.Msg == nil { 1799 return nil, fmt.Errorf("need a message to verify") 1800 } 1801 1802 // The signature should be zbase32 encoded 1803 sig, err := zbase32.DecodeString(in.Signature) 1804 if err != nil { 1805 return nil, fmt.Errorf("failed to decode signature: %w", err) 1806 } 1807 1808 // The signature is over the double-sha256 hash of the message. 1809 in.Msg = append(signedMsgPrefix, in.Msg...) 1810 digest := chainhash.DoubleHashB(in.Msg) 1811 1812 // RecoverCompact both recovers the pubkey and validates the signature. 1813 pubKey, _, err := ecdsa.RecoverCompact(sig, digest) 1814 if err != nil { 1815 return &lnrpc.VerifyMessageResponse{Valid: false}, nil 1816 } 1817 pubKeyHex := hex.EncodeToString(pubKey.SerializeCompressed()) 1818 1819 var pub [33]byte 1820 copy(pub[:], pubKey.SerializeCompressed()) 1821 1822 // Query the channel graph to ensure a node in the network with active 1823 // channels signed the message. 1824 // 1825 // TODO(phlip9): Require valid nodes to have capital in active channels. 1826 graph := r.server.v1Graph 1827 active, err := graph.HasNode(ctx, pub) 1828 if err != nil { 1829 return nil, fmt.Errorf("failed to query graph: %w", err) 1830 } 1831 1832 return &lnrpc.VerifyMessageResponse{ 1833 Valid: active, 1834 Pubkey: pubKeyHex, 1835 }, nil 1836 } 1837 1838 // ConnectPeer attempts to establish a connection to a remote peer. 1839 func (r *rpcServer) ConnectPeer(ctx context.Context, 1840 in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) { 1841 1842 // The server hasn't yet started, so it won't be able to service any of 1843 // our requests, so we'll bail early here. 1844 if !r.server.Started() { 1845 return nil, ErrServerNotActive 1846 } 1847 1848 if in.Addr == nil { 1849 return nil, fmt.Errorf("need: lnc pubkeyhash@hostname") 1850 } 1851 1852 pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey) 1853 if err != nil { 1854 return nil, err 1855 } 1856 pubKey, err := btcec.ParsePubKey(pubkeyHex) 1857 if err != nil { 1858 return nil, err 1859 } 1860 1861 // Connections to ourselves are disallowed for obvious reasons. 1862 if pubKey.IsEqual(r.server.identityECDH.PubKey()) { 1863 return nil, fmt.Errorf("cannot make connection to self") 1864 } 1865 1866 addr, err := parseAddr(in.Addr.Host, r.cfg.net) 1867 if err != nil { 1868 return nil, err 1869 } 1870 1871 peerAddr := &lnwire.NetAddress{ 1872 IdentityKey: pubKey, 1873 Address: addr, 1874 ChainNet: r.cfg.ActiveNetParams.Net, 1875 } 1876 1877 rpcsLog.Debugf("[connectpeer] requested connection to %x@%s", 1878 peerAddr.IdentityKey.SerializeCompressed(), peerAddr.Address) 1879 1880 // By default, we will use the global connection timeout value. 1881 timeout := r.cfg.ConnectionTimeout 1882 1883 // Check if the connection timeout is set. If set, we will use it in our 1884 // request. 1885 if in.Timeout != 0 { 1886 timeout = time.Duration(in.Timeout) * time.Second 1887 rpcsLog.Debugf("[connectpeer] connection timeout is set to %v", 1888 timeout) 1889 } 1890 1891 if err := r.server.ConnectToPeer( 1892 peerAddr, in.Perm, timeout, 1893 ); err != nil { 1894 rpcsLog.Errorf("[connectpeer]: error connecting to peer: %v", 1895 err) 1896 return nil, err 1897 } 1898 1899 rpcsLog.Debugf("Connected to peer: %v", peerAddr.String()) 1900 1901 return &lnrpc.ConnectPeerResponse{ 1902 Status: fmt.Sprintf("connection to %v initiated", 1903 peerAddr.String()), 1904 }, nil 1905 } 1906 1907 // DisconnectPeer attempts to disconnect one peer from another identified by a 1908 // given pubKey. In the case that we currently have a pending or active channel 1909 // with the target peer, this action will be disallowed. 1910 func (r *rpcServer) DisconnectPeer(ctx context.Context, 1911 in *lnrpc.DisconnectPeerRequest) (*lnrpc.DisconnectPeerResponse, error) { 1912 1913 rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey) 1914 1915 if !r.server.Started() { 1916 return nil, ErrServerNotActive 1917 } 1918 1919 // First we'll validate the string passed in within the request to 1920 // ensure that it's a valid hex-string, and also a valid compressed 1921 // public key. 1922 pubKeyBytes, err := hex.DecodeString(in.PubKey) 1923 if err != nil { 1924 return nil, fmt.Errorf("unable to decode pubkey bytes: %w", err) 1925 } 1926 peerPubKey, err := btcec.ParsePubKey(pubKeyBytes) 1927 if err != nil { 1928 return nil, fmt.Errorf("unable to parse pubkey: %w", err) 1929 } 1930 1931 // Next, we'll fetch the pending/active channels we have with a 1932 // particular peer. 1933 nodeChannels, err := r.server.chanStateDB.FetchOpenChannels(peerPubKey) 1934 if err != nil { 1935 return nil, fmt.Errorf("unable to fetch channels for peer: %w", 1936 err) 1937 } 1938 1939 // In order to avoid erroneously disconnecting from a peer that we have 1940 // an active channel with, if we have any channels active with this 1941 // peer, then we'll disallow disconnecting from them in certain 1942 // situations. 1943 if len(nodeChannels) != 0 { 1944 // If the configured dev value `unsafedisconnect` is false, we 1945 // return an error since there are active channels. For 1946 // production environments, we allow disconnecting from a peer 1947 // even if there are channels active with them. 1948 if !r.cfg.Dev.GetUnsafeDisconnect() { 1949 return nil, fmt.Errorf("cannot disconnect from "+ 1950 "peer(%x), still has %d active channels", 1951 pubKeyBytes, len(nodeChannels)) 1952 } 1953 1954 // We are in a dev environment, print a warning log and 1955 // disconnect. 1956 rpcsLog.Warnf("UnsafeDisconnect mode, disconnecting from "+ 1957 "peer(%x) while there are %d active channels", 1958 pubKeyBytes, len(nodeChannels)) 1959 } 1960 1961 // With all initial validation complete, we'll now request that the 1962 // server disconnects from the peer. 1963 err = r.server.DisconnectPeer(peerPubKey) 1964 if err != nil { 1965 return nil, fmt.Errorf("unable to disconnect peer: %w", err) 1966 } 1967 1968 return &lnrpc.DisconnectPeerResponse{ 1969 Status: "disconnect initiated", 1970 }, nil 1971 } 1972 1973 // newFundingShimAssembler returns a new fully populated 1974 // chanfunding.CannedAssembler using a FundingShim obtained from an RPC caller. 1975 func newFundingShimAssembler(chanPointShim *lnrpc.ChanPointShim, initiator bool, 1976 keyRing keychain.KeyRing) (chanfunding.Assembler, error) { 1977 1978 // Perform some basic sanity checks to ensure that all the expected 1979 // fields are populated. 1980 switch { 1981 case chanPointShim.RemoteKey == nil: 1982 return nil, fmt.Errorf("remote key not set") 1983 1984 case chanPointShim.LocalKey == nil: 1985 return nil, fmt.Errorf("local key desc not set") 1986 1987 case chanPointShim.LocalKey.RawKeyBytes == nil: 1988 return nil, fmt.Errorf("local raw key bytes not set") 1989 1990 case chanPointShim.LocalKey.KeyLoc == nil: 1991 return nil, fmt.Errorf("local key loc not set") 1992 1993 case chanPointShim.ChanPoint == nil: 1994 return nil, fmt.Errorf("chan point not set") 1995 1996 case len(chanPointShim.PendingChanId) != 32: 1997 return nil, fmt.Errorf("pending chan ID not set") 1998 } 1999 2000 // First, we'll map the RPC's channel point to one we can actually use. 2001 index := chanPointShim.ChanPoint.OutputIndex 2002 txid, err := lnrpc.GetChanPointFundingTxid(chanPointShim.ChanPoint) 2003 if err != nil { 2004 return nil, err 2005 } 2006 chanPoint := wire.NewOutPoint(txid, index) 2007 2008 // Next we'll parse out the remote party's funding key, as well as our 2009 // full key descriptor. 2010 remoteKey, err := btcec.ParsePubKey(chanPointShim.RemoteKey) 2011 if err != nil { 2012 return nil, err 2013 } 2014 2015 shimKeyDesc := chanPointShim.LocalKey 2016 localKey, err := btcec.ParsePubKey(shimKeyDesc.RawKeyBytes) 2017 if err != nil { 2018 return nil, err 2019 } 2020 localKeyDesc := keychain.KeyDescriptor{ 2021 PubKey: localKey, 2022 KeyLocator: keychain.KeyLocator{ 2023 Family: keychain.KeyFamily( 2024 shimKeyDesc.KeyLoc.KeyFamily, 2025 ), 2026 Index: uint32(shimKeyDesc.KeyLoc.KeyIndex), 2027 }, 2028 } 2029 2030 // Verify that if we re-derive this key according to the passed 2031 // KeyLocator, that we get the exact same key back. Otherwise, we may 2032 // end up in a situation where we aren't able to actually sign for this 2033 // newly created channel. 2034 derivedKey, err := keyRing.DeriveKey(localKeyDesc.KeyLocator) 2035 if err != nil { 2036 return nil, err 2037 } 2038 if !derivedKey.PubKey.IsEqual(localKey) { 2039 return nil, fmt.Errorf("KeyLocator does not match attached " + 2040 "raw pubkey") 2041 } 2042 2043 // With all the parts assembled, we can now make the canned assembler 2044 // to pass into the wallet. 2045 // 2046 // TODO(roasbeef): update to support musig2 2047 return chanfunding.NewCannedAssembler( 2048 chanPointShim.ThawHeight, *chanPoint, 2049 btcutil.Amount(chanPointShim.Amt), &localKeyDesc, 2050 remoteKey, initiator, chanPointShim.Musig2, 2051 ), nil 2052 } 2053 2054 // newPsbtAssembler returns a new fully populated 2055 // chanfunding.PsbtAssembler using a FundingShim obtained from an RPC caller. 2056 func newPsbtAssembler(req *lnrpc.OpenChannelRequest, 2057 psbtShim *lnrpc.PsbtShim, netParams *chaincfg.Params) ( 2058 chanfunding.Assembler, error) { 2059 2060 var ( 2061 packet *psbt.Packet 2062 err error 2063 ) 2064 2065 // Perform some basic sanity checks to ensure that all the expected 2066 // fields are populated and none of the incompatible fields are. 2067 if len(psbtShim.PendingChanId) != 32 { 2068 return nil, fmt.Errorf("pending chan ID not set") 2069 } 2070 if req.SatPerByte != 0 || req.SatPerVbyte != 0 || req.TargetConf != 0 { // nolint:staticcheck 2071 return nil, fmt.Errorf("specifying fee estimation parameters " + 2072 "is not supported for PSBT funding") 2073 } 2074 2075 // The base PSBT is optional. But if it's set, it has to be a valid, 2076 // binary serialized PSBT. 2077 if len(psbtShim.BasePsbt) > 0 { 2078 packet, err = psbt.NewFromRawBytes( 2079 bytes.NewReader(psbtShim.BasePsbt), false, 2080 ) 2081 if err != nil { 2082 return nil, fmt.Errorf("error parsing base PSBT: %w", 2083 err) 2084 } 2085 } 2086 2087 // With all the parts assembled, we can now make the canned assembler 2088 // to pass into the wallet. 2089 return chanfunding.NewPsbtAssembler( 2090 btcutil.Amount(req.LocalFundingAmount), packet, netParams, 2091 !psbtShim.NoPublish, 2092 ), nil 2093 } 2094 2095 // canOpenChannel returns an error if the necessary subsystems for channel 2096 // funding are not ready. 2097 func (r *rpcServer) canOpenChannel() error { 2098 // We can't open a channel until the main server has started. 2099 if !r.server.Started() { 2100 return ErrServerNotActive 2101 } 2102 2103 // Creation of channels before the wallet syncs up is currently 2104 // disallowed. 2105 isSynced, _, err := r.server.cc.Wallet.IsSynced() 2106 if err != nil { 2107 return err 2108 } 2109 if !isSynced { 2110 return errors.New("channels cannot be created before the " + 2111 "wallet is fully synced") 2112 } 2113 2114 return nil 2115 } 2116 2117 // parseOpenChannelReq parses an OpenChannelRequest message into an InitFundingMsg 2118 // struct. The logic is abstracted so that it can be shared between OpenChannel 2119 // and OpenChannelSync. 2120 func (r *rpcServer) parseOpenChannelReq(in *lnrpc.OpenChannelRequest, 2121 isSync bool) (*funding.InitFundingMsg, error) { 2122 2123 rpcsLog.Debugf("[openchannel] request to NodeKey(%x) "+ 2124 "allocation(us=%v, them=%v)", in.NodePubkey, 2125 in.LocalFundingAmount, in.PushSat) 2126 2127 localFundingAmt := btcutil.Amount(in.LocalFundingAmount) 2128 remoteInitialBalance := btcutil.Amount(in.PushSat) 2129 2130 // If we are not committing the maximum viable balance towards a channel 2131 // then the local funding amount must be specified. In case FundMax is 2132 // set the funding amount is specified as the interval between minimum 2133 // funding amount and by the configured maximum channel size. 2134 if !in.FundMax && localFundingAmt == 0 { 2135 return nil, fmt.Errorf("local funding amount must be non-zero") 2136 } 2137 2138 // Ensure that the initial balance of the remote party (if pushing 2139 // satoshis) does not exceed the amount the local party has requested 2140 // for funding. This is only checked if we are not committing the 2141 // maximum viable amount towards the channel balance. If we do commit 2142 // the maximum then the remote balance is checked in a dedicated FundMax 2143 // check. 2144 if !in.FundMax && remoteInitialBalance >= localFundingAmt { 2145 return nil, fmt.Errorf("amount pushed to remote peer for " + 2146 "initial state must be below the local funding amount") 2147 } 2148 2149 // We either allow the fundmax or the psbt flow hence we return an error 2150 // if both are set. 2151 if in.FundingShim != nil && in.FundMax { 2152 return nil, fmt.Errorf("cannot provide a psbt funding shim " + 2153 "while committing the maximum wallet balance towards " + 2154 "the channel opening") 2155 } 2156 2157 // Fetch our own feature set and determine wumbo support early, as it's 2158 // needed for both FundMax and explicit amount validation. 2159 globalFeatureSet := r.server.featureMgr.Get(feature.SetNodeAnn) 2160 wumboEnabled := globalFeatureSet.HasFeature( 2161 lnwire.WumboChannelsOptional, 2162 ) 2163 2164 // If the FundMax flag is set, ensure that the acceptable minimum local 2165 // amount adheres to the amount to be pushed to the remote, and to 2166 // current rules, while also respecting the protocol-level maximum 2167 // channel size. 2168 var minFundAmt, fundUpToMaxAmt btcutil.Amount 2169 if in.FundMax { 2170 // Use the protocol-level maximum as the upper bound for our 2171 // funding attempt. 2172 if wumboEnabled { 2173 fundUpToMaxAmt = funding.MaxBtcFundingAmountWumbo 2174 } else { 2175 fundUpToMaxAmt = MaxFundingAmount 2176 } 2177 2178 // Since the standard non-fundmax flow requires the minimum 2179 // funding amount to be at least in the amount of the initial 2180 // remote balance(push amount) we need to adjust the minimum 2181 // funding amount accordingly. We initially assume the minimum 2182 // allowed channel size as minimum funding amount. 2183 minFundAmt = funding.MinChanFundingSize 2184 2185 // If minFundAmt is less than the initial remote balance we 2186 // simply assign the initial remote balance to minFundAmt in 2187 // order to fullfil the criterion. Whether or not this so 2188 // determined minimum amount is actually available is 2189 // ascertained downstream in the lnwallet's reservation 2190 // workflow. 2191 if remoteInitialBalance >= minFundAmt { 2192 minFundAmt = remoteInitialBalance 2193 } 2194 } 2195 2196 minHtlcIn := lnwire.MilliSatoshi(in.MinHtlcMsat) 2197 remoteCsvDelay := uint16(in.RemoteCsvDelay) 2198 maxValue := lnwire.MilliSatoshi(in.RemoteMaxValueInFlightMsat) 2199 maxHtlcs := uint16(in.RemoteMaxHtlcs) 2200 remoteChanReserve := btcutil.Amount(in.RemoteChanReserveSat) 2201 2202 // Determine if the user provided channel fees 2203 // and if so pass them on to the funding workflow. 2204 var channelBaseFee, channelFeeRate *uint64 2205 if in.UseBaseFee { 2206 channelBaseFee = &in.BaseFee 2207 } 2208 if in.UseFeeRate { 2209 channelFeeRate = &in.FeeRate 2210 } 2211 2212 // Ensure that the remote channel reserve does not exceed 20% of the 2213 // channel capacity. 2214 if !in.FundMax && remoteChanReserve >= localFundingAmt/5 { 2215 return nil, fmt.Errorf("remote channel reserve must be less " + 2216 "than the %%20 of the channel capacity") 2217 } 2218 2219 // Ensure that the user doesn't exceed the current soft-limit for 2220 // channel size. If the funding amount is above the soft-limit, then 2221 // we'll reject the request. 2222 // If the FundMax flag is set the local amount is determined downstream 2223 // in the wallet hence we do not check it here against the maximum 2224 // funding amount. Only if the localFundingAmt is specified we can check 2225 // if it exceeds the maximum funding amount. 2226 if !in.FundMax && !wumboEnabled && localFundingAmt > MaxFundingAmount { 2227 return nil, fmt.Errorf("funding amount is too large, the max "+ 2228 "channel size is: %v", MaxFundingAmount) 2229 } 2230 2231 // Restrict the size of the channel we'll actually open. At a later 2232 // level, we'll ensure that the output we create, after accounting for 2233 // fees, does not leave a dust output. In case of the FundMax flow 2234 // dedicated checks ensure that the lower boundary of the channel size 2235 // is at least in the amount of MinChanFundingSize or potentially higher 2236 // if a remote balance is specified. 2237 if !in.FundMax && localFundingAmt < funding.MinChanFundingSize { 2238 return nil, fmt.Errorf("channel is too small, the minimum "+ 2239 "channel size is: %v SAT", int64(funding.MinChanFundingSize)) 2240 } 2241 2242 // Prevent users from submitting a max-htlc value that would exceed the 2243 // protocol maximum. 2244 if maxHtlcs > input.MaxHTLCNumber/2 { 2245 return nil, fmt.Errorf("remote-max-htlcs (%v) cannot be "+ 2246 "greater than %v", maxHtlcs, input.MaxHTLCNumber/2) 2247 } 2248 2249 // Then, we'll extract the minimum number of confirmations that each 2250 // output we use to fund the channel's funding transaction should 2251 // satisfy. 2252 minConfs, err := lnrpc.ExtractMinConfs(in.MinConfs, in.SpendUnconfirmed) 2253 if err != nil { 2254 return nil, err 2255 } 2256 2257 // TODO(roasbeef): also return channel ID? 2258 2259 var nodePubKey *btcec.PublicKey 2260 2261 // Parse the remote pubkey the NodePubkey field of the request. If it's 2262 // not present, we'll fallback to the deprecated version that parses the 2263 // key from a hex string if this is for REST for backwards compatibility. 2264 switch { 2265 // Parse the raw bytes of the node key into a pubkey object so we can 2266 // easily manipulate it. 2267 case len(in.NodePubkey) > 0: 2268 nodePubKey, err = btcec.ParsePubKey(in.NodePubkey) 2269 if err != nil { 2270 return nil, err 2271 } 2272 2273 // Decode the provided target node's public key, parsing it into a pub 2274 // key object. For all sync call, byte slices are expected to be encoded 2275 // as hex strings. 2276 case isSync: 2277 keyBytes, err := hex.DecodeString(in.NodePubkeyString) // nolint:staticcheck 2278 if err != nil { 2279 return nil, err 2280 } 2281 2282 nodePubKey, err = btcec.ParsePubKey(keyBytes) 2283 if err != nil { 2284 return nil, err 2285 } 2286 2287 default: 2288 return nil, fmt.Errorf("NodePubkey is not set") 2289 } 2290 2291 // Making a channel to ourselves wouldn't be of any use, so we 2292 // explicitly disallow them. 2293 if nodePubKey.IsEqual(r.server.identityECDH.PubKey()) { 2294 return nil, fmt.Errorf("cannot open channel to self") 2295 } 2296 2297 // NOTE: We also need to do the fee rate calculation for the psbt 2298 // funding flow because the `batchfund` depends on it. 2299 targetConf := maybeUseDefaultConf( 2300 in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf), 2301 ) 2302 2303 // Calculate an appropriate fee rate for this transaction. 2304 feeRate, err := lnrpc.CalculateFeeRate( 2305 uint64(in.SatPerByte), in.SatPerVbyte, 2306 targetConf, r.server.cc.FeeEstimator, 2307 ) 2308 if err != nil { 2309 return nil, err 2310 } 2311 2312 rpcsLog.Debugf("[openchannel]: using fee of %v sat/kw for "+ 2313 "funding tx", int64(feeRate)) 2314 2315 script, err := chancloser.ParseUpfrontShutdownAddress( 2316 in.CloseAddress, r.cfg.ActiveNetParams.Params, 2317 ) 2318 if err != nil { 2319 return nil, fmt.Errorf("error parsing upfront shutdown: %w", 2320 err) 2321 } 2322 2323 var channelType *lnwire.ChannelType 2324 switch in.CommitmentType { 2325 case lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE: 2326 if in.ZeroConf { 2327 return nil, fmt.Errorf("use anchors for zero-conf") 2328 } 2329 2330 case lnrpc.CommitmentType_LEGACY: 2331 channelType = new(lnwire.ChannelType) 2332 *channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector()) 2333 2334 case lnrpc.CommitmentType_STATIC_REMOTE_KEY: 2335 channelType = new(lnwire.ChannelType) 2336 *channelType = lnwire.ChannelType(*lnwire.NewRawFeatureVector( 2337 lnwire.StaticRemoteKeyRequired, 2338 )) 2339 2340 case lnrpc.CommitmentType_ANCHORS: 2341 channelType = new(lnwire.ChannelType) 2342 fv := lnwire.NewRawFeatureVector( 2343 lnwire.StaticRemoteKeyRequired, 2344 lnwire.AnchorsZeroFeeHtlcTxRequired, 2345 ) 2346 2347 if in.ZeroConf { 2348 fv.Set(lnwire.ZeroConfRequired) 2349 } 2350 2351 if in.ScidAlias { 2352 fv.Set(lnwire.ScidAliasRequired) 2353 } 2354 2355 *channelType = lnwire.ChannelType(*fv) 2356 2357 case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: 2358 channelType = new(lnwire.ChannelType) 2359 fv := lnwire.NewRawFeatureVector( 2360 lnwire.StaticRemoteKeyRequired, 2361 lnwire.AnchorsZeroFeeHtlcTxRequired, 2362 lnwire.ScriptEnforcedLeaseRequired, 2363 ) 2364 2365 if in.ZeroConf { 2366 fv.Set(lnwire.ZeroConfRequired) 2367 } 2368 2369 if in.ScidAlias { 2370 fv.Set(lnwire.ScidAliasRequired) 2371 } 2372 2373 *channelType = lnwire.ChannelType(*fv) 2374 2375 case lnrpc.CommitmentType_SIMPLE_TAPROOT: 2376 // If the taproot channel type is being set, then the channel 2377 // MUST be private (unadvertised) for now. 2378 if !in.Private { 2379 return nil, fmt.Errorf("taproot channels must be " + 2380 "private") 2381 } 2382 2383 channelType = new(lnwire.ChannelType) 2384 fv := lnwire.NewRawFeatureVector( 2385 lnwire.SimpleTaprootChannelsRequiredStaging, 2386 ) 2387 2388 // TODO(roasbeef): no need for the rest as they're now 2389 // implicit? 2390 2391 if in.ZeroConf { 2392 fv.Set(lnwire.ZeroConfRequired) 2393 } 2394 2395 if in.ScidAlias { 2396 fv.Set(lnwire.ScidAliasRequired) 2397 } 2398 2399 *channelType = lnwire.ChannelType(*fv) 2400 2401 case lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY: 2402 // If the taproot overlay channel type is being set, then the 2403 // channel MUST be private. 2404 if !in.Private { 2405 return nil, fmt.Errorf("taproot overlay channels " + 2406 "must be private") 2407 } 2408 2409 channelType = new(lnwire.ChannelType) 2410 fv := lnwire.NewRawFeatureVector( 2411 lnwire.SimpleTaprootOverlayChansRequired, 2412 ) 2413 2414 if in.ZeroConf { 2415 fv.Set(lnwire.ZeroConfRequired) 2416 } 2417 2418 if in.ScidAlias { 2419 fv.Set(lnwire.ScidAliasRequired) 2420 } 2421 2422 *channelType = lnwire.ChannelType(*fv) 2423 2424 default: 2425 return nil, fmt.Errorf("unhandled request channel type %v", 2426 in.CommitmentType) 2427 } 2428 2429 // We limit the channel memo to be 500 characters long. This enforces 2430 // a reasonable upper bound on storage consumption. This also mimics 2431 // the length limit for the label of a TX. 2432 const maxMemoLength = 500 2433 if len(in.Memo) > maxMemoLength { 2434 return nil, fmt.Errorf("provided memo (%s) is of length %d, "+ 2435 "exceeds %d", in.Memo, len(in.Memo), maxMemoLength) 2436 } 2437 2438 // Check, if manually selected outpoints are present to fund a channel. 2439 var outpoints []wire.OutPoint 2440 if len(in.Outpoints) > 0 { 2441 outpoints, err = toWireOutpoints(in.Outpoints) 2442 if err != nil { 2443 return nil, fmt.Errorf("can't create outpoints %w", err) 2444 } 2445 } 2446 2447 // Instruct the server to trigger the necessary events to attempt to 2448 // open a new channel. A stream is returned in place, this stream will 2449 // be used to consume updates of the state of the pending channel. 2450 return &funding.InitFundingMsg{ 2451 TargetPubkey: nodePubKey, 2452 ChainHash: *r.cfg.ActiveNetParams.GenesisHash, 2453 LocalFundingAmt: localFundingAmt, 2454 BaseFee: channelBaseFee, 2455 FeeRate: channelFeeRate, 2456 PushAmt: lnwire.NewMSatFromSatoshis( 2457 remoteInitialBalance, 2458 ), 2459 MinHtlcIn: minHtlcIn, 2460 FundingFeePerKw: feeRate, 2461 Private: in.Private, 2462 RemoteCsvDelay: remoteCsvDelay, 2463 RemoteChanReserve: remoteChanReserve, 2464 MinConfs: minConfs, 2465 ShutdownScript: script, 2466 MaxValueInFlight: maxValue, 2467 MaxHtlcs: maxHtlcs, 2468 MaxLocalCsv: uint16(in.MaxLocalCsv), 2469 ChannelType: channelType, 2470 FundUpToMaxAmt: fundUpToMaxAmt, 2471 MinFundAmt: minFundAmt, 2472 Memo: []byte(in.Memo), 2473 Outpoints: outpoints, 2474 }, nil 2475 } 2476 2477 // toWireOutpoints converts a list of outpoints from the rpc format to the wire 2478 // format. 2479 func toWireOutpoints(outpoints []*lnrpc.OutPoint) ([]wire.OutPoint, error) { 2480 var wireOutpoints []wire.OutPoint 2481 for _, outpoint := range outpoints { 2482 hash, err := chainhash.NewHashFromStr(outpoint.TxidStr) 2483 if err != nil { 2484 return nil, fmt.Errorf("cannot create chainhash") 2485 } 2486 2487 wireOutpoint := wire.NewOutPoint( 2488 hash, outpoint.OutputIndex, 2489 ) 2490 wireOutpoints = append(wireOutpoints, *wireOutpoint) 2491 } 2492 2493 return wireOutpoints, nil 2494 } 2495 2496 // OpenChannel attempts to open a singly funded channel specified in the 2497 // request to a remote peer. 2498 func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, 2499 updateStream lnrpc.Lightning_OpenChannelServer) error { 2500 2501 if err := r.canOpenChannel(); err != nil { 2502 return err 2503 } 2504 2505 req, err := r.parseOpenChannelReq(in, false) 2506 if err != nil { 2507 return err 2508 } 2509 2510 // If the user has provided a shim, then we'll now augment the based 2511 // open channel request with this additional logic. 2512 if in.FundingShim != nil { 2513 switch { 2514 // If we have a chan point shim, then this means the funding 2515 // transaction was crafted externally. In this case we only 2516 // need to hand a channel point down into the wallet. 2517 case in.FundingShim.GetChanPointShim() != nil: 2518 chanPointShim := in.FundingShim.GetChanPointShim() 2519 2520 // Map the channel point shim into a new 2521 // chanfunding.CannedAssembler that the wallet will use 2522 // to obtain the channel point details. 2523 copy(req.PendingChanID[:], chanPointShim.PendingChanId) 2524 req.ChanFunder, err = newFundingShimAssembler( 2525 chanPointShim, true, r.server.cc.KeyRing, 2526 ) 2527 if err != nil { 2528 return err 2529 } 2530 2531 // If we have a PSBT shim, then this means the funding 2532 // transaction will be crafted outside of the wallet, once the 2533 // funding multisig output script is known. We'll create an 2534 // intent that will supervise the multi-step process. 2535 case in.FundingShim.GetPsbtShim() != nil: 2536 psbtShim := in.FundingShim.GetPsbtShim() 2537 2538 // Instruct the wallet to use the new 2539 // chanfunding.PsbtAssembler to construct the funding 2540 // transaction. 2541 copy(req.PendingChanID[:], psbtShim.PendingChanId) 2542 2543 // NOTE: For the PSBT case we do also allow unconfirmed 2544 // utxos to fund the psbt transaction because we make 2545 // sure we only use stable utxos. 2546 req.ChanFunder, err = newPsbtAssembler( 2547 in, psbtShim, 2548 &r.server.cc.Wallet.Cfg.NetParams, 2549 ) 2550 if err != nil { 2551 return err 2552 } 2553 } 2554 } 2555 2556 updateChan, errChan := r.server.OpenChannel(req) 2557 2558 var outpoint wire.OutPoint 2559 out: 2560 for { 2561 select { 2562 case err := <-errChan: 2563 rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v", 2564 req.TargetPubkey.SerializeCompressed(), err) 2565 return err 2566 case fundingUpdate := <-updateChan: 2567 rpcsLog.Tracef("[openchannel] sending update: %v", 2568 fundingUpdate) 2569 if err := updateStream.Send(fundingUpdate); err != nil { 2570 return err 2571 } 2572 2573 // If a final channel open update is being sent, then 2574 // we can break out of our recv loop as we no longer 2575 // need to process any further updates. 2576 update, ok := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanOpen) 2577 if ok { 2578 chanPoint := update.ChanOpen.ChannelPoint 2579 txid, err := lnrpc.GetChanPointFundingTxid(chanPoint) 2580 if err != nil { 2581 return err 2582 } 2583 outpoint = wire.OutPoint{ 2584 Hash: *txid, 2585 Index: chanPoint.OutputIndex, 2586 } 2587 2588 break out 2589 } 2590 case <-r.quit: 2591 return nil 2592 } 2593 } 2594 2595 rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)", 2596 req.TargetPubkey.SerializeCompressed(), outpoint) 2597 return nil 2598 } 2599 2600 // OpenChannelSync is a synchronous version of the OpenChannel RPC call. This 2601 // call is meant to be consumed by clients to the REST proxy. As with all other 2602 // sync calls, all byte slices are instead to be populated as hex encoded 2603 // strings. 2604 func (r *rpcServer) OpenChannelSync(ctx context.Context, 2605 in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) { 2606 2607 if err := r.canOpenChannel(); err != nil { 2608 return nil, err 2609 } 2610 2611 req, err := r.parseOpenChannelReq(in, true) 2612 if err != nil { 2613 return nil, err 2614 } 2615 2616 updateChan, errChan := r.server.OpenChannel(req) 2617 select { 2618 // If an error occurs them immediately return the error to the client. 2619 case err := <-errChan: 2620 rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v", 2621 req.TargetPubkey.SerializeCompressed(), err) 2622 return nil, err 2623 2624 // Otherwise, wait for the first channel update. The first update sent 2625 // is when the funding transaction is broadcast to the network. 2626 case fundingUpdate := <-updateChan: 2627 rpcsLog.Tracef("[openchannel] sending update: %v", 2628 fundingUpdate) 2629 2630 // Parse out the txid of the pending funding transaction. The 2631 // sync client can use this to poll against the list of 2632 // PendingChannels. 2633 openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending) 2634 chanUpdate := openUpdate.ChanPending 2635 2636 return &lnrpc.ChannelPoint{ 2637 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 2638 FundingTxidBytes: chanUpdate.Txid, 2639 }, 2640 OutputIndex: chanUpdate.OutputIndex, 2641 }, nil 2642 case <-r.quit: 2643 return nil, nil 2644 } 2645 } 2646 2647 // BatchOpenChannel attempts to open multiple single-funded channels in a 2648 // single transaction in an atomic way. This means either all channel open 2649 // requests succeed at once or all attempts are aborted if any of them fail. 2650 // This is the safer variant of using PSBTs to manually fund a batch of 2651 // channels through the OpenChannel RPC. 2652 func (r *rpcServer) BatchOpenChannel(ctx context.Context, 2653 in *lnrpc.BatchOpenChannelRequest) (*lnrpc.BatchOpenChannelResponse, 2654 error) { 2655 2656 if err := r.canOpenChannel(); err != nil { 2657 return nil, err 2658 } 2659 2660 // We need the wallet kit server to do the heavy lifting on the PSBT 2661 // part. If we didn't rely on re-using the wallet kit server's logic we 2662 // would need to re-implement everything here. Since we deliver lnd with 2663 // the wallet kit server enabled by default we can assume it's okay to 2664 // make this functionality dependent on that server being active. 2665 var walletKitServer walletrpc.WalletKitServer 2666 for _, subServer := range r.subServers { 2667 if subServer.Name() == walletrpc.SubServerName { 2668 walletKitServer = subServer.(walletrpc.WalletKitServer) 2669 } 2670 } 2671 if walletKitServer == nil { 2672 return nil, fmt.Errorf("batch channel open is only possible " + 2673 "if walletrpc subserver is active") 2674 } 2675 2676 rpcsLog.Debugf("[batchopenchannel] request to open batch of %d "+ 2677 "channels", len(in.Channels)) 2678 2679 // Make sure there is at least one channel to open. We could say we want 2680 // at least two channels for a batch. But maybe it's nice if developers 2681 // can use the same API for a single channel as well as a batch of 2682 // channels. 2683 if len(in.Channels) == 0 { 2684 return nil, fmt.Errorf("specify at least one channel") 2685 } 2686 2687 // In case we remove a pending channel from the database, we need to set 2688 // a close height, so we'll just use the current best known height. 2689 _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock() 2690 if err != nil { 2691 return nil, fmt.Errorf("error fetching best block: %w", err) 2692 } 2693 2694 // So far everything looks good and we can now start the heavy lifting 2695 // that's done in the funding package. 2696 requestParser := func(req *lnrpc.OpenChannelRequest) ( 2697 *funding.InitFundingMsg, error) { 2698 2699 return r.parseOpenChannelReq(req, false) 2700 } 2701 channelAbandoner := func(point *wire.OutPoint) error { 2702 return r.abandonChan(point, uint32(bestHeight)) 2703 } 2704 batcher := funding.NewBatcher(&funding.BatchConfig{ 2705 RequestParser: requestParser, 2706 ChannelAbandoner: channelAbandoner, 2707 ChannelOpener: r.server.OpenChannel, 2708 WalletKitServer: walletKitServer, 2709 Wallet: r.server.cc.Wallet, 2710 NetParams: &r.server.cc.Wallet.Cfg.NetParams, 2711 Quit: r.quit, 2712 }) 2713 rpcPoints, err := batcher.BatchFund(ctx, in) 2714 if err != nil { 2715 return nil, fmt.Errorf("batch funding failed: %w", err) 2716 } 2717 2718 // Now all that's left to do is send back the response with the channel 2719 // points we created. 2720 return &lnrpc.BatchOpenChannelResponse{ 2721 PendingChannels: rpcPoints, 2722 }, nil 2723 } 2724 2725 // CloseChannel attempts to close an active channel identified by its channel 2726 // point. The actions of this method can additionally be augmented to attempt 2727 // a force close after a timeout period in the case of an inactive peer. 2728 func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, 2729 updateStream lnrpc.Lightning_CloseChannelServer) error { 2730 2731 if !r.server.Started() { 2732 return ErrServerNotActive 2733 } 2734 2735 // If the user didn't specify a channel point, then we'll reject this 2736 // request all together. 2737 if in.GetChannelPoint() == nil { 2738 return fmt.Errorf("must specify channel point in close channel") 2739 } 2740 2741 // If force closing a channel, the fee set in the commitment transaction 2742 // is used. 2743 if in.Force && (in.SatPerByte != 0 || in.SatPerVbyte != 0 || // nolint:staticcheck 2744 in.TargetConf != 0) { 2745 2746 return fmt.Errorf("force closing a channel uses a pre-defined fee") 2747 } 2748 2749 force := in.Force 2750 index := in.ChannelPoint.OutputIndex 2751 txid, err := lnrpc.GetChanPointFundingTxid(in.GetChannelPoint()) 2752 if err != nil { 2753 rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err) 2754 return err 2755 } 2756 chanPoint := wire.NewOutPoint(txid, index) 2757 2758 rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v", 2759 chanPoint, force) 2760 2761 var ( 2762 updateChan chan interface{} 2763 errChan chan error 2764 ) 2765 2766 // TODO(roasbeef): if force and peer online then don't force? 2767 2768 // First, we'll fetch the channel as is, as we'll need to examine it 2769 // regardless of if this is a force close or not. 2770 channel, err := r.server.chanStateDB.FetchChannel(*chanPoint) 2771 if err != nil { 2772 return err 2773 } 2774 2775 // We can't coop or force close restored channels or channels that have 2776 // experienced local data loss. Normally we would detect this in the 2777 // channel arbitrator if the channel has the status 2778 // ChanStatusLocalDataLoss after connecting to its peer. But if no 2779 // connection can be established, the channel arbitrator doesn't know it 2780 // can't be force closed yet. 2781 if channel.HasChanStatus(channeldb.ChanStatusRestored) || 2782 channel.HasChanStatus(channeldb.ChanStatusLocalDataLoss) { 2783 2784 return fmt.Errorf("cannot close channel with state: %v", 2785 channel.ChanStatus()) 2786 } 2787 2788 // Retrieve the best height of the chain, which we'll use to complete 2789 // either closing flow. 2790 _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock() 2791 if err != nil { 2792 return err 2793 } 2794 2795 // Retrieve the number of active HTLCs on the channel. 2796 activeHtlcs := channel.ActiveHtlcs() 2797 2798 // If a force closure was requested, then we'll handle all the details 2799 // around the creation and broadcast of the unilateral closure 2800 // transaction here rather than going to the switch as we don't require 2801 // interaction from the peer. 2802 if force { 2803 // As we're force closing this channel, as a precaution, we'll 2804 // ensure that the switch doesn't continue to see this channel 2805 // as eligible for forwarding HTLC's. If the peer is online, 2806 // then we'll also purge all of its indexes. 2807 remotePub := channel.IdentityPub 2808 if peer, err := r.server.FindPeer(remotePub); err == nil { 2809 // TODO(roasbeef): actually get the active channel 2810 // instead too? 2811 // * so only need to grab from database 2812 peer.WipeChannel(&channel.FundingOutpoint) 2813 } else { 2814 chanID := lnwire.NewChanIDFromOutPoint( 2815 channel.FundingOutpoint, 2816 ) 2817 r.server.htlcSwitch.RemoveLink(chanID) 2818 } 2819 2820 // With the necessary indexes cleaned up, we'll now force close 2821 // the channel. 2822 chainArbitrator := r.server.chainArb 2823 closingTx, err := chainArbitrator.ForceCloseContract( 2824 *chanPoint, 2825 ) 2826 if err != nil { 2827 rpcsLog.Errorf("unable to force close transaction: %v", err) 2828 return err 2829 } 2830 2831 // Safety check which should never happen. 2832 // 2833 // TODO(ziggie): remove pointer as return value from 2834 // ForceCloseContract. 2835 if closingTx == nil { 2836 return fmt.Errorf("force close transaction is nil") 2837 } 2838 2839 closingTxid := closingTx.TxHash() 2840 2841 // With the transaction broadcast, we send our first update to 2842 // the client. 2843 updateChan = make(chan interface{}, 2) 2844 updateChan <- &peer.PendingUpdate{ 2845 Txid: closingTxid[:], 2846 } 2847 2848 errChan = make(chan error, 1) 2849 notifier := r.server.cc.ChainNotifier 2850 2851 // For force closes, we notify the RPC client immediately after 2852 // 1 confirmation. The actual security-critical confirmation 2853 // waiting is handled by the channel arbitrator. 2854 numConfs := uint32(1) 2855 2856 go peer.WaitForChanToClose( 2857 uint32(bestHeight), notifier, errChan, chanPoint, 2858 &closingTxid, closingTx.TxOut[0].PkScript, numConfs, 2859 func() { 2860 // Respond to the local subsystem which 2861 // requested the channel closure. 2862 updateChan <- &peer.ChannelCloseUpdate{ 2863 ClosingTxid: closingTxid[:], 2864 Success: true, 2865 // Force closure transactions don't have 2866 // additional local/remote outputs. 2867 } 2868 }, 2869 ) 2870 } else { 2871 // If this is a frozen channel, then we only allow the co-op 2872 // close to proceed if we were the responder to this channel if 2873 // the absolute thaw height has not been met. 2874 if channel.IsInitiator { 2875 absoluteThawHeight, err := channel.AbsoluteThawHeight() 2876 if err != nil { 2877 return err 2878 } 2879 if uint32(bestHeight) < absoluteThawHeight { 2880 return fmt.Errorf("cannot co-op close frozen "+ 2881 "channel as initiator until height=%v, "+ 2882 "(current_height=%v)", 2883 absoluteThawHeight, bestHeight) 2884 } 2885 } 2886 2887 var ( 2888 chanInSwitch = true 2889 chanHasRbfCloser = r.server.ChanHasRbfCoopCloser( 2890 channel.IdentityPub, *chanPoint, 2891 ) 2892 ) 2893 2894 // If the link is not known by the switch, we cannot gracefully close 2895 // the channel. 2896 channelID := lnwire.NewChanIDFromOutPoint(*chanPoint) 2897 2898 if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil { 2899 chanInSwitch = false 2900 2901 // The channel isn't in the switch, but if there's an 2902 // active chan closer for the channel, and it's of the 2903 // RBF variant, then we can actually bypass the switch. 2904 // Otherwise, we'll return an error. 2905 if !chanHasRbfCloser { 2906 rpcsLog.Debugf("Trying to non-force close "+ 2907 "offline channel with chan_point=%v", 2908 chanPoint) 2909 2910 return fmt.Errorf("unable to gracefully close "+ 2911 "channel while peer is offline (try "+ 2912 "force closing it instead): %v", err) 2913 } 2914 } 2915 2916 // Keep the old behavior prior to 0.18.0 - when the user 2917 // doesn't set fee rate or conf target, the default conf target 2918 // of 6 is used. 2919 targetConf := maybeUseDefaultConf( 2920 in.SatPerByte, in.SatPerVbyte, uint32(in.TargetConf), 2921 ) 2922 2923 // Based on the passed fee related parameters, we'll determine 2924 // an appropriate fee rate for the cooperative closure 2925 // transaction. 2926 feeRate, err := lnrpc.CalculateFeeRate( 2927 uint64(in.SatPerByte), in.SatPerVbyte, // nolint:staticcheck 2928 targetConf, r.server.cc.FeeEstimator, 2929 ) 2930 if err != nil { 2931 return err 2932 } 2933 2934 rpcsLog.Debugf("Target sat/kw for closing transaction: %v", 2935 int64(feeRate)) 2936 2937 // If the user hasn't specified NoWait, then before we attempt 2938 // to close the channel we ensure there are no active HTLCs on 2939 // the link. 2940 if !in.NoWait && len(activeHtlcs) != 0 { 2941 return fmt.Errorf("cannot coop close channel with "+ 2942 "active htlcs (number of active htlcs: %d), "+ 2943 "bypass this check and initiate the coop "+ 2944 "close by setting no_wait=true", 2945 len(activeHtlcs)) 2946 } 2947 2948 // Otherwise, the caller has requested a regular interactive 2949 // cooperative channel closure. So we'll forward the request to 2950 // the htlc switch which will handle the negotiation and 2951 // broadcast details. 2952 2953 var deliveryScript lnwire.DeliveryAddress 2954 2955 // If a delivery address to close out to was specified, decode it. 2956 if len(in.DeliveryAddress) > 0 { 2957 // Decode the address provided. 2958 addr, err := btcutil.DecodeAddress( 2959 in.DeliveryAddress, r.cfg.ActiveNetParams.Params, 2960 ) 2961 if err != nil { 2962 return fmt.Errorf("invalid delivery address: "+ 2963 "%v", err) 2964 } 2965 2966 if !addr.IsForNet(r.cfg.ActiveNetParams.Params) { 2967 return fmt.Errorf("delivery address is not "+ 2968 "for %s", 2969 r.cfg.ActiveNetParams.Params.Name) 2970 } 2971 2972 // Create a script to pay out to the address provided. 2973 deliveryScript, err = txscript.PayToAddrScript(addr) 2974 if err != nil { 2975 return err 2976 } 2977 } 2978 2979 maxFee := chainfee.SatPerKVByte( 2980 in.MaxFeePerVbyte * 1000, 2981 ).FeePerKWeight() 2982 2983 // In case the max fee was specified, we check if it's less than 2984 // the initial fee rate and abort if it is. 2985 if maxFee != 0 && maxFee < feeRate { 2986 return fmt.Errorf("max_fee_per_vbyte (%v) is less "+ 2987 "than the required fee rate (%v)", maxFee, 2988 feeRate) 2989 } 2990 2991 if chanHasRbfCloser && !chanInSwitch { 2992 rpcsLog.Infof("Bypassing Switch to do fee bump "+ 2993 "for ChannelPoint(%v)", chanPoint) 2994 2995 closeUpdates, err := r.server.AttemptRBFCloseUpdate( 2996 updateStream.Context(), *chanPoint, feeRate, 2997 deliveryScript, 2998 ) 2999 if err != nil { 3000 return fmt.Errorf("unable to do RBF close "+ 3001 "update: %w", err) 3002 } 3003 3004 updateChan = closeUpdates.UpdateChan 3005 errChan = closeUpdates.ErrChan 3006 } else { 3007 maxFee := chainfee.SatPerKVByte( 3008 in.MaxFeePerVbyte * 1000, 3009 ).FeePerKWeight() 3010 updateChan, errChan = r.server.htlcSwitch.CloseLink( 3011 updateStream.Context(), chanPoint, 3012 contractcourt.CloseRegular, feeRate, maxFee, 3013 deliveryScript, 3014 ) 3015 } 3016 } 3017 3018 // If the user doesn't want to wait for the txid to come back then we 3019 // will send an empty update to kick off the stream. This is also used 3020 // when active htlcs are still on the channel to give the client 3021 // immediate feedback. 3022 if in.NoWait { 3023 rpcsLog.Trace("[closechannel] sending instant update") 3024 if err := updateStream.Send( 3025 //nolint:ll 3026 &lnrpc.CloseStatusUpdate{ 3027 Update: &lnrpc.CloseStatusUpdate_CloseInstant{ 3028 CloseInstant: &lnrpc.InstantUpdate{ 3029 NumPendingHtlcs: int32(len(activeHtlcs)), 3030 }, 3031 }, 3032 }, 3033 ); err != nil { 3034 return err 3035 } 3036 } 3037 3038 out: 3039 for { 3040 select { 3041 case err := <-errChan: 3042 rpcsLog.Errorf("[closechannel] unable to close "+ 3043 "ChannelPoint(%v): %v", chanPoint, err) 3044 3045 return err 3046 3047 case closingUpdate := <-updateChan: 3048 rpcClosingUpdate, err := createRPCCloseUpdate( 3049 closingUpdate, 3050 ) 3051 if err != nil { 3052 return err 3053 } 3054 3055 err = fn.MapOptionZ( 3056 r.server.implCfg.AuxDataParser, 3057 func(parser AuxDataParser) error { 3058 return parser.InlineParseCustomData( 3059 rpcClosingUpdate, 3060 ) 3061 }, 3062 ) 3063 if err != nil { 3064 return fmt.Errorf("error parsing custom data: "+ 3065 "%w", err) 3066 } 3067 3068 rpcsLog.Tracef("[closechannel] sending update: %v", 3069 rpcClosingUpdate) 3070 3071 if err := updateStream.Send(rpcClosingUpdate); err != nil { 3072 return err 3073 } 3074 3075 // If a final channel closing updates is being sent, 3076 // then we can break out of our dispatch loop as we no 3077 // longer need to process any further updates. 3078 switch closeUpdate := closingUpdate.(type) { 3079 case *peer.ChannelCloseUpdate: 3080 h, _ := chainhash.NewHash(closeUpdate.ClosingTxid) 3081 rpcsLog.Infof("[closechannel] close completed: "+ 3082 "txid(%v)", h) 3083 3084 break out 3085 } 3086 3087 case <-r.quit: 3088 return nil 3089 } 3090 } 3091 3092 return nil 3093 } 3094 3095 func createRPCCloseUpdate( 3096 update interface{}) (*lnrpc.CloseStatusUpdate, error) { 3097 3098 switch u := update.(type) { 3099 case *peer.ChannelCloseUpdate: 3100 ccu := &lnrpc.ChannelCloseUpdate{ 3101 ClosingTxid: u.ClosingTxid, 3102 Success: u.Success, 3103 } 3104 3105 err := fn.MapOptionZ( 3106 u.LocalCloseOutput, 3107 func(closeOut types.CloseOutput) error { 3108 cr, err := closeOut.ShutdownRecords.Serialize() 3109 if err != nil { 3110 return fmt.Errorf("error serializing "+ 3111 "local close out custom "+ 3112 "records: %w", err) 3113 } 3114 3115 rpcCloseOut := &lnrpc.CloseOutput{ 3116 AmountSat: int64(closeOut.Amt), 3117 PkScript: closeOut.PkScript, 3118 IsLocal: true, 3119 CustomChannelData: cr, 3120 } 3121 ccu.LocalCloseOutput = rpcCloseOut 3122 3123 return nil 3124 }, 3125 ) 3126 if err != nil { 3127 return nil, err 3128 } 3129 3130 err = fn.MapOptionZ( 3131 u.RemoteCloseOutput, 3132 func(closeOut types.CloseOutput) error { 3133 cr, err := closeOut.ShutdownRecords.Serialize() 3134 if err != nil { 3135 return fmt.Errorf("error serializing "+ 3136 "remote close out custom "+ 3137 "records: %w", err) 3138 } 3139 3140 rpcCloseOut := &lnrpc.CloseOutput{ 3141 AmountSat: int64(closeOut.Amt), 3142 PkScript: closeOut.PkScript, 3143 CustomChannelData: cr, 3144 } 3145 ccu.RemoteCloseOutput = rpcCloseOut 3146 3147 return nil 3148 }, 3149 ) 3150 if err != nil { 3151 return nil, err 3152 } 3153 3154 u.AuxOutputs.WhenSome(func(outs chancloser.AuxCloseOutputs) { 3155 for _, out := range outs.ExtraCloseOutputs { 3156 ccu.AdditionalOutputs = append( 3157 ccu.AdditionalOutputs, 3158 &lnrpc.CloseOutput{ 3159 AmountSat: out.Value, 3160 PkScript: out.PkScript, 3161 IsLocal: out.IsLocal, 3162 }, 3163 ) 3164 } 3165 }) 3166 3167 return &lnrpc.CloseStatusUpdate{ 3168 Update: &lnrpc.CloseStatusUpdate_ChanClose{ 3169 ChanClose: ccu, 3170 }, 3171 }, nil 3172 3173 case *peer.PendingUpdate: 3174 upd := &lnrpc.PendingUpdate{ 3175 Txid: u.Txid, 3176 OutputIndex: u.OutputIndex, 3177 } 3178 3179 // Potentially set the optional fields that are only set for 3180 // the new RBF close flow. 3181 u.IsLocalCloseTx.WhenSome(func(isLocal bool) { 3182 upd.LocalCloseTx = isLocal 3183 }) 3184 u.FeePerVbyte.WhenSome(func(feeRate chainfee.SatPerVByte) { 3185 upd.FeePerVbyte = int64(feeRate) 3186 }) 3187 3188 return &lnrpc.CloseStatusUpdate{ 3189 Update: &lnrpc.CloseStatusUpdate_ClosePending{ 3190 ClosePending: upd, 3191 }, 3192 }, nil 3193 } 3194 3195 return nil, errors.New("unknown close status update") 3196 } 3197 3198 // abandonChanFromGraph attempts to remove a channel from the channel graph. If 3199 // we can't find the chanID in the graph, then we assume it has already been 3200 // removed, and will return a nop. 3201 func abandonChanFromGraph(chanGraph *graphdb.VersionedGraph, 3202 chanPoint *wire.OutPoint) error { 3203 3204 // First, we'll obtain the channel ID. If we can't locate this, then 3205 // it's the case that the channel may have already been removed from 3206 // the graph, so we'll return a nil error. 3207 chanID, err := chanGraph.ChannelID(context.TODO(), chanPoint) 3208 switch { 3209 case errors.Is(err, graphdb.ErrEdgeNotFound): 3210 return nil 3211 case err != nil: 3212 return err 3213 } 3214 3215 // If the channel ID is still in the graph, then that means the channel 3216 // is still open, so we'll now move to purge it from the graph. 3217 return chanGraph.DeleteChannelEdges(context.TODO(), false, true, chanID) 3218 } 3219 3220 // abandonChan removes a channel from the database, graph and contract court. 3221 func (r *rpcServer) abandonChan(chanPoint *wire.OutPoint, 3222 bestHeight uint32) error { 3223 3224 // Before we remove the channel we cancel the rebroadcasting of the 3225 // transaction. If this transaction does not exist in the rebroadcast 3226 // queue anymore it is a noop. 3227 txid, err := chainhash.NewHash(chanPoint.Hash[:]) 3228 if err != nil { 3229 return err 3230 } 3231 r.server.cc.Wallet.CancelRebroadcast(*txid) 3232 3233 // Abandoning a channel is a three-step process: remove from the open 3234 // channel state, remove from the graph, remove from the contract 3235 // court. Between any step it's possible that the users restarts the 3236 // process all over again. As a result, each of the steps below are 3237 // intended to be idempotent. 3238 err = r.server.chanStateDB.AbandonChannel(chanPoint, bestHeight) 3239 if err != nil { 3240 return err 3241 } 3242 // TODO: update to support deletions for v2 channels. 3243 err = abandonChanFromGraph(r.server.v1Graph, chanPoint) 3244 if err != nil { 3245 return err 3246 } 3247 err = r.server.chainArb.ResolveContract(*chanPoint) 3248 if err != nil { 3249 return err 3250 } 3251 3252 // If this channel was in the process of being closed, but didn't fully 3253 // close, then it's possible that the nursery is hanging on to some 3254 // state. To err on the side of caution, we'll now attempt to wipe any 3255 // state for this channel from the nursery. 3256 err = r.server.utxoNursery.RemoveChannel(chanPoint) 3257 if err != nil && err != contractcourt.ErrContractNotFound { 3258 return err 3259 } 3260 3261 // Finally, notify the backup listeners that the channel can be removed 3262 // from any channel backups. 3263 r.server.channelNotifier.NotifyClosedChannelEvent(*chanPoint) 3264 3265 return nil 3266 } 3267 3268 // AbandonChannel removes all channel state from the database except for a 3269 // close summary. This method can be used to get rid of permanently unusable 3270 // channels due to bugs fixed in newer versions of lnd. 3271 func (r *rpcServer) AbandonChannel(_ context.Context, 3272 in *lnrpc.AbandonChannelRequest) (*lnrpc.AbandonChannelResponse, error) { 3273 3274 // If this isn't the dev build, then we won't allow the RPC to be 3275 // executed, as it's an advanced feature and won't be activated in 3276 // regular production/release builds except for the explicit case of 3277 // externally funded channels that are still pending. Due to repeated 3278 // requests, we also allow this requirement to be overwritten by a new 3279 // flag that attests to the user knowing what they're doing and the risk 3280 // associated with the command/RPC. 3281 if !in.IKnowWhatIAmDoing && !in.PendingFundingShimOnly && 3282 !build.IsDevBuild() { 3283 3284 return nil, fmt.Errorf("AbandonChannel RPC call only " + 3285 "available in dev builds") 3286 } 3287 3288 // We'll parse out the arguments to we can obtain the chanPoint of the 3289 // target channel. 3290 txid, err := lnrpc.GetChanPointFundingTxid(in.GetChannelPoint()) 3291 if err != nil { 3292 return nil, err 3293 } 3294 index := in.ChannelPoint.OutputIndex 3295 chanPoint := wire.NewOutPoint(txid, index) 3296 3297 // When we remove the channel from the database, we need to set a close 3298 // height, so we'll just use the current best known height. 3299 _, bestHeight, err := r.server.cc.ChainIO.GetBestBlock() 3300 if err != nil { 3301 return nil, err 3302 } 3303 3304 dbChan, err := r.server.chanStateDB.FetchChannel(*chanPoint) 3305 switch { 3306 // If the channel isn't found in the set of open channels, then we can 3307 // continue on as it can't be loaded into the link/peer. 3308 case err == channeldb.ErrChannelNotFound: 3309 break 3310 3311 // If the channel is still known to be open, then before we modify any 3312 // on-disk state, we'll remove the channel from the switch and peer 3313 // state if it's been loaded in. 3314 case err == nil: 3315 // If the user requested the more safe version that only allows 3316 // the removal of externally (shim) funded channels that are 3317 // still pending, we enforce this option now that we know the 3318 // state of the channel. 3319 // 3320 // TODO(guggero): Properly store the funding type (wallet, shim, 3321 // PSBT) on the channel so we don't need to use the thaw height. 3322 isShimFunded := dbChan.ThawHeight > 0 3323 isPendingShimFunded := isShimFunded && dbChan.IsPending 3324 if !in.IKnowWhatIAmDoing && in.PendingFundingShimOnly && 3325 !isPendingShimFunded { 3326 3327 return nil, fmt.Errorf("channel %v is not externally "+ 3328 "funded or not pending", chanPoint) 3329 } 3330 3331 // We'll mark the channel as borked before we remove the state 3332 // from the switch/peer so it won't be loaded back in if the 3333 // peer reconnects. 3334 if err := dbChan.MarkBorked(); err != nil { 3335 return nil, err 3336 } 3337 remotePub := dbChan.IdentityPub 3338 if peer, err := r.server.FindPeer(remotePub); err == nil { 3339 peer.WipeChannel(chanPoint) 3340 } 3341 3342 default: 3343 return nil, err 3344 } 3345 3346 // Remove the channel from the graph, database and contract court. 3347 if err := r.abandonChan(chanPoint, uint32(bestHeight)); err != nil { 3348 return nil, err 3349 } 3350 3351 return &lnrpc.AbandonChannelResponse{ 3352 Status: fmt.Sprintf("channel %v abandoned", chanPoint.String()), 3353 }, nil 3354 } 3355 3356 // GetInfo returns general information concerning the lightning node including 3357 // its identity pubkey, alias, the chains it is connected to, and information 3358 // concerning the number of open+pending channels. 3359 func (r *rpcServer) GetInfo(_ context.Context, 3360 _ *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) { 3361 3362 serverPeers := r.server.Peers() 3363 3364 openChannels, err := r.server.chanStateDB.FetchAllOpenChannels() 3365 if err != nil { 3366 return nil, err 3367 } 3368 3369 var activeChannels uint32 3370 for _, channel := range openChannels { 3371 chanID := lnwire.NewChanIDFromOutPoint(channel.FundingOutpoint) 3372 if r.server.htlcSwitch.HasActiveLink(chanID) { 3373 activeChannels++ 3374 } 3375 } 3376 3377 inactiveChannels := uint32(len(openChannels)) - activeChannels 3378 3379 pendingChannels, err := r.server.chanStateDB.FetchPendingChannels() 3380 if err != nil { 3381 return nil, fmt.Errorf("unable to get retrieve pending "+ 3382 "channels: %v", err) 3383 } 3384 nPendingChannels := uint32(len(pendingChannels)) 3385 3386 idPub := r.server.identityECDH.PubKey().SerializeCompressed() 3387 encodedIDPub := hex.EncodeToString(idPub) 3388 3389 // Get the system's chain sync info. 3390 syncInfo, err := r.getChainSyncInfo() 3391 if err != nil { 3392 return nil, err 3393 } 3394 3395 network := lncfg.NormalizeNetwork(r.cfg.ActiveNetParams.Name) 3396 activeChains := []*lnrpc.Chain{ 3397 { 3398 Chain: BitcoinChainName, 3399 Network: network, 3400 }, 3401 } 3402 3403 // Check if external IP addresses were provided to lnd and use them 3404 // to set the URIs. 3405 nodeAnn := r.server.getNodeAnnouncement() 3406 3407 addrs := nodeAnn.Addresses 3408 uris := make([]string, len(addrs)) 3409 for i, addr := range addrs { 3410 uris[i] = fmt.Sprintf("%s@%s", encodedIDPub, addr.String()) 3411 } 3412 3413 isGraphSynced := r.server.authGossiper.SyncManager().IsGraphSynced() 3414 3415 features := make(map[uint32]*lnrpc.Feature) 3416 sets := r.server.featureMgr.ListSets() 3417 3418 for _, set := range sets { 3419 // Get the a list of lnrpc features for each set we support. 3420 featureVector := r.server.featureMgr.Get(set) 3421 rpcFeatures := invoicesrpc.CreateRPCFeatures(featureVector) 3422 3423 // Add the features to our map of features, allowing over writing of 3424 // existing values because features in different sets with the same bit 3425 // are duplicated across sets. 3426 maps.Copy(features, rpcFeatures) 3427 } 3428 3429 // TODO(roasbeef): add synced height n stuff 3430 3431 isTestNet := chainreg.IsTestnet(&r.cfg.ActiveNetParams) 3432 nodeColor := graphdb.EncodeHexColor(nodeAnn.RGBColor) 3433 version := build.Version() + " commit=" + build.Commit 3434 3435 return &lnrpc.GetInfoResponse{ 3436 IdentityPubkey: encodedIDPub, 3437 NumPendingChannels: nPendingChannels, 3438 NumActiveChannels: activeChannels, 3439 NumInactiveChannels: inactiveChannels, 3440 NumPeers: uint32(len(serverPeers)), 3441 BlockHeight: uint32(syncInfo.bestHeight), 3442 BlockHash: syncInfo.blockHash.String(), 3443 SyncedToChain: syncInfo.isSynced, 3444 Testnet: isTestNet, 3445 Chains: activeChains, 3446 Uris: uris, 3447 Alias: nodeAnn.Alias.String(), 3448 Color: nodeColor, 3449 BestHeaderTimestamp: syncInfo.timestamp, 3450 Version: version, 3451 CommitHash: build.CommitHash, 3452 SyncedToGraph: isGraphSynced, 3453 Features: features, 3454 RequireHtlcInterceptor: r.cfg.RequireInterceptor, 3455 StoreFinalHtlcResolutions: r.cfg.StoreFinalHtlcResolutions, 3456 WalletSynced: syncInfo.isWalletSynced, 3457 }, nil 3458 } 3459 3460 // GetDebugInfo returns debug information concerning the state of the daemon 3461 // and its subsystems. This includes the full configuration and the latest log 3462 // entries from the log file. 3463 func (r *rpcServer) GetDebugInfo(_ context.Context, 3464 _ *lnrpc.GetDebugInfoRequest) (*lnrpc.GetDebugInfoResponse, error) { 3465 3466 flatConfig, _, err := configToFlatMap(*r.cfg) 3467 if err != nil { 3468 return nil, fmt.Errorf("error converting config to flat map: "+ 3469 "%w", err) 3470 } 3471 3472 logFileName := filepath.Join(r.cfg.LogDir, defaultLogFilename) 3473 logContent, err := os.ReadFile(logFileName) 3474 if err != nil { 3475 return nil, fmt.Errorf("error reading log file '%s': %w", 3476 logFileName, err) 3477 } 3478 3479 return &lnrpc.GetDebugInfoResponse{ 3480 Config: flatConfig, 3481 Log: strings.Split(string(logContent), "\n"), 3482 }, nil 3483 } 3484 3485 // GetRecoveryInfo returns a boolean indicating whether the wallet is started 3486 // in recovery mode, whether the recovery is finished, and the progress made 3487 // so far. 3488 func (r *rpcServer) GetRecoveryInfo(ctx context.Context, 3489 in *lnrpc.GetRecoveryInfoRequest) (*lnrpc.GetRecoveryInfoResponse, error) { 3490 3491 isRecoveryMode, progress, err := r.server.cc.Wallet.GetRecoveryInfo() 3492 if err != nil { 3493 return nil, fmt.Errorf("unable to get wallet recovery info: %w", 3494 err) 3495 } 3496 3497 rpcsLog.Debugf("[getrecoveryinfo] is recovery mode=%v, progress=%v", 3498 isRecoveryMode, progress) 3499 3500 return &lnrpc.GetRecoveryInfoResponse{ 3501 RecoveryMode: isRecoveryMode, 3502 RecoveryFinished: progress == 1, 3503 Progress: progress, 3504 }, nil 3505 } 3506 3507 // ListPeers returns a verbose listing of all currently active peers. 3508 func (r *rpcServer) ListPeers(ctx context.Context, 3509 in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) { 3510 3511 serverPeers := r.server.Peers() 3512 resp := &lnrpc.ListPeersResponse{ 3513 Peers: make([]*lnrpc.Peer, 0, len(serverPeers)), 3514 } 3515 3516 for _, serverPeer := range serverPeers { 3517 var ( 3518 satSent int64 3519 satRecv int64 3520 ) 3521 3522 // In order to display the total number of satoshis of outbound 3523 // (sent) and inbound (recv'd) satoshis that have been 3524 // transported through this peer, we'll sum up the sent/recv'd 3525 // values for each of the active channels we have with the 3526 // peer. 3527 chans := serverPeer.ChannelSnapshots() 3528 for _, c := range chans { 3529 satSent += int64(c.TotalMSatSent.ToSatoshis()) 3530 satRecv += int64(c.TotalMSatReceived.ToSatoshis()) 3531 } 3532 3533 nodePub := serverPeer.PubKey() 3534 3535 // Retrieve the peer's sync type. If we don't currently have a 3536 // syncer for the peer, then we'll default to a passive sync. 3537 // This can happen if the RPC is called while a peer is 3538 // initializing. 3539 syncer, ok := r.server.authGossiper.SyncManager().GossipSyncer( 3540 nodePub, 3541 ) 3542 3543 var lnrpcSyncType lnrpc.Peer_SyncType 3544 if !ok { 3545 rpcsLog.Warnf("Gossip syncer for peer=%x not found", 3546 nodePub) 3547 lnrpcSyncType = lnrpc.Peer_UNKNOWN_SYNC 3548 } else { 3549 syncType := syncer.SyncType() 3550 switch syncType { 3551 case discovery.ActiveSync: 3552 lnrpcSyncType = lnrpc.Peer_ACTIVE_SYNC 3553 case discovery.PassiveSync: 3554 lnrpcSyncType = lnrpc.Peer_PASSIVE_SYNC 3555 case discovery.PinnedSync: 3556 lnrpcSyncType = lnrpc.Peer_PINNED_SYNC 3557 default: 3558 return nil, fmt.Errorf("unhandled sync type %v", 3559 syncType) 3560 } 3561 } 3562 3563 features := invoicesrpc.CreateRPCFeatures( 3564 serverPeer.RemoteFeatures(), 3565 ) 3566 3567 rpcPeer := &lnrpc.Peer{ 3568 PubKey: hex.EncodeToString(nodePub[:]), 3569 Address: serverPeer.Conn().RemoteAddr().String(), 3570 Inbound: serverPeer.Inbound(), 3571 BytesRecv: serverPeer.BytesReceived(), 3572 BytesSent: serverPeer.BytesSent(), 3573 SatSent: satSent, 3574 SatRecv: satRecv, 3575 PingTime: serverPeer.PingTime(), 3576 SyncType: lnrpcSyncType, 3577 Features: features, 3578 LastPingPayload: serverPeer.LastRemotePingPayload(), 3579 } 3580 3581 var peerErrors []interface{} 3582 3583 // If we only want the most recent error, get the most recent 3584 // error from the buffer and add it to our list of errors if 3585 // it is non-nil. If we want all the stored errors, simply 3586 // add the full list to our set of errors. 3587 if in.LatestError { 3588 latestErr := serverPeer.ErrorBuffer().Latest() 3589 if latestErr != nil { 3590 peerErrors = []interface{}{latestErr} 3591 } 3592 } else { 3593 peerErrors = serverPeer.ErrorBuffer().List() 3594 } 3595 3596 // Add the relevant peer errors to our response. 3597 for _, error := range peerErrors { 3598 tsError := error.(*peer.TimestampedError) 3599 3600 rpcErr := &lnrpc.TimestampedError{ 3601 Timestamp: uint64(tsError.Timestamp.Unix()), 3602 Error: tsError.Error.Error(), 3603 } 3604 3605 rpcPeer.Errors = append(rpcPeer.Errors, rpcErr) 3606 } 3607 3608 // If the server has started, we can query the event store 3609 // for our peer's flap count. If we do so when the server has 3610 // not started, the request will block. 3611 if r.server.Started() { 3612 vertex, err := route.NewVertexFromBytes(nodePub[:]) 3613 if err != nil { 3614 return nil, err 3615 } 3616 3617 flap, ts, err := r.server.chanEventStore.FlapCount( 3618 vertex, 3619 ) 3620 3621 // Log the error if we cannot get the flap count instead 3622 // of failing this RPC call. 3623 if err != nil { 3624 rpcsLog.Debugf("Failed to get flap count for "+ 3625 "peer %v", vertex) 3626 } 3627 3628 // If our timestamp is non-nil, we have values for our 3629 // peer's flap count, so we set them. 3630 if ts != nil { 3631 rpcPeer.FlapCount = int32(flap) 3632 rpcPeer.LastFlapNs = ts.UnixNano() 3633 } 3634 } 3635 3636 resp.Peers = append(resp.Peers, rpcPeer) 3637 } 3638 3639 rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers) 3640 3641 return resp, nil 3642 } 3643 3644 // SubscribePeerEvents returns a uni-directional stream (server -> client) 3645 // for notifying the client of peer online and offline events. 3646 func (r *rpcServer) SubscribePeerEvents(req *lnrpc.PeerEventSubscription, 3647 eventStream lnrpc.Lightning_SubscribePeerEventsServer) error { 3648 3649 peerEventSub, err := r.server.peerNotifier.SubscribePeerEvents() 3650 if err != nil { 3651 return err 3652 } 3653 defer peerEventSub.Cancel() 3654 3655 for { 3656 select { 3657 // A new update has been sent by the peer notifier, we'll 3658 // marshal it into the form expected by the gRPC client, then 3659 // send it off to the client. 3660 case e := <-peerEventSub.Updates(): 3661 var event *lnrpc.PeerEvent 3662 3663 switch peerEvent := e.(type) { 3664 case peernotifier.PeerOfflineEvent: 3665 event = &lnrpc.PeerEvent{ 3666 PubKey: hex.EncodeToString(peerEvent.PubKey[:]), 3667 Type: lnrpc.PeerEvent_PEER_OFFLINE, 3668 } 3669 3670 case peernotifier.PeerOnlineEvent: 3671 event = &lnrpc.PeerEvent{ 3672 PubKey: hex.EncodeToString(peerEvent.PubKey[:]), 3673 Type: lnrpc.PeerEvent_PEER_ONLINE, 3674 } 3675 3676 default: 3677 return fmt.Errorf("unexpected peer event: %v", event) 3678 } 3679 3680 if err := eventStream.Send(event); err != nil { 3681 return err 3682 } 3683 3684 // The response stream's context for whatever reason has been 3685 // closed. If context is closed by an exceeded deadline we will 3686 // return an error. 3687 case <-eventStream.Context().Done(): 3688 if errors.Is(eventStream.Context().Err(), context.Canceled) { 3689 return nil 3690 } 3691 return eventStream.Context().Err() 3692 3693 case <-r.quit: 3694 return nil 3695 } 3696 } 3697 } 3698 3699 // WalletBalance returns total unspent outputs(confirmed and unconfirmed), all 3700 // confirmed unspent outputs and all unconfirmed unspent outputs under control 3701 // by the wallet. This method can be modified by having the request specify 3702 // only witness outputs should be factored into the final output sum. 3703 // TODO(roasbeef): add async hooks into wallet balance changes. 3704 func (r *rpcServer) WalletBalance(ctx context.Context, 3705 in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) { 3706 3707 // Retrieve all existing wallet accounts. We'll compute the confirmed 3708 // and unconfirmed balance for each and tally them up. 3709 accounts, err := r.server.cc.Wallet.ListAccounts(in.Account, nil) 3710 if err != nil { 3711 return nil, err 3712 } 3713 3714 var totalBalance, confirmedBalance, unconfirmedBalance btcutil.Amount 3715 rpcAccountBalances := make( 3716 map[string]*lnrpc.WalletAccountBalance, len(accounts), 3717 ) 3718 for _, account := range accounts { 3719 // There are two default accounts, one for NP2WKH outputs and 3720 // another for P2WKH outputs. The balance will be computed for 3721 // both given one call to ConfirmedBalance with the default 3722 // wallet and imported account, so we'll skip the second 3723 // instance to avoid inflating the balance. 3724 switch account.AccountName { 3725 case waddrmgr.ImportedAddrAccountName: 3726 // Omit the imported account from the response unless we 3727 // actually have any keys imported. 3728 if account.ImportedKeyCount == 0 { 3729 continue 3730 } 3731 3732 fallthrough 3733 3734 case lnwallet.DefaultAccountName: 3735 if _, ok := rpcAccountBalances[account.AccountName]; ok { 3736 continue 3737 } 3738 3739 default: 3740 } 3741 3742 // There now also are the accounts for the internal channel 3743 // related keys. We skip those as they'll never have any direct 3744 // balance. 3745 if account.KeyScope.Purpose == keychain.BIP0043Purpose { 3746 continue 3747 } 3748 3749 // Get total balance, from txs that have >= 0 confirmations. 3750 totalBal, err := r.server.cc.Wallet.ConfirmedBalance( 3751 0, account.AccountName, 3752 ) 3753 if err != nil { 3754 return nil, err 3755 } 3756 totalBalance += totalBal 3757 3758 // Get confirmed balance, from txs that have >= 1 confirmations. 3759 // TODO(halseth): get both unconfirmed and confirmed balance in 3760 // one call, as this is racy. 3761 if in.MinConfs <= 0 { 3762 in.MinConfs = 1 3763 } 3764 confirmedBal, err := r.server.cc.Wallet.ConfirmedBalance( 3765 in.MinConfs, account.AccountName, 3766 ) 3767 if err != nil { 3768 return nil, err 3769 } 3770 confirmedBalance += confirmedBal 3771 3772 // Get unconfirmed balance, from txs with 0 confirmations. 3773 unconfirmedBal := totalBal - confirmedBal 3774 unconfirmedBalance += unconfirmedBal 3775 3776 rpcAccountBalances[account.AccountName] = &lnrpc.WalletAccountBalance{ 3777 ConfirmedBalance: int64(confirmedBal), 3778 UnconfirmedBalance: int64(unconfirmedBal), 3779 } 3780 } 3781 3782 // Now that we have the base balance accounted for with each account, 3783 // we'll look at the set of locked UTXOs to tally that as well. If we 3784 // don't display this, then anytime we attempt a funding reservation, 3785 // the outputs will chose as being "gone" until they're confirmed on 3786 // chain. 3787 var lockedBalance btcutil.Amount 3788 leases, err := r.server.cc.Wallet.ListLeasedOutputs() 3789 if err != nil { 3790 return nil, err 3791 } 3792 for _, leasedOutput := range leases { 3793 lockedBalance += btcutil.Amount(leasedOutput.Value) 3794 } 3795 3796 // Get the current number of non-private anchor channels. 3797 currentNumAnchorChans, err := r.server.cc.Wallet.CurrentNumAnchorChans() 3798 if err != nil { 3799 return nil, err 3800 } 3801 3802 // Get the required reserve for the wallet. 3803 requiredReserve := r.server.cc.Wallet.RequiredReserve( 3804 uint32(currentNumAnchorChans), 3805 ) 3806 3807 rpcsLog.Debugf("[walletbalance] Total balance=%v (confirmed=%v, "+ 3808 "unconfirmed=%v, locked=%v)", totalBalance, confirmedBalance, 3809 unconfirmedBalance, lockedBalance) 3810 3811 return &lnrpc.WalletBalanceResponse{ 3812 TotalBalance: int64(totalBalance), 3813 ConfirmedBalance: int64(confirmedBalance), 3814 UnconfirmedBalance: int64(unconfirmedBalance), 3815 LockedBalance: int64(lockedBalance), 3816 ReservedBalanceAnchorChan: int64(requiredReserve), 3817 AccountBalance: rpcAccountBalances, 3818 }, nil 3819 } 3820 3821 // ChannelBalance returns the total available channel flow across all open 3822 // channels in satoshis. 3823 func (r *rpcServer) ChannelBalance(ctx context.Context, 3824 in *lnrpc.ChannelBalanceRequest) ( 3825 *lnrpc.ChannelBalanceResponse, error) { 3826 3827 var ( 3828 localBalance lnwire.MilliSatoshi 3829 remoteBalance lnwire.MilliSatoshi 3830 unsettledLocalBalance lnwire.MilliSatoshi 3831 unsettledRemoteBalance lnwire.MilliSatoshi 3832 pendingOpenLocalBalance lnwire.MilliSatoshi 3833 pendingOpenRemoteBalance lnwire.MilliSatoshi 3834 customDataBuf bytes.Buffer 3835 ) 3836 3837 openChannels, err := r.server.chanStateDB.FetchAllOpenChannels() 3838 if err != nil { 3839 return nil, err 3840 } 3841 3842 // Encode the number of open channels to the custom data buffer. 3843 err = wire.WriteVarInt(&customDataBuf, 0, uint64(len(openChannels))) 3844 if err != nil { 3845 return nil, err 3846 } 3847 3848 for _, channel := range openChannels { 3849 c := channel.LocalCommitment 3850 localBalance += c.LocalBalance 3851 remoteBalance += c.RemoteBalance 3852 3853 // Add pending htlc amount. 3854 for _, htlc := range c.Htlcs { 3855 if htlc.Incoming { 3856 unsettledLocalBalance += htlc.Amt 3857 } else { 3858 unsettledRemoteBalance += htlc.Amt 3859 } 3860 } 3861 3862 // Encode the custom data for this open channel. 3863 openChanData := channel.LocalCommitment.CustomBlob.UnwrapOr(nil) 3864 err = wire.WriteVarBytes(&customDataBuf, 0, openChanData) 3865 if err != nil { 3866 return nil, err 3867 } 3868 } 3869 3870 pendingChannels, err := r.server.chanStateDB.FetchPendingChannels() 3871 if err != nil { 3872 return nil, err 3873 } 3874 3875 // Encode the number of pending channels to the custom data buffer. 3876 err = wire.WriteVarInt(&customDataBuf, 0, uint64(len(pendingChannels))) 3877 if err != nil { 3878 return nil, err 3879 } 3880 3881 for _, channel := range pendingChannels { 3882 c := channel.LocalCommitment 3883 pendingOpenLocalBalance += c.LocalBalance 3884 pendingOpenRemoteBalance += c.RemoteBalance 3885 3886 // Encode the custom data for this pending channel. 3887 openChanData := channel.LocalCommitment.CustomBlob.UnwrapOr(nil) 3888 err = wire.WriteVarBytes(&customDataBuf, 0, openChanData) 3889 if err != nil { 3890 return nil, err 3891 } 3892 } 3893 3894 rpcsLog.Debugf("[channelbalance] local_balance=%v remote_balance=%v "+ 3895 "unsettled_local_balance=%v unsettled_remote_balance=%v "+ 3896 "pending_open_local_balance=%v pending_open_remote_balance=%v", 3897 localBalance, remoteBalance, unsettledLocalBalance, 3898 unsettledRemoteBalance, pendingOpenLocalBalance, 3899 pendingOpenRemoteBalance) 3900 3901 resp := &lnrpc.ChannelBalanceResponse{ 3902 LocalBalance: &lnrpc.Amount{ 3903 Sat: uint64(localBalance.ToSatoshis()), 3904 Msat: uint64(localBalance), 3905 }, 3906 RemoteBalance: &lnrpc.Amount{ 3907 Sat: uint64(remoteBalance.ToSatoshis()), 3908 Msat: uint64(remoteBalance), 3909 }, 3910 UnsettledLocalBalance: &lnrpc.Amount{ 3911 Sat: uint64(unsettledLocalBalance.ToSatoshis()), 3912 Msat: uint64(unsettledLocalBalance), 3913 }, 3914 UnsettledRemoteBalance: &lnrpc.Amount{ 3915 Sat: uint64(unsettledRemoteBalance.ToSatoshis()), 3916 Msat: uint64(unsettledRemoteBalance), 3917 }, 3918 PendingOpenLocalBalance: &lnrpc.Amount{ 3919 Sat: uint64(pendingOpenLocalBalance.ToSatoshis()), 3920 Msat: uint64(pendingOpenLocalBalance), 3921 }, 3922 PendingOpenRemoteBalance: &lnrpc.Amount{ 3923 Sat: uint64(pendingOpenRemoteBalance.ToSatoshis()), 3924 Msat: uint64(pendingOpenRemoteBalance), 3925 }, 3926 CustomChannelData: customDataBuf.Bytes(), 3927 3928 // Deprecated fields. 3929 Balance: int64(localBalance.ToSatoshis()), 3930 PendingOpenBalance: int64(pendingOpenLocalBalance.ToSatoshis()), 3931 } 3932 3933 err = fn.MapOptionZ( 3934 r.server.implCfg.AuxDataParser, 3935 func(parser AuxDataParser) error { 3936 return parser.InlineParseCustomData(resp) 3937 }, 3938 ) 3939 if err != nil { 3940 return nil, fmt.Errorf("error parsing custom data: %w", err) 3941 } 3942 3943 return resp, nil 3944 } 3945 3946 type ( 3947 pendingOpenChannels []*lnrpc.PendingChannelsResponse_PendingOpenChannel 3948 pendingForceClose []*lnrpc.PendingChannelsResponse_ForceClosedChannel 3949 waitingCloseChannels []*lnrpc.PendingChannelsResponse_WaitingCloseChannel 3950 ) 3951 3952 // calcRemainingConfs calculates how many more confirmations are needed for a 3953 // pending channel to be fully confirmed. It takes into account: 3954 // 1. The current blockchain height 3955 // 2. The block height at which the funding transaction was first confirmed 3956 // 3. The total number of confirmations required for the channel. 3957 func calcRemainingConfs(pendingChan *channeldb.OpenChannel, 3958 currentHeight uint32) uint32 { 3959 3960 // If the funding transaction hasn't been confirmed yet, 3961 // we need all the required confirmations. 3962 if pendingChan.ConfirmationHeight == 0 { 3963 return uint32(pendingChan.NumConfsRequired) 3964 } 3965 3966 // Calculate the target height at which the channel will be fully 3967 // confirmed. The -1 is because the confirmation height of the first 3968 // confirmation has to be taken into account. 3969 targetConfirmationHeight := pendingChan.ConfirmationHeight + 3970 uint32(pendingChan.NumConfsRequired) - 1 3971 3972 // In case the current height is already past the target, return 0. This 3973 // should never happen because the channel should already be moved from 3974 // pending to open state but we handle this case in case of timing 3975 // issues. 3976 if currentHeight >= targetConfirmationHeight { 3977 return 0 3978 } 3979 3980 return targetConfirmationHeight - currentHeight 3981 } 3982 3983 // fetchPendingOpenChannels queries the database for a list of channels that 3984 // have pending open state. The returned result is used in the response of the 3985 // PendingChannels RPC. 3986 func (r *rpcServer) fetchPendingOpenChannels() (pendingOpenChannels, error) { 3987 // First, we'll populate the response with all the channels that are 3988 // soon to be opened. We can easily fetch this data from the database 3989 // and map the db struct to the proto response. 3990 channels, err := r.server.chanStateDB.FetchPendingChannels() 3991 if err != nil { 3992 rpcsLog.Errorf("unable to fetch pending channels: %v", err) 3993 return nil, err 3994 } 3995 3996 _, currentHeight, err := r.server.cc.ChainIO.GetBestBlock() 3997 if err != nil { 3998 return nil, err 3999 } 4000 4001 result := make(pendingOpenChannels, len(channels)) 4002 for i, pendingChan := range channels { 4003 pub := pendingChan.IdentityPub.SerializeCompressed() 4004 4005 // As this is required for display purposes, we'll calculate 4006 // the weight of the commitment transaction. We also add on the 4007 // estimated weight of the witness to calculate the weight of 4008 // the transaction if it were to be immediately unilaterally 4009 // broadcast. 4010 // TODO(roasbeef): query for funding tx from wallet, display 4011 // that also? 4012 var witnessWeight int64 4013 if pendingChan.ChanType.IsTaproot() { 4014 witnessWeight = input.TaprootKeyPathWitnessSize 4015 } else { 4016 witnessWeight = input.WitnessCommitmentTxWeight 4017 } 4018 4019 localCommitment := pendingChan.LocalCommitment 4020 utx := btcutil.NewTx(localCommitment.CommitTx) 4021 commitBaseWeight := blockchain.GetTransactionWeight(utx) 4022 commitWeight := commitBaseWeight + witnessWeight 4023 4024 // The value of waitBlocksForFundingConf is adjusted in a 4025 // development environment to enhance test capabilities. 4026 // Otherwise, it is set to DefaultMaxWaitNumBlocksFundingConf. 4027 waitBlocksForFundingConf := uint32( 4028 lncfg.DefaultMaxWaitNumBlocksFundingConf, 4029 ) 4030 4031 if lncfg.IsDevBuild() { 4032 waitBlocksForFundingConf = 4033 r.cfg.Dev.GetMaxWaitNumBlocksFundingConf() 4034 } 4035 4036 // FundingExpiryBlocks is the distance from the current block 4037 // height to the broadcast height + waitBlocksForFundingConf. 4038 maxFundingHeight := waitBlocksForFundingConf + 4039 pendingChan.BroadcastHeight() 4040 fundingExpiryBlocks := int32(maxFundingHeight) - currentHeight 4041 4042 // Calculate remainingConfs, the number of blocks left until the 4043 // funding transaction reaches the required confirmation height. 4044 // 4045 // ZeroConf channels are marked OPEN immediately upon creation, 4046 // so they never enter the "pending" state. 4047 remainingConfs := calcRemainingConfs( 4048 pendingChan, uint32(currentHeight), 4049 ) 4050 4051 customChanBytes, err := encodeCustomChanData(pendingChan) 4052 if err != nil { 4053 return nil, fmt.Errorf("unable to encode open chan "+ 4054 "data: %w", err) 4055 } 4056 4057 result[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{ 4058 Channel: &lnrpc.PendingChannelsResponse_PendingChannel{ 4059 RemoteNodePub: hex.EncodeToString(pub), 4060 ChannelPoint: pendingChan.FundingOutpoint.String(), 4061 Capacity: int64(pendingChan.Capacity), 4062 LocalBalance: int64(localCommitment.LocalBalance.ToSatoshis()), 4063 RemoteBalance: int64(localCommitment.RemoteBalance.ToSatoshis()), 4064 LocalChanReserveSat: int64(pendingChan.LocalChanCfg.ChanReserve), 4065 RemoteChanReserveSat: int64(pendingChan.RemoteChanCfg.ChanReserve), 4066 Initiator: rpcInitiator(pendingChan.IsInitiator), 4067 CommitmentType: rpcCommitmentType(pendingChan.ChanType), 4068 Private: isPrivate(pendingChan), 4069 Memo: string(pendingChan.Memo), 4070 CustomChannelData: customChanBytes, 4071 }, 4072 CommitWeight: commitWeight, 4073 CommitFee: int64(localCommitment.CommitFee), 4074 FeePerKw: int64(localCommitment. 4075 FeePerKw), 4076 FundingExpiryBlocks: fundingExpiryBlocks, 4077 ConfirmationsUntilActive: remainingConfs, 4078 ConfirmationHeight: pendingChan. 4079 ConfirmationHeight, 4080 } 4081 } 4082 4083 return result, nil 4084 } 4085 4086 // fetchPendingForceCloseChannels queries the database for a list of channels 4087 // that have their closing transactions confirmed but not fully resolved yet. 4088 // The returned result is used in the response of the PendingChannels RPC. 4089 func (r *rpcServer) fetchPendingForceCloseChannels() (pendingForceClose, 4090 int64, error) { 4091 4092 _, currentHeight, err := r.server.cc.ChainIO.GetBestBlock() 4093 if err != nil { 4094 return nil, 0, err 4095 } 4096 4097 // Next, we'll examine the channels that are soon to be closed so we 4098 // can populate these fields within the response. 4099 channels, err := r.server.chanStateDB.FetchClosedChannels(true) 4100 if err != nil { 4101 rpcsLog.Errorf("unable to fetch closed channels: %v", err) 4102 return nil, 0, err 4103 } 4104 4105 result := make(pendingForceClose, 0) 4106 limboBalance := int64(0) 4107 4108 for _, pendingClose := range channels { 4109 // First construct the channel struct itself, this will be 4110 // needed regardless of how this channel was closed. 4111 pub := pendingClose.RemotePub.SerializeCompressed() 4112 chanPoint := pendingClose.ChanPoint 4113 4114 // Create the pending channel. If this channel was closed before 4115 // we started storing historical channel data, we will not know 4116 // who initiated the channel, so we set the initiator field to 4117 // unknown. 4118 channel := &lnrpc.PendingChannelsResponse_PendingChannel{ 4119 RemoteNodePub: hex.EncodeToString(pub), 4120 ChannelPoint: chanPoint.String(), 4121 Capacity: int64(pendingClose.Capacity), 4122 LocalBalance: int64(pendingClose.SettledBalance), 4123 CommitmentType: lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, 4124 Initiator: lnrpc.Initiator_INITIATOR_UNKNOWN, 4125 } 4126 4127 // Lookup the channel in the historical channel bucket to obtain 4128 // initiator information. If the historical channel bucket was 4129 // not found, or the channel itself, this channel was closed 4130 // in a version before we started persisting historical 4131 // channels, so we silence the error. 4132 historical, err := r.server.chanStateDB.FetchHistoricalChannel( 4133 &pendingClose.ChanPoint, 4134 ) 4135 switch err { 4136 // If the channel was closed in a version that did not record 4137 // historical channels, ignore the error. 4138 case channeldb.ErrNoHistoricalBucket: 4139 case channeldb.ErrChannelNotFound: 4140 4141 case nil: 4142 channel.Initiator = rpcInitiator(historical.IsInitiator) 4143 channel.CommitmentType = rpcCommitmentType( 4144 historical.ChanType, 4145 ) 4146 4147 // Get the number of forwarding packages from the 4148 // historical channel. 4149 fwdPkgs, err := historical.LoadFwdPkgs() 4150 if err != nil { 4151 rpcsLog.Errorf("unable to load forwarding "+ 4152 "packages for channel:%s, %v", 4153 historical.ShortChannelID, err) 4154 return nil, 0, err 4155 } 4156 channel.NumForwardingPackages = int64(len(fwdPkgs)) 4157 4158 channel.RemoteBalance = int64( 4159 historical.LocalCommitment.RemoteBalance.ToSatoshis(), 4160 ) 4161 4162 customChanBytes, err := encodeCustomChanData(historical) 4163 if err != nil { 4164 return nil, 0, fmt.Errorf("unable to encode "+ 4165 "open chan data: %w", err) 4166 } 4167 channel.CustomChannelData = customChanBytes 4168 4169 channel.Private = isPrivate(historical) 4170 channel.Memo = string(historical.Memo) 4171 4172 // If the error is non-nil, and not due to older versions of lnd 4173 // not persisting historical channels, return it. 4174 default: 4175 return nil, 0, err 4176 } 4177 4178 closeTXID := pendingClose.ClosingTXID.String() 4179 4180 switch pendingClose.CloseType { 4181 4182 // A coop closed channel should never be in the "pending close" 4183 // state. If a node upgraded from an older lnd version in the 4184 // middle of a their channel confirming, it will be in this 4185 // state. We log a warning that the channel will not be included 4186 // in the now deprecated pending close channels field. 4187 case channeldb.CooperativeClose: 4188 rpcsLog.Warnf("channel %v cooperatively closed and "+ 4189 "in pending close state", 4190 pendingClose.ChanPoint) 4191 4192 // If the channel was force closed, then we'll need to query 4193 // the utxoNursery for additional information. 4194 // TODO(halseth): distinguish remote and local case? 4195 case channeldb.LocalForceClose, channeldb.RemoteForceClose: 4196 forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{ 4197 Channel: channel, 4198 ClosingTxid: closeTXID, 4199 } 4200 4201 // Fetch reports from both nursery and resolvers. At the 4202 // moment this is not an atomic snapshot. This is 4203 // planned to be resolved when the nursery is removed 4204 // and channel arbitrator will be the single source for 4205 // these kind of reports. 4206 err := r.nurseryPopulateForceCloseResp( 4207 &chanPoint, currentHeight, forceClose, 4208 ) 4209 if err != nil { 4210 rpcsLog.Errorf("unable to populate nursery "+ 4211 "force close resp:%s, %v", 4212 chanPoint, err) 4213 return nil, 0, err 4214 } 4215 4216 err = r.arbitratorPopulateForceCloseResp( 4217 &chanPoint, currentHeight, forceClose, 4218 ) 4219 if err != nil { 4220 rpcsLog.Errorf("unable to populate arbitrator "+ 4221 "force close resp:%s, %v", 4222 chanPoint, err) 4223 return nil, 0, err 4224 } 4225 4226 limboBalance += forceClose.LimboBalance 4227 result = append(result, forceClose) 4228 } 4229 } 4230 4231 return result, limboBalance, nil 4232 } 4233 4234 // fetchWaitingCloseChannels queries the database for a list of channels 4235 // that have their closing transactions broadcast but not confirmed yet. 4236 // The returned result is used in the response of the PendingChannels RPC. 4237 func (r *rpcServer) fetchWaitingCloseChannels( 4238 includeRawTx bool) (waitingCloseChannels, int64, error) { 4239 4240 // We'll also fetch all channels that are open, but have had their 4241 // commitment broadcasted, meaning they are waiting for the closing 4242 // transaction to confirm. 4243 channels, err := r.server.chanStateDB.FetchWaitingCloseChannels() 4244 if err != nil { 4245 rpcsLog.Errorf("unable to fetch channels waiting close: %v", 4246 err) 4247 return nil, 0, err 4248 } 4249 4250 result := make(waitingCloseChannels, 0) 4251 limboBalance := int64(0) 4252 4253 // getClosingTx is a helper closure that tries to find the closing tx of 4254 // a given waiting close channel. Notice that if the remote closes the 4255 // channel, we may not have the closing tx. 4256 getClosingTx := func(c *channeldb.OpenChannel) (*wire.MsgTx, error) { 4257 var ( 4258 tx *wire.MsgTx 4259 err error 4260 ) 4261 4262 // First, we try to locate the force closing tx. If not found, 4263 // we will then try to find its coop closing tx. 4264 tx, err = c.BroadcastedCommitment() 4265 if err == nil { 4266 return tx, nil 4267 } 4268 4269 // If the error returned is not ErrNoCloseTx, something 4270 // unexpected happened and we will return the error. 4271 if err != channeldb.ErrNoCloseTx { 4272 return nil, err 4273 } 4274 4275 // Otherwise, we continue to locate its coop closing tx. 4276 tx, err = c.BroadcastedCooperative() 4277 if err == nil { 4278 return tx, nil 4279 } 4280 4281 // Return the error if it's not ErrNoCloseTx. 4282 if err != channeldb.ErrNoCloseTx { 4283 return nil, err 4284 } 4285 4286 // Otherwise return an empty tx. This can happen if the remote 4287 // broadcast the closing tx and we haven't recorded it yet. 4288 return nil, nil 4289 } 4290 4291 for _, waitingClose := range channels { 4292 pub := waitingClose.IdentityPub.SerializeCompressed() 4293 chanPoint := waitingClose.FundingOutpoint 4294 4295 var commitments lnrpc.PendingChannelsResponse_Commitments 4296 4297 // Report local commit. May not be present when DLP is active. 4298 if waitingClose.LocalCommitment.CommitTx != nil { 4299 commitments.LocalTxid = 4300 waitingClose.LocalCommitment.CommitTx.TxHash(). 4301 String() 4302 4303 commitments.LocalCommitFeeSat = uint64( 4304 waitingClose.LocalCommitment.CommitFee, 4305 ) 4306 } 4307 4308 // Report remote commit. May not be present when DLP is active. 4309 if waitingClose.RemoteCommitment.CommitTx != nil { 4310 commitments.RemoteTxid = 4311 waitingClose.RemoteCommitment.CommitTx.TxHash(). 4312 String() 4313 4314 commitments.RemoteCommitFeeSat = uint64( 4315 waitingClose.RemoteCommitment.CommitFee, 4316 ) 4317 } 4318 4319 // Report the remote pending commit if any. 4320 remoteCommitDiff, err := waitingClose.RemoteCommitChainTip() 4321 4322 switch { 4323 // Don't set hash if there is no pending remote commit. 4324 case err == channeldb.ErrNoPendingCommit: 4325 4326 // An unexpected error occurred. 4327 case err != nil: 4328 return nil, 0, err 4329 4330 // There is a pending remote commit. Set its hash in the 4331 // response. 4332 default: 4333 hash := remoteCommitDiff.Commitment.CommitTx.TxHash() 4334 commitments.RemotePendingTxid = hash.String() 4335 commitments.RemoteCommitFeeSat = uint64( 4336 remoteCommitDiff.Commitment.CommitFee, 4337 ) 4338 } 4339 4340 fwdPkgs, err := waitingClose.LoadFwdPkgs() 4341 if err != nil { 4342 rpcsLog.Errorf("unable to load forwarding packages "+ 4343 "for channel:%s, %v", 4344 waitingClose.ShortChannelID, err) 4345 return nil, 0, err 4346 } 4347 4348 // Get the closing tx. 4349 // NOTE: the closing tx could be nil here if it's the remote 4350 // that broadcasted the closing tx. 4351 closingTx, err := getClosingTx(waitingClose) 4352 if err != nil { 4353 rpcsLog.Errorf("unable to find closing tx for "+ 4354 "channel:%s, %v", 4355 waitingClose.ShortChannelID, err) 4356 return nil, 0, err 4357 } 4358 4359 customChanBytes, err := encodeCustomChanData(waitingClose) 4360 if err != nil { 4361 return nil, 0, fmt.Errorf("unable to encode "+ 4362 "open chan data: %w", err) 4363 } 4364 4365 localCommit := waitingClose.LocalCommitment 4366 chanStatus := waitingClose.ChanStatus() 4367 channel := &lnrpc.PendingChannelsResponse_PendingChannel{ 4368 RemoteNodePub: hex.EncodeToString(pub), 4369 ChannelPoint: chanPoint.String(), 4370 Capacity: int64(waitingClose.Capacity), 4371 LocalBalance: int64( 4372 localCommit.LocalBalance.ToSatoshis(), 4373 ), 4374 RemoteBalance: int64( 4375 localCommit.RemoteBalance.ToSatoshis(), 4376 ), 4377 LocalChanReserveSat: int64( 4378 waitingClose.LocalChanCfg.ChanReserve, 4379 ), 4380 RemoteChanReserveSat: int64( 4381 waitingClose.RemoteChanCfg.ChanReserve, 4382 ), 4383 Initiator: rpcInitiator( 4384 waitingClose.IsInitiator, 4385 ), 4386 CommitmentType: rpcCommitmentType( 4387 waitingClose.ChanType, 4388 ), 4389 NumForwardingPackages: int64(len(fwdPkgs)), 4390 ChanStatusFlags: chanStatus.String(), 4391 Private: isPrivate(waitingClose), 4392 Memo: string(waitingClose.Memo), 4393 CustomChannelData: customChanBytes, 4394 } 4395 4396 var closingTxid, closingTxHex string 4397 if closingTx != nil { 4398 closingTxid = closingTx.TxHash().String() 4399 if includeRawTx { 4400 var txBuf bytes.Buffer 4401 err = closingTx.Serialize(&txBuf) 4402 if err != nil { 4403 return nil, 0, fmt.Errorf("failed to "+ 4404 "serialize closing transaction"+ 4405 ": %w", err) 4406 } 4407 closingTxHex = hex.EncodeToString(txBuf.Bytes()) 4408 } 4409 } 4410 4411 waitingCloseResp := &lnrpc.PendingChannelsResponse_WaitingCloseChannel{ 4412 Channel: channel, 4413 LimboBalance: channel.LocalBalance, 4414 Commitments: &commitments, 4415 ClosingTxid: closingTxid, 4416 ClosingTxHex: closingTxHex, 4417 } 4418 4419 // A close tx has been broadcasted, all our balance will be in 4420 // limbo until it confirms. 4421 result = append(result, waitingCloseResp) 4422 limboBalance += channel.LocalBalance 4423 } 4424 4425 return result, limboBalance, nil 4426 } 4427 4428 // PendingChannels returns a list of all the channels that are currently 4429 // considered "pending". A channel is pending if it has finished the funding 4430 // workflow and is waiting for confirmations for the funding txn, or is in the 4431 // process of closure, either initiated cooperatively or non-cooperatively. 4432 func (r *rpcServer) PendingChannels(ctx context.Context, 4433 in *lnrpc.PendingChannelsRequest) ( 4434 *lnrpc.PendingChannelsResponse, error) { 4435 4436 resp := &lnrpc.PendingChannelsResponse{} 4437 4438 // First, we find all the channels that will soon be opened. 4439 pendingOpenChannels, err := r.fetchPendingOpenChannels() 4440 if err != nil { 4441 return nil, err 4442 } 4443 resp.PendingOpenChannels = pendingOpenChannels 4444 4445 // Second, we fetch all channels that considered pending force closing. 4446 // This means the channels here have their closing transactions 4447 // confirmed but not considered fully resolved yet. For instance, they 4448 // may have a second level HTLCs to be resolved onchain. 4449 pendingCloseChannels, limbo, err := r.fetchPendingForceCloseChannels() 4450 if err != nil { 4451 return nil, err 4452 } 4453 resp.PendingForceClosingChannels = pendingCloseChannels 4454 resp.TotalLimboBalance = limbo 4455 4456 // Third, we fetch all channels that are open, but have had their 4457 // commitment broadcasted, meaning they are waiting for the closing 4458 // transaction to confirm. 4459 waitingCloseChannels, limbo, err := r.fetchWaitingCloseChannels( 4460 in.IncludeRawTx, 4461 ) 4462 if err != nil { 4463 return nil, err 4464 } 4465 resp.WaitingCloseChannels = waitingCloseChannels 4466 resp.TotalLimboBalance += limbo 4467 4468 err = fn.MapOptionZ( 4469 r.server.implCfg.AuxDataParser, 4470 func(parser AuxDataParser) error { 4471 return parser.InlineParseCustomData(resp) 4472 }, 4473 ) 4474 if err != nil { 4475 return nil, fmt.Errorf("error parsing custom data: %w", err) 4476 } 4477 4478 return resp, nil 4479 } 4480 4481 // arbitratorPopulateForceCloseResp populates the pending channels response 4482 // message with channel resolution information from the contract resolvers. 4483 func (r *rpcServer) arbitratorPopulateForceCloseResp(chanPoint *wire.OutPoint, 4484 currentHeight int32, 4485 forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error { 4486 4487 // Query for contract resolvers state. 4488 arbitrator, err := r.server.chainArb.GetChannelArbitrator(*chanPoint) 4489 if err != nil { 4490 return err 4491 } 4492 reports := arbitrator.Report() 4493 4494 for _, report := range reports { 4495 switch report.Type { 4496 // For a direct output, populate/update the top level 4497 // response properties. 4498 case contractcourt.ReportOutputUnencumbered: 4499 // Populate the maturity height fields for the direct 4500 // commitment output to us. 4501 forceClose.MaturityHeight = report.MaturityHeight 4502 4503 // If the transaction has been confirmed, then we can 4504 // compute how many blocks it has left. 4505 if forceClose.MaturityHeight != 0 { 4506 forceClose.BlocksTilMaturity = 4507 int32(forceClose.MaturityHeight) - 4508 currentHeight 4509 } 4510 4511 // Add htlcs to the PendingHtlcs response property. 4512 case contractcourt.ReportOutputIncomingHtlc, 4513 contractcourt.ReportOutputOutgoingHtlc: 4514 4515 // Don't report details on htlcs that are no longer in 4516 // limbo. 4517 if report.LimboBalance == 0 { 4518 break 4519 } 4520 4521 incoming := report.Type == contractcourt.ReportOutputIncomingHtlc 4522 htlc := &lnrpc.PendingHTLC{ 4523 Incoming: incoming, 4524 Amount: int64(report.Amount), 4525 Outpoint: report.Outpoint.String(), 4526 MaturityHeight: report.MaturityHeight, 4527 Stage: report.Stage, 4528 } 4529 4530 if htlc.MaturityHeight != 0 { 4531 htlc.BlocksTilMaturity = 4532 int32(htlc.MaturityHeight) - currentHeight 4533 } 4534 4535 forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc) 4536 4537 case contractcourt.ReportOutputAnchor: 4538 // There are three resolution states for the anchor: 4539 // limbo, lost and recovered. Derive the current state 4540 // from the limbo and recovered balances. 4541 switch { 4542 case report.RecoveredBalance != 0: 4543 forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_RECOVERED 4544 4545 case report.LimboBalance != 0: 4546 forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LIMBO 4547 4548 default: 4549 forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST 4550 } 4551 4552 default: 4553 return fmt.Errorf("unknown report output type: %v", 4554 report.Type) 4555 } 4556 4557 forceClose.LimboBalance += int64(report.LimboBalance) 4558 forceClose.RecoveredBalance += int64(report.RecoveredBalance) 4559 } 4560 4561 return nil 4562 } 4563 4564 // nurseryPopulateForceCloseResp populates the pending channels response 4565 // message with contract resolution information from utxonursery. 4566 func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint, 4567 currentHeight int32, 4568 forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error { 4569 4570 // Query for the maturity state for this force closed channel. If we 4571 // didn't have any time-locked outputs, then the nursery may not know of 4572 // the contract. 4573 nurseryInfo, err := r.server.utxoNursery.NurseryReport(chanPoint) 4574 if err == contractcourt.ErrContractNotFound { 4575 return nil 4576 } 4577 if err != nil { 4578 return fmt.Errorf("unable to obtain "+ 4579 "nursery report for ChannelPoint(%v): %v", 4580 chanPoint, err) 4581 } 4582 4583 // If the nursery knows of this channel, then we can populate 4584 // information detailing exactly how much funds are time locked and also 4585 // the height in which we can ultimately sweep the funds into the 4586 // wallet. 4587 forceClose.LimboBalance = int64(nurseryInfo.LimboBalance) 4588 forceClose.RecoveredBalance = int64(nurseryInfo.RecoveredBalance) 4589 4590 for _, htlcReport := range nurseryInfo.Htlcs { 4591 // TODO(conner) set incoming flag appropriately after handling 4592 // incoming incubation 4593 htlc := &lnrpc.PendingHTLC{ 4594 Incoming: false, 4595 Amount: int64(htlcReport.Amount), 4596 Outpoint: htlcReport.Outpoint.String(), 4597 MaturityHeight: htlcReport.MaturityHeight, 4598 Stage: htlcReport.Stage, 4599 } 4600 4601 if htlc.MaturityHeight != 0 { 4602 htlc.BlocksTilMaturity = 4603 int32(htlc.MaturityHeight) - 4604 currentHeight 4605 } 4606 4607 forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, 4608 htlc) 4609 } 4610 4611 return nil 4612 } 4613 4614 // ClosedChannels returns a list of all the channels have been closed. 4615 // This does not include channels that are still in the process of closing. 4616 func (r *rpcServer) ClosedChannels(ctx context.Context, 4617 in *lnrpc.ClosedChannelsRequest) (*lnrpc.ClosedChannelsResponse, 4618 error) { 4619 4620 // Show all channels when no filter flags are set. 4621 filterResults := in.Cooperative || in.LocalForce || 4622 in.RemoteForce || in.Breach || in.FundingCanceled || 4623 in.Abandoned 4624 4625 resp := &lnrpc.ClosedChannelsResponse{} 4626 4627 dbChannels, err := r.server.chanStateDB.FetchClosedChannels(false) 4628 if err != nil { 4629 return nil, err 4630 } 4631 4632 // In order to make the response easier to parse for clients, we'll 4633 // sort the set of closed channels by their closing height before 4634 // serializing the proto response. 4635 sort.Slice(dbChannels, func(i, j int) bool { 4636 return dbChannels[i].CloseHeight < dbChannels[j].CloseHeight 4637 }) 4638 4639 for _, dbChannel := range dbChannels { 4640 if dbChannel.IsPending { 4641 continue 4642 } 4643 4644 switch dbChannel.CloseType { 4645 case channeldb.CooperativeClose: 4646 if filterResults && !in.Cooperative { 4647 continue 4648 } 4649 case channeldb.LocalForceClose: 4650 if filterResults && !in.LocalForce { 4651 continue 4652 } 4653 case channeldb.RemoteForceClose: 4654 if filterResults && !in.RemoteForce { 4655 continue 4656 } 4657 case channeldb.BreachClose: 4658 if filterResults && !in.Breach { 4659 continue 4660 } 4661 case channeldb.FundingCanceled: 4662 if filterResults && !in.FundingCanceled { 4663 continue 4664 } 4665 case channeldb.Abandoned: 4666 if filterResults && !in.Abandoned { 4667 continue 4668 } 4669 } 4670 4671 channel, err := r.createRPCClosedChannel(dbChannel) 4672 if err != nil { 4673 return nil, err 4674 } 4675 4676 resp.Channels = append(resp.Channels, channel) 4677 } 4678 4679 err = fn.MapOptionZ( 4680 r.server.implCfg.AuxDataParser, 4681 func(parser AuxDataParser) error { 4682 return parser.InlineParseCustomData(resp) 4683 }, 4684 ) 4685 if err != nil { 4686 return nil, fmt.Errorf("error parsing custom data: %w", err) 4687 } 4688 4689 return resp, nil 4690 } 4691 4692 // LookupHtlcResolution retrieves a final htlc resolution from the database. If 4693 // the htlc has no final resolution yet, a NotFound grpc status code is 4694 // returned. 4695 func (r *rpcServer) LookupHtlcResolution( 4696 _ context.Context, in *lnrpc.LookupHtlcResolutionRequest) ( 4697 *lnrpc.LookupHtlcResolutionResponse, error) { 4698 4699 if !r.cfg.StoreFinalHtlcResolutions { 4700 return nil, status.Error(codes.Unavailable, "cannot lookup "+ 4701 "with flag --store-final-htlc-resolutions=false") 4702 } 4703 4704 chanID := lnwire.NewShortChanIDFromInt(in.ChanId) 4705 4706 info, err := r.server.chanStateDB.LookupFinalHtlc(chanID, in.HtlcIndex) 4707 switch { 4708 case errors.Is(err, channeldb.ErrHtlcUnknown): 4709 return nil, status.Error(codes.NotFound, err.Error()) 4710 4711 case err != nil: 4712 return nil, err 4713 } 4714 4715 return &lnrpc.LookupHtlcResolutionResponse{ 4716 Settled: info.Settled, 4717 Offchain: info.Offchain, 4718 }, nil 4719 } 4720 4721 // ListChannels returns a description of all the open channels that this node 4722 // is a participant in. 4723 func (r *rpcServer) ListChannels(ctx context.Context, 4724 in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) { 4725 4726 if in.ActiveOnly && in.InactiveOnly { 4727 return nil, fmt.Errorf("either `active_only` or " + 4728 "`inactive_only` can be set, but not both") 4729 } 4730 4731 if in.PublicOnly && in.PrivateOnly { 4732 return nil, fmt.Errorf("either `public_only` or " + 4733 "`private_only` can be set, but not both") 4734 } 4735 4736 if len(in.Peer) > 0 && len(in.Peer) != 33 { 4737 _, err := route.NewVertexFromBytes(in.Peer) 4738 return nil, fmt.Errorf("invalid `peer` key: %w", err) 4739 } 4740 4741 resp := &lnrpc.ListChannelsResponse{} 4742 4743 dbChannels, err := r.server.chanStateDB.FetchAllOpenChannels() 4744 if err != nil { 4745 return nil, err 4746 } 4747 4748 rpcsLog.Debugf("[listchannels] fetched %v channels from DB", 4749 len(dbChannels)) 4750 4751 for _, dbChannel := range dbChannels { 4752 nodePub := dbChannel.IdentityPub 4753 nodePubBytes := nodePub.SerializeCompressed() 4754 chanPoint := dbChannel.FundingOutpoint 4755 4756 // If the caller requested channels for a target node, skip any 4757 // that don't match the provided pubkey. 4758 if len(in.Peer) > 0 && !bytes.Equal(nodePubBytes, in.Peer) { 4759 continue 4760 } 4761 4762 var peerOnline bool 4763 if _, err := r.server.FindPeer(nodePub); err == nil { 4764 peerOnline = true 4765 } 4766 4767 channelID := lnwire.NewChanIDFromOutPoint(chanPoint) 4768 var linkActive bool 4769 if link, err := r.server.htlcSwitch.GetLink(channelID); err == nil { 4770 // A channel is only considered active if it is known 4771 // by the switch *and* able to forward 4772 // incoming/outgoing payments. 4773 linkActive = link.EligibleToForward() 4774 } 4775 4776 // Next, we'll determine whether we should add this channel to 4777 // our list depending on the type of channels requested to us. 4778 isActive := peerOnline && linkActive 4779 channel, err := createRPCOpenChannel( 4780 ctx, r, dbChannel, isActive, in.PeerAliasLookup, 4781 ) 4782 if err != nil { 4783 return nil, err 4784 } 4785 4786 // We'll only skip returning this channel if we were requested 4787 // for a specific kind and this channel doesn't satisfy it. 4788 switch { 4789 case in.ActiveOnly && !isActive: 4790 continue 4791 case in.InactiveOnly && isActive: 4792 continue 4793 case in.PublicOnly && channel.Private: 4794 continue 4795 case in.PrivateOnly && !channel.Private: 4796 continue 4797 } 4798 4799 resp.Channels = append(resp.Channels, channel) 4800 } 4801 4802 err = fn.MapOptionZ( 4803 r.server.implCfg.AuxDataParser, 4804 func(parser AuxDataParser) error { 4805 return parser.InlineParseCustomData(resp) 4806 }, 4807 ) 4808 if err != nil { 4809 return nil, fmt.Errorf("error parsing custom data: %w", err) 4810 } 4811 4812 return resp, nil 4813 } 4814 4815 // rpcCommitmentType takes the channel type and converts it to an rpc commitment 4816 // type value. 4817 func rpcCommitmentType(chanType channeldb.ChannelType) lnrpc.CommitmentType { 4818 // Extract the commitment type from the channel type flags. We must 4819 // first check whether it has anchors, since in that case it would also 4820 // be tweakless. 4821 switch { 4822 case chanType.HasTapscriptRoot(): 4823 return lnrpc.CommitmentType_SIMPLE_TAPROOT_OVERLAY 4824 4825 case chanType.IsTaproot(): 4826 return lnrpc.CommitmentType_SIMPLE_TAPROOT 4827 4828 case chanType.HasLeaseExpiration(): 4829 return lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE 4830 4831 case chanType.HasAnchors(): 4832 return lnrpc.CommitmentType_ANCHORS 4833 4834 case chanType.IsTweakless(): 4835 return lnrpc.CommitmentType_STATIC_REMOTE_KEY 4836 4837 default: 4838 4839 return lnrpc.CommitmentType_LEGACY 4840 } 4841 } 4842 4843 // createChannelConstraint creates a *lnrpc.ChannelConstraints using the 4844 // *Channeldb.ChannelConfig. 4845 func createChannelConstraint( 4846 chanCfg *channeldb.ChannelConfig) *lnrpc.ChannelConstraints { 4847 return &lnrpc.ChannelConstraints{ 4848 CsvDelay: uint32(chanCfg.CsvDelay), 4849 ChanReserveSat: uint64(chanCfg.ChanReserve), 4850 DustLimitSat: uint64(chanCfg.DustLimit), 4851 MaxPendingAmtMsat: uint64(chanCfg.MaxPendingAmount), 4852 MinHtlcMsat: uint64(chanCfg.MinHTLC), 4853 MaxAcceptedHtlcs: uint32(chanCfg.MaxAcceptedHtlcs), 4854 } 4855 } 4856 4857 // isPrivate evaluates the ChannelFlags of the db channel to determine if the 4858 // channel is private or not. 4859 func isPrivate(dbChannel *channeldb.OpenChannel) bool { 4860 if dbChannel == nil { 4861 return false 4862 } 4863 return dbChannel.ChannelFlags&lnwire.FFAnnounceChannel != 1 4864 } 4865 4866 // encodeCustomChanData encodes the custom channel data for the open channel. 4867 // It encodes that data as a pair of var bytes blobs. 4868 func encodeCustomChanData(lnChan *channeldb.OpenChannel) ([]byte, error) { 4869 customOpenChanData := lnChan.CustomBlob.UnwrapOr(nil) 4870 customLocalCommitData := lnChan.LocalCommitment.CustomBlob.UnwrapOr(nil) 4871 4872 // Don't write any custom data if both blobs are empty. 4873 if len(customOpenChanData) == 0 && len(customLocalCommitData) == 0 { 4874 return nil, nil 4875 } 4876 4877 // We'll encode our custom channel data as two blobs. The first is a 4878 // set of var bytes encoding of the open chan data, the second is an 4879 // encoding of the local commitment data. 4880 var customChanDataBuf bytes.Buffer 4881 err := wire.WriteVarBytes(&customChanDataBuf, 0, customOpenChanData) 4882 if err != nil { 4883 return nil, fmt.Errorf("unable to encode open chan "+ 4884 "data: %w", err) 4885 } 4886 err = wire.WriteVarBytes(&customChanDataBuf, 0, customLocalCommitData) 4887 if err != nil { 4888 return nil, fmt.Errorf("unable to encode local commit "+ 4889 "data: %w", err) 4890 } 4891 4892 return customChanDataBuf.Bytes(), nil 4893 } 4894 4895 // createRPCOpenChannel creates an *lnrpc.Channel from the *channeldb.Channel. 4896 // 4897 //nolint:funlen 4898 func createRPCOpenChannel(ctx context.Context, r *rpcServer, 4899 dbChannel *channeldb.OpenChannel, 4900 isActive, peerAliasLookup bool) (*lnrpc.Channel, error) { 4901 4902 nodePub := dbChannel.IdentityPub 4903 nodeID := hex.EncodeToString(nodePub.SerializeCompressed()) 4904 chanPoint := dbChannel.FundingOutpoint 4905 chanID := lnwire.NewChanIDFromOutPoint(chanPoint) 4906 4907 // As this is required for display purposes, we'll calculate 4908 // the weight of the commitment transaction. We also add on the 4909 // estimated weight of the witness to calculate the weight of 4910 // the transaction if it were to be immediately unilaterally 4911 // broadcast. 4912 var witnessWeight int64 4913 if dbChannel.ChanType.IsTaproot() { 4914 witnessWeight = input.TaprootKeyPathWitnessSize 4915 } else { 4916 witnessWeight = input.WitnessCommitmentTxWeight 4917 } 4918 4919 localCommit := dbChannel.LocalCommitment 4920 utx := btcutil.NewTx(localCommit.CommitTx) 4921 commitBaseWeight := blockchain.GetTransactionWeight(utx) 4922 commitWeight := commitBaseWeight + witnessWeight 4923 4924 localBalance := localCommit.LocalBalance 4925 remoteBalance := localCommit.RemoteBalance 4926 4927 // As an artifact of our usage of mSAT internally, either party 4928 // may end up in a state where they're holding a fractional 4929 // amount of satoshis which can't be expressed within the 4930 // actual commitment output. Since we round down when going 4931 // from mSAT -> SAT, we may at any point be adding an 4932 // additional SAT to miners fees. As a result, we display a 4933 // commitment fee that accounts for this externally. 4934 var sumOutputs btcutil.Amount 4935 for _, txOut := range localCommit.CommitTx.TxOut { 4936 sumOutputs += btcutil.Amount(txOut.Value) 4937 } 4938 externalCommitFee := dbChannel.Capacity - sumOutputs 4939 4940 // Extract the commitment type from the channel type flags. 4941 commitmentType := rpcCommitmentType(dbChannel.ChanType) 4942 4943 dbScid := dbChannel.ShortChannelID 4944 4945 // Fetch the set of aliases for the channel. 4946 channelAliases := r.server.aliasMgr.GetAliases(dbScid) 4947 4948 // Fetch the peer alias. If one does not exist, errNoPeerAlias 4949 // is returned and peerScidAlias will be an empty ShortChannelID. 4950 peerScidAlias, _ := r.server.aliasMgr.GetPeerAlias(chanID) 4951 4952 // Finally we'll attempt to encode the custom channel data if any 4953 // exists. 4954 customChanBytes, err := encodeCustomChanData(dbChannel) 4955 if err != nil { 4956 return nil, fmt.Errorf("unable to encode open chan data: %w", 4957 err) 4958 } 4959 4960 channel := &lnrpc.Channel{ 4961 Active: isActive, 4962 Private: isPrivate(dbChannel), 4963 RemotePubkey: nodeID, 4964 ChannelPoint: chanPoint.String(), 4965 ChanId: dbScid.ToUint64(), 4966 Capacity: int64(dbChannel.Capacity), 4967 LocalBalance: int64(localBalance.ToSatoshis()), 4968 RemoteBalance: int64(remoteBalance.ToSatoshis()), 4969 CommitFee: int64(externalCommitFee), 4970 CommitWeight: commitWeight, 4971 FeePerKw: int64(localCommit.FeePerKw), 4972 TotalSatoshisSent: int64(dbChannel.TotalMSatSent.ToSatoshis()), 4973 TotalSatoshisReceived: int64(dbChannel.TotalMSatReceived.ToSatoshis()), 4974 NumUpdates: localCommit.CommitHeight, 4975 PendingHtlcs: make([]*lnrpc.HTLC, len(localCommit.Htlcs)), 4976 Initiator: dbChannel.IsInitiator, 4977 ChanStatusFlags: dbChannel.ChanStatus().String(), 4978 StaticRemoteKey: commitmentType == lnrpc.CommitmentType_STATIC_REMOTE_KEY, 4979 CommitmentType: commitmentType, 4980 ThawHeight: dbChannel.ThawHeight, 4981 LocalConstraints: createChannelConstraint( 4982 &dbChannel.LocalChanCfg, 4983 ), 4984 RemoteConstraints: createChannelConstraint( 4985 &dbChannel.RemoteChanCfg, 4986 ), 4987 AliasScids: make([]uint64, 0, len(channelAliases)), 4988 PeerScidAlias: peerScidAlias.ToUint64(), 4989 ZeroConf: dbChannel.IsZeroConf(), 4990 ZeroConfConfirmedScid: dbChannel.ZeroConfRealScid().ToUint64(), 4991 Memo: string(dbChannel.Memo), 4992 CustomChannelData: customChanBytes, 4993 // TODO: remove the following deprecated fields 4994 CsvDelay: uint32(dbChannel.LocalChanCfg.CsvDelay), 4995 LocalChanReserveSat: int64(dbChannel.LocalChanCfg.ChanReserve), 4996 RemoteChanReserveSat: int64(dbChannel.RemoteChanCfg.ChanReserve), 4997 } 4998 4999 // Look up our channel peer's node alias if the caller requests it. 5000 if peerAliasLookup { 5001 peerAlias, err := r.server.v1Graph.LookupAlias(ctx, nodePub) 5002 if err != nil { 5003 peerAlias = fmt.Sprintf("unable to lookup "+ 5004 "peer alias: %v", err) 5005 } 5006 channel.PeerAlias = peerAlias 5007 } 5008 5009 // Populate the set of aliases. 5010 for _, chanAlias := range channelAliases { 5011 channel.AliasScids = append( 5012 channel.AliasScids, chanAlias.ToUint64(), 5013 ) 5014 } 5015 5016 // Create two sets of the HTLCs found in the remote commitment, which is 5017 // used to decide whether the HTLCs from the local commitment has been 5018 // locked in or not. 5019 remoteIncomingHTLCs := fn.NewSet[uint64]() 5020 remoteOutgoingHTLCs := fn.NewSet[uint64]() 5021 for _, htlc := range dbChannel.RemoteCommitment.Htlcs { 5022 if htlc.Incoming { 5023 remoteIncomingHTLCs.Add(htlc.HtlcIndex) 5024 } else { 5025 remoteOutgoingHTLCs.Add(htlc.HtlcIndex) 5026 } 5027 } 5028 5029 for i, htlc := range localCommit.Htlcs { 5030 var rHash [32]byte 5031 copy(rHash[:], htlc.RHash[:]) 5032 5033 circuitMap := r.server.htlcSwitch.CircuitLookup() 5034 5035 var ( 5036 forwardingChannel, forwardingHtlcIndex uint64 5037 lockedIn bool 5038 ) 5039 switch { 5040 case htlc.Incoming: 5041 circuit := circuitMap.LookupCircuit( 5042 htlcswitch.CircuitKey{ 5043 ChanID: dbChannel.ShortChannelID, 5044 HtlcID: htlc.HtlcIndex, 5045 }, 5046 ) 5047 if circuit != nil && circuit.Outgoing != nil { 5048 forwardingChannel = circuit.Outgoing.ChanID. 5049 ToUint64() 5050 5051 forwardingHtlcIndex = circuit.Outgoing.HtlcID 5052 } 5053 5054 lockedIn = remoteIncomingHTLCs.Contains(htlc.HtlcIndex) 5055 5056 case !htlc.Incoming: 5057 circuit := circuitMap.LookupOpenCircuit( 5058 htlcswitch.CircuitKey{ 5059 ChanID: dbChannel.ShortChannelID, 5060 HtlcID: htlc.HtlcIndex, 5061 }, 5062 ) 5063 5064 // If the incoming channel id is the special hop.Source 5065 // value, the htlc index is a local payment identifier. 5066 // In this case, report nothing. 5067 if circuit != nil && 5068 circuit.Incoming.ChanID != hop.Source { 5069 5070 forwardingChannel = circuit.Incoming.ChanID. 5071 ToUint64() 5072 5073 forwardingHtlcIndex = circuit.Incoming.HtlcID 5074 } 5075 5076 lockedIn = remoteOutgoingHTLCs.Contains(htlc.HtlcIndex) 5077 } 5078 5079 channel.PendingHtlcs[i] = &lnrpc.HTLC{ 5080 Incoming: htlc.Incoming, 5081 Amount: int64(htlc.Amt.ToSatoshis()), 5082 HashLock: rHash[:], 5083 ExpirationHeight: htlc.RefundTimeout, 5084 HtlcIndex: htlc.HtlcIndex, 5085 ForwardingChannel: forwardingChannel, 5086 ForwardingHtlcIndex: forwardingHtlcIndex, 5087 LockedIn: lockedIn, 5088 } 5089 5090 // Add the Pending Htlc Amount to UnsettledBalance field. 5091 channel.UnsettledBalance += channel.PendingHtlcs[i].Amount 5092 } 5093 5094 // If we initiated opening the channel, the zero height remote balance 5095 // is the push amount. Otherwise, our starting balance is the push 5096 // amount. If there is no push amount, these values will simply be zero. 5097 if dbChannel.IsInitiator { 5098 amt := dbChannel.InitialRemoteBalance.ToSatoshis() 5099 channel.PushAmountSat = uint64(amt) 5100 } else { 5101 amt := dbChannel.InitialLocalBalance.ToSatoshis() 5102 channel.PushAmountSat = uint64(amt) 5103 } 5104 5105 if len(dbChannel.LocalShutdownScript) > 0 { 5106 _, addresses, _, err := txscript.ExtractPkScriptAddrs( 5107 dbChannel.LocalShutdownScript, r.cfg.ActiveNetParams.Params, 5108 ) 5109 if err != nil { 5110 return nil, err 5111 } 5112 5113 // We only expect one upfront shutdown address for a channel. If 5114 // LocalShutdownScript is non-zero, there should be one payout 5115 // address set. 5116 if len(addresses) != 1 { 5117 return nil, fmt.Errorf("expected one upfront shutdown "+ 5118 "address, got: %v", len(addresses)) 5119 } 5120 5121 channel.CloseAddress = addresses[0].String() 5122 } 5123 5124 // If the server hasn't fully started yet, it's possible that the 5125 // channel event store hasn't either, so it won't be able to consume any 5126 // requests until then. To prevent blocking, we'll just omit the uptime 5127 // related fields for now. 5128 if !r.server.Started() { 5129 return channel, nil 5130 } 5131 5132 peer, err := route.NewVertexFromBytes(nodePub.SerializeCompressed()) 5133 if err != nil { 5134 return nil, err 5135 } 5136 5137 // Query the event store for additional information about the channel. 5138 // Do not fail if it is not available, because there is a potential 5139 // race between a channel being added to our node and the event store 5140 // being notified of it. 5141 outpoint := dbChannel.FundingOutpoint 5142 info, err := r.server.chanEventStore.GetChanInfo(outpoint, peer) 5143 switch { 5144 // If the store does not know about the peer, we just log it. 5145 case errors.Is(err, chanfitness.ErrPeerNotFound): 5146 rpcsLog.Warnf("peer: %v not found by channel event store", 5147 peer) 5148 5149 // If the store does not know about the channel, we just log it. 5150 case errors.Is(err, chanfitness.ErrChannelNotFound): 5151 rpcsLog.Warnf("channel: %v not found by channel event store", 5152 outpoint) 5153 5154 // If we got our channel info, we further populate the channel. 5155 case err == nil: 5156 channel.Uptime = int64(info.Uptime.Seconds()) 5157 channel.Lifetime = int64(info.Lifetime.Seconds()) 5158 5159 // If we get an unexpected error, we return it. 5160 default: 5161 return nil, err 5162 } 5163 5164 return channel, nil 5165 } 5166 5167 // createRPCClosedChannel creates an *lnrpc.ClosedChannelSummary from a 5168 // *channeldb.ChannelCloseSummary. 5169 func (r *rpcServer) createRPCClosedChannel( 5170 dbChannel *channeldb.ChannelCloseSummary) (*lnrpc.ChannelCloseSummary, 5171 error) { 5172 5173 nodePub := dbChannel.RemotePub 5174 nodeID := hex.EncodeToString(nodePub.SerializeCompressed()) 5175 5176 var ( 5177 closeType lnrpc.ChannelCloseSummary_ClosureType 5178 openInit lnrpc.Initiator 5179 closeInitiator lnrpc.Initiator 5180 err error 5181 ) 5182 5183 // Lookup local and remote cooperative initiators. If these values 5184 // are not known they will just return unknown. 5185 openInit, closeInitiator, err = r.getInitiators(&dbChannel.ChanPoint) 5186 if err != nil { 5187 return nil, err 5188 } 5189 5190 // Convert the close type to rpc type. 5191 switch dbChannel.CloseType { 5192 case channeldb.CooperativeClose: 5193 closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE 5194 case channeldb.LocalForceClose: 5195 closeType = lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE 5196 case channeldb.RemoteForceClose: 5197 closeType = lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE 5198 case channeldb.BreachClose: 5199 closeType = lnrpc.ChannelCloseSummary_BREACH_CLOSE 5200 case channeldb.FundingCanceled: 5201 closeType = lnrpc.ChannelCloseSummary_FUNDING_CANCELED 5202 case channeldb.Abandoned: 5203 closeType = lnrpc.ChannelCloseSummary_ABANDONED 5204 } 5205 5206 dbScid := dbChannel.ShortChanID 5207 5208 // Fetch the set of aliases for this channel. 5209 channelAliases := r.server.aliasMgr.GetAliases(dbScid) 5210 5211 channel := &lnrpc.ChannelCloseSummary{ 5212 Capacity: int64(dbChannel.Capacity), 5213 RemotePubkey: nodeID, 5214 CloseHeight: dbChannel.CloseHeight, 5215 CloseType: closeType, 5216 ChannelPoint: dbChannel.ChanPoint.String(), 5217 ChanId: dbChannel.ShortChanID.ToUint64(), 5218 SettledBalance: int64(dbChannel.SettledBalance), 5219 TimeLockedBalance: int64(dbChannel.TimeLockedBalance), 5220 ChainHash: dbChannel.ChainHash.String(), 5221 ClosingTxHash: dbChannel.ClosingTXID.String(), 5222 OpenInitiator: openInit, 5223 CloseInitiator: closeInitiator, 5224 AliasScids: make([]uint64, 0, len(channelAliases)), 5225 } 5226 5227 // Populate the set of aliases. 5228 for _, chanAlias := range channelAliases { 5229 channel.AliasScids = append( 5230 channel.AliasScids, chanAlias.ToUint64(), 5231 ) 5232 } 5233 5234 // Populate any historical data that the summary needs. 5235 histChan, err := r.server.chanStateDB.FetchHistoricalChannel( 5236 &dbChannel.ChanPoint, 5237 ) 5238 switch err { 5239 // The channel was closed in a pre-historic version of lnd. Ignore the 5240 // error. 5241 case channeldb.ErrNoHistoricalBucket: 5242 case channeldb.ErrChannelNotFound: 5243 5244 case nil: 5245 if histChan.IsZeroConf() && histChan.ZeroConfConfirmed() { 5246 // If the channel was zero-conf, it may have confirmed. 5247 // Populate the confirmed SCID if so. 5248 confirmedScid := histChan.ZeroConfRealScid().ToUint64() 5249 channel.ZeroConfConfirmedScid = confirmedScid 5250 } 5251 5252 // Finally we'll attempt to encode the custom channel data if 5253 // any exists. 5254 channel.CustomChannelData, err = encodeCustomChanData(histChan) 5255 if err != nil { 5256 return nil, fmt.Errorf("unable to encode open chan "+ 5257 "data: %w", err) 5258 } 5259 5260 // Non-nil error not due to older versions of lnd. 5261 default: 5262 return nil, err 5263 } 5264 5265 reports, err := r.server.miscDB.FetchChannelReports( 5266 *r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint, 5267 ) 5268 switch err { 5269 // If the channel does not have its resolver outcomes stored, 5270 // ignore it. 5271 case channeldb.ErrNoChainHashBucket: 5272 fallthrough 5273 case channeldb.ErrNoChannelSummaries: 5274 return channel, nil 5275 5276 // If there is no error, fallthrough the switch to process reports. 5277 case nil: 5278 5279 // If another error occurred, return it. 5280 default: 5281 return nil, err 5282 } 5283 5284 for _, report := range reports { 5285 rpcResolution, err := rpcChannelResolution(report) 5286 if err != nil { 5287 return nil, err 5288 } 5289 5290 channel.Resolutions = append(channel.Resolutions, rpcResolution) 5291 } 5292 5293 return channel, nil 5294 } 5295 5296 func rpcChannelResolution(report *channeldb.ResolverReport) (*lnrpc.Resolution, 5297 error) { 5298 5299 res := &lnrpc.Resolution{ 5300 AmountSat: uint64(report.Amount), 5301 Outpoint: lnrpc.MarshalOutPoint(&report.OutPoint), 5302 } 5303 5304 if report.SpendTxID != nil { 5305 res.SweepTxid = report.SpendTxID.String() 5306 } 5307 5308 switch report.ResolverType { 5309 case channeldb.ResolverTypeAnchor: 5310 res.ResolutionType = lnrpc.ResolutionType_ANCHOR 5311 5312 case channeldb.ResolverTypeIncomingHtlc: 5313 res.ResolutionType = lnrpc.ResolutionType_INCOMING_HTLC 5314 5315 case channeldb.ResolverTypeOutgoingHtlc: 5316 res.ResolutionType = lnrpc.ResolutionType_OUTGOING_HTLC 5317 5318 case channeldb.ResolverTypeCommit: 5319 res.ResolutionType = lnrpc.ResolutionType_COMMIT 5320 5321 default: 5322 return nil, fmt.Errorf("unknown resolver type: %v", 5323 report.ResolverType) 5324 } 5325 5326 switch report.ResolverOutcome { 5327 case channeldb.ResolverOutcomeClaimed: 5328 res.Outcome = lnrpc.ResolutionOutcome_CLAIMED 5329 5330 case channeldb.ResolverOutcomeUnclaimed: 5331 res.Outcome = lnrpc.ResolutionOutcome_UNCLAIMED 5332 5333 case channeldb.ResolverOutcomeAbandoned: 5334 res.Outcome = lnrpc.ResolutionOutcome_ABANDONED 5335 5336 case channeldb.ResolverOutcomeFirstStage: 5337 res.Outcome = lnrpc.ResolutionOutcome_FIRST_STAGE 5338 5339 case channeldb.ResolverOutcomeTimeout: 5340 res.Outcome = lnrpc.ResolutionOutcome_TIMEOUT 5341 5342 default: 5343 return nil, fmt.Errorf("unknown outcome: %v", 5344 report.ResolverOutcome) 5345 } 5346 5347 return res, nil 5348 } 5349 5350 // getInitiators returns an initiator enum that provides information about the 5351 // party that initiated channel's open and close. This information is obtained 5352 // from the historical channel bucket, so unknown values are returned when the 5353 // channel is not present (which indicates that it was closed before we started 5354 // writing channels to the historical close bucket). 5355 func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) ( 5356 lnrpc.Initiator, 5357 lnrpc.Initiator, error) { 5358 5359 var ( 5360 openInitiator = lnrpc.Initiator_INITIATOR_UNKNOWN 5361 closeInitiator = lnrpc.Initiator_INITIATOR_UNKNOWN 5362 ) 5363 5364 // To get the close initiator for cooperative closes, we need 5365 // to get the channel status from the historical channel bucket. 5366 histChan, err := r.server.chanStateDB.FetchHistoricalChannel(chanPoint) 5367 switch { 5368 // The node has upgraded from a version where we did not store 5369 // historical channels, and has not closed a channel since. Do 5370 // not return an error, initiator values are unknown. 5371 case err == channeldb.ErrNoHistoricalBucket: 5372 return openInitiator, closeInitiator, nil 5373 5374 // The channel was closed before we started storing historical 5375 // channels. Do not return an error, initiator values are unknown. 5376 case err == channeldb.ErrChannelNotFound: 5377 return openInitiator, closeInitiator, nil 5378 5379 case err != nil: 5380 return 0, 0, err 5381 } 5382 5383 // If we successfully looked up the channel, determine initiator based 5384 // on channels status. 5385 if histChan.IsInitiator { 5386 openInitiator = lnrpc.Initiator_INITIATOR_LOCAL 5387 } else { 5388 openInitiator = lnrpc.Initiator_INITIATOR_REMOTE 5389 } 5390 5391 localInit := histChan.HasChanStatus( 5392 channeldb.ChanStatusLocalCloseInitiator, 5393 ) 5394 5395 remoteInit := histChan.HasChanStatus( 5396 channeldb.ChanStatusRemoteCloseInitiator, 5397 ) 5398 5399 switch { 5400 // There is a possible case where closes were attempted by both parties. 5401 // We return the initiator as both in this case to provide full 5402 // information about the close. 5403 case localInit && remoteInit: 5404 closeInitiator = lnrpc.Initiator_INITIATOR_BOTH 5405 5406 case localInit: 5407 closeInitiator = lnrpc.Initiator_INITIATOR_LOCAL 5408 5409 case remoteInit: 5410 closeInitiator = lnrpc.Initiator_INITIATOR_REMOTE 5411 } 5412 5413 return openInitiator, closeInitiator, nil 5414 } 5415 5416 // SubscribeChannelEvents returns a uni-directional stream (server -> client) 5417 // for notifying the client of newly active, inactive or closed channels. 5418 func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription, 5419 updateStream lnrpc.Lightning_SubscribeChannelEventsServer) error { 5420 5421 channelEventSub, err := r.server.channelNotifier.SubscribeChannelEvents() 5422 if err != nil { 5423 return err 5424 } 5425 5426 // Ensure that the resources for the client is cleaned up once either 5427 // the server, or client exits. 5428 defer channelEventSub.Cancel() 5429 5430 for { 5431 //nolint:ll 5432 select { 5433 // A new update has been sent by the channel router, we'll 5434 // marshal it into the form expected by the gRPC client, then 5435 // send it off to the client(s). 5436 case e := <-channelEventSub.Updates(): 5437 var update *lnrpc.ChannelEventUpdate 5438 switch event := e.(type) { 5439 case channelnotifier.PendingOpenChannelEvent: 5440 update = &lnrpc.ChannelEventUpdate{ 5441 Type: lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL, 5442 Channel: &lnrpc.ChannelEventUpdate_PendingOpenChannel{ 5443 PendingOpenChannel: &lnrpc.PendingUpdate{ 5444 Txid: event.ChannelPoint.Hash[:], 5445 OutputIndex: event.ChannelPoint.Index, 5446 }, 5447 }, 5448 } 5449 case channelnotifier.OpenChannelEvent: 5450 channel, err := createRPCOpenChannel( 5451 updateStream.Context(), r, 5452 event.Channel, true, false, 5453 ) 5454 if err != nil { 5455 return err 5456 } 5457 5458 update = &lnrpc.ChannelEventUpdate{ 5459 Type: lnrpc.ChannelEventUpdate_OPEN_CHANNEL, 5460 Channel: &lnrpc.ChannelEventUpdate_OpenChannel{ 5461 OpenChannel: channel, 5462 }, 5463 } 5464 5465 case channelnotifier.ClosedChannelEvent: 5466 closedChannel, err := r.createRPCClosedChannel( 5467 event.CloseSummary, 5468 ) 5469 if err != nil { 5470 return err 5471 } 5472 5473 update = &lnrpc.ChannelEventUpdate{ 5474 Type: lnrpc.ChannelEventUpdate_CLOSED_CHANNEL, 5475 Channel: &lnrpc.ChannelEventUpdate_ClosedChannel{ 5476 ClosedChannel: closedChannel, 5477 }, 5478 } 5479 5480 case channelnotifier.ActiveChannelEvent: 5481 update = &lnrpc.ChannelEventUpdate{ 5482 Type: lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL, 5483 Channel: &lnrpc.ChannelEventUpdate_ActiveChannel{ 5484 ActiveChannel: &lnrpc.ChannelPoint{ 5485 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 5486 FundingTxidBytes: event.ChannelPoint.Hash[:], 5487 }, 5488 OutputIndex: event.ChannelPoint.Index, 5489 }, 5490 }, 5491 } 5492 5493 case channelnotifier.ChannelUpdateEvent: 5494 channel, err := createRPCOpenChannel( 5495 updateStream.Context(), 5496 r, event.Channel, true, false, 5497 ) 5498 if err != nil { 5499 return err 5500 } 5501 5502 update = &lnrpc.ChannelEventUpdate{ 5503 Type: lnrpc.ChannelEventUpdate_CHANNEL_UPDATE, 5504 Channel: &lnrpc.ChannelEventUpdate_UpdatedChannel{ 5505 UpdatedChannel: &lnrpc.ChannelCommitUpdate{ 5506 Channel: channel, 5507 }, 5508 }, 5509 } 5510 5511 case channelnotifier.InactiveChannelEvent: 5512 update = &lnrpc.ChannelEventUpdate{ 5513 Type: lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL, 5514 Channel: &lnrpc.ChannelEventUpdate_InactiveChannel{ 5515 InactiveChannel: &lnrpc.ChannelPoint{ 5516 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 5517 FundingTxidBytes: event.ChannelPoint.Hash[:], 5518 }, 5519 OutputIndex: event.ChannelPoint.Index, 5520 }, 5521 }, 5522 } 5523 5524 // Completely ignore ActiveLinkEvent and 5525 // InactiveLinkEvent as this is explicitly not exposed 5526 // to the RPC. 5527 case channelnotifier.ActiveLinkEvent, 5528 channelnotifier.InactiveLinkEvent: 5529 5530 continue 5531 5532 case channelnotifier.FullyResolvedChannelEvent: 5533 update = &lnrpc.ChannelEventUpdate{ 5534 Type: lnrpc.ChannelEventUpdate_FULLY_RESOLVED_CHANNEL, 5535 Channel: &lnrpc.ChannelEventUpdate_FullyResolvedChannel{ 5536 FullyResolvedChannel: &lnrpc.ChannelPoint{ 5537 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 5538 FundingTxidBytes: event.ChannelPoint.Hash[:], 5539 }, 5540 OutputIndex: event.ChannelPoint.Index, 5541 }, 5542 }, 5543 } 5544 5545 case channelnotifier.FundingTimeoutEvent: 5546 update = &lnrpc.ChannelEventUpdate{ 5547 Type: lnrpc.ChannelEventUpdate_CHANNEL_FUNDING_TIMEOUT, 5548 Channel: &lnrpc.ChannelEventUpdate_ChannelFundingTimeout{ 5549 ChannelFundingTimeout: &lnrpc.ChannelPoint{ 5550 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 5551 FundingTxidBytes: event.ChannelPoint.Hash[:], 5552 }, 5553 OutputIndex: event.ChannelPoint.Index, 5554 }, 5555 }, 5556 } 5557 5558 default: 5559 return fmt.Errorf("unexpected channel event update: %v", event) 5560 } 5561 5562 if err := updateStream.Send(update); err != nil { 5563 return err 5564 } 5565 5566 // The response stream's context for whatever reason has been 5567 // closed. If context is closed by an exceeded deadline we will 5568 // return an error. 5569 case <-updateStream.Context().Done(): 5570 if errors.Is(updateStream.Context().Err(), context.Canceled) { 5571 return nil 5572 } 5573 return updateStream.Context().Err() 5574 5575 case <-r.quit: 5576 return nil 5577 } 5578 } 5579 } 5580 5581 // paymentStream enables different types of payment streams, such as: 5582 // lnrpc.Lightning_SendPaymentServer and lnrpc.Lightning_SendToRouteServer to 5583 // execute sendPayment. We use this struct as a sort of bridge to enable code 5584 // re-use between SendPayment and SendToRoute. 5585 type paymentStream struct { 5586 getCtx func() context.Context 5587 recv func() (*rpcPaymentRequest, error) 5588 send func(*lnrpc.SendResponse) error 5589 } 5590 5591 // rpcPaymentRequest wraps lnrpc.SendRequest so that routes from 5592 // lnrpc.SendToRouteRequest can be passed to sendPayment. 5593 type rpcPaymentRequest struct { 5594 *lnrpc.SendRequest 5595 route *route.Route 5596 } 5597 5598 // SendPayment dispatches a bi-directional streaming RPC for sending payments 5599 // through the Lightning Network. A single RPC invocation creates a persistent 5600 // bi-directional stream allowing clients to rapidly send payments through the 5601 // Lightning Network with a single persistent connection. 5602 func (r *rpcServer) SendPayment( 5603 stream lnrpc.Lightning_SendPaymentServer) error { 5604 5605 var lock sync.Mutex 5606 5607 return r.sendPayment(&paymentStream{ 5608 getCtx: stream.Context, 5609 recv: func() (*rpcPaymentRequest, error) { 5610 req, err := stream.Recv() 5611 if err != nil { 5612 return nil, err 5613 } 5614 5615 return &rpcPaymentRequest{ 5616 SendRequest: req, 5617 }, nil 5618 }, 5619 send: func(r *lnrpc.SendResponse) error { 5620 // Calling stream.Send concurrently is not safe. 5621 lock.Lock() 5622 defer lock.Unlock() 5623 return stream.Send(r) 5624 }, 5625 }) 5626 } 5627 5628 // SendToRoute dispatches a bi-directional streaming RPC for sending payments 5629 // through the Lightning Network via predefined routes passed in. A single RPC 5630 // invocation creates a persistent bi-directional stream allowing clients to 5631 // rapidly send payments through the Lightning Network with a single persistent 5632 // connection. 5633 func (r *rpcServer) SendToRoute( 5634 stream lnrpc.Lightning_SendToRouteServer) error { 5635 5636 var lock sync.Mutex 5637 5638 return r.sendPayment(&paymentStream{ 5639 getCtx: stream.Context, 5640 recv: func() (*rpcPaymentRequest, error) { 5641 req, err := stream.Recv() 5642 if err != nil { 5643 return nil, err 5644 } 5645 5646 return r.unmarshallSendToRouteRequest(req) 5647 }, 5648 send: func(r *lnrpc.SendResponse) error { 5649 // Calling stream.Send concurrently is not safe. 5650 lock.Lock() 5651 defer lock.Unlock() 5652 return stream.Send(r) 5653 }, 5654 }) 5655 } 5656 5657 // unmarshallSendToRouteRequest unmarshalls an rpc sendtoroute request 5658 func (r *rpcServer) unmarshallSendToRouteRequest( 5659 req *lnrpc.SendToRouteRequest) (*rpcPaymentRequest, error) { 5660 5661 if req.Route == nil { 5662 return nil, fmt.Errorf("unable to send, no route provided") 5663 } 5664 5665 route, err := r.routerBackend.UnmarshallRoute(req.Route) 5666 if err != nil { 5667 return nil, err 5668 } 5669 5670 return &rpcPaymentRequest{ 5671 SendRequest: &lnrpc.SendRequest{ 5672 PaymentHash: req.PaymentHash, 5673 PaymentHashString: req.PaymentHashString, 5674 }, 5675 route: route, 5676 }, nil 5677 } 5678 5679 // rpcPaymentIntent is a small wrapper struct around the of values we can 5680 // receive from a client over RPC if they wish to send a payment. We'll either 5681 // extract these fields from a payment request (which may include routing 5682 // hints), or we'll get a fully populated route from the user that we'll pass 5683 // directly to the channel router for dispatching. 5684 type rpcPaymentIntent struct { 5685 msat lnwire.MilliSatoshi 5686 feeLimit lnwire.MilliSatoshi 5687 cltvLimit uint32 5688 dest route.Vertex 5689 rHash [32]byte 5690 cltvDelta uint16 5691 routeHints [][]zpay32.HopHint 5692 outgoingChannelIDs []uint64 5693 lastHop *route.Vertex 5694 destFeatures *lnwire.FeatureVector 5695 paymentAddr fn.Option[[32]byte] 5696 payReq []byte 5697 metadata []byte 5698 blindedPathSet *routing.BlindedPaymentPathSet 5699 5700 destCustomRecords record.CustomSet 5701 5702 route *route.Route 5703 } 5704 5705 // extractPaymentIntent attempts to parse the complete details required to 5706 // dispatch a client from the information presented by an RPC client. There are 5707 // three ways a client can specify their payment details: a payment request, 5708 // via manual details, or via a complete route. 5709 // 5710 //nolint:funlen 5711 func (r *rpcServer) extractPaymentIntent( 5712 rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) { 5713 5714 payIntent := rpcPaymentIntent{} 5715 5716 // If a route was specified, then we can use that directly. 5717 if rpcPayReq.route != nil { 5718 // If the user is using the REST interface, then they'll be 5719 // passing the payment hash as a hex encoded string. 5720 if rpcPayReq.PaymentHashString != "" { 5721 paymentHash, err := hex.DecodeString( 5722 rpcPayReq.PaymentHashString, 5723 ) 5724 if err != nil { 5725 return payIntent, err 5726 } 5727 5728 copy(payIntent.rHash[:], paymentHash) 5729 } else { 5730 copy(payIntent.rHash[:], rpcPayReq.PaymentHash) 5731 } 5732 5733 payIntent.route = rpcPayReq.route 5734 return payIntent, nil 5735 } 5736 5737 // If there are no routes specified, pass along a outgoing channel 5738 // restriction if specified. The main server rpc does not support 5739 // multiple channel restrictions. 5740 if rpcPayReq.OutgoingChanId != 0 { 5741 payIntent.outgoingChannelIDs = []uint64{ 5742 rpcPayReq.OutgoingChanId, 5743 } 5744 } 5745 5746 // Pass along a last hop restriction if specified. 5747 if len(rpcPayReq.LastHopPubkey) > 0 { 5748 lastHop, err := route.NewVertexFromBytes( 5749 rpcPayReq.LastHopPubkey, 5750 ) 5751 if err != nil { 5752 return payIntent, err 5753 } 5754 payIntent.lastHop = &lastHop 5755 } 5756 5757 // Take the CLTV limit from the request if set, otherwise use the max. 5758 cltvLimit, err := routerrpc.ValidateCLTVLimit( 5759 rpcPayReq.CltvLimit, r.cfg.MaxOutgoingCltvExpiry, 5760 ) 5761 if err != nil { 5762 return payIntent, err 5763 } 5764 payIntent.cltvLimit = cltvLimit 5765 5766 customRecords := record.CustomSet(rpcPayReq.DestCustomRecords) 5767 if err := customRecords.Validate(); err != nil { 5768 return payIntent, err 5769 } 5770 payIntent.destCustomRecords = customRecords 5771 5772 validateDest := func(dest route.Vertex) error { 5773 if rpcPayReq.AllowSelfPayment { 5774 return nil 5775 } 5776 5777 if dest == r.selfNode { 5778 return errors.New("self-payments not allowed") 5779 } 5780 5781 return nil 5782 } 5783 5784 // If the payment request field isn't blank, then the details of the 5785 // invoice are encoded entirely within the encoded payReq. So we'll 5786 // attempt to decode it, populating the payment accordingly. 5787 if rpcPayReq.PaymentRequest != "" { 5788 payReq, err := zpay32.Decode( 5789 rpcPayReq.PaymentRequest, r.cfg.ActiveNetParams.Params, 5790 zpay32.WithErrorOnUnknownFeatureBit(), 5791 ) 5792 if err != nil { 5793 return payIntent, err 5794 } 5795 5796 // Next, we'll ensure that this payreq hasn't already expired. 5797 err = routerrpc.ValidatePayReqExpiry( 5798 r.routerBackend.Clock, payReq, 5799 ) 5800 if err != nil { 5801 return payIntent, err 5802 } 5803 5804 // If the amount was not included in the invoice, then we let 5805 // the payer specify the amount of satoshis they wish to send. 5806 // We override the amount to pay with the amount provided from 5807 // the payment request. 5808 if payReq.MilliSat == nil { 5809 amt, err := lnrpc.UnmarshallAmt( 5810 rpcPayReq.Amt, rpcPayReq.AmtMsat, 5811 ) 5812 if err != nil { 5813 return payIntent, err 5814 } 5815 if amt == 0 { 5816 return payIntent, errors.New("amount must be " + 5817 "specified when paying a zero amount " + 5818 "invoice") 5819 } 5820 5821 payIntent.msat = amt 5822 } else { 5823 payIntent.msat = *payReq.MilliSat 5824 } 5825 5826 // Calculate the fee limit that should be used for this payment. 5827 payIntent.feeLimit = lnrpc.CalculateFeeLimit( 5828 rpcPayReq.FeeLimit, payIntent.msat, 5829 ) 5830 5831 copy(payIntent.rHash[:], payReq.PaymentHash[:]) 5832 destKey := payReq.Destination.SerializeCompressed() 5833 copy(payIntent.dest[:], destKey) 5834 payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry()) 5835 payIntent.routeHints = payReq.RouteHints 5836 payIntent.payReq = []byte(rpcPayReq.PaymentRequest) 5837 payIntent.destFeatures = payReq.Features 5838 payIntent.paymentAddr = payReq.PaymentAddr 5839 payIntent.metadata = payReq.Metadata 5840 5841 if len(payReq.BlindedPaymentPaths) > 0 { 5842 pathSet, err := routerrpc.BuildBlindedPathSet( 5843 payReq.BlindedPaymentPaths, 5844 ) 5845 if err != nil { 5846 return payIntent, err 5847 } 5848 payIntent.blindedPathSet = pathSet 5849 5850 // Replace the destination node with the target public 5851 // key of the blinded path set. 5852 copy( 5853 payIntent.dest[:], 5854 pathSet.TargetPubKey().SerializeCompressed(), 5855 ) 5856 5857 pathFeatures := pathSet.Features() 5858 if !pathFeatures.IsEmpty() { 5859 payIntent.destFeatures = pathFeatures.Clone() 5860 } 5861 } 5862 5863 if err := validateDest(payIntent.dest); err != nil { 5864 return payIntent, err 5865 } 5866 5867 // Do bounds checking with the block padding. 5868 err = routing.ValidateCLTVLimit( 5869 payIntent.cltvLimit, payIntent.cltvDelta, true, 5870 ) 5871 if err != nil { 5872 return payIntent, err 5873 } 5874 5875 return payIntent, nil 5876 } 5877 5878 // At this point, a destination MUST be specified, so we'll convert it 5879 // into the proper representation now. The destination will either be 5880 // encoded as raw bytes, or via a hex string. 5881 var pubBytes []byte 5882 if len(rpcPayReq.Dest) != 0 { 5883 pubBytes = rpcPayReq.Dest 5884 } else { 5885 var err error 5886 pubBytes, err = hex.DecodeString(rpcPayReq.DestString) 5887 if err != nil { 5888 return payIntent, err 5889 } 5890 } 5891 if len(pubBytes) != 33 { 5892 return payIntent, errors.New("invalid key length") 5893 } 5894 copy(payIntent.dest[:], pubBytes) 5895 5896 if err := validateDest(payIntent.dest); err != nil { 5897 return payIntent, err 5898 } 5899 5900 // Payment address may not be needed by legacy invoices. 5901 if len(rpcPayReq.PaymentAddr) != 0 && len(rpcPayReq.PaymentAddr) != 32 { 5902 return payIntent, errors.New("invalid payment address length") 5903 } 5904 5905 // Set the payment address if it was explicitly defined with the 5906 // rpcPaymentRequest. 5907 // Note that the payment address for the payIntent should be nil if none 5908 // was provided with the rpcPaymentRequest. 5909 if len(rpcPayReq.PaymentAddr) != 0 { 5910 var addr [32]byte 5911 copy(addr[:], rpcPayReq.PaymentAddr) 5912 payIntent.paymentAddr = fn.Some(addr) 5913 } 5914 5915 // Otherwise, If the payment request field was not specified 5916 // (and a custom route wasn't specified), construct the payment 5917 // from the other fields. 5918 payIntent.msat, err = lnrpc.UnmarshallAmt( 5919 rpcPayReq.Amt, rpcPayReq.AmtMsat, 5920 ) 5921 if err != nil { 5922 return payIntent, err 5923 } 5924 5925 // Calculate the fee limit that should be used for this payment. 5926 payIntent.feeLimit = lnrpc.CalculateFeeLimit( 5927 rpcPayReq.FeeLimit, payIntent.msat, 5928 ) 5929 5930 if rpcPayReq.FinalCltvDelta != 0 { 5931 payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta) 5932 } else { 5933 // If no final cltv delta is given, assume the default that we 5934 // use when creating an invoice. We do not assume the default of 5935 // 9 blocks that is defined in BOLT-11, because this is never 5936 // enough for other lnd nodes. 5937 payIntent.cltvDelta = uint16(r.cfg.Bitcoin.TimeLockDelta) 5938 } 5939 5940 // Do bounds checking with the block padding so the router isn't left 5941 // with a zombie payment in case the user messes up. 5942 err = routing.ValidateCLTVLimit( 5943 payIntent.cltvLimit, payIntent.cltvDelta, true, 5944 ) 5945 if err != nil { 5946 return payIntent, err 5947 } 5948 5949 // If the user is manually specifying payment details, then the payment 5950 // hash may be encoded as a string. 5951 switch { 5952 case rpcPayReq.PaymentHashString != "": 5953 paymentHash, err := hex.DecodeString( 5954 rpcPayReq.PaymentHashString, 5955 ) 5956 if err != nil { 5957 return payIntent, err 5958 } 5959 5960 copy(payIntent.rHash[:], paymentHash) 5961 5962 default: 5963 copy(payIntent.rHash[:], rpcPayReq.PaymentHash) 5964 } 5965 5966 // Unmarshal any custom destination features. 5967 payIntent.destFeatures = routerrpc.UnmarshalFeatures( 5968 rpcPayReq.DestFeatures, 5969 ) 5970 5971 return payIntent, nil 5972 } 5973 5974 type paymentIntentResponse struct { 5975 Route *route.Route 5976 Preimage [32]byte 5977 Err error 5978 } 5979 5980 // dispatchPaymentIntent attempts to fully dispatch an RPC payment intent. 5981 // We'll either pass the payment as a whole to the channel router, or give it a 5982 // pre-built route. The first error this method returns denotes if we were 5983 // unable to save the payment. The second error returned denotes if the payment 5984 // didn't succeed. 5985 func (r *rpcServer) dispatchPaymentIntent(ctx context.Context, 5986 payIntent *rpcPaymentIntent) (*paymentIntentResponse, error) { 5987 5988 // Construct a payment request to send to the channel router. If the 5989 // payment is successful, the route chosen will be returned. Otherwise, 5990 // we'll get a non-nil error. 5991 var ( 5992 preImage [32]byte 5993 route *route.Route 5994 routerErr error 5995 ) 5996 5997 // If a route was specified, then we'll pass the route directly to the 5998 // router, otherwise we'll create a payment session to execute it. 5999 if payIntent.route == nil { 6000 payment := &routing.LightningPayment{ 6001 Target: payIntent.dest, 6002 Amount: payIntent.msat, 6003 FinalCLTVDelta: payIntent.cltvDelta, 6004 FeeLimit: payIntent.feeLimit, 6005 CltvLimit: payIntent.cltvLimit, 6006 RouteHints: payIntent.routeHints, 6007 OutgoingChannelIDs: payIntent.outgoingChannelIDs, 6008 LastHop: payIntent.lastHop, 6009 PaymentRequest: payIntent.payReq, 6010 PayAttemptTimeout: routing.DefaultPayAttemptTimeout, 6011 DestCustomRecords: payIntent.destCustomRecords, 6012 DestFeatures: payIntent.destFeatures, 6013 PaymentAddr: payIntent.paymentAddr, 6014 Metadata: payIntent.metadata, 6015 BlindedPathSet: payIntent.blindedPathSet, 6016 6017 // Don't enable multi-part payments on the main rpc. 6018 // Users need to use routerrpc for that. 6019 MaxParts: 1, 6020 } 6021 err := payment.SetPaymentHash(payIntent.rHash) 6022 if err != nil { 6023 return nil, err 6024 } 6025 6026 preImage, route, routerErr = r.server.chanRouter.SendPayment( 6027 ctx, payment, 6028 ) 6029 } else { 6030 var attempt *paymentsdb.HTLCAttempt 6031 attempt, routerErr = r.server.chanRouter.SendToRoute( 6032 ctx, payIntent.rHash, payIntent.route, nil, 6033 ) 6034 6035 if routerErr == nil { 6036 preImage = attempt.Settle.Preimage 6037 } 6038 6039 route = payIntent.route 6040 } 6041 6042 // If the route failed, then we'll return a nil save err, but a non-nil 6043 // routing err. 6044 if routerErr != nil { 6045 rpcsLog.Warnf("Unable to send payment: %v", routerErr) 6046 6047 return &paymentIntentResponse{ 6048 Err: routerErr, 6049 }, nil 6050 } 6051 6052 return &paymentIntentResponse{ 6053 Route: route, 6054 Preimage: preImage, 6055 }, nil 6056 } 6057 6058 // sendPayment takes a paymentStream (a source of pre-built routes or payment 6059 // requests) and continually attempt to dispatch payment requests written to 6060 // the write end of the stream. Responses will also be streamed back to the 6061 // client via the write end of the stream. This method is by both SendToRoute 6062 // and SendPayment as the logic is virtually identical. 6063 func (r *rpcServer) sendPayment(stream *paymentStream) error { 6064 payChan := make(chan *rpcPaymentIntent) 6065 errChan := make(chan error, 1) 6066 6067 // We don't allow payments to be sent while the daemon itself is still 6068 // syncing as we may be trying to sent a payment over a "stale" 6069 // channel. 6070 if !r.server.Started() { 6071 return ErrServerNotActive 6072 } 6073 6074 // TODO(roasbeef): check payment filter to see if already used? 6075 6076 // In order to limit the level of concurrency and prevent a client from 6077 // attempting to OOM the server, we'll set up a semaphore to create an 6078 // upper ceiling on the number of outstanding payments. 6079 const numOutstandingPayments = 2000 6080 htlcSema := make(chan struct{}, numOutstandingPayments) 6081 for i := 0; i < numOutstandingPayments; i++ { 6082 htlcSema <- struct{}{} 6083 } 6084 6085 // We keep track of the running goroutines and set up a quit signal we 6086 // can use to request them to exit if the method returns because of an 6087 // encountered error. 6088 var wg sync.WaitGroup 6089 reqQuit := make(chan struct{}) 6090 defer close(reqQuit) 6091 6092 // Launch a new goroutine to handle reading new payment requests from 6093 // the client. This way we can handle errors independently of blocking 6094 // and waiting for the next payment request to come through. 6095 // TODO(joostjager): Callers expect result to come in in the same order 6096 // as the request were sent, but this is far from guarantueed in the 6097 // code below. 6098 wg.Add(1) 6099 go func() { 6100 defer wg.Done() 6101 6102 for { 6103 select { 6104 case <-reqQuit: 6105 return 6106 6107 default: 6108 // Receive the next pending payment within the 6109 // stream sent by the client. If we read the 6110 // EOF sentinel, then the client has closed the 6111 // stream, and we can exit normally. 6112 nextPayment, err := stream.recv() 6113 if err == io.EOF { 6114 close(payChan) 6115 return 6116 } else if err != nil { 6117 rpcsLog.Errorf("Failed receiving from "+ 6118 "stream: %v", err) 6119 6120 select { 6121 case errChan <- err: 6122 default: 6123 } 6124 return 6125 } 6126 6127 // Populate the next payment, either from the 6128 // payment request, or from the explicitly set 6129 // fields. If the payment proto wasn't well 6130 // formed, then we'll send an error reply and 6131 // wait for the next payment. 6132 payIntent, err := r.extractPaymentIntent( 6133 nextPayment, 6134 ) 6135 if err != nil { 6136 if err := stream.send(&lnrpc.SendResponse{ 6137 PaymentError: err.Error(), 6138 PaymentHash: payIntent.rHash[:], 6139 }); err != nil { 6140 rpcsLog.Errorf("Failed "+ 6141 "sending on "+ 6142 "stream: %v", err) 6143 6144 select { 6145 case errChan <- err: 6146 default: 6147 } 6148 return 6149 } 6150 continue 6151 } 6152 6153 // If the payment was well formed, then we'll 6154 // send to the dispatch goroutine, or exit, 6155 // which ever comes first. 6156 select { 6157 case payChan <- &payIntent: 6158 case <-reqQuit: 6159 return 6160 } 6161 } 6162 } 6163 }() 6164 6165 sendLoop: 6166 for { 6167 select { 6168 6169 // If we encounter and error either during sending or 6170 // receiving, we return directly, closing the stream. 6171 case err := <-errChan: 6172 return err 6173 6174 case <-r.quit: 6175 return errors.New("rpc server shutting down") 6176 6177 case payIntent, ok := <-payChan: 6178 // If the receive loop is done, we break the send loop 6179 // and wait for the ongoing payments to finish before 6180 // exiting. 6181 if !ok { 6182 break sendLoop 6183 } 6184 6185 // We launch a new goroutine to execute the current 6186 // payment so we can continue to serve requests while 6187 // this payment is being dispatched. 6188 wg.Add(1) 6189 go func(payIntent *rpcPaymentIntent) { 6190 defer wg.Done() 6191 6192 // Attempt to grab a free semaphore slot, using 6193 // a defer to eventually release the slot 6194 // regardless of payment success. 6195 select { 6196 case <-htlcSema: 6197 case <-reqQuit: 6198 return 6199 } 6200 defer func() { 6201 htlcSema <- struct{}{} 6202 }() 6203 6204 resp, saveErr := r.dispatchPaymentIntent( 6205 stream.getCtx(), payIntent, 6206 ) 6207 6208 switch { 6209 // If we were unable to save the state of the 6210 // payment, then we'll return the error to the 6211 // user, and terminate. 6212 case saveErr != nil: 6213 rpcsLog.Errorf("Failed dispatching "+ 6214 "payment intent: %v", saveErr) 6215 6216 select { 6217 case errChan <- saveErr: 6218 default: 6219 } 6220 return 6221 6222 // If we receive payment error than, instead of 6223 // terminating the stream, send error response 6224 // to the user. 6225 case resp.Err != nil: 6226 err := stream.send(&lnrpc.SendResponse{ 6227 PaymentError: resp.Err.Error(), 6228 PaymentHash: payIntent.rHash[:], 6229 }) 6230 if err != nil { 6231 rpcsLog.Errorf("Failed "+ 6232 "sending error "+ 6233 "response: %v", err) 6234 6235 select { 6236 case errChan <- err: 6237 default: 6238 } 6239 } 6240 return 6241 } 6242 6243 backend := r.routerBackend 6244 marshalledRouted, err := backend.MarshallRoute( 6245 resp.Route, 6246 ) 6247 if err != nil { 6248 errChan <- err 6249 return 6250 } 6251 6252 err = stream.send(&lnrpc.SendResponse{ 6253 PaymentHash: payIntent.rHash[:], 6254 PaymentPreimage: resp.Preimage[:], 6255 PaymentRoute: marshalledRouted, 6256 }) 6257 if err != nil { 6258 rpcsLog.Errorf("Failed sending "+ 6259 "response: %v", err) 6260 6261 select { 6262 case errChan <- err: 6263 default: 6264 } 6265 return 6266 } 6267 }(payIntent) 6268 } 6269 } 6270 6271 // Wait for all goroutines to finish before closing the stream. 6272 wg.Wait() 6273 return nil 6274 } 6275 6276 // SendPaymentSync is the synchronous non-streaming version of SendPayment. 6277 // This RPC is intended to be consumed by clients of the REST proxy. 6278 // Additionally, this RPC expects the destination's public key and the payment 6279 // hash (if any) to be encoded as hex strings. 6280 func (r *rpcServer) SendPaymentSync(ctx context.Context, 6281 nextPayment *lnrpc.SendRequest) (*lnrpc.SendResponse, error) { 6282 6283 return r.sendPaymentSync(ctx, &rpcPaymentRequest{ 6284 SendRequest: nextPayment, 6285 }) 6286 } 6287 6288 // SendToRouteSync is the synchronous non-streaming version of SendToRoute. 6289 // This RPC is intended to be consumed by clients of the REST proxy. 6290 // Additionally, this RPC expects the payment hash (if any) to be encoded as 6291 // hex strings. 6292 func (r *rpcServer) SendToRouteSync(ctx context.Context, 6293 req *lnrpc.SendToRouteRequest) (*lnrpc.SendResponse, error) { 6294 6295 if req.Route == nil { 6296 return nil, fmt.Errorf("unable to send, no routes provided") 6297 } 6298 6299 paymentRequest, err := r.unmarshallSendToRouteRequest(req) 6300 if err != nil { 6301 return nil, err 6302 } 6303 6304 return r.sendPaymentSync(ctx, paymentRequest) 6305 } 6306 6307 // sendPaymentSync is the synchronous variant of sendPayment. It will block and 6308 // wait until the payment has been fully completed. 6309 func (r *rpcServer) sendPaymentSync(ctx context.Context, 6310 nextPayment *rpcPaymentRequest) (*lnrpc.SendResponse, error) { 6311 6312 // We don't allow payments to be sent while the daemon itself is still 6313 // syncing as we may be trying to sent a payment over a "stale" 6314 // channel. 6315 if !r.server.Started() { 6316 return nil, ErrServerNotActive 6317 } 6318 6319 // First we'll attempt to map the proto describing the next payment to 6320 // an intent that we can pass to local sub-systems. 6321 payIntent, err := r.extractPaymentIntent(nextPayment) 6322 if err != nil { 6323 return nil, err 6324 } 6325 6326 // With the payment validated, we'll now attempt to dispatch the 6327 // payment. 6328 resp, saveErr := r.dispatchPaymentIntent(ctx, &payIntent) 6329 switch { 6330 case saveErr != nil: 6331 return nil, saveErr 6332 6333 case resp.Err != nil: 6334 return &lnrpc.SendResponse{ 6335 PaymentError: resp.Err.Error(), 6336 PaymentHash: payIntent.rHash[:], 6337 }, nil 6338 } 6339 6340 rpcRoute, err := r.routerBackend.MarshallRoute(resp.Route) 6341 if err != nil { 6342 return nil, err 6343 } 6344 6345 return &lnrpc.SendResponse{ 6346 PaymentHash: payIntent.rHash[:], 6347 PaymentPreimage: resp.Preimage[:], 6348 PaymentRoute: rpcRoute, 6349 }, nil 6350 } 6351 6352 // AddInvoice attempts to add a new invoice to the invoice database. Any 6353 // duplicated invoices are rejected, therefore all invoices *must* have a 6354 // unique payment preimage. 6355 func (r *rpcServer) AddInvoice(ctx context.Context, 6356 invoice *lnrpc.Invoice) (*lnrpc.AddInvoiceResponse, error) { 6357 6358 var ( 6359 defaultDelta = r.cfg.Bitcoin.TimeLockDelta 6360 blindCfg = invoice.BlindedPathConfig 6361 blind = invoice.IsBlinded 6362 ) 6363 6364 globalBlindCfg := r.server.cfg.Routing.BlindedPaths 6365 blindingRestrictions := &routing.BlindedPathRestrictions{ 6366 MinDistanceFromIntroNode: globalBlindCfg.MinNumRealHops, 6367 NumHops: globalBlindCfg.NumHops, 6368 MaxNumPaths: globalBlindCfg.MaxNumPaths, 6369 NodeOmissionSet: fn.NewSet[route.Vertex](), 6370 } 6371 6372 if blindCfg != nil && !blind { 6373 return nil, fmt.Errorf("blinded path config provided but " + 6374 "IsBlinded not set") 6375 } 6376 6377 if blind && blindCfg != nil { 6378 if blindCfg.MinNumRealHops != nil { 6379 blindingRestrictions.MinDistanceFromIntroNode = 6380 uint8(*blindCfg.MinNumRealHops) 6381 } 6382 if blindCfg.NumHops != nil { 6383 blindingRestrictions.NumHops = uint8(*blindCfg.NumHops) 6384 } 6385 if blindCfg.MaxNumPaths != nil { 6386 if *blindCfg.MaxNumPaths == 0 { 6387 return nil, fmt.Errorf("blinded max num " + 6388 "paths cannot be 0") 6389 } 6390 blindingRestrictions.MaxNumPaths = 6391 uint8(*blindCfg.MaxNumPaths) 6392 } 6393 6394 for _, nodeIDBytes := range blindCfg.NodeOmissionList { 6395 vertex, err := route.NewVertexFromBytes(nodeIDBytes) 6396 if err != nil { 6397 return nil, err 6398 } 6399 6400 blindingRestrictions.NodeOmissionSet.Add(vertex) 6401 } 6402 6403 blindingRestrictions.IncomingChainedChannels = append( 6404 blindingRestrictions.IncomingChainedChannels, 6405 blindCfg.IncomingChannelList..., 6406 ) 6407 6408 numChainedChannels := 6409 uint8(len(blindingRestrictions.IncomingChainedChannels)) 6410 6411 // When selecting the blinded incoming channel list parameter 6412 // the maximum number of hops is implictitly set. 6413 if numChainedChannels > blindingRestrictions.NumHops { 6414 rpcsLog.Warnf("Changing the num_blinded_hops "+ 6415 "from (%d) to (%d)", 6416 blindingRestrictions.NumHops, 6417 numChainedChannels) 6418 6419 blindingRestrictions.NumHops = 6420 numChainedChannels 6421 } 6422 6423 // The MinDistanceFromIntroNode must be greater than or equal to 6424 // the number of hops specified on the chained channels. 6425 minNumHops := blindingRestrictions.MinDistanceFromIntroNode 6426 if minNumHops < numChainedChannels { 6427 // Ensure MinimumPath is at least the size of the 6428 // chained path to avoid shorter routes being returned 6429 // by the pathfinder. 6430 return nil, fmt.Errorf("minimum number of blinded "+ 6431 "path hops (%d) must be greater than or equal "+ 6432 "to the number of hops specified on the "+ 6433 "chained channels (%d)", minNumHops, 6434 numChainedChannels) 6435 } 6436 6437 } 6438 6439 if blindingRestrictions.MinDistanceFromIntroNode > 6440 blindingRestrictions.NumHops { 6441 6442 return nil, fmt.Errorf("the minimum number of real " + 6443 "hops in a blinded path must be smaller than " + 6444 "or equal to the number of hops expected to " + 6445 "be included in each path") 6446 } 6447 6448 addInvoiceCfg := &invoicesrpc.AddInvoiceConfig{ 6449 AddInvoice: r.server.invoices.AddInvoice, 6450 IsChannelActive: r.server.htlcSwitch.HasActiveLink, 6451 ChainParams: r.cfg.ActiveNetParams.Params, 6452 NodeSigner: r.server.nodeSigner, 6453 DefaultCLTVExpiry: defaultDelta, 6454 ChanDB: r.server.chanStateDB, 6455 Graph: r.server.graphDB, 6456 GenInvoiceFeatures: func() *lnwire.FeatureVector { 6457 v := r.server.featureMgr.Get(feature.SetInvoice) 6458 6459 if blind { 6460 // If an invoice includes blinded paths, then a 6461 // payment address is not required since we use 6462 // the PathID in the final hop's encrypted data 6463 // as equivalent to the payment address 6464 v.Unset(lnwire.PaymentAddrRequired) 6465 v.Set(lnwire.PaymentAddrOptional) 6466 6467 // The invoice payer will also need to 6468 // understand the new BOLT 11 tagged field 6469 // containing the blinded path, so we switch 6470 // the bit to required. 6471 v = feature.SetBit( 6472 v, lnwire.Bolt11BlindedPathsRequired, 6473 ) 6474 } 6475 6476 return v 6477 }, 6478 GenAmpInvoiceFeatures: func() *lnwire.FeatureVector { 6479 return r.server.featureMgr.Get(feature.SetInvoiceAmp) 6480 }, 6481 GetAlias: r.server.aliasMgr.GetPeerAlias, 6482 BestHeight: r.server.cc.BestBlockTracker.BestHeight, 6483 QueryBlindedRoutes: func(amt lnwire.MilliSatoshi) ( 6484 []*route.Route, error) { 6485 6486 return r.server.chanRouter.FindBlindedPaths( 6487 r.selfNode, amt, 6488 r.server.defaultMC.GetProbability, 6489 blindingRestrictions, 6490 ) 6491 }, 6492 } 6493 6494 value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMsat) 6495 if err != nil { 6496 return nil, err 6497 } 6498 6499 // Convert the passed routing hints to the required format. 6500 routeHints, err := invoicesrpc.CreateZpay32HopHints(invoice.RouteHints) 6501 if err != nil { 6502 return nil, err 6503 } 6504 6505 var blindedPathCfg *invoicesrpc.BlindedPathConfig 6506 if blind { 6507 bpConfig := r.server.cfg.Routing.BlindedPaths 6508 6509 blindedPathCfg = &invoicesrpc.BlindedPathConfig{ 6510 RoutePolicyIncrMultiplier: bpConfig. 6511 PolicyIncreaseMultiplier, 6512 RoutePolicyDecrMultiplier: bpConfig. 6513 PolicyDecreaseMultiplier, 6514 DefaultDummyHopPolicy: &blindedpath.BlindedHopPolicy{ 6515 CLTVExpiryDelta: uint16(defaultDelta), 6516 FeeRate: uint32( 6517 r.server.cfg.Bitcoin.FeeRate, 6518 ), 6519 BaseFee: r.server.cfg.Bitcoin.BaseFee, 6520 MinHTLCMsat: r.server.cfg.Bitcoin.MinHTLCIn, 6521 6522 // MaxHTLCMsat will be calculated on the fly by 6523 // using the introduction node's channel's 6524 // capacities. 6525 MaxHTLCMsat: 0, 6526 }, 6527 MinNumPathHops: blindingRestrictions.NumHops, 6528 } 6529 } 6530 6531 addInvoiceData := &invoicesrpc.AddInvoiceData{ 6532 Memo: invoice.Memo, 6533 Value: value, 6534 DescriptionHash: invoice.DescriptionHash, 6535 Expiry: invoice.Expiry, 6536 FallbackAddr: invoice.FallbackAddr, 6537 CltvExpiry: invoice.CltvExpiry, 6538 Private: invoice.Private, 6539 RouteHints: routeHints, 6540 Amp: invoice.IsAmp, 6541 BlindedPathCfg: blindedPathCfg, 6542 } 6543 6544 if invoice.RPreimage != nil { 6545 preimage, err := lntypes.MakePreimage(invoice.RPreimage) 6546 if err != nil { 6547 return nil, err 6548 } 6549 addInvoiceData.Preimage = &preimage 6550 } 6551 6552 hash, dbInvoice, err := invoicesrpc.AddInvoice( 6553 ctx, addInvoiceCfg, addInvoiceData, 6554 ) 6555 if err != nil { 6556 return nil, err 6557 } 6558 6559 return &lnrpc.AddInvoiceResponse{ 6560 AddIndex: dbInvoice.AddIndex, 6561 PaymentRequest: string(dbInvoice.PaymentRequest), 6562 RHash: hash[:], 6563 PaymentAddr: dbInvoice.Terms.PaymentAddr[:], 6564 }, nil 6565 } 6566 6567 // LookupInvoice attempts to look up an invoice according to its payment hash. 6568 // The passed payment hash *must* be exactly 32 bytes, if not an error is 6569 // returned. 6570 func (r *rpcServer) LookupInvoice(ctx context.Context, 6571 req *lnrpc.PaymentHash) (*lnrpc.Invoice, error) { 6572 6573 var ( 6574 payHash [32]byte 6575 rHash []byte 6576 err error 6577 ) 6578 6579 // If the RHash as a raw string was provided, then decode that and use 6580 // that directly. Otherwise, we use the raw bytes provided. 6581 if req.RHashStr != "" { 6582 rHash, err = hex.DecodeString(req.RHashStr) 6583 if err != nil { 6584 return nil, err 6585 } 6586 } else { 6587 rHash = req.RHash 6588 } 6589 6590 // Ensure that the payment hash is *exactly* 32-bytes. 6591 if len(rHash) != 0 && len(rHash) != 32 { 6592 return nil, fmt.Errorf("payment hash must be exactly "+ 6593 "32 bytes, is instead %v", len(rHash)) 6594 } 6595 copy(payHash[:], rHash) 6596 6597 rpcsLog.Tracef("[lookupinvoice] searching for invoice %x", payHash[:]) 6598 6599 invoice, err := r.server.invoices.LookupInvoice(ctx, payHash) 6600 switch { 6601 case errors.Is(err, invoices.ErrInvoiceNotFound) || 6602 errors.Is(err, invoices.ErrNoInvoicesCreated): 6603 6604 return nil, status.Error(codes.NotFound, err.Error()) 6605 case err != nil: 6606 return nil, err 6607 } 6608 6609 rpcsLog.Tracef("[lookupinvoice] located invoice %v", 6610 lnutils.SpewLogClosure(invoice)) 6611 6612 rpcInvoice, err := invoicesrpc.CreateRPCInvoice( 6613 &invoice, r.cfg.ActiveNetParams.Params, 6614 ) 6615 if err != nil { 6616 return nil, err 6617 } 6618 6619 // Give the aux data parser a chance to format the custom data in the 6620 // invoice HTLCs. 6621 err = fn.MapOptionZ( 6622 r.server.implCfg.AuxDataParser, 6623 func(parser AuxDataParser) error { 6624 return parser.InlineParseCustomData(rpcInvoice) 6625 }, 6626 ) 6627 if err != nil { 6628 return nil, fmt.Errorf("error parsing custom data: %w", 6629 err) 6630 } 6631 6632 return rpcInvoice, nil 6633 } 6634 6635 // ListInvoices returns a list of all the invoices currently stored within the 6636 // database. Any active debug invoices are ignored. 6637 func (r *rpcServer) ListInvoices(ctx context.Context, 6638 req *lnrpc.ListInvoiceRequest) (*lnrpc.ListInvoiceResponse, error) { 6639 6640 // If the number of invoices was not specified, then we'll default to 6641 // returning the latest 100 invoices. 6642 if req.NumMaxInvoices == 0 { 6643 req.NumMaxInvoices = 100 6644 } 6645 6646 // If both dates are set, we check that the start date is less than the 6647 // end date, otherwise we'll get an empty result. 6648 if req.CreationDateStart != 0 && req.CreationDateEnd != 0 { 6649 if req.CreationDateStart >= req.CreationDateEnd { 6650 return nil, fmt.Errorf("start date(%v) must be before "+ 6651 "end date(%v)", req.CreationDateStart, 6652 req.CreationDateEnd) 6653 } 6654 } 6655 6656 // Next, we'll map the proto request into a format that is understood by 6657 // the database. 6658 q := invoices.InvoiceQuery{ 6659 IndexOffset: req.IndexOffset, 6660 NumMaxInvoices: req.NumMaxInvoices, 6661 PendingOnly: req.PendingOnly, 6662 Reversed: req.Reversed, 6663 CreationDateStart: int64(req.CreationDateStart), 6664 CreationDateEnd: int64(req.CreationDateEnd), 6665 } 6666 6667 invoiceSlice, err := r.server.invoicesDB.QueryInvoices(ctx, q) 6668 if err != nil { 6669 return nil, fmt.Errorf("unable to query invoices: %w", err) 6670 } 6671 6672 // Before returning the response, we'll need to convert each invoice 6673 // into it's proto representation. 6674 resp := &lnrpc.ListInvoiceResponse{ 6675 Invoices: make([]*lnrpc.Invoice, len(invoiceSlice.Invoices)), 6676 FirstIndexOffset: invoiceSlice.FirstIndexOffset, 6677 LastIndexOffset: invoiceSlice.LastIndexOffset, 6678 } 6679 for i, invoice := range invoiceSlice.Invoices { 6680 invoice := invoice 6681 resp.Invoices[i], err = invoicesrpc.CreateRPCInvoice( 6682 &invoice, r.cfg.ActiveNetParams.Params, 6683 ) 6684 if err != nil { 6685 return nil, err 6686 } 6687 6688 // Give the aux data parser a chance to format the custom data 6689 // in the invoice HTLCs. 6690 err = fn.MapOptionZ( 6691 r.server.implCfg.AuxDataParser, 6692 func(parser AuxDataParser) error { 6693 return parser.InlineParseCustomData( 6694 resp.Invoices[i], 6695 ) 6696 }, 6697 ) 6698 if err != nil { 6699 return nil, fmt.Errorf("error parsing custom data: %w", 6700 err) 6701 } 6702 } 6703 6704 return resp, nil 6705 } 6706 6707 // SubscribeInvoices returns a uni-directional stream (server -> client) for 6708 // notifying the client of newly added/settled invoices. 6709 func (r *rpcServer) SubscribeInvoices(req *lnrpc.InvoiceSubscription, 6710 updateStream lnrpc.Lightning_SubscribeInvoicesServer) error { 6711 6712 invoiceClient, err := r.server.invoices.SubscribeNotifications( 6713 updateStream.Context(), req.AddIndex, req.SettleIndex, 6714 ) 6715 if err != nil { 6716 return err 6717 } 6718 defer invoiceClient.Cancel() 6719 6720 for { 6721 select { 6722 case newInvoice := <-invoiceClient.NewInvoices: 6723 rpcInvoice, err := invoicesrpc.CreateRPCInvoice( 6724 newInvoice, r.cfg.ActiveNetParams.Params, 6725 ) 6726 if err != nil { 6727 return err 6728 } 6729 6730 // Give the aux data parser a chance to format the 6731 // custom data in the invoice HTLCs. 6732 err = fn.MapOptionZ( 6733 r.server.implCfg.AuxDataParser, 6734 func(parser AuxDataParser) error { 6735 return parser.InlineParseCustomData( 6736 rpcInvoice, 6737 ) 6738 }, 6739 ) 6740 if err != nil { 6741 return fmt.Errorf("error parsing custom data: "+ 6742 "%w", err) 6743 } 6744 6745 if err := updateStream.Send(rpcInvoice); err != nil { 6746 return err 6747 } 6748 6749 case settledInvoice := <-invoiceClient.SettledInvoices: 6750 rpcInvoice, err := invoicesrpc.CreateRPCInvoice( 6751 settledInvoice, r.cfg.ActiveNetParams.Params, 6752 ) 6753 if err != nil { 6754 return err 6755 } 6756 6757 // Give the aux data parser a chance to format the 6758 // custom data in the invoice HTLCs. 6759 err = fn.MapOptionZ( 6760 r.server.implCfg.AuxDataParser, 6761 func(parser AuxDataParser) error { 6762 return parser.InlineParseCustomData( 6763 rpcInvoice, 6764 ) 6765 }, 6766 ) 6767 if err != nil { 6768 return fmt.Errorf("error parsing custom data: "+ 6769 "%w", err) 6770 } 6771 6772 if err := updateStream.Send(rpcInvoice); err != nil { 6773 return err 6774 } 6775 6776 // The response stream's context for whatever reason has been 6777 // closed. If context is closed by an exceeded deadline we will 6778 // return an error. 6779 case <-updateStream.Context().Done(): 6780 if errors.Is(updateStream.Context().Err(), context.Canceled) { 6781 return nil 6782 } 6783 return updateStream.Context().Err() 6784 6785 case <-r.quit: 6786 return nil 6787 } 6788 } 6789 } 6790 6791 // SubscribeTransactions creates a uni-directional stream (server -> client) in 6792 // which any newly discovered transactions relevant to the wallet are sent 6793 // over. 6794 func (r *rpcServer) SubscribeTransactions(req *lnrpc.GetTransactionsRequest, 6795 updateStream lnrpc.Lightning_SubscribeTransactionsServer) error { 6796 6797 txClient, err := r.server.cc.Wallet.SubscribeTransactions() 6798 if err != nil { 6799 return err 6800 } 6801 defer txClient.Cancel() 6802 rpcsLog.Infof("New transaction subscription") 6803 6804 for { 6805 select { 6806 case tx := <-txClient.ConfirmedTransactions(): 6807 detail := lnrpc.RPCTransaction(tx) 6808 if err := updateStream.Send(detail); err != nil { 6809 return err 6810 } 6811 6812 case tx := <-txClient.UnconfirmedTransactions(): 6813 detail := lnrpc.RPCTransaction(tx) 6814 if err := updateStream.Send(detail); err != nil { 6815 return err 6816 } 6817 6818 // The response stream's context for whatever reason has been 6819 // closed. If context is closed by an exceeded deadline we will 6820 // return an error. 6821 case <-updateStream.Context().Done(): 6822 rpcsLog.Infof("Canceling transaction subscription") 6823 if errors.Is(updateStream.Context().Err(), context.Canceled) { 6824 return nil 6825 } 6826 return updateStream.Context().Err() 6827 6828 case <-r.quit: 6829 return nil 6830 } 6831 } 6832 } 6833 6834 // GetTransactions returns a list of describing all the known transactions 6835 // relevant to the wallet. 6836 func (r *rpcServer) GetTransactions(ctx context.Context, 6837 req *lnrpc.GetTransactionsRequest) (*lnrpc.TransactionDetails, error) { 6838 6839 // To remain backwards compatible with the old api, default to the 6840 // special case end height which will return transactions from the start 6841 // height until the chain tip, including unconfirmed transactions. 6842 var endHeight = btcwallet.UnconfirmedHeight 6843 6844 // If the user has provided an end height, we overwrite our default. 6845 if req.EndHeight != 0 { 6846 endHeight = req.EndHeight 6847 } 6848 6849 txns, firstIdx, lastIdx, err := 6850 r.server.cc.Wallet.ListTransactionDetails( 6851 req.StartHeight, endHeight, req.Account, 6852 req.IndexOffset, req.MaxTransactions, 6853 ) 6854 if err != nil { 6855 return nil, err 6856 } 6857 6858 return lnrpc.RPCTransactionDetails(txns, firstIdx, lastIdx), nil 6859 } 6860 6861 // DescribeGraph returns a description of the latest graph state from the PoV 6862 // of the node. The graph information is partitioned into two components: all 6863 // the nodes/vertexes, and all the edges that connect the vertexes themselves. 6864 // As this is a directed graph, the edges also contain the node directional 6865 // specific routing policy which includes: the time lock delta, fee 6866 // information, etc. 6867 func (r *rpcServer) DescribeGraph(ctx context.Context, 6868 req *lnrpc.ChannelGraphRequest) (*lnrpc.ChannelGraph, error) { 6869 6870 resp := &lnrpc.ChannelGraph{} 6871 includeUnannounced := req.IncludeUnannounced 6872 6873 // Check to see if the cache is already populated, if so then we can 6874 // just return it directly. 6875 // 6876 // TODO(roasbeef): move this to an interceptor level feature? 6877 graphCacheActive := r.cfg.Caches.RPCGraphCacheDuration != 0 6878 if graphCacheActive { 6879 r.graphCache.Lock() 6880 defer r.graphCache.Unlock() 6881 6882 if r.describeGraphResp != nil { 6883 return r.describeGraphResp, nil 6884 } 6885 } 6886 6887 // Obtain the pointer to the V1 channel graph. This will provide a 6888 // consistent view of the graph due to bolt db's transactional model. 6889 // 6890 // TODO(elle): switch to a cross-version graph view when available. 6891 graph := r.server.v1Graph 6892 6893 // First iterate through all the known nodes (connected or unconnected 6894 // within the graph), collating their current state into the RPC 6895 // response. 6896 err := graph.ForEachNode(ctx, func(node *models.Node) error { 6897 lnNode := marshalNode(node) 6898 6899 resp.Nodes = append(resp.Nodes, lnNode) 6900 6901 return nil 6902 }, func() { 6903 resp.Nodes = nil 6904 }) 6905 if err != nil { 6906 return nil, err 6907 } 6908 6909 // Next, for each active channel we know of within the graph, create a 6910 // similar response which details both the edge information as well as 6911 // the routing policies of th nodes connecting the two edges. 6912 err = graph.ForEachChannel(ctx, func(edgeInfo *models.ChannelEdgeInfo, 6913 c1, c2 *models.ChannelEdgePolicy) error { 6914 6915 // Do not include unannounced channels unless specifically 6916 // requested. Unannounced channels include both private channels as 6917 // well as public channels whose authentication proof were not 6918 // confirmed yet, hence were not announced. 6919 if !includeUnannounced && edgeInfo.AuthProof == nil { 6920 return nil 6921 } 6922 6923 edge := marshalDBEdge(edgeInfo, c1, c2, req.IncludeAuthProof) 6924 resp.Edges = append(resp.Edges, edge) 6925 6926 return nil 6927 }, func() { 6928 resp.Edges = nil 6929 }) 6930 if err != nil && !errors.Is(err, graphdb.ErrGraphNoEdgesFound) { 6931 return nil, err 6932 } 6933 6934 // We still have the mutex held, so we can safely populate the cache 6935 // now to save on GC churn for this query, but only if the cache isn't 6936 // disabled. 6937 if graphCacheActive { 6938 r.describeGraphResp = resp 6939 } 6940 6941 return resp, nil 6942 } 6943 6944 // marshalExtraOpaqueData marshals the given tlv data. If the tlv stream is 6945 // malformed or empty, an empty map is returned. This makes the method safe to 6946 // use on unvalidated data. 6947 func marshalExtraOpaqueData(data []byte) map[uint64][]byte { 6948 r := bytes.NewReader(data) 6949 6950 tlvStream, err := tlv.NewStream() 6951 if err != nil { 6952 return nil 6953 } 6954 6955 // Since ExtraOpaqueData is provided by a potentially malicious peer, 6956 // pass it into the P2P decoding variant. 6957 parsedTypes, err := tlvStream.DecodeWithParsedTypesP2P(r) 6958 if err != nil || len(parsedTypes) == 0 { 6959 return nil 6960 } 6961 6962 records := make(map[uint64][]byte) 6963 for k, v := range parsedTypes { 6964 records[uint64(k)] = v 6965 } 6966 6967 return records 6968 } 6969 6970 func marshalDBEdge(edgeInfo *models.ChannelEdgeInfo, 6971 c1, c2 *models.ChannelEdgePolicy, 6972 includeAuthProof bool) *lnrpc.ChannelEdge { 6973 6974 // Make sure the policies match the node they belong to. c1 should point 6975 // to the policy for NodeKey1, and c2 for NodeKey2. 6976 if c1 != nil && c1.ChannelFlags&lnwire.ChanUpdateDirection == 1 || 6977 c2 != nil && c2.ChannelFlags&lnwire.ChanUpdateDirection == 0 { 6978 6979 c2, c1 = c1, c2 6980 } 6981 6982 var lastUpdate int64 6983 if c1 != nil { 6984 lastUpdate = c1.LastUpdate.Unix() 6985 } 6986 if c2 != nil && c2.LastUpdate.Unix() > lastUpdate { 6987 lastUpdate = c2.LastUpdate.Unix() 6988 } 6989 6990 customRecords := marshalExtraOpaqueData(edgeInfo.ExtraOpaqueData) 6991 6992 edge := &lnrpc.ChannelEdge{ 6993 ChannelId: edgeInfo.ChannelID, 6994 ChanPoint: edgeInfo.ChannelPoint.String(), 6995 // TODO(roasbeef): update should be on edge info itself 6996 LastUpdate: uint32(lastUpdate), 6997 Node1Pub: hex.EncodeToString(edgeInfo.NodeKey1Bytes[:]), 6998 Node2Pub: hex.EncodeToString(edgeInfo.NodeKey2Bytes[:]), 6999 Capacity: int64(edgeInfo.Capacity), 7000 CustomRecords: customRecords, 7001 } 7002 7003 if c1 != nil { 7004 edge.Node1Policy = marshalDBRoutingPolicy(c1) 7005 } 7006 7007 if c2 != nil { 7008 edge.Node2Policy = marshalDBRoutingPolicy(c2) 7009 } 7010 7011 // We do not expect to have an AuthProof for private channels and for 7012 // our own public channels for the time between channel funding and 7013 // channel announcement. 7014 if includeAuthProof && edgeInfo.AuthProof != nil { 7015 edge.AuthProof = &lnrpc.ChannelAuthProof{ 7016 NodeSig1: edgeInfo.AuthProof.NodeSig1(), 7017 BitcoinSig1: edgeInfo.AuthProof.BitcoinSig1(), 7018 NodeSig2: edgeInfo.AuthProof.NodeSig2(), 7019 BitcoinSig2: edgeInfo.AuthProof.BitcoinSig2(), 7020 } 7021 } 7022 7023 return edge 7024 } 7025 7026 // marshalPolicyExtraOpaqueData marshals the given tlv data and filters out 7027 // inbound fee record. 7028 func marshalPolicyExtraOpaqueData(data []byte) map[uint64][]byte { 7029 records := marshalExtraOpaqueData(data) 7030 7031 // Remove the inbound fee record as we have dedicated fields for it. 7032 delete(records, uint64(lnwire.FeeRecordType)) 7033 7034 return records 7035 } 7036 7037 func marshalDBRoutingPolicy( 7038 policy *models.ChannelEdgePolicy) *lnrpc.RoutingPolicy { 7039 7040 disabled := policy.ChannelFlags&lnwire.ChanUpdateDisabled != 0 7041 7042 customRecords := marshalPolicyExtraOpaqueData(policy.ExtraOpaqueData) 7043 inboundFee := policy.InboundFee.UnwrapOr(lnwire.Fee{}) 7044 7045 return &lnrpc.RoutingPolicy{ 7046 TimeLockDelta: uint32(policy.TimeLockDelta), 7047 MinHtlc: int64(policy.MinHTLC), 7048 MaxHtlcMsat: uint64(policy.MaxHTLC), 7049 FeeBaseMsat: int64(policy.FeeBaseMSat), 7050 FeeRateMilliMsat: int64(policy.FeeProportionalMillionths), 7051 Disabled: disabled, 7052 LastUpdate: uint32(policy.LastUpdate.Unix()), 7053 CustomRecords: customRecords, 7054 7055 InboundFeeBaseMsat: inboundFee.BaseFee, 7056 InboundFeeRateMilliMsat: inboundFee.FeeRate, 7057 } 7058 } 7059 7060 // GetNodeMetrics returns all available node metrics calculated from the 7061 // current channel graph. 7062 func (r *rpcServer) GetNodeMetrics(ctx context.Context, 7063 req *lnrpc.NodeMetricsRequest) (*lnrpc.NodeMetricsResponse, error) { 7064 7065 // Get requested metric types. 7066 getCentrality := false 7067 for _, t := range req.Types { 7068 if t == lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY { 7069 getCentrality = true 7070 } 7071 } 7072 7073 // Only centrality can be requested for now. 7074 if !getCentrality { 7075 return nil, nil 7076 } 7077 7078 resp := &lnrpc.NodeMetricsResponse{ 7079 BetweennessCentrality: make(map[string]*lnrpc.FloatMetric), 7080 } 7081 7082 // Obtain the pointer to the global singleton channel graph, this will 7083 // provide a consistent view of the graph due to bolt db's 7084 // transactional model. 7085 graph := r.server.graphDB 7086 7087 // Calculate betweenness centrality if requested. Note that depending on the 7088 // graph size, this may take up to a few minutes. 7089 channelGraph := autopilot.ChannelGraphFromDatabase(graph) 7090 centralityMetric, err := autopilot.NewBetweennessCentralityMetric( 7091 runtime.NumCPU(), 7092 ) 7093 if err != nil { 7094 return nil, err 7095 } 7096 if err := centralityMetric.Refresh(ctx, channelGraph); err != nil { 7097 return nil, err 7098 } 7099 7100 // Fill normalized and non normalized centrality. 7101 centrality := centralityMetric.GetMetric(true) 7102 for nodeID, val := range centrality { 7103 resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])] = 7104 &lnrpc.FloatMetric{ 7105 NormalizedValue: val, 7106 } 7107 } 7108 7109 centrality = centralityMetric.GetMetric(false) 7110 for nodeID, val := range centrality { 7111 resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])].Value = val 7112 } 7113 7114 return resp, nil 7115 } 7116 7117 // GetChanInfo returns the latest authenticated network announcement for the 7118 // given channel identified by either its channel ID or a channel outpoint. Both 7119 // uniquely identify the location of transaction's funding output within the 7120 // blockchain. The former is an 8-byte integer, while the latter is a string 7121 // formatted as funding_txid:output_index. 7122 func (r *rpcServer) GetChanInfo(ctx context.Context, 7123 in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) { 7124 7125 graph := r.server.graphDB 7126 7127 var ( 7128 edgeInfo *models.ChannelEdgeInfo 7129 edge1, edge2 *models.ChannelEdgePolicy 7130 err error 7131 ) 7132 7133 switch { 7134 case in.ChanId != 0: 7135 edgeInfo, edge1, edge2, err = graph.FetchChannelEdgesByID( 7136 ctx, in.ChanId, 7137 ) 7138 7139 case in.ChanPoint != "": 7140 var chanPoint *wire.OutPoint 7141 chanPoint, err = wire.NewOutPointFromString(in.ChanPoint) 7142 if err != nil { 7143 return nil, err 7144 } 7145 edgeInfo, edge1, edge2, err = graph.FetchChannelEdgesByOutpoint( 7146 ctx, chanPoint, 7147 ) 7148 7149 default: 7150 return nil, fmt.Errorf("specify either chan_id or chan_point") 7151 } 7152 switch { 7153 case errors.Is(err, graphdb.ErrEdgeNotFound): 7154 return nil, status.Error(codes.NotFound, err.Error()) 7155 case err != nil: 7156 return nil, err 7157 } 7158 7159 // Convert the database's edge format into the network/RPC edge format 7160 // which couples the edge itself along with the directional node 7161 // routing policies of each node involved within the channel. 7162 channelEdge := marshalDBEdge( 7163 edgeInfo, edge1, edge2, in.IncludeAuthProof, 7164 ) 7165 7166 return channelEdge, nil 7167 } 7168 7169 // GetNodeInfo returns the latest advertised and aggregate authenticated 7170 // channel information for the specified node identified by its public key. 7171 func (r *rpcServer) GetNodeInfo(ctx context.Context, 7172 in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) { 7173 7174 if in.IncludeAuthProof && !in.IncludeChannels { 7175 return nil, fmt.Errorf("include_auth_proof depends on " + 7176 "include_channels") 7177 } 7178 7179 graph := r.server.v1Graph 7180 7181 // First, parse the hex-encoded public key into a full in-memory public 7182 // key object we can work with for querying. 7183 pubKey, err := route.NewVertexFromStr(in.PubKey) 7184 if err != nil { 7185 return nil, err 7186 } 7187 7188 // With the public key decoded, attempt to fetch the node corresponding 7189 // to this public key. If the node cannot be found, then an error will 7190 // be returned. 7191 node, err := graph.FetchNode(ctx, pubKey) 7192 switch { 7193 case errors.Is(err, graphdb.ErrGraphNodeNotFound): 7194 return nil, status.Error(codes.NotFound, err.Error()) 7195 case err != nil: 7196 return nil, err 7197 } 7198 7199 // With the node obtained, we'll now iterate through all its out going 7200 // edges to gather some basic statistics about its out going channels. 7201 var ( 7202 numChannels uint32 7203 totalCapacity btcutil.Amount 7204 channels []*lnrpc.ChannelEdge 7205 ) 7206 7207 err = graph.ForEachNodeChannel( 7208 ctx, node.PubKeyBytes, 7209 func(edge *models.ChannelEdgeInfo, 7210 c1, c2 *models.ChannelEdgePolicy) error { 7211 7212 numChannels++ 7213 totalCapacity += edge.Capacity 7214 7215 // Only populate the node's channels if the user 7216 // requested them. 7217 if in.IncludeChannels { 7218 // Do not include unannounced channels - private 7219 // channels or public channels whose 7220 // authentication proof were not confirmed yet. 7221 if edge.AuthProof == nil { 7222 return nil 7223 } 7224 7225 // Convert the database's edge format into the 7226 // network/RPC edge format. 7227 channelEdge := marshalDBEdge( 7228 edge, c1, c2, in.IncludeAuthProof, 7229 ) 7230 channels = append(channels, channelEdge) 7231 } 7232 7233 return nil 7234 }, func() { 7235 numChannels = 0 7236 totalCapacity = 0 7237 channels = nil 7238 }, 7239 ) 7240 if err != nil { 7241 return nil, err 7242 } 7243 7244 return &lnrpc.NodeInfo{ 7245 Node: marshalNode(node), 7246 NumChannels: numChannels, 7247 TotalCapacity: int64(totalCapacity), 7248 Channels: channels, 7249 }, nil 7250 } 7251 7252 func marshalNode(node *models.Node) *lnrpc.LightningNode { 7253 nodeAddrs := make([]*lnrpc.NodeAddress, len(node.Addresses)) 7254 for i, addr := range node.Addresses { 7255 nodeAddr := &lnrpc.NodeAddress{ 7256 Network: addr.Network(), 7257 Addr: addr.String(), 7258 } 7259 nodeAddrs[i] = nodeAddr 7260 } 7261 7262 features := invoicesrpc.CreateRPCFeatures(node.Features) 7263 7264 customRecords := marshalExtraOpaqueData(node.ExtraOpaqueData) 7265 7266 return &lnrpc.LightningNode{ 7267 LastUpdate: uint32(node.LastUpdate.Unix()), 7268 PubKey: hex.EncodeToString(node.PubKeyBytes[:]), 7269 Addresses: nodeAddrs, 7270 Alias: node.Alias.UnwrapOr(""), 7271 Color: graphdb.EncodeHexColor( 7272 node.Color.UnwrapOr(color.RGBA{}), 7273 ), 7274 Features: features, 7275 CustomRecords: customRecords, 7276 } 7277 } 7278 7279 // QueryRoutes attempts to query the daemons' Channel Router for a possible 7280 // route to a target destination capable of carrying a specific amount of 7281 // satoshis within the route's flow. The returned route contains the full 7282 // details required to craft and send an HTLC, also including the necessary 7283 // information that should be present within the Sphinx packet encapsulated 7284 // within the HTLC. 7285 // 7286 // TODO(roasbeef): should return a slice of routes in reality 7287 // - create separate PR to send based on well formatted route 7288 func (r *rpcServer) QueryRoutes(ctx context.Context, 7289 in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) { 7290 7291 return r.routerBackend.QueryRoutes(ctx, in) 7292 } 7293 7294 // GetNetworkInfo returns some basic stats about the known channel graph from 7295 // the PoV of the node. 7296 func (r *rpcServer) GetNetworkInfo(ctx context.Context, 7297 _ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) { 7298 7299 graph := r.server.graphDB 7300 7301 var ( 7302 numNodes uint32 7303 numChannels uint32 7304 maxChanOut uint32 7305 totalNetworkCapacity btcutil.Amount 7306 minChannelSize btcutil.Amount = math.MaxInt64 7307 maxChannelSize btcutil.Amount 7308 medianChanSize btcutil.Amount 7309 ) 7310 7311 // We'll use this map to de-duplicate channels during our traversal. 7312 // This is needed since channels are directional, so there will be two 7313 // edges for each channel within the graph. 7314 seenChans := make(map[uint64]struct{}) 7315 7316 // We also keep a list of all encountered capacities, in order to 7317 // calculate the median channel size. 7318 var allChans []btcutil.Amount 7319 7320 // We'll run through all the known nodes in the within our view of the 7321 // network, tallying up the total number of nodes, and also gathering 7322 // each node so we can measure the graph diameter and degree stats 7323 // below. 7324 err := graph.ForEachNodeCached(ctx, false, func(ctx context.Context, 7325 node route.Vertex, _ []net.Addr, 7326 edges map[uint64]*graphdb.DirectedChannel) error { 7327 7328 // Increment the total number of nodes with each iteration. 7329 numNodes++ 7330 7331 // For each channel we'll compute the out degree of each node, 7332 // and also update our running tallies of the min/max channel 7333 // capacity, as well as the total channel capacity. We pass 7334 // through the db transaction from the outer view so we can 7335 // re-use it within this inner view. 7336 var outDegree uint32 7337 for _, edge := range edges { 7338 // Bump up the out degree for this node for each 7339 // channel encountered. 7340 outDegree++ 7341 7342 // If we've already seen this channel, then we'll 7343 // return early to ensure that we don't double-count 7344 // stats. 7345 if _, ok := seenChans[edge.ChannelID]; ok { 7346 return nil 7347 } 7348 7349 // Compare the capacity of this channel against the 7350 // running min/max to see if we should update the 7351 // extrema. 7352 chanCapacity := edge.Capacity 7353 if chanCapacity < minChannelSize { 7354 minChannelSize = chanCapacity 7355 } 7356 if chanCapacity > maxChannelSize { 7357 maxChannelSize = chanCapacity 7358 } 7359 7360 // Accumulate the total capacity of this channel to the 7361 // network wide-capacity. 7362 totalNetworkCapacity += chanCapacity 7363 7364 numChannels++ 7365 7366 seenChans[edge.ChannelID] = struct{}{} 7367 allChans = append(allChans, edge.Capacity) 7368 } 7369 7370 // Finally, if the out degree of this node is greater than what 7371 // we've seen so far, update the maxChanOut variable. 7372 if outDegree > maxChanOut { 7373 maxChanOut = outDegree 7374 } 7375 7376 return nil 7377 }, func() { 7378 numChannels = 0 7379 numNodes = 0 7380 maxChanOut = 0 7381 totalNetworkCapacity = 0 7382 minChannelSize = math.MaxInt64 7383 maxChannelSize = 0 7384 clear(allChans) 7385 clear(seenChans) 7386 }) 7387 if err != nil { 7388 return nil, err 7389 } 7390 7391 // Query the graph for the current number of zombie channels. 7392 numZombies, err := graph.NumZombies(ctx) 7393 if err != nil { 7394 return nil, err 7395 } 7396 7397 // Find the median. 7398 medianChanSize = autopilot.Median(allChans) 7399 7400 // If we don't have any channels, then reset the minChannelSize to zero 7401 // to avoid outputting NaN in encoded JSON. 7402 if numChannels == 0 { 7403 minChannelSize = 0 7404 } 7405 7406 // Graph diameter. 7407 channelGraph := autopilot.ChannelGraphFromCachedDatabase(graph) 7408 simpleGraph, err := autopilot.NewSimpleGraph(ctx, channelGraph) 7409 if err != nil { 7410 return nil, err 7411 } 7412 start := time.Now() 7413 diameter := simpleGraph.DiameterRadialCutoff() 7414 rpcsLog.Infof("elapsed time for diameter (%d) calculation: %v", diameter, 7415 time.Since(start)) 7416 7417 // TODO(roasbeef): also add oldest channel? 7418 netInfo := &lnrpc.NetworkInfo{ 7419 GraphDiameter: diameter, 7420 MaxOutDegree: maxChanOut, 7421 AvgOutDegree: float64(2*numChannels) / float64(numNodes), 7422 NumNodes: numNodes, 7423 NumChannels: numChannels, 7424 TotalNetworkCapacity: int64(totalNetworkCapacity), 7425 AvgChannelSize: float64(totalNetworkCapacity) / float64(numChannels), 7426 7427 MinChannelSize: int64(minChannelSize), 7428 MaxChannelSize: int64(maxChannelSize), 7429 MedianChannelSizeSat: int64(medianChanSize), 7430 NumZombieChans: numZombies, 7431 } 7432 7433 // Similarly, if we don't have any channels, then we'll also set the 7434 // average channel size to zero in order to avoid weird JSON encoding 7435 // outputs. 7436 if numChannels == 0 { 7437 netInfo.AvgChannelSize = 0 7438 } 7439 7440 return netInfo, nil 7441 } 7442 7443 // StopDaemon will send a shutdown request to the interrupt handler, triggering 7444 // a graceful shutdown of the daemon. 7445 func (r *rpcServer) StopDaemon(_ context.Context, 7446 _ *lnrpc.StopRequest) (*lnrpc.StopResponse, error) { 7447 7448 // Before we even consider a shutdown, are we currently in recovery 7449 // mode? We don't want to allow shutting down during recovery because 7450 // that would mean the user would have to manually continue the rescan 7451 // process next time by using `lncli unlock --recovery_window X` 7452 // otherwise some funds wouldn't be picked up. 7453 isRecoveryMode, progress, err := r.server.cc.Wallet.GetRecoveryInfo() 7454 if err != nil { 7455 return nil, fmt.Errorf("unable to get wallet recovery info: %w", 7456 err) 7457 } 7458 if isRecoveryMode && progress < 1 { 7459 return nil, fmt.Errorf("wallet recovery in progress, cannot " + 7460 "shut down, please wait until rescan finishes") 7461 } 7462 7463 r.interceptor.RequestShutdown() 7464 7465 return &lnrpc.StopResponse{ 7466 Status: "shutdown initiated, check logs for progress", 7467 }, nil 7468 } 7469 7470 // SubscribeChannelGraph launches a streaming RPC that allows the caller to 7471 // receive notifications upon any changes the channel graph topology from the 7472 // review of the responding node. Events notified include: new nodes coming 7473 // online, nodes updating their authenticated attributes, new channels being 7474 // advertised, updates in the routing policy for a directional channel edge, 7475 // and finally when prior channels are closed on-chain. 7476 func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription, 7477 updateStream lnrpc.Lightning_SubscribeChannelGraphServer) error { 7478 7479 // First, we start by subscribing to a new intent to receive 7480 // notifications from the channel router. 7481 client, err := r.server.graphDB.SubscribeTopology() 7482 if err != nil { 7483 return err 7484 } 7485 7486 // Ensure that the resources for the topology update client is cleaned 7487 // up once either the server, or client exists. 7488 defer client.Cancel() 7489 7490 for { 7491 select { 7492 7493 // A new update has been sent by the channel router, we'll 7494 // marshal it into the form expected by the gRPC client, then 7495 // send it off. 7496 case topChange, ok := <-client.TopologyChanges: 7497 // If the second value from the channel read is nil, 7498 // then this means that the channel router is exiting 7499 // or the notification client was canceled. So we'll 7500 // exit early. 7501 if !ok { 7502 return errors.New("server shutting down") 7503 } 7504 7505 // Convert the struct from the channel router into the 7506 // form expected by the gRPC service then send it off 7507 // to the client. 7508 graphUpdate := marshallTopologyChange(topChange) 7509 if err := updateStream.Send(graphUpdate); err != nil { 7510 return err 7511 } 7512 7513 // The response stream's context for whatever reason has been 7514 // closed. If context is closed by an exceeded deadline 7515 // we will return an error. 7516 case <-updateStream.Context().Done(): 7517 if errors.Is(updateStream.Context().Err(), context.Canceled) { 7518 return nil 7519 } 7520 return updateStream.Context().Err() 7521 7522 // The server is quitting, so we'll exit immediately. Returning 7523 // nil will close the clients read end of the stream. 7524 case <-r.quit: 7525 return nil 7526 } 7527 } 7528 } 7529 7530 // marshallTopologyChange performs a mapping from the topology change struct 7531 // returned by the router to the form of notifications expected by the current 7532 // gRPC service. 7533 func marshallTopologyChange( 7534 topChange *graphdb.TopologyChange) *lnrpc.GraphTopologyUpdate { 7535 7536 // encodeKey is a simple helper function that converts a live public 7537 // key into a hex-encoded version of the compressed serialization for 7538 // the public key. 7539 encodeKey := func(k *btcec.PublicKey) string { 7540 return hex.EncodeToString(k.SerializeCompressed()) 7541 } 7542 7543 nodeUpdates := make([]*lnrpc.NodeUpdate, len(topChange.NodeUpdates)) 7544 for i, nodeUpdate := range topChange.NodeUpdates { 7545 nodeAddrs := make( 7546 []*lnrpc.NodeAddress, 0, len(nodeUpdate.Addresses), 7547 ) 7548 for _, addr := range nodeUpdate.Addresses { 7549 nodeAddr := &lnrpc.NodeAddress{ 7550 Network: addr.Network(), 7551 Addr: addr.String(), 7552 } 7553 nodeAddrs = append(nodeAddrs, nodeAddr) 7554 } 7555 7556 addrs := make([]string, len(nodeUpdate.Addresses)) 7557 for i, addr := range nodeUpdate.Addresses { 7558 addrs[i] = addr.String() 7559 } 7560 7561 nodeUpdates[i] = &lnrpc.NodeUpdate{ 7562 Addresses: addrs, 7563 NodeAddresses: nodeAddrs, 7564 IdentityKey: encodeKey(nodeUpdate.IdentityKey), 7565 Alias: nodeUpdate.Alias, 7566 Color: nodeUpdate.Color, 7567 Features: invoicesrpc.CreateRPCFeatures( 7568 nodeUpdate.Features, 7569 ), 7570 } 7571 } 7572 7573 channelUpdates := make([]*lnrpc.ChannelEdgeUpdate, len(topChange.ChannelEdgeUpdates)) 7574 for i, channelUpdate := range topChange.ChannelEdgeUpdates { 7575 7576 customRecords := marshalPolicyExtraOpaqueData( 7577 channelUpdate.ExtraOpaqueData, 7578 ) 7579 inboundFee := channelUpdate.InboundFee.UnwrapOr(lnwire.Fee{}) 7580 7581 channelUpdates[i] = &lnrpc.ChannelEdgeUpdate{ 7582 ChanId: channelUpdate.ChanID, 7583 ChanPoint: &lnrpc.ChannelPoint{ 7584 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 7585 FundingTxidBytes: channelUpdate.ChanPoint.Hash[:], 7586 }, 7587 OutputIndex: channelUpdate.ChanPoint.Index, 7588 }, 7589 Capacity: int64(channelUpdate.Capacity), 7590 RoutingPolicy: &lnrpc.RoutingPolicy{ 7591 TimeLockDelta: uint32( 7592 channelUpdate.TimeLockDelta, 7593 ), 7594 MinHtlc: int64( 7595 channelUpdate.MinHTLC, 7596 ), 7597 MaxHtlcMsat: uint64( 7598 channelUpdate.MaxHTLC, 7599 ), 7600 FeeBaseMsat: int64( 7601 channelUpdate.BaseFee, 7602 ), 7603 FeeRateMilliMsat: int64( 7604 channelUpdate.FeeRate, 7605 ), 7606 Disabled: channelUpdate.Disabled, 7607 InboundFeeBaseMsat: inboundFee.BaseFee, 7608 InboundFeeRateMilliMsat: inboundFee.FeeRate, 7609 CustomRecords: customRecords, 7610 }, 7611 AdvertisingNode: encodeKey(channelUpdate.AdvertisingNode), 7612 ConnectingNode: encodeKey(channelUpdate.ConnectingNode), 7613 } 7614 } 7615 7616 closedChans := make([]*lnrpc.ClosedChannelUpdate, len(topChange.ClosedChannels)) 7617 for i, closedChan := range topChange.ClosedChannels { 7618 closedChans[i] = &lnrpc.ClosedChannelUpdate{ 7619 ChanId: closedChan.ChanID, 7620 Capacity: int64(closedChan.Capacity), 7621 ClosedHeight: closedChan.ClosedHeight, 7622 ChanPoint: &lnrpc.ChannelPoint{ 7623 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 7624 FundingTxidBytes: closedChan.ChanPoint.Hash[:], 7625 }, 7626 OutputIndex: closedChan.ChanPoint.Index, 7627 }, 7628 } 7629 } 7630 7631 return &lnrpc.GraphTopologyUpdate{ 7632 NodeUpdates: nodeUpdates, 7633 ChannelUpdates: channelUpdates, 7634 ClosedChans: closedChans, 7635 } 7636 } 7637 7638 // ListPayments returns a list of outgoing payments determined by a paginated 7639 // database query. 7640 func (r *rpcServer) ListPayments(ctx context.Context, 7641 req *lnrpc.ListPaymentsRequest) (*lnrpc.ListPaymentsResponse, error) { 7642 7643 // If both dates are set, we check that the start date is less than the 7644 // end date, otherwise we'll get an empty result. 7645 if req.CreationDateStart != 0 && req.CreationDateEnd != 0 { 7646 if req.CreationDateStart >= req.CreationDateEnd { 7647 return nil, fmt.Errorf("start date(%v) must be before "+ 7648 "end date(%v)", req.CreationDateStart, 7649 req.CreationDateEnd) 7650 } 7651 } 7652 7653 query := paymentsdb.Query{ 7654 IndexOffset: req.IndexOffset, 7655 MaxPayments: req.MaxPayments, 7656 Reversed: req.Reversed, 7657 IncludeIncomplete: req.IncludeIncomplete, 7658 CountTotal: req.CountTotalPayments, 7659 CreationDateStart: int64(req.CreationDateStart), 7660 CreationDateEnd: int64(req.CreationDateEnd), 7661 } 7662 7663 // If the maximum number of payments wasn't specified, we default to 7664 // a reasonable number to prevent resource exhaustion. All of the 7665 // payments are fetched into memory. Moreover we don't want our daemon 7666 // to remain stable and do other stuff rather than serving payments. 7667 // 7668 // TODO(ziggie): Choose a more specific default value when results of 7669 // performance testing are available. 7670 if req.MaxPayments == 0 { 7671 query.MaxPayments = paymentsdb.DefaultMaxPayments 7672 } 7673 7674 paymentsQuerySlice, err := r.server.paymentsDB.QueryPayments( 7675 ctx, query, 7676 ) 7677 if err != nil { 7678 return nil, err 7679 } 7680 7681 paymentsResp := &lnrpc.ListPaymentsResponse{ 7682 LastIndexOffset: paymentsQuerySlice.LastIndexOffset, 7683 FirstIndexOffset: paymentsQuerySlice.FirstIndexOffset, 7684 TotalNumPayments: paymentsQuerySlice.TotalCount, 7685 } 7686 7687 for _, payment := range paymentsQuerySlice.Payments { 7688 payment := payment 7689 7690 rpcPayment, err := r.routerBackend.MarshallPayment(payment) 7691 if err != nil { 7692 return nil, err 7693 } 7694 7695 paymentsResp.Payments = append( 7696 paymentsResp.Payments, rpcPayment, 7697 ) 7698 } 7699 7700 return paymentsResp, nil 7701 } 7702 7703 // DeleteCanceledInvoice remove a canceled invoice from the database. 7704 func (r *rpcServer) DeleteCanceledInvoice(ctx context.Context, 7705 req *lnrpc.DelCanceledInvoiceReq) (*lnrpc.DelCanceledInvoiceResp, 7706 error) { 7707 7708 if req.InvoiceHash == "" { 7709 return nil, invoices.ErrNoInvoiceHash 7710 } 7711 7712 hash, err := lntypes.MakeHashFromStr(req.InvoiceHash) 7713 if err != nil { 7714 return nil, err 7715 } 7716 7717 invoice, err := r.server.invoices.LookupInvoice(ctx, hash) 7718 if err != nil { 7719 return nil, err 7720 } 7721 7722 if invoice.State != invoices.ContractCanceled { 7723 return nil, invoices.ErrInvoiceNotCanceled 7724 } 7725 7726 err = r.server.invoicesDB.DeleteInvoice(ctx, 7727 []invoices.InvoiceDeleteRef{ 7728 { 7729 PayHash: hash, 7730 AddIndex: invoice.AddIndex, 7731 }, 7732 }, 7733 ) 7734 if err != nil { 7735 return nil, err 7736 } 7737 7738 return &lnrpc.DelCanceledInvoiceResp{Status: fmt.Sprintf("canceled "+ 7739 "invoice deleted successfully: invoice hash %v", hash)}, nil 7740 } 7741 7742 // DeletePayment deletes a payment from the DB given its payment hash. If 7743 // failedHtlcsOnly is set, only failed HTLC attempts of the payment will be 7744 // deleted. 7745 func (r *rpcServer) DeletePayment(ctx context.Context, 7746 req *lnrpc.DeletePaymentRequest) ( 7747 *lnrpc.DeletePaymentResponse, error) { 7748 7749 hash, err := lntypes.MakeHash(req.PaymentHash) 7750 if err != nil { 7751 return nil, err 7752 } 7753 7754 rpcsLog.Infof("[DeletePayment] payment_identifier=%v, "+ 7755 "failed_htlcs_only=%v", hash, req.FailedHtlcsOnly) 7756 7757 err = r.server.paymentsDB.DeletePayment(ctx, hash, req.FailedHtlcsOnly) 7758 if err != nil { 7759 return nil, err 7760 } 7761 7762 return &lnrpc.DeletePaymentResponse{ 7763 Status: "payment deleted", 7764 }, nil 7765 } 7766 7767 // DeleteAllPayments deletes all outgoing payments from DB. 7768 func (r *rpcServer) DeleteAllPayments(ctx context.Context, 7769 req *lnrpc.DeleteAllPaymentsRequest) ( 7770 *lnrpc.DeleteAllPaymentsResponse, error) { 7771 7772 switch { 7773 // Since this is a destructive operation, at least one of the options 7774 // must be set to true. 7775 case !req.AllPayments && !req.FailedPaymentsOnly && 7776 !req.FailedHtlcsOnly: 7777 7778 return nil, fmt.Errorf("at least one of the options " + 7779 "`all_payments`, `failed_payments_only`, or " + 7780 "`failed_htlcs_only` must be set to true") 7781 7782 // `all_payments` cannot be true with `failed_payments_only` or 7783 // `failed_htlcs_only`. `all_payments` includes all records, making 7784 // these options contradictory. 7785 case req.AllPayments && 7786 (req.FailedPaymentsOnly || req.FailedHtlcsOnly): 7787 7788 return nil, fmt.Errorf("`all_payments` cannot be set to true " + 7789 "while either `failed_payments_only` or " + 7790 "`failed_htlcs_only` is also set to true") 7791 } 7792 7793 rpcsLog.Infof("[DeleteAllPayments] failed_payments_only=%v, "+ 7794 "failed_htlcs_only=%v", req.FailedPaymentsOnly, 7795 req.FailedHtlcsOnly) 7796 7797 numDeletedPayments, err := r.server.paymentsDB.DeletePayments( 7798 ctx, req.FailedPaymentsOnly, req.FailedHtlcsOnly, 7799 ) 7800 if err != nil { 7801 return nil, err 7802 } 7803 7804 return &lnrpc.DeleteAllPaymentsResponse{ 7805 Status: fmt.Sprintf("%v payments deleted, failed_htlcs_only=%v", 7806 numDeletedPayments, req.FailedHtlcsOnly), 7807 }, nil 7808 } 7809 7810 // DebugLevel allows a caller to programmatically set the logging verbosity of 7811 // lnd. The logging can be targeted according to a coarse daemon-wide logging 7812 // level, or in a granular fashion to specify the logging for a target 7813 // sub-system. 7814 func (r *rpcServer) DebugLevel(ctx context.Context, 7815 req *lnrpc.DebugLevelRequest) (*lnrpc.DebugLevelResponse, error) { 7816 7817 // If show is set, then we simply print out the list of available 7818 // sub-systems. 7819 if req.Show { 7820 return &lnrpc.DebugLevelResponse{ 7821 SubSystems: strings.Join( 7822 r.cfg.SubLogMgr.SupportedSubsystems(), " ", 7823 ), 7824 }, nil 7825 } 7826 7827 rpcsLog.Infof("[debuglevel] changing debug level to: %v", req.LevelSpec) 7828 7829 // Otherwise, we'll attempt to set the logging level using the 7830 // specified level spec. 7831 err := build.ParseAndSetDebugLevels(req.LevelSpec, r.cfg.SubLogMgr) 7832 if err != nil { 7833 return nil, err 7834 } 7835 7836 subLoggers := r.cfg.SubLogMgr.SubLoggers() 7837 // Sort alphabetically by subsystem name. 7838 var tags []string 7839 for t := range subLoggers { 7840 tags = append(tags, t) 7841 } 7842 sort.Strings(tags) 7843 7844 // Create the log levels string. 7845 var logLevels []string 7846 for _, t := range tags { 7847 logLevels = append(logLevels, fmt.Sprintf("%s=%s", t, 7848 subLoggers[t].Level().String())) 7849 } 7850 logLevelsString := strings.Join(logLevels, ", ") 7851 7852 // Propagate the new config level to the main config struct. 7853 r.cfg.DebugLevel = logLevelsString 7854 7855 return &lnrpc.DebugLevelResponse{ 7856 SubSystems: logLevelsString, 7857 }, nil 7858 } 7859 7860 // DecodePayReq takes an encoded payment request string and attempts to decode 7861 // it, returning a full description of the conditions encoded within the 7862 // payment request. 7863 func (r *rpcServer) DecodePayReq(ctx context.Context, 7864 req *lnrpc.PayReqString) (*lnrpc.PayReq, error) { 7865 7866 rpcsLog.Tracef("[decodepayreq] decoding: %v", req.PayReq) 7867 7868 // Fist we'll attempt to decode the payment request string, if the 7869 // request is invalid or the checksum doesn't match, then we'll exit 7870 // here with an error. 7871 payReq, err := zpay32.Decode(req.PayReq, r.cfg.ActiveNetParams.Params) 7872 if err != nil { 7873 return nil, err 7874 } 7875 7876 // Let the fields default to empty strings. 7877 desc := "" 7878 if payReq.Description != nil { 7879 desc = *payReq.Description 7880 } 7881 7882 descHash := []byte("") 7883 if payReq.DescriptionHash != nil { 7884 descHash = payReq.DescriptionHash[:] 7885 } 7886 7887 fallbackAddr := "" 7888 if payReq.FallbackAddr != nil { 7889 fallbackAddr = payReq.FallbackAddr.String() 7890 } 7891 7892 // Expiry time will default to 3600 seconds if not specified 7893 // explicitly. 7894 expiry := int64(payReq.Expiry().Seconds()) 7895 7896 // Convert between the `lnrpc` and `routing` types. 7897 routeHints := invoicesrpc.CreateRPCRouteHints(payReq.RouteHints) 7898 7899 blindedPaymentPaths, err := invoicesrpc.CreateRPCBlindedPayments( 7900 payReq.BlindedPaymentPaths, 7901 ) 7902 if err != nil { 7903 return nil, err 7904 } 7905 7906 var amtSat, amtMsat int64 7907 if payReq.MilliSat != nil { 7908 amtSat = int64(payReq.MilliSat.ToSatoshis()) 7909 amtMsat = int64(*payReq.MilliSat) 7910 } 7911 7912 // Extract the payment address from the payment request, if present. 7913 paymentAddr := payReq.PaymentAddr.UnwrapOr([32]byte{}) 7914 7915 dest := payReq.Destination.SerializeCompressed() 7916 return &lnrpc.PayReq{ 7917 Destination: hex.EncodeToString(dest), 7918 PaymentHash: hex.EncodeToString(payReq.PaymentHash[:]), 7919 NumSatoshis: amtSat, 7920 NumMsat: amtMsat, 7921 Timestamp: payReq.Timestamp.Unix(), 7922 Description: desc, 7923 DescriptionHash: hex.EncodeToString(descHash[:]), 7924 FallbackAddr: fallbackAddr, 7925 Expiry: expiry, 7926 CltvExpiry: int64(payReq.MinFinalCLTVExpiry()), 7927 RouteHints: routeHints, 7928 BlindedPaths: blindedPaymentPaths, 7929 PaymentAddr: paymentAddr[:], 7930 Features: invoicesrpc.CreateRPCFeatures(payReq.Features), 7931 }, nil 7932 } 7933 7934 // feeBase is the fixed point that fee rate computation are performed over. 7935 // Nodes on the network advertise their fee rate using this point as a base. 7936 // This means that the minimal possible fee rate if 1e-6, or 0.000001, or 7937 // 0.0001%. 7938 const feeBase float64 = 1000000 7939 7940 // FeeReport allows the caller to obtain a report detailing the current fee 7941 // schedule enforced by the node globally for each channel. 7942 func (r *rpcServer) FeeReport(ctx context.Context, 7943 _ *lnrpc.FeeReportRequest) (*lnrpc.FeeReportResponse, error) { 7944 7945 channelGraph := r.server.v1Graph 7946 selfNode, err := channelGraph.SourceNode(ctx) 7947 if err != nil { 7948 return nil, err 7949 } 7950 7951 var feeReports []*lnrpc.ChannelFeeReport 7952 err = channelGraph.ForEachNodeChannel( 7953 ctx, selfNode.PubKeyBytes, 7954 func(chanInfo *models.ChannelEdgeInfo, 7955 edgePolicy, _ *models.ChannelEdgePolicy) error { 7956 7957 // Self node should always have policies for its 7958 // channels. 7959 if edgePolicy == nil { 7960 return fmt.Errorf("no policy for outgoing "+ 7961 "channel %v ", chanInfo.ChannelID) 7962 } 7963 7964 // We'll compute the effective fee rate by converting 7965 // from a fixed point fee rate to a floating point fee 7966 // rate. The fee rate field in the database the amount 7967 // of mSAT charged per 1mil mSAT sent, so will divide by 7968 // this to get the proper fee rate. 7969 feeRateFixedPoint := 7970 edgePolicy.FeeProportionalMillionths 7971 feeRate := float64(feeRateFixedPoint) / feeBase 7972 7973 inboundFee := edgePolicy.InboundFee.UnwrapOr( 7974 lnwire.Fee{}, 7975 ) 7976 7977 // TODO(roasbeef): also add stats for revenue for each 7978 // channel 7979 feeReports = append(feeReports, &lnrpc.ChannelFeeReport{ 7980 ChanId: chanInfo.ChannelID, 7981 ChannelPoint: chanInfo.ChannelPoint.String(), 7982 BaseFeeMsat: int64(edgePolicy.FeeBaseMSat), 7983 FeePerMil: int64(feeRateFixedPoint), 7984 FeeRate: feeRate, 7985 7986 InboundBaseFeeMsat: inboundFee.BaseFee, 7987 InboundFeePerMil: inboundFee.FeeRate, 7988 }) 7989 7990 return nil 7991 }, func() { 7992 feeReports = nil 7993 }, 7994 ) 7995 if err != nil { 7996 return nil, err 7997 } 7998 7999 fwdEventLog := r.server.miscDB.ForwardingLog() 8000 8001 // computeFeeSum is a helper function that computes the total fees for 8002 // a particular time slice described by a forwarding event query. 8003 computeFeeSum := func(query channeldb.ForwardingEventQuery) (lnwire.MilliSatoshi, error) { 8004 8005 var totalFees lnwire.MilliSatoshi 8006 8007 // We'll continue to fetch the next query and accumulate the 8008 // fees until the next query returns no events. 8009 for { 8010 timeSlice, err := fwdEventLog.Query(query) 8011 if err != nil { 8012 return 0, err 8013 } 8014 8015 // If the timeslice is empty, then we'll return as 8016 // we've retrieved all the entries in this range. 8017 if len(timeSlice.ForwardingEvents) == 0 { 8018 break 8019 } 8020 8021 // Otherwise, we'll tally up an accumulate the total 8022 // fees for this time slice. 8023 for _, event := range timeSlice.ForwardingEvents { 8024 fee := event.AmtIn - event.AmtOut 8025 totalFees += fee 8026 } 8027 8028 // We'll now take the last offset index returned as 8029 // part of this response, and modify our query to start 8030 // at this index. This has a pagination effect in the 8031 // case that our query bounds has more than 100k 8032 // entries. 8033 query.IndexOffset = timeSlice.LastIndexOffset 8034 } 8035 8036 return totalFees, nil 8037 } 8038 8039 now := time.Now() 8040 8041 // Before we perform the queries below, we'll instruct the switch to 8042 // flush any pending events to disk. This ensure we get a complete 8043 // snapshot at this particular time. 8044 if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil { 8045 return nil, fmt.Errorf("unable to flush forwarding "+ 8046 "events: %v", err) 8047 } 8048 8049 // In addition to returning the current fee schedule for each channel. 8050 // We'll also perform a series of queries to obtain the total fees 8051 // earned over the past day, week, and month. 8052 dayQuery := channeldb.ForwardingEventQuery{ 8053 StartTime: now.Add(-time.Hour * 24), 8054 EndTime: now, 8055 NumMaxEvents: 1000, 8056 } 8057 dayFees, err := computeFeeSum(dayQuery) 8058 if err != nil { 8059 return nil, fmt.Errorf("unable to retrieve day fees: %w", err) 8060 } 8061 8062 weekQuery := channeldb.ForwardingEventQuery{ 8063 StartTime: now.Add(-time.Hour * 24 * 7), 8064 EndTime: now, 8065 NumMaxEvents: 1000, 8066 } 8067 weekFees, err := computeFeeSum(weekQuery) 8068 if err != nil { 8069 return nil, fmt.Errorf("unable to retrieve day fees: %w", err) 8070 } 8071 8072 monthQuery := channeldb.ForwardingEventQuery{ 8073 StartTime: now.Add(-time.Hour * 24 * 30), 8074 EndTime: now, 8075 NumMaxEvents: 1000, 8076 } 8077 monthFees, err := computeFeeSum(monthQuery) 8078 if err != nil { 8079 return nil, fmt.Errorf("unable to retrieve day fees: %w", err) 8080 } 8081 8082 return &lnrpc.FeeReportResponse{ 8083 ChannelFees: feeReports, 8084 DayFeeSum: uint64(dayFees.ToSatoshis()), 8085 WeekFeeSum: uint64(weekFees.ToSatoshis()), 8086 MonthFeeSum: uint64(monthFees.ToSatoshis()), 8087 }, nil 8088 } 8089 8090 // minFeeRate is the smallest permitted fee rate within the network. This is 8091 // derived by the fact that fee rates are computed using a fixed point of 8092 // 1,000,000. As a result, the smallest representable fee rate is 1e-6, or 8093 // 0.000001, or 0.0001%. 8094 const minFeeRate = 1e-6 8095 8096 // UpdateChannelPolicy allows the caller to update the channel forwarding policy 8097 // for all channels globally, or a particular channel. 8098 func (r *rpcServer) UpdateChannelPolicy(ctx context.Context, 8099 req *lnrpc.PolicyUpdateRequest) (*lnrpc.PolicyUpdateResponse, error) { 8100 8101 var targetChans []wire.OutPoint 8102 switch scope := req.Scope.(type) { 8103 // If the request is targeting all active channels, then we don't need 8104 // target any channels by their channel point. 8105 case *lnrpc.PolicyUpdateRequest_Global: 8106 8107 // Otherwise, we're targeting an individual channel by its channel 8108 // point. 8109 case *lnrpc.PolicyUpdateRequest_ChanPoint: 8110 txid, err := lnrpc.GetChanPointFundingTxid(scope.ChanPoint) 8111 if err != nil { 8112 return nil, err 8113 } 8114 targetChans = append(targetChans, wire.OutPoint{ 8115 Hash: *txid, 8116 Index: scope.ChanPoint.OutputIndex, 8117 }) 8118 default: 8119 return nil, fmt.Errorf("unknown scope: %v", scope) 8120 } 8121 8122 var feeRateFixed uint32 8123 8124 switch { 8125 // The request should use either the fee rate in percent, or the new 8126 // ppm rate, but not both. 8127 case req.FeeRate != 0 && req.FeeRatePpm != 0: 8128 errMsg := "cannot set both FeeRate and FeeRatePpm at the " + 8129 "same time" 8130 8131 return nil, status.Errorf(codes.InvalidArgument, "%v", errMsg) 8132 8133 // If the request is using fee_rate. 8134 case req.FeeRate != 0: 8135 // As a sanity check, if the fee isn't zero, we'll ensure that 8136 // the passed fee rate is below 1e-6, or the lowest allowed 8137 // non-zero fee rate expressible within the protocol. 8138 if req.FeeRate != 0 && req.FeeRate < minFeeRate { 8139 return nil, fmt.Errorf("fee rate of %v is too "+ 8140 "small, min fee rate is %v", req.FeeRate, 8141 minFeeRate) 8142 } 8143 8144 // We'll also need to convert the floating point fee rate we 8145 // accept over RPC to the fixed point rate that we use within 8146 // the protocol. We do this by multiplying the passed fee rate 8147 // by the fee base. This gives us the fixed point, scaled by 1 8148 // million that's used within the protocol. 8149 // 8150 // Because of the inaccurate precision of the IEEE 754 8151 // standard, we need to round the product of feerate and 8152 // feebase. 8153 feeRateFixed = uint32(math.Round(req.FeeRate * feeBase)) 8154 8155 // Otherwise, we use the fee_rate_ppm parameter. 8156 case req.FeeRatePpm != 0: 8157 feeRateFixed = req.FeeRatePpm 8158 } 8159 8160 // We'll also ensure that the user isn't setting a CLTV delta that 8161 // won't give outgoing HTLCs enough time to fully resolve if needed. 8162 if req.TimeLockDelta < minTimeLockDelta { 8163 return nil, fmt.Errorf("time lock delta of %v is too small, "+ 8164 "minimum supported is %v", req.TimeLockDelta, 8165 minTimeLockDelta) 8166 } else if req.TimeLockDelta > uint32(MaxTimeLockDelta) { 8167 return nil, fmt.Errorf("time lock delta of %v is too big, "+ 8168 "maximum supported is %v", req.TimeLockDelta, 8169 MaxTimeLockDelta) 8170 } 8171 8172 // By default, positive inbound fees are rejected. 8173 if !r.cfg.AcceptPositiveInboundFees && req.InboundFee != nil { 8174 if req.InboundFee.BaseFeeMsat > 0 { 8175 return nil, fmt.Errorf("positive values for inbound "+ 8176 "base fee msat are not supported: %v", 8177 req.InboundFee.BaseFeeMsat) 8178 } 8179 if req.InboundFee.FeeRatePpm > 0 { 8180 return nil, fmt.Errorf("positive values for inbound "+ 8181 "fee rate ppm are not supported: %v", 8182 req.InboundFee.FeeRatePpm) 8183 } 8184 } 8185 8186 // If no inbound fees have been specified, we indicate with an empty 8187 // option that the previous inbound fee should be retained during the 8188 // edge update. 8189 inboundFee := fn.None[models.InboundFee]() 8190 if req.InboundFee != nil { 8191 inboundFee = fn.Some(models.InboundFee{ 8192 Base: req.InboundFee.BaseFeeMsat, 8193 Rate: req.InboundFee.FeeRatePpm, 8194 }) 8195 } 8196 8197 baseFeeMsat := lnwire.MilliSatoshi(req.BaseFeeMsat) 8198 feeSchema := routing.FeeSchema{ 8199 BaseFee: baseFeeMsat, 8200 FeeRate: feeRateFixed, 8201 InboundFee: inboundFee, 8202 } 8203 8204 maxHtlc := lnwire.MilliSatoshi(req.MaxHtlcMsat) 8205 var minHtlc *lnwire.MilliSatoshi 8206 if req.MinHtlcMsatSpecified { 8207 min := lnwire.MilliSatoshi(req.MinHtlcMsat) 8208 minHtlc = &min 8209 } 8210 8211 chanPolicy := routing.ChannelPolicy{ 8212 FeeSchema: feeSchema, 8213 TimeLockDelta: req.TimeLockDelta, 8214 MaxHTLC: maxHtlc, 8215 MinHTLC: minHtlc, 8216 } 8217 8218 rpcsLog.Debugf("[updatechanpolicy] updating channel policy, "+ 8219 "targets=%v, req=%v", lnutils.SpewLogClosure(targetChans), 8220 lnutils.SpewLogClosure(req)) 8221 8222 // With the scope resolved, we'll now send this to the local channel 8223 // manager so it can propagate the new policy for our target channel(s). 8224 failedUpdates, err := r.server.localChanMgr.UpdatePolicy( 8225 ctx, chanPolicy, req.CreateMissingEdge, targetChans..., 8226 ) 8227 if err != nil { 8228 return nil, err 8229 } 8230 8231 return &lnrpc.PolicyUpdateResponse{ 8232 FailedUpdates: failedUpdates, 8233 }, nil 8234 } 8235 8236 // ForwardingHistory allows the caller to query the htlcswitch for a record of 8237 // all HTLC's forwarded within the target time range, and integer offset within 8238 // that time range. If no time-range is specified, then the first chunk of the 8239 // past 24 hrs of forwarding history are returned. 8240 8241 // A list of forwarding events are returned. The size of each forwarding event 8242 // is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. 8243 // In order to safely stay under this max limit, we'll return 50k events per 8244 // response. Each response has the index offset of the last entry. The index 8245 // offset can be provided to the request to allow the caller to skip a series 8246 // of records. 8247 func (r *rpcServer) ForwardingHistory(ctx context.Context, 8248 req *lnrpc.ForwardingHistoryRequest) (*lnrpc.ForwardingHistoryResponse, 8249 error) { 8250 8251 // Before we perform the queries below, we'll instruct the switch to 8252 // flush any pending events to disk. This ensure we get a complete 8253 // snapshot at this particular time. 8254 if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil { 8255 return nil, fmt.Errorf("unable to flush forwarding "+ 8256 "events: %v", err) 8257 } 8258 8259 var ( 8260 startTime, endTime time.Time 8261 8262 numEvents uint32 8263 ) 8264 8265 // startTime defaults to the Unix epoch (0 unixtime, or 8266 // midnight 01-01-1970). 8267 startTime = time.Unix(int64(req.StartTime), 0) 8268 8269 // If the end time wasn't specified, assume a default end time of now. 8270 if req.EndTime == 0 { 8271 now := time.Now() 8272 endTime = now 8273 } else { 8274 endTime = time.Unix(int64(req.EndTime), 0) 8275 } 8276 8277 // If the number of events wasn't specified, then we'll default to 8278 // returning the last 100 events. 8279 numEvents = req.NumMaxEvents 8280 if numEvents == 0 { 8281 numEvents = 100 8282 } 8283 8284 // Create sets of incoming and outgoing channel IDs from the request 8285 // for faster lookups for filtering. 8286 incomingChanIDs := fn.NewSet(req.IncomingChanIds...) 8287 outgoingChanIDs := fn.NewSet(req.OutgoingChanIds...) 8288 8289 // Next, we'll map the proto request into a format that is understood by 8290 // the forwarding log. 8291 eventQuery := channeldb.ForwardingEventQuery{ 8292 StartTime: startTime, 8293 EndTime: endTime, 8294 IndexOffset: req.IndexOffset, 8295 NumMaxEvents: numEvents, 8296 IncomingChanIDs: incomingChanIDs, 8297 OutgoingChanIDs: outgoingChanIDs, 8298 } 8299 timeSlice, err := r.server.miscDB.ForwardingLog().Query(eventQuery) 8300 if err != nil { 8301 return nil, fmt.Errorf("unable to query forwarding log: %w", 8302 err) 8303 } 8304 8305 // chanToPeerAlias caches previously looked up channel information. 8306 chanToPeerAlias := make(map[lnwire.ShortChannelID]string) 8307 8308 // Helper function to extract a peer's node alias given its SCID. 8309 getRemoteAlias := func(chanID lnwire.ShortChannelID) (string, error) { 8310 // If we'd previously seen this chanID then return the cached 8311 // peer alias. 8312 if peerAlias, ok := chanToPeerAlias[chanID]; ok { 8313 return peerAlias, nil 8314 } 8315 8316 // Else call the server to look up the peer alias. 8317 edge, err := r.GetChanInfo(ctx, &lnrpc.ChanInfoRequest{ 8318 ChanId: chanID.ToUint64(), 8319 }) 8320 if err != nil { 8321 return "", err 8322 } 8323 8324 remotePub := edge.Node1Pub 8325 if r.selfNode.String() == edge.Node1Pub { 8326 remotePub = edge.Node2Pub 8327 } 8328 8329 vertex, err := route.NewVertexFromStr(remotePub) 8330 if err != nil { 8331 return "", err 8332 } 8333 8334 peer, err := r.server.v1Graph.FetchNode(ctx, vertex) 8335 if err != nil { 8336 return "", err 8337 } 8338 8339 // Cache the peer alias. 8340 chanToPeerAlias[chanID] = peer.Alias.UnwrapOr("") 8341 8342 return peer.Alias.UnwrapOr(""), nil 8343 } 8344 8345 // TODO(roasbeef): add settlement latency? 8346 // * use FPE on all records? 8347 8348 // With the events retrieved, we'll now map them into the proper proto 8349 // response. 8350 // 8351 // TODO(roasbeef): show in ns for the outside? 8352 fwdingEvents := make( 8353 []*lnrpc.ForwardingEvent, len(timeSlice.ForwardingEvents), 8354 ) 8355 resp := &lnrpc.ForwardingHistoryResponse{ 8356 ForwardingEvents: fwdingEvents, 8357 LastOffsetIndex: timeSlice.LastIndexOffset, 8358 } 8359 for i, event := range timeSlice.ForwardingEvents { 8360 amtInMsat := event.AmtIn 8361 amtOutMsat := event.AmtOut 8362 feeMsat := event.AmtIn - event.AmtOut 8363 8364 resp.ForwardingEvents[i] = &lnrpc.ForwardingEvent{ 8365 Timestamp: uint64(event.Timestamp.Unix()), 8366 TimestampNs: uint64(event.Timestamp.UnixNano()), 8367 ChanIdIn: event.IncomingChanID.ToUint64(), 8368 ChanIdOut: event.OutgoingChanID.ToUint64(), 8369 AmtIn: uint64(amtInMsat.ToSatoshis()), 8370 AmtOut: uint64(amtOutMsat.ToSatoshis()), 8371 Fee: uint64(feeMsat.ToSatoshis()), 8372 FeeMsat: uint64(feeMsat), 8373 AmtInMsat: uint64(amtInMsat), 8374 AmtOutMsat: uint64(amtOutMsat), 8375 } 8376 8377 // If the incoming htlc id is present, add it to the response. 8378 event.IncomingHtlcID.WhenSome(func(id uint64) { 8379 resp.ForwardingEvents[i].IncomingHtlcId = &id 8380 }) 8381 8382 // If the outgoing htlc id is present, add it to the response. 8383 event.OutgoingHtlcID.WhenSome(func(id uint64) { 8384 resp.ForwardingEvents[i].OutgoingHtlcId = &id 8385 }) 8386 8387 if req.PeerAliasLookup { 8388 aliasIn, err := getRemoteAlias(event.IncomingChanID) 8389 if err != nil { 8390 aliasIn = fmt.Sprintf("unable to lookup peer "+ 8391 "alias: %v", err) 8392 } 8393 aliasOut, err := getRemoteAlias(event.OutgoingChanID) 8394 if err != nil { 8395 aliasOut = fmt.Sprintf("unable to lookup peer"+ 8396 "alias: %v", err) 8397 } 8398 resp.ForwardingEvents[i].PeerAliasIn = aliasIn 8399 resp.ForwardingEvents[i].PeerAliasOut = aliasOut 8400 } 8401 } 8402 8403 return resp, nil 8404 } 8405 8406 // ExportChannelBackup attempts to return an encrypted static channel backup 8407 // for the target channel identified by it channel point. The backup is 8408 // encrypted with a key generated from the aezeed seed of the user. The 8409 // returned backup can either be restored using the RestoreChannelBackup method 8410 // once lnd is running, or via the InitWallet and UnlockWallet methods from the 8411 // WalletUnlocker service. 8412 func (r *rpcServer) ExportChannelBackup(ctx context.Context, 8413 in *lnrpc.ExportChannelBackupRequest) (*lnrpc.ChannelBackup, error) { 8414 8415 // First, we'll convert the lnrpc channel point into a wire.OutPoint 8416 // that we can manipulate. 8417 txid, err := lnrpc.GetChanPointFundingTxid(in.ChanPoint) 8418 if err != nil { 8419 return nil, err 8420 } 8421 chanPoint := wire.OutPoint{ 8422 Hash: *txid, 8423 Index: in.ChanPoint.OutputIndex, 8424 } 8425 8426 // Next, we'll attempt to fetch a channel backup for this channel from 8427 // the database. If this channel has been closed, or the outpoint is 8428 // unknown, then we'll return an error 8429 unpackedBackup, err := chanbackup.FetchBackupForChan( 8430 ctx, chanPoint, r.server.chanStateDB, r.server.addrSource, 8431 ) 8432 if err != nil { 8433 return nil, err 8434 } 8435 8436 // At this point, we have an unpacked backup (plaintext) so we'll now 8437 // attempt to serialize and encrypt it in order to create a packed 8438 // backup. 8439 packedBackups, err := chanbackup.PackStaticChanBackups( 8440 []chanbackup.Single{*unpackedBackup}, 8441 r.server.cc.KeyRing, 8442 ) 8443 if err != nil { 8444 return nil, fmt.Errorf("packing of back ups failed: %w", err) 8445 } 8446 8447 // Before we proceed, we'll ensure that we received a backup for this 8448 // channel, otherwise, we'll bail out. 8449 packedBackup, ok := packedBackups[chanPoint] 8450 if !ok { 8451 return nil, fmt.Errorf("expected single backup for "+ 8452 "ChannelPoint(%v), got %v", chanPoint, 8453 len(packedBackup)) 8454 } 8455 8456 return &lnrpc.ChannelBackup{ 8457 ChanPoint: in.ChanPoint, 8458 ChanBackup: packedBackup, 8459 }, nil 8460 } 8461 8462 // VerifyChanBackup allows a caller to verify the integrity of a channel backup 8463 // snapshot. This method will accept both either a packed Single or a packed 8464 // Multi. Specifying both will result in an error. 8465 func (r *rpcServer) VerifyChanBackup(ctx context.Context, 8466 in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) { 8467 8468 var ( 8469 channels []chanbackup.Single 8470 err error 8471 ) 8472 switch { 8473 // If neither a Single or Multi has been specified, then we have nothing 8474 // to verify. 8475 case in.GetSingleChanBackups() == nil && in.GetMultiChanBackup() == nil: 8476 return nil, errors.New("either a Single or Multi channel " + 8477 "backup must be specified") 8478 8479 // Either a Single or a Multi must be specified, but not both. 8480 case in.GetSingleChanBackups() != nil && in.GetMultiChanBackup() != nil: 8481 return nil, errors.New("either a Single or Multi channel " + 8482 "backup must be specified, but not both") 8483 8484 // If a Single is specified then we'll only accept one of them to allow 8485 // the caller to map the valid/invalid state for each individual Single. 8486 case in.GetSingleChanBackups() != nil: 8487 chanBackupsProtos := in.GetSingleChanBackups().ChanBackups 8488 if len(chanBackupsProtos) != 1 { 8489 return nil, errors.New("only one Single is accepted " + 8490 "at a time") 8491 } 8492 8493 // First, we'll convert the raw byte slice into a type we can 8494 // work with a bit better. 8495 chanBackup := chanbackup.PackedSingles( 8496 [][]byte{chanBackupsProtos[0].ChanBackup}, 8497 ) 8498 8499 // With our PackedSingles created, we'll attempt to unpack the 8500 // backup. If this fails, then we know the backup is invalid for 8501 // some reason. 8502 channels, err = chanBackup.Unpack(r.server.cc.KeyRing) 8503 if err != nil { 8504 return nil, fmt.Errorf("invalid single channel "+ 8505 "backup: %v", err) 8506 } 8507 8508 case in.GetMultiChanBackup() != nil: 8509 // We'll convert the raw byte slice into a PackedMulti that we 8510 // can easily work with. 8511 packedMultiBackup := in.GetMultiChanBackup().MultiChanBackup 8512 packedMulti := chanbackup.PackedMulti(packedMultiBackup) 8513 8514 // We'll now attempt to unpack the Multi. If this fails, then we 8515 // know it's invalid. 8516 multi, err := packedMulti.Unpack(r.server.cc.KeyRing) 8517 if err != nil { 8518 return nil, fmt.Errorf("invalid multi channel backup: "+ 8519 "%v", err) 8520 } 8521 8522 channels = multi.StaticBackups 8523 } 8524 8525 return &lnrpc.VerifyChanBackupResponse{ 8526 ChanPoints: fn.Map(channels, func(c chanbackup.Single) string { 8527 return c.FundingOutpoint.String() 8528 }), 8529 }, nil 8530 } 8531 8532 // createBackupSnapshot converts the passed Single backup into a snapshot which 8533 // contains individual packed single backups, as well as a single packed multi 8534 // backup. 8535 func (r *rpcServer) createBackupSnapshot(backups []chanbackup.Single) ( 8536 *lnrpc.ChanBackupSnapshot, error) { 8537 8538 // Once we have the set of back ups, we'll attempt to pack them all 8539 // into a series of single channel backups. 8540 singleChanPackedBackups, err := chanbackup.PackStaticChanBackups( 8541 backups, r.server.cc.KeyRing, 8542 ) 8543 if err != nil { 8544 return nil, fmt.Errorf("unable to pack set of chan "+ 8545 "backups: %v", err) 8546 } 8547 8548 // Now that we have our set of single packed backups, we'll morph that 8549 // into a form that the proto response requires. 8550 numBackups := len(singleChanPackedBackups) 8551 singleBackupResp := &lnrpc.ChannelBackups{ 8552 ChanBackups: make([]*lnrpc.ChannelBackup, 0, numBackups), 8553 } 8554 for chanPoint, singlePackedBackup := range singleChanPackedBackups { 8555 txid := chanPoint.Hash 8556 rpcChanPoint := &lnrpc.ChannelPoint{ 8557 FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ 8558 FundingTxidBytes: txid[:], 8559 }, 8560 OutputIndex: chanPoint.Index, 8561 } 8562 8563 singleBackupResp.ChanBackups = append( 8564 singleBackupResp.ChanBackups, 8565 &lnrpc.ChannelBackup{ 8566 ChanPoint: rpcChanPoint, 8567 ChanBackup: singlePackedBackup, 8568 }, 8569 ) 8570 } 8571 8572 // In addition, to the set of single chan backups, we'll also create a 8573 // single multi-channel backup which can be serialized into a single 8574 // file for safe storage. 8575 var b bytes.Buffer 8576 unpackedMultiBackup := chanbackup.Multi{ 8577 StaticBackups: backups, 8578 } 8579 err = unpackedMultiBackup.PackToWriter(&b, r.server.cc.KeyRing) 8580 if err != nil { 8581 return nil, fmt.Errorf("unable to multi-pack backups: %w", err) 8582 } 8583 8584 multiBackupResp := &lnrpc.MultiChanBackup{ 8585 MultiChanBackup: b.Bytes(), 8586 } 8587 for _, singleBackup := range singleBackupResp.ChanBackups { 8588 multiBackupResp.ChanPoints = append( 8589 multiBackupResp.ChanPoints, singleBackup.ChanPoint, 8590 ) 8591 } 8592 8593 return &lnrpc.ChanBackupSnapshot{ 8594 SingleChanBackups: singleBackupResp, 8595 MultiChanBackup: multiBackupResp, 8596 }, nil 8597 } 8598 8599 // ExportAllChannelBackups returns static channel backups for all existing 8600 // channels known to lnd. A set of regular singular static channel backups for 8601 // each channel are returned. Additionally, a multi-channel backup is returned 8602 // as well, which contains a single encrypted blob containing the backups of 8603 // each channel. 8604 func (r *rpcServer) ExportAllChannelBackups(ctx context.Context, 8605 in *lnrpc.ChanBackupExportRequest) (*lnrpc.ChanBackupSnapshot, error) { 8606 8607 // First, we'll attempt to read back ups for ALL currently opened 8608 // channels from disk. 8609 allUnpackedBackups, err := chanbackup.FetchStaticChanBackups( 8610 ctx, r.server.chanStateDB, r.server.addrSource, 8611 ) 8612 if err != nil { 8613 return nil, fmt.Errorf("unable to fetch all static chan "+ 8614 "backups: %v", err) 8615 } 8616 8617 // With the backups assembled, we'll create a full snapshot. 8618 return r.createBackupSnapshot(allUnpackedBackups) 8619 } 8620 8621 // RestoreChannelBackups accepts a set of singular channel backups, or a single 8622 // encrypted multi-chan backup and attempts to recover any funds remaining 8623 // within the channel. If we're able to unpack the backup, then the new channel 8624 // will be shown under listchannels, as well as pending channels. 8625 func (r *rpcServer) RestoreChannelBackups(ctx context.Context, 8626 in *lnrpc.RestoreChanBackupRequest) (*lnrpc.RestoreBackupResponse, error) { 8627 8628 // The server hasn't yet started, so it won't be able to service any of 8629 // our requests, so we'll bail early here. 8630 if !r.server.Started() { 8631 return nil, ErrServerNotActive 8632 } 8633 8634 // First, we'll make our implementation of the 8635 // chanbackup.ChannelRestorer interface which we'll use to properly 8636 // restore either a set of chanbackup.Single or chanbackup.Multi 8637 // backups. 8638 chanRestorer := &chanDBRestorer{ 8639 db: r.server.chanStateDB, 8640 secretKeys: r.server.cc.KeyRing, 8641 chainArb: r.server.chainArb, 8642 } 8643 8644 // We'll accept either a list of Single backups, or a single Multi 8645 // backup which contains several single backups. 8646 var ( 8647 numRestored int 8648 err error 8649 ) 8650 switch { 8651 case in.GetChanBackups() != nil: 8652 chanBackupsProtos := in.GetChanBackups() 8653 8654 // Now that we know what type of backup we're working with, 8655 // we'll parse them all out into a more suitable format. 8656 packedBackups := make([][]byte, 0, len(chanBackupsProtos.ChanBackups)) 8657 for _, chanBackup := range chanBackupsProtos.ChanBackups { 8658 packedBackups = append( 8659 packedBackups, chanBackup.ChanBackup, 8660 ) 8661 } 8662 8663 // With our backups obtained, we'll now restore them which will 8664 // write the new backups to disk, and then attempt to connect 8665 // out to any peers that we know of which were our prior 8666 // channel peers. 8667 numRestored, err = chanbackup.UnpackAndRecoverSingles( 8668 chanbackup.PackedSingles(packedBackups), 8669 r.server.cc.KeyRing, chanRestorer, r.server, 8670 ) 8671 if err != nil { 8672 return nil, fmt.Errorf("unable to unpack single "+ 8673 "backups: %v", err) 8674 } 8675 8676 case in.GetMultiChanBackup() != nil: 8677 packedMultiBackup := in.GetMultiChanBackup() 8678 8679 // With our backups obtained, we'll now restore them which will 8680 // write the new backups to disk, and then attempt to connect 8681 // out to any peers that we know of which were our prior 8682 // channel peers. 8683 packedMulti := chanbackup.PackedMulti(packedMultiBackup) 8684 numRestored, err = chanbackup.UnpackAndRecoverMulti( 8685 packedMulti, r.server.cc.KeyRing, chanRestorer, 8686 r.server, 8687 ) 8688 if err != nil { 8689 return nil, fmt.Errorf("unable to unpack chan "+ 8690 "backup: %v", err) 8691 } 8692 } 8693 8694 return &lnrpc.RestoreBackupResponse{ 8695 NumRestored: uint32(numRestored), 8696 }, nil 8697 } 8698 8699 // SubscribeChannelBackups allows a client to sub-subscribe to the most up to 8700 // date information concerning the state of all channel back ups. Each time a 8701 // new channel is added, we return the new set of channels, along with a 8702 // multi-chan backup containing the backup info for all channels. Each time a 8703 // channel is closed, we send a new update, which contains new new chan back 8704 // ups, but the updated set of encrypted multi-chan backups with the closed 8705 // channel(s) removed. 8706 func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription, 8707 updateStream lnrpc.Lightning_SubscribeChannelBackupsServer) error { 8708 8709 // First, we'll subscribe to the primary channel notifier so we can 8710 // obtain events for new pending/opened/closed channels. 8711 chanSubscription, err := r.server.channelNotifier.SubscribeChannelEvents() 8712 if err != nil { 8713 return err 8714 } 8715 8716 defer chanSubscription.Cancel() 8717 for { 8718 select { 8719 // A new event has been sent by the channel notifier, we'll 8720 // assemble, then sling out a new event to the client. 8721 case e := <-chanSubscription.Updates(): 8722 // TODO(roasbeef): batch dispatch ntnfs 8723 8724 switch e.(type) { 8725 8726 // We only care about new/closed channels, so we'll 8727 // skip any events for active/inactive channels. 8728 // To make the subscription behave the same way as the 8729 // synchronous call and the file based backup, we also 8730 // include pending channels in the update. 8731 case channelnotifier.ActiveChannelEvent: 8732 continue 8733 case channelnotifier.InactiveChannelEvent: 8734 continue 8735 case channelnotifier.ActiveLinkEvent: 8736 continue 8737 case channelnotifier.InactiveLinkEvent: 8738 continue 8739 } 8740 8741 // Now that we know the channel state has changed, 8742 // we'll obtains the current set of single channel 8743 // backups from disk. 8744 chanBackups, err := chanbackup.FetchStaticChanBackups( 8745 updateStream.Context(), r.server.chanStateDB, 8746 r.server.addrSource, 8747 ) 8748 if err != nil { 8749 return fmt.Errorf("unable to fetch all "+ 8750 "static chan backups: %v", err) 8751 } 8752 8753 // With our backups obtained, we'll pack them into a 8754 // snapshot and send them back to the client. 8755 backupSnapshot, err := r.createBackupSnapshot( 8756 chanBackups, 8757 ) 8758 if err != nil { 8759 return err 8760 } 8761 err = updateStream.Send(backupSnapshot) 8762 if err != nil { 8763 return err 8764 } 8765 8766 // The response stream's context for whatever reason has been 8767 // closed. If context is closed by an exceeded deadline we will 8768 // return an error. 8769 case <-updateStream.Context().Done(): 8770 if errors.Is(updateStream.Context().Err(), context.Canceled) { 8771 return nil 8772 } 8773 return updateStream.Context().Err() 8774 8775 case <-r.quit: 8776 return nil 8777 } 8778 } 8779 } 8780 8781 // ChannelAcceptor dispatches a bi-directional streaming RPC in which 8782 // OpenChannel requests are sent to the client and the client responds with 8783 // a boolean that tells LND whether or not to accept the channel. This allows 8784 // node operators to specify their own criteria for accepting inbound channels 8785 // through a single persistent connection. 8786 func (r *rpcServer) ChannelAcceptor(stream lnrpc.Lightning_ChannelAcceptorServer) error { 8787 chainedAcceptor := r.chanPredicate 8788 8789 // Create a new RPCAcceptor which will send requests into the 8790 // newRequests channel when it receives them. 8791 rpcAcceptor := chanacceptor.NewRPCAcceptor( 8792 stream.Recv, stream.Send, r.cfg.AcceptorTimeout, 8793 r.cfg.ActiveNetParams.Params, r.quit, 8794 ) 8795 8796 // Add the RPCAcceptor to the ChainedAcceptor and defer its removal. 8797 id := chainedAcceptor.AddAcceptor(rpcAcceptor) 8798 defer chainedAcceptor.RemoveAcceptor(id) 8799 8800 // Run the rpc acceptor, which will accept requests for channel 8801 // acceptance decisions from our chained acceptor, send them to the 8802 // channel acceptor and listen for and report responses. This function 8803 // blocks, and will exit if the rpcserver receives the instruction to 8804 // shutdown, or the client cancels. 8805 return rpcAcceptor.Run() 8806 } 8807 8808 // BakeMacaroon allows the creation of a new macaroon with custom read and write 8809 // permissions. No first-party caveats are added since this can be done offline. 8810 // If the --allow-external-permissions flag is set, the RPC will allow 8811 // external permissions that LND is not aware of. 8812 func (r *rpcServer) BakeMacaroon(ctx context.Context, 8813 req *lnrpc.BakeMacaroonRequest) (*lnrpc.BakeMacaroonResponse, error) { 8814 8815 // If the --no-macaroons flag is used to start lnd, the macaroon service 8816 // is not initialized. Therefore we can't bake new macaroons. 8817 if r.macService == nil { 8818 return nil, errMacaroonDisabled 8819 } 8820 8821 helpMsg := fmt.Sprintf("supported actions are %v, supported entities "+ 8822 "are %v", validActions, validEntities) 8823 8824 // Don't allow empty permission list as it doesn't make sense to have 8825 // a macaroon that is not allowed to access any RPC. 8826 if len(req.Permissions) == 0 { 8827 return nil, fmt.Errorf("permission list cannot be empty. "+ 8828 "specify at least one action/entity pair. %s", helpMsg) 8829 } 8830 8831 // Validate and map permission struct used by gRPC to the one used by 8832 // the bakery. If the --allow-external-permissions flag is set, we 8833 // will not validate, but map. 8834 requestedPermissions := make([]bakery.Op, len(req.Permissions)) 8835 for idx, op := range req.Permissions { 8836 if req.AllowExternalPermissions { 8837 requestedPermissions[idx] = bakery.Op{ 8838 Entity: op.Entity, 8839 Action: op.Action, 8840 } 8841 continue 8842 } 8843 8844 if !stringInSlice(op.Entity, validEntities) { 8845 return nil, fmt.Errorf("invalid permission entity. %s", 8846 helpMsg) 8847 } 8848 8849 // Either we have the special entity "uri" which specifies a 8850 // full gRPC URI or we have one of the pre-defined actions. 8851 if op.Entity == macaroons.PermissionEntityCustomURI { 8852 allPermissions := r.interceptorChain.Permissions() 8853 _, ok := allPermissions[op.Action] 8854 if !ok { 8855 return nil, fmt.Errorf("invalid permission " + 8856 "action, must be an existing URI in " + 8857 "the format /package.Service/" + 8858 "MethodName") 8859 } 8860 } else if !stringInSlice(op.Action, validActions) { 8861 return nil, fmt.Errorf("invalid permission action. %s", 8862 helpMsg) 8863 } 8864 8865 requestedPermissions[idx] = bakery.Op{ 8866 Entity: op.Entity, 8867 Action: op.Action, 8868 } 8869 } 8870 8871 // Convert root key id from uint64 to bytes. Because the 8872 // DefaultRootKeyID is a digit 0 expressed in a byte slice of a string 8873 // "0", we will keep the IDs in the same format - all must be numeric, 8874 // and must be a byte slice of string value of the digit, e.g., 8875 // uint64(123) to string(123). 8876 rootKeyID := []byte(strconv.FormatUint(req.RootKeyId, 10)) 8877 8878 // Bake new macaroon with the given permissions and send it binary 8879 // serialized and hex encoded to the client. 8880 newMac, err := r.macService.NewMacaroon( 8881 ctx, rootKeyID, requestedPermissions..., 8882 ) 8883 if err != nil { 8884 return nil, err 8885 } 8886 newMacBytes, err := newMac.M().MarshalBinary() 8887 if err != nil { 8888 return nil, err 8889 } 8890 resp := &lnrpc.BakeMacaroonResponse{} 8891 resp.Macaroon = hex.EncodeToString(newMacBytes) 8892 8893 return resp, nil 8894 } 8895 8896 // ListMacaroonIDs returns a list of macaroon root key IDs in use. 8897 func (r *rpcServer) ListMacaroonIDs(ctx context.Context, 8898 req *lnrpc.ListMacaroonIDsRequest) ( 8899 *lnrpc.ListMacaroonIDsResponse, error) { 8900 8901 // If the --no-macaroons flag is used to start lnd, the macaroon service 8902 // is not initialized. Therefore we can't show any IDs. 8903 if r.macService == nil { 8904 return nil, errMacaroonDisabled 8905 } 8906 8907 rootKeyIDByteSlice, err := r.macService.ListMacaroonIDs(ctx) 8908 if err != nil { 8909 return nil, err 8910 } 8911 8912 var rootKeyIDs []uint64 8913 for _, value := range rootKeyIDByteSlice { 8914 // Convert bytes into uint64. 8915 id, err := strconv.ParseUint(string(value), 10, 64) 8916 if err != nil { 8917 return nil, err 8918 } 8919 8920 rootKeyIDs = append(rootKeyIDs, id) 8921 } 8922 8923 return &lnrpc.ListMacaroonIDsResponse{RootKeyIds: rootKeyIDs}, nil 8924 } 8925 8926 // DeleteMacaroonID removes a specific macaroon ID. 8927 func (r *rpcServer) DeleteMacaroonID(ctx context.Context, 8928 req *lnrpc.DeleteMacaroonIDRequest) ( 8929 *lnrpc.DeleteMacaroonIDResponse, error) { 8930 8931 // If the --no-macaroons flag is used to start lnd, the macaroon service 8932 // is not initialized. Therefore we can't delete any IDs. 8933 if r.macService == nil { 8934 return nil, errMacaroonDisabled 8935 } 8936 8937 // Convert root key id from uint64 to bytes. Because the 8938 // DefaultRootKeyID is a digit 0 expressed in a byte slice of a string 8939 // "0", we will keep the IDs in the same format - all must be digit, and 8940 // must be a byte slice of string value of the digit. 8941 rootKeyID := []byte(strconv.FormatUint(req.RootKeyId, 10)) 8942 deletedIDBytes, err := r.macService.DeleteMacaroonID(ctx, rootKeyID) 8943 if err != nil { 8944 return nil, err 8945 } 8946 8947 return &lnrpc.DeleteMacaroonIDResponse{ 8948 // If the root key ID doesn't exist, it won't be deleted. We 8949 // will return a response with deleted = false, otherwise true. 8950 Deleted: deletedIDBytes != nil, 8951 }, nil 8952 } 8953 8954 // ListPermissions lists all RPC method URIs and their required macaroon 8955 // permissions to access them. 8956 func (r *rpcServer) ListPermissions(_ context.Context, 8957 _ *lnrpc.ListPermissionsRequest) (*lnrpc.ListPermissionsResponse, 8958 error) { 8959 8960 permissionMap := make(map[string]*lnrpc.MacaroonPermissionList) 8961 for uri, perms := range r.interceptorChain.Permissions() { 8962 rpcPerms := make([]*lnrpc.MacaroonPermission, len(perms)) 8963 for idx, perm := range perms { 8964 rpcPerms[idx] = &lnrpc.MacaroonPermission{ 8965 Entity: perm.Entity, 8966 Action: perm.Action, 8967 } 8968 } 8969 permissionMap[uri] = &lnrpc.MacaroonPermissionList{ 8970 Permissions: rpcPerms, 8971 } 8972 } 8973 8974 return &lnrpc.ListPermissionsResponse{ 8975 MethodPermissions: permissionMap, 8976 }, nil 8977 } 8978 8979 // CheckMacaroonPermissions checks whether the provided macaroon contains all 8980 // the provided permissions. If the macaroon is valid (e.g. all caveats are 8981 // satisfied), and all permissions provided in the request are met, then 8982 // this RPC returns true. 8983 func (r *rpcServer) CheckMacaroonPermissions(ctx context.Context, 8984 req *lnrpc.CheckMacPermRequest) (*lnrpc.CheckMacPermResponse, error) { 8985 8986 // Sanity-check the input parameters to eliminate impossible 8987 // combinations. 8988 switch { 8989 case len(req.Permissions) > 0 && req.CheckDefaultPermsFromFullMethod: 8990 return nil, fmt.Errorf("cannot check default permissions " + 8991 "from full method and from provided permission list " + 8992 "at the same time") 8993 8994 case len(req.FullMethod) == 0 && req.CheckDefaultPermsFromFullMethod: 8995 return nil, fmt.Errorf("cannot check default permissions " + 8996 "from full method without providing the full method " + 8997 "name") 8998 } 8999 9000 // Turn grpc macaroon permission into bakery.Op for the server to 9001 // process. 9002 permissions := make([]bakery.Op, len(req.Permissions)) 9003 for idx, perm := range req.Permissions { 9004 permissions[idx] = bakery.Op{ 9005 Entity: perm.Entity, 9006 Action: perm.Action, 9007 } 9008 } 9009 9010 // If the user wants to check the default permissions for the 9011 // full method, then we'll use the interceptor chain to obtain the 9012 // default permissions for the full method. This overwrites the 9013 // user-provided permissions parsed above, but those are required to be 9014 // empty anyway if the flag is turned on. 9015 if req.CheckDefaultPermsFromFullMethod { 9016 allPerms := r.interceptorChain.Permissions() 9017 methodPerms, ok := allPerms[req.FullMethod] 9018 if !ok { 9019 return nil, fmt.Errorf("no permissions found for "+ 9020 "full method %s", req.FullMethod) 9021 } 9022 9023 permissions = methodPerms 9024 } 9025 9026 err := r.macService.CheckMacAuth( 9027 ctx, req.Macaroon, permissions, req.FullMethod, 9028 ) 9029 if err != nil { 9030 return nil, status.Error(codes.InvalidArgument, err.Error()) 9031 } 9032 9033 return &lnrpc.CheckMacPermResponse{ 9034 Valid: true, 9035 }, nil 9036 } 9037 9038 // FundingStateStep is an advanced funding related call that allows the caller 9039 // to either execute some preparatory steps for a funding workflow, or manually 9040 // progress a funding workflow. The primary way a funding flow is identified is 9041 // via its pending channel ID. As an example, this method can be used to 9042 // specify that we're expecting a funding flow for a particular pending channel 9043 // ID, for which we need to use specific parameters. Alternatively, this can 9044 // be used to interactively drive PSBT signing for funding for partially 9045 // complete funding transactions. 9046 func (r *rpcServer) FundingStateStep(ctx context.Context, 9047 in *lnrpc.FundingTransitionMsg) (*lnrpc.FundingStateStepResp, error) { 9048 9049 var pendingChanID [32]byte 9050 switch { 9051 // If this is a message to register a new shim that is an external 9052 // channel point, then we'll contact the wallet to register this new 9053 // shim. A user will use this method to register a new channel funding 9054 // workflow which has already been partially negotiated outside of the 9055 // core protocol. 9056 case in.GetShimRegister() != nil && 9057 in.GetShimRegister().GetChanPointShim() != nil: 9058 9059 rpcShimIntent := in.GetShimRegister().GetChanPointShim() 9060 9061 // Using the rpc shim as a template, we'll construct a new 9062 // chanfunding.Assembler that is able to express proper 9063 // formulation of this expected channel. 9064 shimAssembler, err := newFundingShimAssembler( 9065 rpcShimIntent, false, r.server.cc.KeyRing, 9066 ) 9067 if err != nil { 9068 return nil, err 9069 } 9070 req := &chanfunding.Request{ 9071 RemoteAmt: btcutil.Amount(rpcShimIntent.Amt), 9072 } 9073 shimIntent, err := shimAssembler.ProvisionChannel(req) 9074 if err != nil { 9075 return nil, err 9076 } 9077 9078 // Once we have the intent, we'll register it with the wallet. 9079 // Once we receive an incoming funding request that uses this 9080 // pending channel ID, then this shim will be dispatched in 9081 // place of our regular funding workflow. 9082 copy(pendingChanID[:], rpcShimIntent.PendingChanId) 9083 err = r.server.cc.Wallet.RegisterFundingIntent( 9084 pendingChanID, shimIntent, 9085 ) 9086 if err != nil { 9087 return nil, err 9088 } 9089 9090 // There is no need to register a PSBT shim before opening the channel, 9091 // even though our RPC message structure allows for it. Inform the user 9092 // by returning a proper error instead of just doing nothing. 9093 case in.GetShimRegister() != nil && 9094 in.GetShimRegister().GetPsbtShim() != nil: 9095 9096 return nil, fmt.Errorf("PSBT shim must only be sent when " + 9097 "opening a channel") 9098 9099 // If this is a transition to cancel an existing shim, then we'll pass 9100 // this message along to the wallet, informing it that the intent no 9101 // longer needs to be considered and should be cleaned up. 9102 case in.GetShimCancel() != nil: 9103 rpcsLog.Debugf("Canceling funding shim for pending_id=%x", 9104 in.GetShimCancel().PendingChanId) 9105 9106 copy(pendingChanID[:], in.GetShimCancel().PendingChanId) 9107 err := r.server.cc.Wallet.CancelFundingIntent(pendingChanID) 9108 if err != nil { 9109 return nil, err 9110 } 9111 9112 // If this is a transition to verify the PSBT for an existing shim, 9113 // we'll do so and then store the verified PSBT for later so we can 9114 // compare it to the final, signed one. 9115 case in.GetPsbtVerify() != nil: 9116 rpcsLog.Debugf("Verifying PSBT for pending_id=%x", 9117 in.GetPsbtVerify().PendingChanId) 9118 9119 copy(pendingChanID[:], in.GetPsbtVerify().PendingChanId) 9120 packet, err := psbt.NewFromRawBytes( 9121 bytes.NewReader(in.GetPsbtVerify().FundedPsbt), false, 9122 ) 9123 if err != nil { 9124 return nil, fmt.Errorf("error parsing psbt: %w", err) 9125 } 9126 9127 err = r.server.cc.Wallet.PsbtFundingVerify( 9128 pendingChanID, packet, in.GetPsbtVerify().SkipFinalize, 9129 ) 9130 if err != nil { 9131 return nil, err 9132 } 9133 9134 // If this is a transition to finalize the PSBT funding flow, we compare 9135 // the final PSBT to the previously verified one and if nothing 9136 // unexpected was changed, continue the channel opening process. 9137 case in.GetPsbtFinalize() != nil: 9138 msg := in.GetPsbtFinalize() 9139 rpcsLog.Debugf("Finalizing PSBT for pending_id=%x", 9140 msg.PendingChanId) 9141 9142 copy(pendingChanID[:], in.GetPsbtFinalize().PendingChanId) 9143 9144 var ( 9145 packet *psbt.Packet 9146 rawTx *wire.MsgTx 9147 err error 9148 ) 9149 9150 // Either the signed PSBT or the raw transaction need to be set 9151 // but not both at the same time. 9152 switch { 9153 case len(msg.SignedPsbt) > 0 && len(msg.FinalRawTx) > 0: 9154 return nil, fmt.Errorf("cannot set both signed PSBT " + 9155 "and final raw TX at the same time") 9156 9157 case len(msg.SignedPsbt) > 0: 9158 packet, err = psbt.NewFromRawBytes( 9159 bytes.NewReader(in.GetPsbtFinalize().SignedPsbt), 9160 false, 9161 ) 9162 if err != nil { 9163 return nil, fmt.Errorf("error parsing psbt: %w", 9164 err) 9165 } 9166 9167 case len(msg.FinalRawTx) > 0: 9168 rawTx = &wire.MsgTx{} 9169 err = rawTx.Deserialize(bytes.NewReader(msg.FinalRawTx)) 9170 if err != nil { 9171 return nil, fmt.Errorf("error parsing final "+ 9172 "raw TX: %v", err) 9173 } 9174 9175 default: 9176 return nil, fmt.Errorf("PSBT or raw transaction to " + 9177 "finalize missing") 9178 } 9179 9180 err = r.server.cc.Wallet.PsbtFundingFinalize( 9181 pendingChanID, packet, rawTx, 9182 ) 9183 if err != nil { 9184 return nil, err 9185 } 9186 } 9187 9188 // TODO(roasbeef): extend PendingChannels to also show shims 9189 9190 // TODO(roasbeef): return resulting state? also add a method to query 9191 // current state? 9192 return &lnrpc.FundingStateStepResp{}, nil 9193 } 9194 9195 // RegisterRPCMiddleware adds a new gRPC middleware to the interceptor chain. A 9196 // gRPC middleware is software component external to lnd that aims to add 9197 // additional business logic to lnd by observing/intercepting/validating 9198 // incoming gRPC client requests and (if needed) replacing/overwriting outgoing 9199 // messages before they're sent to the client. When registering the middleware 9200 // must identify itself and indicate what custom macaroon caveats it wants to 9201 // be responsible for. Only requests that contain a macaroon with that specific 9202 // custom caveat are then sent to the middleware for inspection. As a security 9203 // measure, _no_ middleware can intercept requests made with _unencumbered_ 9204 // macaroons! 9205 func (r *rpcServer) RegisterRPCMiddleware( 9206 stream lnrpc.Lightning_RegisterRPCMiddlewareServer) error { 9207 9208 // This is a security critical functionality and needs to be enabled 9209 // specifically by the user. 9210 if !r.cfg.RPCMiddleware.Enable { 9211 return fmt.Errorf("RPC middleware not enabled in config") 9212 } 9213 9214 // When registering a middleware the first message being sent from the 9215 // middleware must be a registration message containing its name and the 9216 // custom caveat it wants to register for. 9217 var ( 9218 registerChan = make(chan *lnrpc.MiddlewareRegistration, 1) 9219 registerDoneChan = make(chan struct{}) 9220 errChan = make(chan error, 1) 9221 ) 9222 ctxc, cancel := context.WithTimeout( 9223 stream.Context(), r.cfg.RPCMiddleware.InterceptTimeout, 9224 ) 9225 defer cancel() 9226 9227 // Read the first message in a goroutine because the Recv method blocks 9228 // until the message arrives. 9229 go func() { 9230 msg, err := stream.Recv() 9231 if err != nil { 9232 errChan <- err 9233 9234 return 9235 } 9236 9237 registerChan <- msg.GetRegister() 9238 }() 9239 9240 // Wait for the initial message to arrive or time out if it takes too 9241 // long. 9242 var registerMsg *lnrpc.MiddlewareRegistration 9243 select { 9244 case registerMsg = <-registerChan: 9245 if registerMsg == nil { 9246 return fmt.Errorf("invalid initial middleware " + 9247 "registration message") 9248 } 9249 9250 case err := <-errChan: 9251 return fmt.Errorf("error receiving initial middleware "+ 9252 "registration message: %v", err) 9253 9254 case <-ctxc.Done(): 9255 return ctxc.Err() 9256 9257 case <-r.quit: 9258 return ErrServerShuttingDown 9259 } 9260 9261 // Make sure the registration is valid. 9262 const nameMinLength = 5 9263 if len(registerMsg.MiddlewareName) < nameMinLength { 9264 return fmt.Errorf("invalid middleware name, use descriptive "+ 9265 "name of at least %d characters", nameMinLength) 9266 } 9267 9268 readOnly := registerMsg.ReadOnlyMode 9269 caveatName := registerMsg.CustomMacaroonCaveatName 9270 switch { 9271 case readOnly && len(caveatName) > 0: 9272 return fmt.Errorf("cannot set read-only and custom caveat " + 9273 "name at the same time") 9274 9275 case !readOnly && len(caveatName) < nameMinLength: 9276 return fmt.Errorf("need to set either custom caveat name "+ 9277 "of at least %d characters or read-only mode", 9278 nameMinLength) 9279 } 9280 9281 middleware := rpcperms.NewMiddlewareHandler( 9282 registerMsg.MiddlewareName, 9283 caveatName, readOnly, stream.Recv, stream.Send, 9284 r.cfg.RPCMiddleware.InterceptTimeout, 9285 r.cfg.ActiveNetParams.Params, r.quit, 9286 ) 9287 9288 // Add the RPC middleware to the interceptor chain and defer its 9289 // removal. 9290 if err := r.interceptorChain.RegisterMiddleware(middleware); err != nil { 9291 return fmt.Errorf("error registering middleware: %w", err) 9292 } 9293 defer r.interceptorChain.RemoveMiddleware(registerMsg.MiddlewareName) 9294 9295 // Send a message to the client to indicate that the registration has 9296 // successfully completed. 9297 regCompleteMsg := &lnrpc.RPCMiddlewareRequest{ 9298 InterceptType: &lnrpc.RPCMiddlewareRequest_RegComplete{ 9299 RegComplete: true, 9300 }, 9301 } 9302 9303 // Send the message in a goroutine because the Send method blocks until 9304 // the message is read by the client. 9305 go func() { 9306 err := stream.Send(regCompleteMsg) 9307 if err != nil { 9308 errChan <- err 9309 return 9310 } 9311 9312 close(registerDoneChan) 9313 }() 9314 9315 select { 9316 case err := <-errChan: 9317 return fmt.Errorf("error sending middleware registration "+ 9318 "complete message: %v", err) 9319 9320 case <-ctxc.Done(): 9321 return ctxc.Err() 9322 9323 case <-r.quit: 9324 return ErrServerShuttingDown 9325 9326 case <-registerDoneChan: 9327 } 9328 9329 return middleware.Run() 9330 } 9331 9332 // SendCustomMessage sends a custom peer message. 9333 func (r *rpcServer) SendCustomMessage(ctx context.Context, 9334 req *lnrpc.SendCustomMessageRequest) (*lnrpc.SendCustomMessageResponse, 9335 error) { 9336 9337 peer, err := route.NewVertexFromBytes(req.Peer) 9338 if err != nil { 9339 return nil, err 9340 } 9341 9342 err = r.server.SendCustomMessage( 9343 ctx, peer, lnwire.MessageType(req.Type), req.Data, 9344 ) 9345 switch { 9346 case errors.Is(err, ErrPeerNotConnected): 9347 return nil, status.Error(codes.NotFound, err.Error()) 9348 case err != nil: 9349 return nil, err 9350 } 9351 9352 return &lnrpc.SendCustomMessageResponse{ 9353 Status: "message sent successfully", 9354 }, nil 9355 } 9356 9357 // SubscribeCustomMessages subscribes to a stream of incoming custom peer 9358 // messages. 9359 func (r *rpcServer) SubscribeCustomMessages( 9360 _ *lnrpc.SubscribeCustomMessagesRequest, 9361 server lnrpc.Lightning_SubscribeCustomMessagesServer) error { 9362 9363 client, err := r.server.SubscribeCustomMessages() 9364 if err != nil { 9365 return err 9366 } 9367 defer client.Cancel() 9368 9369 for { 9370 select { 9371 case <-client.Quit(): 9372 return errors.New("shutdown") 9373 9374 case <-server.Context().Done(): 9375 return server.Context().Err() 9376 9377 case update := <-client.Updates(): 9378 customMsg := update.(*CustomMessage) 9379 9380 err := server.Send(&lnrpc.CustomMessage{ 9381 Peer: customMsg.Peer[:], 9382 Data: customMsg.Msg.Data, 9383 Type: uint32(customMsg.Msg.Type), 9384 }) 9385 if err != nil { 9386 return err 9387 } 9388 } 9389 } 9390 } 9391 9392 // SendOnionMessage sends a custom peer message. 9393 func (r *rpcServer) SendOnionMessage(ctx context.Context, 9394 req *lnrpc.SendOnionMessageRequest) (*lnrpc.SendOnionMessageResponse, 9395 error) { 9396 9397 // First we'll validate the string passed in within the request to 9398 // ensure that it's a valid hex-string, and also a valid compressed 9399 // public key. 9400 pathKey, err := btcec.ParsePubKey(req.PathKey) 9401 if err != nil { 9402 return nil, fmt.Errorf("unable to decode path key bytes: %w", 9403 err) 9404 } 9405 9406 peer, err := route.NewVertexFromBytes(req.Peer) 9407 if err != nil { 9408 return nil, err 9409 } 9410 9411 err = r.server.SendOnionMessage(ctx, peer, pathKey, req.Onion) 9412 switch { 9413 case errors.Is(err, ErrPeerNotConnected): 9414 return nil, status.Error(codes.NotFound, err.Error()) 9415 case err != nil: 9416 return nil, err 9417 } 9418 9419 return &lnrpc.SendOnionMessageResponse{ 9420 Status: "onion message sent successfully", 9421 }, nil 9422 } 9423 9424 // SubscribeOnionMessages subscribes to a stream of incoming onion messages. 9425 func (r *rpcServer) SubscribeOnionMessages( 9426 _ *lnrpc.SubscribeOnionMessagesRequest, 9427 server lnrpc.Lightning_SubscribeOnionMessagesServer) error { 9428 9429 client, err := r.server.SubscribeOnionMessages() 9430 if err != nil { 9431 return err 9432 } 9433 defer client.Cancel() 9434 9435 for { 9436 select { 9437 case <-client.Quit(): 9438 return errors.New("shutdown") 9439 9440 case <-server.Context().Done(): 9441 return server.Context().Err() 9442 9443 case update := <-client.Updates(): 9444 oMsg, ok := update.(*onionmessage.OnionMessageUpdate) 9445 if !ok { 9446 return fmt.Errorf("onion message update "+ 9447 "failed type assertion: %T", update) 9448 } 9449 9450 bp := &lnrpc.BlindedPath{} 9451 9452 //nolint:ll 9453 if oMsg.ReplyPath != nil { 9454 bp.IntroductionNode = oMsg.ReplyPath.IntroductionPoint.SerializeCompressed() 9455 bp.BlindingPoint = oMsg.ReplyPath.BlindingPoint.SerializeCompressed() 9456 9457 for _, hop := range oMsg.ReplyPath.BlindedHops { 9458 rpcHop := &lnrpc.BlindedHop{ 9459 BlindedNode: hop.BlindedNodePub.SerializeCompressed(), 9460 EncryptedData: hop.CipherText, 9461 } 9462 bp.BlindedHops = append(bp.BlindedHops, rpcHop) 9463 } 9464 } 9465 9466 //nolint:ll 9467 err := server.Send(&lnrpc.OnionMessageUpdate{ 9468 Peer: oMsg.Peer[:], 9469 PathKey: oMsg.PathKey[:], 9470 Onion: oMsg.OnionBlob, 9471 ReplyPath: bp, 9472 EncryptedRecipientData: oMsg.EncryptedRecipientData, 9473 CustomRecords: oMsg.CustomRecords, 9474 }) 9475 if err != nil { 9476 return err 9477 } 9478 } 9479 } 9480 } 9481 9482 // ListAliases returns the set of all aliases we have ever allocated along with 9483 // their base SCIDs and possibly a separate confirmed SCID in the case of 9484 // zero-conf. 9485 func (r *rpcServer) ListAliases(_ context.Context, 9486 _ *lnrpc.ListAliasesRequest) (*lnrpc.ListAliasesResponse, error) { 9487 9488 // Fetch the map of all aliases. 9489 mapAliases := r.server.aliasMgr.ListAliases() 9490 9491 // Fill out the response. This does not include the zero-conf confirmed 9492 // SCID. Doing so would require more database lookups, and it can be 9493 // cross-referenced with the output of ListChannels/ClosedChannels. 9494 resp := &lnrpc.ListAliasesResponse{ 9495 AliasMaps: make([]*lnrpc.AliasMap, 0), 9496 } 9497 9498 // Now we need to parse the created mappings into an rpc response. 9499 resp.AliasMaps = lnrpc.MarshalAliasMap(mapAliases) 9500 9501 return resp, nil 9502 } 9503 9504 // rpcInitiator returns the correct lnrpc initiator for channels where we have 9505 // a record of the opening channel. 9506 func rpcInitiator(isInitiator bool) lnrpc.Initiator { 9507 if isInitiator { 9508 return lnrpc.Initiator_INITIATOR_LOCAL 9509 } 9510 9511 return lnrpc.Initiator_INITIATOR_REMOTE 9512 } 9513 9514 // chainSyncInfo wraps info about the best block and whether the system is 9515 // synced to that block. 9516 type chainSyncInfo struct { 9517 // isSynced specifies whether the whole system is considered synced. 9518 // When true, it means the following subsystems are at the best height 9519 // reported by the chain backend, 9520 // - wallet. 9521 // - channel graph. 9522 // - blockbeat dispatcher. 9523 isSynced bool 9524 9525 // isWalletSynced specifies whether the wallet is synced to 9526 // our chain view. 9527 isWalletSynced bool 9528 9529 // bestHeight is the current height known to the chain backend. 9530 bestHeight int32 9531 9532 // blockHash is the hash of the current block known to the chain 9533 // backend. 9534 blockHash chainhash.Hash 9535 9536 // timestamp is the block's timestamp the wallet has synced to. 9537 timestamp int64 9538 } 9539 9540 // getChainSyncInfo queries the chain backend, the wallet, the channel router 9541 // and the blockbeat dispatcher to determine the best block and whether the 9542 // system is considered synced. 9543 func (r *rpcServer) getChainSyncInfo() (*chainSyncInfo, error) { 9544 bestHash, bestHeight, err := r.server.cc.ChainIO.GetBestBlock() 9545 if err != nil { 9546 return nil, fmt.Errorf("unable to get best block info: %w", err) 9547 } 9548 9549 isWalletSynced, bestHeaderTimestamp, err := 9550 r.server.cc.Wallet.IsSynced() 9551 if err != nil { 9552 return nil, fmt.Errorf("unable to sync PoV of the wallet "+ 9553 "with current best block in the main chain: %v", err) 9554 } 9555 9556 // Create info to be returned. 9557 info := &chainSyncInfo{ 9558 isWalletSynced: isWalletSynced, 9559 bestHeight: bestHeight, 9560 blockHash: *bestHash, 9561 timestamp: bestHeaderTimestamp, 9562 } 9563 9564 // Exit early if the wallet is not synced. 9565 if !isWalletSynced { 9566 rpcsLog.Debugf("Wallet is not synced to height %v yet", 9567 bestHeight) 9568 9569 return info, nil 9570 } 9571 9572 // If the router does full channel validation, it has a lot of work to 9573 // do for each block. So it might be possible that it isn't yet up to 9574 // date with the most recent block, even if the wallet is. This can 9575 // happen in environments with high CPU load (such as parallel itests). 9576 // Since the `synced_to_chain` flag in the response of this call is used 9577 // by many wallets (and also our itests) to make sure everything's up to 9578 // date, we add the router's state to it. So the flag will only toggle 9579 // to true once the router was also able to catch up. 9580 isSynced := isWalletSynced 9581 if !r.cfg.Routing.AssumeChannelValid { 9582 routerHeight := r.server.graphBuilder.SyncedHeight() 9583 isSynced = uint32(bestHeight) == routerHeight 9584 } 9585 9586 // Exit early if the channel graph is not synced. 9587 if !isSynced { 9588 rpcsLog.Debugf("Graph is not synced to height %v yet", 9589 bestHeight) 9590 9591 return info, nil 9592 } 9593 9594 // Given the wallet and the channel router are synced, we now check 9595 // whether the blockbeat dispatcher is synced. 9596 height := r.server.blockbeatDispatcher.CurrentHeight() 9597 9598 // Overwrite isSynced and return. 9599 info.isSynced = height == bestHeight 9600 9601 if !info.isSynced { 9602 rpcsLog.Debugf("Blockbeat is not synced to height %v yet", 9603 bestHeight) 9604 } 9605 9606 return info, nil 9607 }