1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 const DevToolsUtils
= require("resource://devtools/shared/DevToolsUtils.js");
10 callFunctionWithAsyncStack
,
11 } = require("resource://devtools/shared/platform/stack.js");
12 const EventEmitter
= require("resource://devtools/shared/event-emitter.js");
14 UnsolicitedNotifications
,
15 } = require("resource://devtools/client/constants.js");
16 const { AppConstants
} = ChromeUtils
.importESModule(
17 "resource://gre/modules/AppConstants.sys.mjs"
20 loader
.lazyRequireGetter(
23 "resource://devtools/shared/security/auth.js"
25 loader
.lazyRequireGetter(
28 "resource://devtools/shared/security/socket.js",
31 loader
.lazyRequireGetter(
34 "resource://devtools/shared/event-emitter.js"
37 loader
.lazyRequireGetter(
39 ["createRootFront", "Front"],
40 "resource://devtools/shared/protocol.js",
44 loader
.lazyRequireGetter(
47 "resource://devtools/client/fronts/object.js",
52 * Creates a client for the remote debugging protocol server. This client
53 * provides the means to communicate with the server and exchange the messages
54 * required by the protocol in a traditional JavaScript API.
56 function DevToolsClient(transport
) {
57 this._transport
= transport
;
58 this._transport
.hooks
= this;
60 this._pendingRequests
= new Map();
61 this._activeRequests
= new Map();
62 this._eventsEnabled
= true;
66 this.request
= this.request
.bind(this);
69 * As the first thing on the connection, expect a greeting packet from
70 * the connection's root actor.
73 this.expectReply("root", async packet
=> {
75 console
.error("Error when waiting for root actor", packet
);
79 this.mainRoot
= createRootFront(this, packet
);
81 // Once the root actor has been communicated by the server,
82 // emit a request to it to also push informations down to the server.
84 // This request has been added in Firefox 133.
86 await
this.mainRoot
.connect({
87 frontendVersion
: AppConstants
.MOZ_APP_VERSION
,
90 // Ignore errors of unsupported packet as the server may not yet support this request.
91 // The request may also fail to complete in tests when closing DevTools quickly after opening.
92 if (!e
.message
.includes("unrecognizedPacketType")) {
97 this.emit("connected", packet
.applicationType
, packet
.traits
);
101 // Expose these to save callers the trouble of importing DebuggerSocket
102 DevToolsClient
.socketConnect = function (options
) {
103 // Defined here instead of just copying the function to allow lazy-load
104 return DebuggerSocket
.connect(options
);
106 DevToolsUtils
.defineLazyGetter(DevToolsClient
, "Authenticators", () => {
107 return Authentication
.Authenticators
;
109 DevToolsUtils
.defineLazyGetter(DevToolsClient
, "AuthenticationResult", () => {
110 return Authentication
.AuthenticationResult
;
113 DevToolsClient
.prototype = {
115 * Connect to the server and start exchanging protocol messages.
118 * Resolves once connected with an array whose first element
119 * is the application type, by default "browser", and the second
120 * element is the traits object (help figure out the features
121 * and behaviors of the server we connect to. See RootActor).
124 return new Promise(resolve
=> {
125 this.once("connected", (applicationType
, traits
) => {
126 this.traits
= traits
;
128 resolve([applicationType
, traits
]);
131 this._transport
.ready();
136 * Shut down communication with the debugging server.
139 * Resolves after the underlying transport is closed.
142 if (this._transportClosed
) {
143 return Promise
.resolve();
145 if (this._closePromise
) {
146 return this._closePromise
;
148 // Immediately set the destroy promise,
149 // as the following code is fully synchronous and can be reentrant.
150 this._closePromise
= this.once("closed");
152 // Disable detach event notifications, because event handlers will be in a
153 // cleared scope by the time they run.
154 this._eventsEnabled
= false;
156 if (this._transport
) {
157 this._transport
.close();
158 this._transport
= null;
161 return this._closePromise
;
165 * Send a request to the debugging server.
167 * @param packet object
168 * A JSON packet to send to the debugging server.
170 * This object emits a number of events to allow you to respond to
171 * different parts of the request lifecycle.
172 * It is also a Promise object, with a `then` method, that is resolved
173 * whenever a JSON or a Bulk response is received; and is rejected
174 * if the response is an error.
177 * * json-reply: The server replied with a JSON packet, which is
178 * passed as event data.
179 * * bulk-reply: The server replied with bulk data, which you can read
180 * using the event data object containing:
181 * * actor: Name of actor that received the packet
182 * * type: Name of actor's method that was called on receipt
183 * * length: Size of the data to be read
184 * * stream: This input stream should only be used directly if you
185 * can ensure that you will read exactly |length| bytes
186 * and will not close the stream when reading is complete
187 * * done: If you use the stream directly (instead of |copyTo|
188 * below), you must signal completion by resolving /
189 * rejecting this promise. If it's rejected, the
190 * transport will be closed. If an Error is supplied as a
191 * rejection value, it will be logged via |dumpn|. If you
192 * do use |copyTo|, resolving is taken care of for you
193 * when copying completes.
194 * * copyTo: A helper function for getting your data out of the
195 * stream that meets the stream handling requirements
196 * above, and has the following signature:
197 * @param output nsIAsyncOutputStream
198 * The stream to copy to.
200 * The promise is resolved when copying completes or
201 * rejected if any (unexpected) errors occur.
202 * This object also emits "progress" events for each chunk
203 * that is copied. See stream-utils.js.
206 if (!this.mainRoot
) {
207 throw Error("Have not yet received a hello packet from the server.");
209 const type
= packet
.type
|| "";
211 throw Error("'" + type
+ "' request packet has no destination.");
214 if (this._transportClosed
) {
218 "' request packet to " +
222 "can't be sent as the connection is closed.";
223 return Promise
.reject({ error
: "connectionClosed", message
: msg
});
226 const request
= new Request(packet
);
227 request
.format
= "json";
228 request
.stack
= getStack();
230 // Implement a Promise like API on the returned object
231 // that resolves/rejects on request response
232 const promise
= new Promise((resolve
, reject
) => {
233 function listenerJson(resp
) {
234 removeRequestListeners();
241 function listenerBulk(resp
) {
242 removeRequestListeners();
246 const removeRequestListeners
= () => {
247 request
.off("json-reply", listenerJson
);
248 request
.off("bulk-reply", listenerBulk
);
251 request
.on("json-reply", listenerJson
);
252 request
.on("bulk-reply", listenerBulk
);
255 this._sendOrQueueRequest(request
);
256 request
.then
= promise
.then
.bind(promise
);
257 request
.catch = promise
.catch.bind(promise
);
263 * Transmit streaming data via a bulk request.
265 * This method initiates the bulk send process by queuing up the header data.
266 * The caller receives eventual access to a stream for writing.
268 * Since this opens up more options for how the server might respond (it could
269 * send back either JSON or bulk data), and the returned Request object emits
270 * events for different stages of the request process that you may want to
273 * @param request Object
274 * This is modeled after the format of JSON packets above, but does not
275 * actually contain the data, but is instead just a routing header:
276 * * actor: Name of actor that will receive the packet
277 * * type: Name of actor's method that should be called on receipt
278 * * length: Size of the data to be sent
280 * This object emits a number of events to allow you to respond to
281 * different parts of the request lifecycle.
284 * * bulk-send-ready: Ready to send bulk data to the server, using the
285 * event data object containing:
286 * * stream: This output stream should only be used directly if
287 * you can ensure that you will write exactly |length|
288 * bytes and will not close the stream when writing is
290 * * done: If you use the stream directly (instead of |copyFrom|
291 * below), you must signal completion by resolving /
292 * rejecting this promise. If it's rejected, the
293 * transport will be closed. If an Error is supplied as
294 * a rejection value, it will be logged via |dumpn|. If
295 * you do use |copyFrom|, resolving is taken care of for
296 * you when copying completes.
297 * * copyFrom: A helper function for getting your data onto the
298 * stream that meets the stream handling requirements
299 * above, and has the following signature:
300 * @param input nsIAsyncInputStream
301 * The stream to copy from.
303 * The promise is resolved when copying completes or
304 * rejected if any (unexpected) errors occur.
305 * This object also emits "progress" events for each chunk
306 * that is copied. See stream-utils.js.
307 * * json-reply: The server replied with a JSON packet, which is
308 * passed as event data.
309 * * bulk-reply: The server replied with bulk data, which you can read
310 * using the event data object containing:
311 * * actor: Name of actor that received the packet
312 * * type: Name of actor's method that was called on receipt
313 * * length: Size of the data to be read
314 * * stream: This input stream should only be used directly if you
315 * can ensure that you will read exactly |length| bytes
316 * and will not close the stream when reading is complete
317 * * done: If you use the stream directly (instead of |copyTo|
318 * below), you must signal completion by resolving /
319 * rejecting this promise. If it's rejected, the
320 * transport will be closed. If an Error is supplied as a
321 * rejection value, it will be logged via |dumpn|. If you
322 * do use |copyTo|, resolving is taken care of for you
323 * when copying completes.
324 * * copyTo: A helper function for getting your data out of the
325 * stream that meets the stream handling requirements
326 * above, and has the following signature:
327 * @param output nsIAsyncOutputStream
328 * The stream to copy to.
330 * The promise is resolved when copying completes or
331 * rejected if any (unexpected) errors occur.
332 * This object also emits "progress" events for each chunk
333 * that is copied. See stream-utils.js.
335 startBulkRequest(request
) {
336 if (!this.mainRoot
) {
337 throw Error("Have not yet received a hello packet from the server.");
340 throw Error("Bulk packet is missing the required 'type' field.");
342 if (!request
.actor
) {
343 throw Error("'" + request
.type
+ "' bulk packet has no destination.");
345 if (!request
.length
) {
346 throw Error("'" + request
.type
+ "' bulk packet has no length.");
349 request
= new Request(request
);
350 request
.format
= "bulk";
352 this._sendOrQueueRequest(request
);
358 * If a new request can be sent immediately, do so. Otherwise, queue it.
360 _sendOrQueueRequest(request
) {
361 const actor
= request
.actor
;
362 if (!this._activeRequests
.has(actor
)) {
363 this._sendRequest(request
);
365 this._queueRequest(request
);
371 * @throws Error if there is already an active request in flight for the same
374 _sendRequest(request
) {
375 const actor
= request
.actor
;
376 this.expectReply(actor
, request
);
378 if (request
.format
=== "json") {
379 this._transport
.send(request
.request
);
383 this._transport
.startBulkSend(request
.request
).then((...args
) => {
384 request
.emit("bulk-send-ready", ...args
);
389 * Queue a request to be sent later. Queues are only drained when an in
390 * flight request to a given actor completes.
392 _queueRequest(request
) {
393 const actor
= request
.actor
;
394 const queue
= this._pendingRequests
.get(actor
) || [];
396 this._pendingRequests
.set(actor
, queue
);
400 * Attempt the next request to a given actor (if any).
402 _attemptNextRequest(actor
) {
403 if (this._activeRequests
.has(actor
)) {
406 const queue
= this._pendingRequests
.get(actor
);
410 const request
= queue
.shift();
411 if (queue
.length
=== 0) {
412 this._pendingRequests
.delete(actor
);
414 this._sendRequest(request
);
418 * Arrange to hand the next reply from |actor| to the handler bound to
421 * DevToolsClient.prototype.request / startBulkRequest usually takes care of
422 * establishing the handler for a given request, but in rare cases (well,
423 * greetings from new root actors, is the only case at the moment) we must be
424 * prepared for a "reply" that doesn't correspond to any request we sent.
426 expectReply(actor
, request
) {
427 if (this._activeRequests
.has(actor
)) {
428 throw Error("clashing handlers for next reply from " + actor
);
431 // If a handler is passed directly (as it is with the handler for the root
432 // actor greeting), create a dummy request to bind this to.
433 if (typeof request
=== "function") {
434 const handler
= request
;
435 request
= new Request();
436 request
.on("json-reply", handler
);
439 this._activeRequests
.set(actor
, request
);
445 * Called by DebuggerTransport to dispatch incoming packets as appropriate.
447 * @param packet object
448 * The incoming packet.
452 DevToolsUtils
.reportException(
455 "Server did not specify an actor, dropping packet: " +
456 JSON
.stringify(packet
)
462 // Check for "forwardingCancelled" here instead of using a front to handle it.
463 // This is necessary because we might receive this event while the client is closing,
464 // and the fronts have already been removed by that point.
467 packet
.from == this.mainRoot
.actorID
&&
468 packet
.type
== "forwardingCancelled"
470 this.purgeRequests(packet
.prefix
);
474 // If we have a registered Front for this actor, let it handle the packet
475 // and skip all the rest of this unpleasantness.
476 const front
= this.getFrontByID(packet
.from);
478 front
.onPacket(packet
);
483 // See if we have a handler function waiting for a reply from this
484 // actor. (Don't count unsolicited notifications or pauses as
487 this._activeRequests
.has(packet
.from) &&
488 !(packet
.type
in UnsolicitedNotifications
)
490 activeRequest
= this._activeRequests
.get(packet
.from);
491 this._activeRequests
.delete(packet
.from);
494 // If there is a subsequent request for the same actor, hand it off to the
495 // transport. Delivery of packets on the other end is always async, even
496 // in the local transport case.
497 this._attemptNextRequest(packet
.from);
499 // Only try to notify listeners on events, not responses to requests
500 // that lack a packet type.
502 this.emit(packet
.type
, packet
);
506 const emitReply
= () => activeRequest
.emit("json-reply", packet
);
507 if (activeRequest
.stack
) {
508 callFunctionWithAsyncStack(
520 * Called by the DebuggerTransport to dispatch incoming bulk packets as
523 * @param packet object
524 * The incoming packet, which contains:
525 * * actor: Name of actor that will receive the packet
526 * * type: Name of actor's method that should be called on receipt
527 * * length: Size of the data to be read
528 * * stream: This input stream should only be used directly if you can
529 * ensure that you will read exactly |length| bytes and will
530 * not close the stream when reading is complete
531 * * done: If you use the stream directly (instead of |copyTo|
532 * below), you must signal completion by resolving /
533 * rejecting this promise. If it's rejected, the transport
534 * will be closed. If an Error is supplied as a rejection
535 * value, it will be logged via |dumpn|. If you do use
536 * |copyTo|, resolving is taken care of for you when copying
538 * * copyTo: A helper function for getting your data out of the stream
539 * that meets the stream handling requirements above, and has
540 * the following signature:
541 * @param output nsIAsyncOutputStream
542 * The stream to copy to.
544 * The promise is resolved when copying completes or rejected
545 * if any (unexpected) errors occur.
546 * This object also emits "progress" events for each chunk
547 * that is copied. See stream-utils.js.
549 onBulkPacket(packet
) {
550 const { actor
} = packet
;
553 DevToolsUtils
.reportException(
556 "Server did not specify an actor, dropping bulk packet: " +
557 JSON
.stringify(packet
)
563 // See if we have a handler function waiting for a reply from this
565 if (!this._activeRequests
.has(actor
)) {
569 const activeRequest
= this._activeRequests
.get(actor
);
570 this._activeRequests
.delete(actor
);
572 // If there is a subsequent request for the same actor, hand it off to the
573 // transport. Delivery of packets on the other end is always async, even
574 // in the local transport case.
575 this._attemptNextRequest(actor
);
577 activeRequest
.emit("bulk-reply", packet
);
581 * Called by DebuggerTransport when the underlying stream is closed.
583 * @param status nsresult
584 * The status code that corresponds to the reason for closing
587 onTransportClosed() {
588 if (this._transportClosed
) {
591 this._transportClosed
= true;
594 this.purgeRequests();
596 // The |_pools| array on the client-side currently is used only by
597 // protocol.js to store active fronts, mirroring the actor pools found in
598 // the server. So, read all usages of "pool" as "protocol.js front".
600 // In the normal case where we shutdown cleanly, the toolbox tells each tool
601 // to close, and they each call |destroy| on any fronts they were using.
602 // When |destroy| is called on a protocol.js front, it also
603 // removes itself from the |_pools| array. Once the toolbox has shutdown,
604 // the connection is closed, and we reach here. All fronts (should have
605 // been) |destroy|ed, so |_pools| should empty.
607 // If the connection instead aborts unexpectedly, we may end up here with
608 // all fronts used during the life of the connection. So, we call |destroy|
609 // on them clear their state, reject pending requests, and remove themselves
610 // from |_pools|. This saves the toolbox from hanging indefinitely, in case
611 // it waits for some server response before shutdown that will now never
613 for (const pool
of this._pools
) {
619 * Purge pending and active requests in this client.
621 * @param prefix string (optional)
622 * If a prefix is given, only requests for actor IDs that start with the prefix
623 * will be cleaned up. This is useful when forwarding of a portion of requests
624 * is cancelled on the server.
626 purgeRequests(prefix
= "") {
627 const reject = function (type
, request
) {
628 // Server can send packets on its own and client only pass a callback
629 // to expectReply, so that there is no request object.
631 if (request
.request
) {
634 request
.request
.type
+
641 "can't be sent as the connection just closed.";
644 "server side packet can't be received as the connection just closed.";
646 const packet
= { error
: "connectionClosed", message
: msg
};
647 request
.emit("json-reply", packet
);
650 let pendingRequestsToReject
= [];
651 this._pendingRequests
.forEach((requests
, actor
) => {
652 if (!actor
.startsWith(prefix
)) {
655 this._pendingRequests
.delete(actor
);
656 pendingRequestsToReject
= pendingRequestsToReject
.concat(requests
);
658 pendingRequestsToReject
.forEach(request
=> reject("pending", request
));
660 let activeRequestsToReject
= [];
661 this._activeRequests
.forEach((request
, actor
) => {
662 if (!actor
.startsWith(prefix
)) {
665 this._activeRequests
.delete(actor
);
666 activeRequestsToReject
= activeRequestsToReject
.concat(request
);
668 activeRequestsToReject
.forEach(request
=> reject("active", request
));
670 // Also purge protocol.js requests
671 const fronts
= this.getAllFronts();
673 for (const front
of fronts
) {
674 if (!front
.isDestroyed() && front
.actorID
.startsWith(prefix
)) {
675 // Call Front.baseFrontClassDestroy nstead of Front.destroy in order to flush requests
676 // and nullify front.actorID immediately, even if Front.destroy is overloaded
677 // by an async function which would otherwise be able to try emitting new request
679 front
.baseFrontClassDestroy();
685 * Search for all requests in process for this client, including those made via
686 * protocol.js and wait all of them to complete. Since the requests seen when this is
687 * first called may in turn trigger more requests, we keep recursing through this
688 * function until there is no more activity.
690 * This is a fairly heavy weight process, so it's only meant to be used in tests.
692 * @param {object=} options
693 * @param {boolean=} options.ignoreOrphanedFronts
694 * Allow to ignore fronts which can no longer be retrieved via
695 * getFrontByID, as their requests can never be completed now.
696 * Ideally we should rather investigate and address those cases, but
697 * since this is a test helper, allow to bypass them here. Defaults to
701 * Resolved when all requests have settled.
703 waitForRequestsToSettle({ ignoreOrphanedFronts
= false } = {}) {
706 // Gather all pending and active requests in this client
707 // The request object supports a Promise API for completion (it has .then())
708 this._pendingRequests
.forEach(requestsForActor
=> {
709 // Each value is an array of pending requests
710 requests
= requests
.concat(requestsForActor
);
712 this._activeRequests
.forEach(requestForActor
=> {
713 // Each value is a single active request
714 requests
= requests
.concat(requestForActor
);
718 const fronts
= this.getAllFronts();
720 // For each front, wait for its requests to settle
721 for (const front
of fronts
) {
722 if (front
.hasRequests()) {
723 if (ignoreOrphanedFronts
&& !this.getFrontByID(front
.actorID
)) {
724 // If a front was stuck during its destroy but the pool managing it
725 // has been already removed, ignore its pending requests, they can
729 requests
.push(front
.waitForRequestsToSettle());
733 // Abort early if there are no requests
734 if (!requests
.length
) {
735 return Promise
.resolve();
738 return DevToolsUtils
.settleAll(requests
)
740 // One of the requests might have failed, but ignore that situation here and pipe
741 // both success and failure through the same path. The important part is just that
745 // Repeat, more requests may have started in response to those we just waited for
746 return this.waitForRequestsToSettle({ ignoreOrphanedFronts
});
751 // Use a Set because some fronts (like domwalker) seem to have multiple parents.
752 const fronts
= new Set();
753 const poolsToVisit
= [...this._pools
];
755 // With protocol.js, each front can potentially have its own pools containing child
756 // fronts, forming a tree. Descend through all the pools to locate all child fronts.
757 while (poolsToVisit
.length
) {
758 const pool
= poolsToVisit
.shift();
759 // `_pools` contains either Fronts or Pools, we only want to collect Fronts here.
760 // Front inherits from Pool which exposes `poolChildren`.
761 if (pool
instanceof Front
) {
764 for (const child
of pool
.poolChildren()) {
765 poolsToVisit
.push(child
);
772 * Actor lifetime management, echos the server's actor pools.
779 this.__pools
= new Set();
784 this._pools
.add(pool
);
786 removeActorPool(pool
) {
787 this._pools
.delete(pool
);
791 * Return the Front for the Actor whose ID is the one passed in argument.
793 * @param {String} actorID: The actor ID to look for.
795 getFrontByID(actorID
) {
796 const pool
= this.poolFor(actorID
);
797 return pool
? pool
.getActorByID(actorID
) : null;
801 for (const pool
of this._pools
) {
802 if (pool
.has(actorID
)) {
810 * Creates an object front for this DevToolsClient and the grip in parameter,
811 * @param {Object} grip: The grip to create the ObjectFront for.
812 * @param {ThreadFront} threadFront
813 * @param {Front} parentFront: Optional front that will manage the object front.
814 * Defaults to threadFront.
815 * @returns {ObjectFront}
817 createObjectFront(grip
, threadFront
, parentFront
) {
819 parentFront
= threadFront
;
822 return new ObjectFront(this, threadFront
.targetFront
, parentFront
, grip
);
826 return this._transport
;
830 * Boolean flag to help identify client connected to the current runtime,
831 * via a LocalDevToolsTransport pipe.
833 get isLocalClient() {
834 return !!this._transport
.isLocalTransport
;
838 for (const pool
of this._pools
) {
839 console
.log(`%c${pool.actorID}`, "font-weight: bold;", [
840 ...pool
.__poolMap
.keys(),
846 EventEmitter
.decorate(DevToolsClient
.prototype);
848 class Request
extends EventEmitter
{
849 constructor(request
) {
851 this.request
= request
;
855 return this.request
.to
|| this.request
.actor
;