Try to mitigate DNS failures when downloading stuff asynchronously
* url-queue.el (url-queue-setup-runners): New function that uses `run-with-idle-timer' for extra asynchronicity. (url-queue-remove-jobs-from-host): New function. (url-queue-callback-function): Remove jobs from the same host if connection failed.
This commit is contained in:
parent
e7bc51d012
commit
b6ea20f39c
2 changed files with 51 additions and 2 deletions
|
@ -1,3 +1,11 @@
|
|||
2012-02-06 Lars Ingebrigtsen <larsi@gnus.org>
|
||||
|
||||
* url-queue.el (url-queue-setup-runners): New function that uses
|
||||
`run-with-idle-timer' for extra asynchronicity.
|
||||
(url-queue-remove-jobs-from-host): New function.
|
||||
(url-queue-callback-function): Remove jobs from the same host if
|
||||
connection failed.
|
||||
|
||||
2012-01-12 Glenn Morris <rgm@gnu.org>
|
||||
|
||||
* url-auth.el (url-basic-auth, url-digest-auth):
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
|
||||
(eval-when-compile (require 'cl))
|
||||
(require 'browse-url)
|
||||
(require 'url-parse)
|
||||
|
||||
(defcustom url-queue-parallel-processes 6
|
||||
"The number of concurrent processes."
|
||||
|
@ -49,7 +50,7 @@
|
|||
|
||||
(defstruct url-queue
|
||||
url callback cbargs silentp
|
||||
buffer start-time)
|
||||
buffer start-time pre-triggered)
|
||||
|
||||
;;;###autoload
|
||||
(defun url-queue-retrieve (url callback &optional cbargs silent)
|
||||
|
@ -63,7 +64,30 @@ controls the level of parallelism via the
|
|||
:callback callback
|
||||
:cbargs cbargs
|
||||
:silentp silent))))
|
||||
(url-queue-run-queue))
|
||||
(url-queue-setup-runners))
|
||||
|
||||
;; To ensure asynch behaviour, we start the required number of queue
|
||||
;; runners from `run-with-idle-timer'. So we're basically going
|
||||
;; through the queue in two ways: 1) synchronously when a program
|
||||
;; calls `url-queue-retrieve' (which will then start the required
|
||||
;; number of queue runners), and 2) at the exit of each job, which
|
||||
;; will then not start any further threads, but just reuse the
|
||||
;; previous "slot".
|
||||
|
||||
(defun url-queue-setup-runners ()
|
||||
(let ((running 0)
|
||||
waiting)
|
||||
(dolist (entry url-queue)
|
||||
(cond
|
||||
((or (url-queue-start-time entry)
|
||||
(url-queue-pre-triggered entry))
|
||||
(incf running))
|
||||
((not waiting)
|
||||
(setq waiting entry))))
|
||||
(when (and waiting
|
||||
(< running url-queue-parallel-processes))
|
||||
(setf (url-queue-pre-triggered waiting) t)
|
||||
(run-with-idle-timer 0.01 nil 'url-queue-run-queue))))
|
||||
|
||||
(defun url-queue-run-queue ()
|
||||
(url-queue-prune-old-entries)
|
||||
|
@ -81,10 +105,27 @@ controls the level of parallelism via the
|
|||
(url-queue-start-retrieve waiting))))
|
||||
|
||||
(defun url-queue-callback-function (status job)
|
||||
(when (and (eq (car status) :error)
|
||||
(eq (cadr (cadr status)) 'connection-failed))
|
||||
;; If we get a connection error, then flush all other jobs from
|
||||
;; the host from the queue. This particularly makes sense if the
|
||||
;; error really is a DNS resolver issue, which happens
|
||||
;; synchronously and totally halts Emacs.
|
||||
(url-queue-remove-jobs-from-host
|
||||
(plist-get (nthcdr 3 (cadr status)) :host)))
|
||||
(setq url-queue (delq job url-queue))
|
||||
(url-queue-run-queue)
|
||||
(apply (url-queue-callback job) (cons status (url-queue-cbargs job))))
|
||||
|
||||
(defun url-queue-remove-jobs-from-host (host)
|
||||
(let ((jobs nil))
|
||||
(dolist (job url-queue)
|
||||
(when (equal (url-host (url-generic-parse-url (url-queue-url job)))
|
||||
host)
|
||||
(push job jobs)))
|
||||
(dolist (job jobs)
|
||||
(setq url-queue (delq job url-queue)))))
|
||||
|
||||
(defun url-queue-start-retrieve (job)
|
||||
(setf (url-queue-buffer job)
|
||||
(ignore-errors
|
||||
|
|
Loading…
Add table
Reference in a new issue