@@ -20,7 +20,7 @@ use std::io::Read;
20
20
use std:: ops:: Range ;
21
21
use std:: os:: raw:: c_ulonglong;
22
22
use std:: ptr:: { self , NonNull } ;
23
- use ureq:: { Agent , Request , Response } ;
23
+ use ureq:: Agent ;
24
24
25
25
use crate :: proj:: { ProjError , _string} ;
26
26
use libc:: c_char;
@@ -63,10 +63,10 @@ impl Drop for HandleData {
63
63
}
64
64
}
65
65
66
- /// Return an exponential wait time based on the number of retries
66
+ /// Return a quadratically-increasing wait time based on the number of retries
67
67
///
68
68
/// Example: a value of 8 allows up to 6400 ms of retry delay, for a cumulative total of 25500 ms
69
- fn get_wait_time_exp ( retrycount : i32 ) -> u64 {
69
+ fn get_wait_time_quad ( retrycount : i32 ) -> u64 {
70
70
if retrycount == 0 {
71
71
return 0 ;
72
72
}
@@ -75,34 +75,46 @@ fn get_wait_time_exp(retrycount: i32) -> u64 {
75
75
76
76
/// Process CDN response: handle retries in case of server error, or early return for client errors
77
77
/// Successful retry data is stored into res
78
- fn error_handler ( res : & mut Response , rb : Request ) -> Result < & Response , ProjError > {
78
+ fn error_handler < ' a > (
79
+ res : & ' a mut http:: Response < ureq:: Body > ,
80
+ url : & str ,
81
+ headers : & [ ( & str , & str ) ] ,
82
+ clt : Agent ,
83
+ ) -> Result < & ' a mut http:: Response < ureq:: Body > , ProjError > {
79
84
let mut retries = 0 ;
80
85
// Check whether something went wrong on the server, or if it's an S3 retry code
81
- if SERVER_ERROR_CODES . contains ( & res. status ( ) ) || RETRY_CODES . contains ( & res. status ( ) ) {
86
+ if SERVER_ERROR_CODES . contains ( & res. status ( ) . as_u16 ( ) )
87
+ || RETRY_CODES . contains ( & res. status ( ) . as_u16 ( ) )
88
+ {
82
89
// Start retrying: up to MAX_RETRIES
83
- while ( SERVER_ERROR_CODES . contains ( & res. status ( ) ) || RETRY_CODES . contains ( & res. status ( ) ) )
90
+ while ( SERVER_ERROR_CODES . contains ( & res. status ( ) . as_u16 ( ) )
91
+ || RETRY_CODES . contains ( & res. status ( ) . as_u16 ( ) ) )
84
92
&& retries <= MAX_RETRIES
85
93
{
86
94
retries += 1 ;
87
- let wait = time:: Duration :: from_millis ( get_wait_time_exp ( retries as i32 ) ) ;
95
+ let wait = time:: Duration :: from_millis ( get_wait_time_quad ( retries as i32 ) ) ;
88
96
thread:: sleep ( wait) ;
89
- let retry = rb. clone ( ) ;
90
- * res = retry. call ( ) ?;
97
+ let mut req = clt. get ( url) ;
98
+ // Apply all headers
99
+ for ( name, value) in headers {
100
+ req = req. header ( * name, * value) ;
101
+ }
102
+ * res = req. call ( ) ?;
91
103
}
92
104
// Not a timeout or known S3 retry code: bail out
93
- } else if CLIENT_ERROR_CODES . contains ( & res. status ( ) ) {
105
+ } else if CLIENT_ERROR_CODES . contains ( & res. status ( ) . as_u16 ( ) ) {
94
106
return Err ( ProjError :: DownloadError (
95
- res. status_text ( ) . to_string ( ) ,
96
- res . get_url ( ) . to_string ( ) ,
107
+ res. status ( ) . to_string ( ) ,
108
+ url . to_string ( ) ,
97
109
retries,
98
110
) ) ;
99
111
}
100
112
// Retries have been exhausted OR
101
113
// The loop ended prematurely due to a different error
102
- if !SUCCESS_ERROR_CODES . contains ( & res. status ( ) ) {
114
+ if !SUCCESS_ERROR_CODES . contains ( & res. status ( ) . as_u16 ( ) ) {
103
115
return Err ( ProjError :: DownloadError (
104
- res. status_text ( ) . to_string ( ) ,
105
- res . get_url ( ) . to_string ( ) ,
116
+ res. status ( ) . to_string ( ) ,
117
+ url . to_string ( ) ,
106
118
retries,
107
119
) ) ;
108
120
}
@@ -148,7 +160,7 @@ pub(crate) unsafe extern "C" fn network_open(
148
160
let err_string = e. to_string ( ) ;
149
161
out_error_string. copy_from_nonoverlapping ( err_string. as_ptr ( ) . cast ( ) , err_string. len ( ) ) ;
150
162
out_error_string. add ( err_string. len ( ) ) . write ( 0 ) ;
151
- ptr:: null_mut ( ) as * mut PROJ_NETWORK_HANDLE
163
+ ptr:: null_mut ( )
152
164
}
153
165
}
154
166
}
@@ -172,44 +184,62 @@ unsafe fn _network_open(
172
184
// RANGE header definition is "bytes=x-y"
173
185
let hvalue = format ! ( "bytes={offset}-{end}" ) ;
174
186
// Create a new client that can be reused for subsequent queries
175
- let clt = Agent :: new ( ) ;
176
- let req = clt. get ( & url) ;
177
- let with_headers = req. set ( "Range" , & hvalue) . set ( "Client" , CLIENT ) ;
178
- let in_case_of_error = with_headers. clone ( ) ;
179
- let mut res = with_headers. call ( ) ?;
187
+ let clt = Agent :: new_with_defaults ( ) ;
188
+ let req = clt
189
+ . get ( & url)
190
+ . header ( "Range" , & hvalue)
191
+ . header ( "Client" , CLIENT ) ;
192
+
193
+ let mut res = req. call ( ) ?;
194
+
195
+ // Define headers for potential retries
196
+ let headers = [ ( "Range" , hvalue. as_str ( ) ) , ( "Client" , CLIENT ) ] ;
197
+
180
198
// hand the response off to the error-handler, continue on success
181
- error_handler ( & mut res, in_case_of_error) ?;
199
+ error_handler ( & mut res, & url, & headers, clt. clone ( ) ) ?;
200
+
182
201
// Write the initial read length value into the pointer
183
- let Some ( Ok ( contentlength) ) = res. header ( "Content-Length" ) . map ( str:: parse :: < usize > ) else {
184
- return Err ( ProjError :: ContentLength ) ;
185
- } ;
202
+ let contentlength = res
203
+ . headers ( )
204
+ . get ( "Content-Length" )
205
+ . and_then ( |val| val. to_str ( ) . ok ( ) )
206
+ . and_then ( |s| s. parse :: < usize > ( ) . ok ( ) )
207
+ . ok_or ( ProjError :: ContentLength ) ?;
208
+
186
209
let headers = res
187
- . headers_names ( )
188
- . into_iter ( )
189
- . filter_map ( |h| {
190
- Some ( {
191
- let v = res. header ( & h) ?. to_string ( ) ;
192
- ( h, v)
193
- } )
210
+ . headers ( )
211
+ . iter ( )
212
+ . filter_map ( |( h, v) | {
213
+ let header_name = h. to_string ( ) ;
214
+ let header_value = v. to_str ( ) . ok ( ) ?. to_string ( ) ;
215
+ Some ( ( header_name, header_value) )
194
216
} )
195
217
. collect ( ) ;
218
+
196
219
// Copy the downloaded bytes into the buffer so it can be passed around
197
220
let capacity = contentlength. min ( size_to_read) ;
198
- let mut buf = Vec :: with_capacity ( capacity) ;
199
- res. into_reader ( )
221
+ let mut buf = Vec :: < u8 > :: with_capacity ( capacity) ;
222
+
223
+ // Read from body into our buffer
224
+ let body_reader = res. body_mut ( ) . as_reader ( ) ;
225
+ body_reader
200
226
. take ( size_to_read as u64 )
201
227
. read_to_end ( & mut buf) ?;
228
+
202
229
out_size_read. write ( buf. len ( ) ) ;
203
230
buf. as_ptr ( ) . copy_to_nonoverlapping ( buffer. cast ( ) , capacity) ;
231
+
204
232
let hd = HandleData :: new ( url, headers, None ) ;
205
233
// heap-allocate the struct and cast it to a void pointer so it can be passed around to PROJ
206
234
let hd_boxed = Box :: new ( hd) ;
207
235
let void: * mut c_void = Box :: into_raw ( hd_boxed) . cast :: < libc:: c_void > ( ) ;
208
236
let opaque: * mut PROJ_NETWORK_HANDLE = void. cast :: < proj_sys:: PROJ_NETWORK_HANDLE > ( ) ;
237
+
209
238
// If everything's OK, set the error string to empty
210
239
let err_string = "" ;
211
240
out_error_string. copy_from_nonoverlapping ( err_string. as_ptr ( ) . cast ( ) , err_string. len ( ) ) ;
212
241
out_error_string. add ( err_string. len ( ) ) . write ( 0 ) ;
242
+
213
243
Ok ( opaque)
214
244
}
215
245
@@ -334,41 +364,58 @@ fn _network_read_range(
334
364
let end = offset as usize + size_to_read - 1 ;
335
365
let hvalue = format ! ( "bytes={offset}-{end}" ) ;
336
366
let hd = unsafe { & mut * ( handle as * const c_void as * mut HandleData ) } ;
337
- let clt = Agent :: new ( ) ;
338
- let initial = clt. get ( & hd. url ) ;
339
- let in_case_of_error = initial. clone ( ) . set ( "Range" , & hvalue) . set ( "Client" , CLIENT ) ;
340
- let req = in_case_of_error. clone ( ) ;
367
+ let clt = Agent :: new_with_defaults ( ) ;
368
+ let req = clt
369
+ . get ( & hd. url )
370
+ . header ( "Range" , & hvalue)
371
+ . header ( "Client" , CLIENT ) ;
372
+
341
373
let mut res = req. call ( ) ?;
342
- // hand the response and retry instance off to the error-handler, continue on success
343
- error_handler ( & mut res, in_case_of_error) ?;
374
+
375
+ // Define headers for potential retries
376
+ let headers = [ ( "Range" , hvalue. as_str ( ) ) , ( "Client" , CLIENT ) ] ;
377
+
378
+ // hand the response off to the error-handler, continue on success
379
+ error_handler ( & mut res, & hd. url , & headers, clt. clone ( ) ) ?;
380
+
344
381
let headers = res
345
- . headers_names ( )
346
- . into_iter ( )
347
- . filter_map ( |h| {
348
- Some ( {
349
- let v = res. header ( & h) ?. to_string ( ) ;
350
- ( h, v)
351
- } )
382
+ . headers ( )
383
+ . iter ( )
384
+ . filter_map ( |( h, v) | {
385
+ let header_name = h. to_string ( ) ;
386
+ let header_value = v. to_str ( ) . ok ( ) ?. to_string ( ) ;
387
+ Some ( ( header_name, header_value) )
352
388
} )
353
389
. collect ( ) ;
354
- let Some ( Ok ( contentlength) ) = res. header ( "Content-Length" ) . map ( str:: parse :: < usize > ) else {
355
- return Err ( ProjError :: ContentLength ) ;
356
- } ;
390
+
391
+ let contentlength = res
392
+ . headers ( )
393
+ . get ( "Content-Length" )
394
+ . and_then ( |val| val. to_str ( ) . ok ( ) )
395
+ . and_then ( |s| s. parse :: < usize > ( ) . ok ( ) )
396
+ . ok_or ( ProjError :: ContentLength ) ?;
397
+
357
398
// Copy the downloaded bytes into the buffer so it can be passed around
358
399
let capacity = contentlength. min ( size_to_read) ;
359
- let mut buf = Vec :: with_capacity ( capacity) ;
360
- res. into_reader ( )
400
+ let mut buf = Vec :: < u8 > :: with_capacity ( capacity) ;
401
+
402
+ // Read from body into our buffer
403
+ let body_reader = res. body_mut ( ) . as_reader ( ) ;
404
+ body_reader
361
405
. take ( size_to_read as u64 )
362
406
. read_to_end ( & mut buf) ?;
407
+
363
408
unsafe {
364
409
buf. as_ptr ( )
365
410
. copy_to_nonoverlapping ( buffer. cast :: < u8 > ( ) , capacity) ;
366
411
}
412
+
367
413
let err_string = "" ;
368
414
unsafe {
369
415
out_error_string. copy_from_nonoverlapping ( err_string. as_ptr ( ) . cast ( ) , err_string. len ( ) ) ;
370
416
out_error_string. add ( err_string. len ( ) ) . write ( 0 ) ;
371
417
}
418
+
372
419
hd. headers = headers;
373
420
Ok ( buf. len ( ) )
374
421
}
0 commit comments