Merge Chromium src@r53293
Change-Id: Ia79acf8670f385cee48c45b0a75371d8e950af34
diff --git a/net/url_request/https_prober.cc b/net/url_request/https_prober.cc
new file mode 100644
index 0000000..a7163ba
--- /dev/null
+++ b/net/url_request/https_prober.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/https_prober.h"
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+
+namespace net {
+
+bool HTTPSProber::HaveProbed(const std::string& host) const {
+ return probed_.find(host) != probed_.end();
+}
+
+bool HTTPSProber::InFlight(const std::string& host) const {
+ return inflight_probes_.find(host) != inflight_probes_.end();
+}
+
+bool HTTPSProber::ProbeHost(const std::string& host, URLRequestContext* ctx,
+ HTTPSProberDelegate* delegate) {
+ if (HaveProbed(host) || InFlight(host)) {
+ return false;
+ }
+
+ inflight_probes_[host] = delegate;
+
+ GURL url("https://" + host);
+ DCHECK_EQ(url.host(), host);
+
+ URLRequest* req = new URLRequest(url, this);
+ req->set_context(ctx);
+ req->Start();
+ return true;
+}
+
+void HTTPSProber::Success(URLRequest* request) {
+ DoCallback(request, true);
+}
+
+void HTTPSProber::Failure(URLRequest* request) {
+ DoCallback(request, false);
+}
+
+void HTTPSProber::DoCallback(URLRequest* request, bool result) {
+ std::map<std::string, HTTPSProberDelegate*>::iterator i =
+ inflight_probes_.find(request->original_url().host());
+ DCHECK(i != inflight_probes_.end());
+
+ HTTPSProberDelegate* delegate = i->second;
+ inflight_probes_.erase(i);
+ probed_.insert(request->original_url().host());
+ delete request;
+ delegate->ProbeComplete(result);
+}
+
+void HTTPSProber::OnAuthRequired(URLRequest* request,
+ net::AuthChallengeInfo* auth_info) {
+ Success(request);
+}
+
+void HTTPSProber::OnSSLCertificateError(URLRequest* request,
+ int cert_error,
+ net::X509Certificate* cert) {
+ request->ContinueDespiteLastError();
+}
+
+void HTTPSProber::OnResponseStarted(URLRequest* request) {
+ if (request->status().status() == URLRequestStatus::SUCCESS) {
+ Success(request);
+ } else {
+ Failure(request);
+ }
+}
+
+void HTTPSProber::OnReadCompleted(URLRequest* request, int bytes_read) {
+ NOTREACHED();
+}
+
+} // namespace net
diff --git a/net/url_request/https_prober.h b/net/url_request/https_prober.h
new file mode 100644
index 0000000..c1c9941
--- /dev/null
+++ b/net/url_request/https_prober.h
@@ -0,0 +1,75 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_BASE_HTTPS_PROBER_H_
+#define NET_BASE_HTTPS_PROBER_H_
+
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/singleton.h"
+#include "base/task.h"
+#include "net/url_request/url_request.h"
+
+class URLRequestContext;
+
+namespace net {
+
+// This should be scoped inside HTTPSProber, but VC cannot compile
+// HTTPProber::Delegate when HTTPSProber also inherits from
+// URLRequest::Delegate.
+class HTTPSProberDelegate {
+ public:
+ virtual void ProbeComplete(bool result) = 0;
+ protected:
+ virtual ~HTTPSProberDelegate() {}
+};
+
+// HTTPSProber is a singleton object that manages HTTPS probes. A HTTPS probe
+// determines if we can connect to a given host over HTTPS. It's used when
+// transparently upgrading from HTTP to HTTPS (for example, for SPDY).
+class HTTPSProber : public URLRequest::Delegate {
+ public:
+ HTTPSProber() {}
+
+ // HaveProbed returns true if the given host is known to have been probed
+ // since the browser was last started.
+ bool HaveProbed(const std::string& host) const;
+
+ // InFlight returns true iff a probe for the given host is currently active.
+ bool InFlight(const std::string& host) const;
+
+ // ProbeHost starts a new probe for the given host. If the host is known to
+ // have been probed since the browser was started, false is returned and no
+ // other action is taken. If a probe to the given host in currently inflight,
+ // false will be returned, and no other action is taken. Otherwise, a new
+ // probe is started, true is returned and the Delegate will be called with the
+ // results (true means a successful handshake).
+ bool ProbeHost(const std::string& host, URLRequestContext* ctx,
+ HTTPSProberDelegate* delegate);
+
+ // Implementation of URLRequest::Delegate
+ void OnAuthRequired(URLRequest* request,
+ net::AuthChallengeInfo* auth_info);
+ void OnSSLCertificateError(URLRequest* request,
+ int cert_error,
+ net::X509Certificate* cert);
+ void OnResponseStarted(URLRequest* request);
+ void OnReadCompleted(URLRequest* request, int bytes_read);
+
+ private:
+ void Success(URLRequest* request);
+ void Failure(URLRequest* request);
+ void DoCallback(URLRequest* request, bool result);
+
+ std::map<std::string, HTTPSProberDelegate*> inflight_probes_;
+ std::set<std::string> probed_;
+
+ friend struct DefaultSingletonTraits<HTTPSProber>;
+ DISALLOW_COPY_AND_ASSIGN(HTTPSProber);
+};
+
+} // namespace net
+#endif
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index 7a09123..9f226c2 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 20010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,8 +9,8 @@
#include "base/singleton.h"
#include "base/stats_counters.h"
#include "net/base/load_flags.h"
-#include "net/base/load_log.h"
#include "net/base/net_errors.h"
+#include "net/base/net_log.h"
#include "net/base/ssl_cert_request_info.h"
#include "net/base/upload_data.h"
#include "net/http/http_response_headers.h"
@@ -18,19 +18,33 @@
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_job.h"
#include "net/url_request/url_request_job_manager.h"
+#include "net/url_request/url_request_netlog_params.h"
using base::Time;
using net::UploadData;
using std::string;
using std::wstring;
-// Max number of http redirects to follow. Same number as gecko.
-static const int kMaxRedirects = 20;
+namespace {
-static URLRequestJobManager* GetJobManager() {
+// Max number of http redirects to follow. Same number as gecko.
+const int kMaxRedirects = 20;
+
+URLRequestJobManager* GetJobManager() {
return Singleton<URLRequestJobManager>::get();
}
+// Discard headers which have meaning in POST (Content-Length, Content-Type,
+// Origin).
+void StripPostSpecificHeaders(net::HttpRequestHeaders* headers) {
+ // These are headers that may be attached to a POST.
+ headers->RemoveHeader(net::HttpRequestHeaders::kContentLength);
+ headers->RemoveHeader(net::HttpRequestHeaders::kContentType);
+ headers->RemoveHeader(net::HttpRequestHeaders::kOrigin);
+}
+
+} // namespace
+
///////////////////////////////////////////////////////////////////////////////
// URLRequest
@@ -44,8 +58,7 @@
enable_profiling_(false),
redirect_limit_(kMaxRedirects),
final_upload_progress_(0),
- priority_(net::LOWEST),
- ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {
+ priority_(net::LOWEST) {
SIMPLE_STATS_COUNTER("URLRequestCount");
// Sanity check out environment.
@@ -87,12 +100,16 @@
upload_->AppendBytes(bytes, bytes_len);
}
-void URLRequest::AppendFileRangeToUpload(const FilePath& file_path,
- uint64 offset, uint64 length) {
+void URLRequest::AppendFileRangeToUpload(
+ const FilePath& file_path,
+ uint64 offset,
+ uint64 length,
+ const base::Time& expected_modification_time) {
DCHECK(file_path.value().length() > 0 && length > 0);
if (!upload_)
upload_ = new UploadData();
- upload_->AppendFileRange(file_path, offset, length);
+ upload_->AppendFileRange(file_path, offset, length,
+ expected_modification_time);
}
void URLRequest::set_upload(net::UploadData* upload) {
@@ -121,17 +138,10 @@
NOTREACHED() << "implement me!";
}
-void URLRequest::SetExtraRequestHeaders(const string& headers) {
+void URLRequest::SetExtraRequestHeaders(
+ const net::HttpRequestHeaders& headers) {
DCHECK(!is_pending_);
- if (headers.empty()) {
- extra_request_headers_.clear();
- } else {
-#ifndef NDEBUG
- size_t crlf = headers.rfind("\r\n", headers.size() - 1);
- DCHECK(crlf != headers.size() - 2) << "headers must not end with CRLF";
-#endif
- extra_request_headers_ = headers + "\r\n";
- }
+ extra_request_headers_ = headers;
// NOTE: This method will likely become non-trivial once the other setters
// for request headers are implemented.
@@ -256,7 +266,10 @@
DCHECK(!is_pending_);
DCHECK(!job_);
- net::LoadLog::BeginEvent(load_log_, net::LoadLog::TYPE_URL_REQUEST_START);
+ net_log_.BeginEvent(
+ net::NetLog::TYPE_URL_REQUEST_START_JOB,
+ new URLRequestStartEventParameters(
+ url_, method_, load_flags_, priority_));
job_ = job;
job_->SetExtraRequestHeaders(extra_request_headers_);
@@ -327,7 +340,7 @@
// about being called recursively.
}
-bool URLRequest::Read(net::IOBuffer* dest, int dest_size, int *bytes_read) {
+bool URLRequest::Read(net::IOBuffer* dest, int dest_size, int* bytes_read) {
DCHECK(job_);
DCHECK(bytes_read);
DCHECK(!job_->is_done());
@@ -347,6 +360,11 @@
return job_->Read(dest, dest_size, bytes_read);
}
+void URLRequest::StopCaching() {
+ DCHECK(job_);
+ job_->StopCaching();
+}
+
void URLRequest::ReceivedRedirect(const GURL& location, bool* defer_redirect) {
URLRequestJob* job = GetJobManager()->MaybeInterceptRedirect(this, location);
if (job) {
@@ -357,10 +375,10 @@
}
void URLRequest::ResponseStarted() {
+ scoped_refptr<net::NetLog::EventParameters> params;
if (!status_.is_success())
- net::LoadLog::AddErrorCode(load_log_, status_.os_error());
-
- net::LoadLog::EndEvent(load_log_, net::LoadLog::TYPE_URL_REQUEST_START);
+ params = new net::NetLogIntegerParameter("net_error", status_.os_error());
+ net_log_.EndEvent(net::NetLog::TYPE_URL_REQUEST_START_JOB, params);
URLRequestJob* job = GetJobManager()->MaybeInterceptResponse(this);
if (job) {
@@ -371,8 +389,8 @@
}
void URLRequest::FollowDeferredRedirect() {
- DCHECK(job_);
- DCHECK(status_.is_success());
+ CHECK(job_);
+ CHECK(status_.is_success());
job_->FollowDeferredRedirect();
}
@@ -406,6 +424,10 @@
void URLRequest::PrepareToRestart() {
DCHECK(job_);
+ // Close the current URL_REQUEST_START_JOB, since we will be starting a new
+ // one.
+ net_log_.EndEvent(net::NetLog::TYPE_URL_REQUEST_START_JOB, NULL);
+
job_->Kill();
OrphanJob();
@@ -420,22 +442,12 @@
job_ = NULL;
}
-// static
-std::string URLRequest::StripPostSpecificHeaders(const std::string& headers) {
- // These are headers that may be attached to a POST.
- static const char* const kPostHeaders[] = {
- "content-type",
- "content-length",
- "origin"
- };
- return net::HttpUtil::StripHeaders(
- headers, kPostHeaders, arraysize(kPostHeaders));
-}
-
int URLRequest::Redirect(const GURL& location, int http_status_code) {
- if (net::LoadLog::IsUnbounded(load_log_)) {
- net::LoadLog::AddString(load_log_, StringPrintf("Redirected (%d) to %s",
- http_status_code, location.spec().c_str()));
+ if (net_log_.HasListener()) {
+ net_log_.AddEvent(
+ net::NetLog::TYPE_URL_REQUEST_REDIRECTED,
+ new net::NetLogStringParameter(
+ "location", location.possibly_invalid_spec()));
}
if (redirect_limit_ <= 0) {
DLOG(INFO) << "disallowing redirect: exceeds limit";
@@ -476,10 +488,7 @@
// the inclusion of a multipart Content-Type header in GET can cause
// problems with some servers:
// http://code.google.com/p/chromium/issues/detail?id=843
- //
- // TODO(eroman): It would be better if this data was structured into
- // specific fields/flags, rather than a stew of extra headers.
- extra_request_headers_ = StripPostSpecificHeaders(extra_request_headers_);
+ StripPostSpecificHeaders(&extra_request_headers_);
}
if (!final_upload_progress_)
@@ -499,18 +508,15 @@
context_ = context;
- // If the context this request belongs to has changed, update the tracker(s).
+ // If the context this request belongs to has changed, update the tracker.
if (prev_context != context) {
- if (prev_context)
- prev_context->url_request_tracker()->Remove(this);
- if (context) {
- if (!load_log_) {
- // Create the LoadLog -- we waited until now to create it so we know
- // what constraints the URLRequestContext is enforcing on log levels.
- load_log_ = context->url_request_tracker()->CreateLoadLog();
- }
+ net_log_.EndEvent(net::NetLog::TYPE_REQUEST_ALIVE, NULL);
+ net_log_ = net::BoundNetLog();
- context->url_request_tracker()->Add(this);
+ if (context) {
+ net_log_ = net::BoundNetLog::Make(context->net_log(),
+ net::NetLog::SOURCE_URL_REQUEST);
+ net_log_.BeginEvent(net::NetLog::TYPE_REQUEST_ALIVE, NULL);
}
}
}
@@ -533,10 +539,3 @@
void URLRequest::SetUserData(const void* key, UserData* data) {
user_data_[key] = linked_ptr<UserData>(data);
}
-
-void URLRequest::GetInfoForTracker(
- RequestTracker<URLRequest>::RecentRequestInfo* info) const {
- DCHECK(info);
- info->original_url = original_url_;
- info->load_log = load_log_;
-}
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index e586e28..7256ec0 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -10,17 +10,16 @@
#include <vector>
#include "base/leak_tracker.h"
-#include "base/linked_list.h"
#include "base/linked_ptr.h"
#include "base/logging.h"
+#include "base/non_thread_safe.h"
#include "base/ref_counted.h"
-#include "base/scoped_ptr.h"
#include "googleurl/src/gurl.h"
-#include "net/base/load_log.h"
#include "net/base/load_states.h"
+#include "net/base/net_log.h"
#include "net/base/request_priority.h"
+#include "net/http/http_request_headers.h"
#include "net/http/http_response_info.h"
-#include "net/url_request/request_tracker.h"
#include "net/url_request/url_request_status.h"
namespace base {
@@ -54,7 +53,7 @@
//
// NOTE: All usage of all instances of this class should be on the same thread.
//
-class URLRequest {
+class URLRequest : public NonThreadSafe {
public:
// Derive from this class and add your own data members to associate extra
// information with a URLRequest. Use GetUserData(key) and SetUserData()
@@ -187,6 +186,20 @@
request->Cancel();
}
+ // Called when reading cookies. |blocked_by_policy| is true if access to
+ // cookies was denied due to content settings. This method will never be
+ // invoked when LOAD_DO_NOT_SEND_COOKIES is specified.
+ virtual void OnGetCookies(URLRequest* request, bool blocked_by_policy) {
+ }
+
+ // Called when a cookie is set. |blocked_by_policy| is true if the cookie
+ // was rejected due to content settings. This method will never be invoked
+ // when LOAD_DO_NOT_SAVE_COOKIES is specified.
+ virtual void OnSetCookie(URLRequest* request,
+ const std::string& cookie_line,
+ bool blocked_by_policy) {
+ }
+
// After calling Start(), the delegate will receive an OnResponseStarted
// callback when the request has completed. If an error occurred, the
// request->status() will be set. On success, all redirects have been
@@ -292,12 +305,16 @@
//
// When uploading data, bytes_len must be non-zero.
// When uploading a file range, length must be non-zero. If length
- // exceeds the end-of-file, the upload is clipped at end-of-file.
+ // exceeds the end-of-file, the upload is clipped at end-of-file. If the
+ // expected modification time is provided (non-zero), it will be used to
+ // check if the underlying file has been changed or not. The granularity of
+ // the time comparison is 1 second since time_t precision is used in WebKit.
void AppendBytesToUpload(const char* bytes, int bytes_len);
void AppendFileRangeToUpload(const FilePath& file_path,
- uint64 offset, uint64 length);
+ uint64 offset, uint64 length,
+ const base::Time& expected_modification_time);
void AppendFileToUpload(const FilePath& file_path) {
- AppendFileRangeToUpload(file_path, 0, kuint64max);
+ AppendFileRangeToUpload(file_path, 0, kuint64max, base::Time());
}
// Set the upload data directly.
@@ -316,17 +333,14 @@
void SetExtraRequestHeaderByName(const std::string& name,
const std::string& value, bool overwrite);
- // Sets all extra request headers, from a \r\n-delimited string. Any extra
- // request headers set by other methods are overwritten by this method. This
- // method may only be called before Start() is called. It is an error to
- // call it later.
- //
- // Note: \r\n is only used to separate the headers in the string if there
- // are multiple headers. The last header in the string must not be followed
- // by \r\n.
- void SetExtraRequestHeaders(const std::string& headers);
+ // Sets all extra request headers. Any extra request headers set by other
+ // methods are overwritten by this method. This method may only be called
+ // before Start() is called. It is an error to call it later.
+ void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers);
- const std::string& extra_request_headers() { return extra_request_headers_; }
+ const net::HttpRequestHeaders& extra_request_headers() const {
+ return extra_request_headers_;
+ }
// Returns the current load state for the request.
net::LoadState GetLoadState() const;
@@ -362,11 +376,29 @@
// Indicate if this response was fetched from disk cache.
bool was_cached() const { return response_info_.was_cached; }
- // Returns true if the URLRequest was delivered with SPDY.
+ // True if response could use alternate protocol. However, browser will
+ // ingore the alternate protocol if spdy is not enabled.
bool was_fetched_via_spdy() const {
return response_info_.was_fetched_via_spdy;
}
+ // Returns true if the URLRequest was delivered after NPN is negotiated,
+ // using either SPDY or HTTP.
+ bool was_npn_negotiated() const {
+ return response_info_.was_npn_negotiated;
+ }
+
+ // Returns true if the URLRequest was delivered when the alertnate protocol
+ // is available.
+ bool was_alternate_protocol_available() const {
+ return response_info_.was_alternate_protocol_available;
+ }
+
+ // Returns true if the URLRequest was delivered through a proxy.
+ bool was_fetched_via_proxy() const {
+ return response_info_.was_fetched_via_proxy;
+ }
+
// Get all response headers, as a HttpResponseHeaders object. See comments
// in HttpResponseHeaders class as to the format of the data.
net::HttpResponseHeaders* response_headers() const;
@@ -454,7 +486,14 @@
//
// If a read error occurs, Read returns false and the request->status
// will be set to an error.
- bool Read(net::IOBuffer* buf, int max_bytes, int *bytes_read);
+ bool Read(net::IOBuffer* buf, int max_bytes, int* bytes_read);
+
+ // If this request is being cached by the HTTP cache, stop subsequent caching.
+ // Note that this method has no effect on other (simultaneous or not) requests
+ // for the same resource. The typical example is a request that results in
+ // the data being stored to disk (downloaded instead of rendered) so we don't
+ // want to store it twice.
+ void StopCaching();
// This method may be called to follow a redirect that was deferred in
// response to an OnReceivedRedirect call.
@@ -495,7 +534,7 @@
URLRequestContext* context();
void set_context(URLRequestContext* context);
- net::LoadLog* load_log() { return load_log_; }
+ const net::BoundNetLog& net_log() const { return net_log_; }
// Returns the expected content size if available
int64 GetExpectedContentSize() const;
@@ -536,13 +575,12 @@
private:
friend class URLRequestJob;
- friend class RequestTracker<URLRequest>;
void StartJob(URLRequestJob* job);
// Restarting involves replacing the current job with a new one such as what
// happens when following a HTTP redirect.
- void RestartWithJob(URLRequestJob *job);
+ void RestartWithJob(URLRequestJob* job);
void PrepareToRestart();
// Detaches the job from this request in preparation for this object going
@@ -554,22 +592,13 @@
// passed values.
void DoCancel(int os_error, const net::SSLInfo& ssl_info);
- // Discard headers which have meaning in POST (Content-Length, Content-Type,
- // Origin).
- static std::string StripPostSpecificHeaders(const std::string& headers);
-
- // Gets the goodies out of this that we want to show the user later on the
- // chrome://net-internals/ page.
- void GetInfoForTracker(
- RequestTracker<URLRequest>::RecentRequestInfo* info) const;
-
// Contextual information used for this request (can be NULL). This contains
// most of the dependencies which are shared between requests (disk cache,
// cookie store, socket poool, etc.)
scoped_refptr<URLRequestContext> context_;
// Tracks the time spent in various load states throughout this request.
- scoped_refptr<net::LoadLog> load_log_;
+ net::BoundNetLog net_log_;
scoped_refptr<URLRequestJob> job_;
scoped_refptr<net::UploadData> upload_;
@@ -578,7 +607,7 @@
GURL first_party_for_cookies_;
std::string method_; // "GET", "POST", etc. Should be all uppercase.
std::string referrer_;
- std::string extra_request_headers_;
+ net::HttpRequestHeaders extra_request_headers_;
int load_flags_; // Flags indicating the request type for the load;
// expected values are LOAD_* enums above.
@@ -616,7 +645,6 @@
// this to determine which URLRequest to allocate sockets to first.
net::RequestPriority priority_;
- RequestTracker<URLRequest>::Node request_tracker_node_;
base::LeakTracker<URLRequest> leak_tracker_;
DISALLOW_COPY_AND_ASSIGN(URLRequest);
diff --git a/net/url_request/url_request_context.h b/net/url_request/url_request_context.h
index d72f84d..0f7fd51 100644
--- a/net/url_request/url_request_context.h
+++ b/net/url_request/url_request_context.h
@@ -10,35 +10,46 @@
#ifndef NET_URL_REQUEST_URL_REQUEST_CONTEXT_H_
#define NET_URL_REQUEST_URL_REQUEST_CONTEXT_H_
+#include "base/non_thread_safe.h"
#include "base/ref_counted.h"
#include "base/string_util.h"
#include "net/base/cookie_store.h"
#include "net/base/host_resolver.h"
+#include "net/base/net_log.h"
#include "net/base/ssl_config_service.h"
#include "net/base/transport_security_state.h"
#include "net/ftp/ftp_auth_cache.h"
#include "net/proxy/proxy_service.h"
-#include "net/url_request/request_tracker.h"
namespace net {
class CookiePolicy;
class FtpTransactionFactory;
+class HttpAuthHandlerFactory;
+class HttpNetworkDelegate;
class HttpTransactionFactory;
class SocketStream;
}
class URLRequest;
// Subclass to provide application-specific context for URLRequest instances.
-class URLRequestContext :
- public base::RefCountedThreadSafe<URLRequestContext> {
+class URLRequestContext
+ : public base::RefCountedThreadSafe<URLRequestContext>,
+ public NonThreadSafe {
public:
URLRequestContext()
- : http_transaction_factory_(NULL),
+ : net_log_(NULL),
+ http_transaction_factory_(NULL),
ftp_transaction_factory_(NULL),
+ http_auth_handler_factory_(NULL),
+ network_delegate_(NULL),
cookie_policy_(NULL),
transport_security_state_(NULL) {
}
+ net::NetLog* net_log() const {
+ return net_log_;
+ }
+
net::HostResolver* host_resolver() const {
return host_resolver_;
}
@@ -77,22 +88,18 @@
// Gets the FTP authentication cache for this context.
net::FtpAuthCache* ftp_auth_cache() { return &ftp_auth_cache_; }
+ // Gets the HTTP Authentication Handler Factory for this context.
+ // The factory is only valid for the lifetime of this URLRequestContext
+ net::HttpAuthHandlerFactory* http_auth_handler_factory() {
+ return http_auth_handler_factory_;
+ }
+
// Gets the value of 'Accept-Charset' header field.
const std::string& accept_charset() const { return accept_charset_; }
// Gets the value of 'Accept-Language' header field.
const std::string& accept_language() const { return accept_language_; }
- // Gets the tracker for URLRequests associated with this context.
- RequestTracker<URLRequest>* url_request_tracker() {
- return &url_request_tracker_;
- }
-
- // Gets the tracker for SocketStreams associated with this context.
- RequestTracker<net::SocketStream>* socket_stream_tracker() {
- return &socket_stream_tracker_;
- }
-
// Gets the UA string to use for the given URL. Pass an invalid URL (such as
// GURL()) to get the default UA string. Subclasses should override this
// method to provide a UA string.
@@ -107,20 +114,6 @@
referrer_charset_ = charset;
}
- // Called before adding cookies to requests. Returns true if cookie can
- // be added to the request. The cookie might still be modified though.
- virtual bool InterceptRequestCookies(const URLRequest* request,
- const std::string& cookies) const {
- return true;
- }
-
- // Called before adding cookies from respones to the cookie monster. Returns
- // true if the cookie can be added. The cookie might still be modified though.
- virtual bool InterceptResponseCookie(const URLRequest* request,
- const std::string& cookie) const {
- return true;
- }
-
protected:
friend class base::RefCountedThreadSafe<URLRequestContext>;
@@ -128,11 +121,14 @@
// The following members are expected to be initialized and owned by
// subclasses.
+ net::NetLog* net_log_;
scoped_refptr<net::HostResolver> host_resolver_;
scoped_refptr<net::ProxyService> proxy_service_;
scoped_refptr<net::SSLConfigService> ssl_config_service_;
net::HttpTransactionFactory* http_transaction_factory_;
net::FtpTransactionFactory* ftp_transaction_factory_;
+ net::HttpAuthHandlerFactory* http_auth_handler_factory_;
+ net::HttpNetworkDelegate* network_delegate_;
scoped_refptr<net::CookieStore> cookie_store_;
net::CookiePolicy* cookie_policy_;
scoped_refptr<net::TransportSecurityState> transport_security_state_;
@@ -144,12 +140,6 @@
// filename for file download.
std::string referrer_charset_;
- // Tracks the requests associated with this context.
- RequestTracker<URLRequest> url_request_tracker_;
-
- // Trakcs the socket streams associated with this context.
- RequestTracker<net::SocketStream> socket_stream_tracker_;
-
private:
DISALLOW_COPY_AND_ASSIGN(URLRequestContext);
};
diff --git a/net/url_request/url_request_file_dir_job.cc b/net/url_request/url_request_file_dir_job.cc
index 07977b5..19a1aaf 100644
--- a/net/url_request/url_request_file_dir_job.cc
+++ b/net/url_request/url_request_file_dir_job.cc
@@ -6,8 +6,8 @@
#include "base/file_util.h"
#include "base/message_loop.h"
-#include "base/string_util.h"
#include "base/sys_string_conversions.h"
+#include "base/utf_string_conversions.h"
#include "base/time.h"
#include "googleurl/src/gurl.h"
#include "net/base/io_buffer.h"
@@ -211,24 +211,3 @@
}
}
}
-
-bool URLRequestFileDirJob::IsRedirectResponse(
- GURL* location, int* http_status_code) {
- // If the URL did not have a trailing slash, treat the response as a redirect
- // to the URL with a trailing slash appended.
- std::string path = request_->url().path();
- if (path.empty() || (path[path.size() - 1] != '/')) {
- // This happens when we discovered the file is a directory, so needs a
- // slash at the end of the path.
- std::string new_path = path;
- new_path.push_back('/');
- GURL::Replacements replacements;
- replacements.SetPathStr(new_path);
-
- *location = request_->url().ReplaceComponents(replacements);
- *http_status_code = 301; // simulate a permanent redirect
- return true;
- }
-
- return false;
-}
diff --git a/net/url_request/url_request_file_dir_job.h b/net/url_request/url_request_file_dir_job.h
index 24a6c72..0322f10 100644
--- a/net/url_request/url_request_file_dir_job.h
+++ b/net/url_request/url_request_file_dir_job.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -25,7 +25,6 @@
virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
virtual bool GetMimeType(std::string* mime_type) const;
virtual bool GetCharset(std::string* charset);
- virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
// DirectoryLister::DirectoryListerDelegate methods:
virtual void OnListFile(const file_util::FileEnumerator::FindInfo& data);
@@ -63,7 +62,7 @@
scoped_refptr<net::IOBuffer> read_buffer_;
int read_buffer_length_;
- DISALLOW_EVIL_CONSTRUCTORS(URLRequestFileDirJob);
+ DISALLOW_COPY_AND_ASSIGN(URLRequestFileDirJob);
};
#endif // NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H__
diff --git a/net/url_request/url_request_file_job.cc b/net/url_request/url_request_file_job.cc
index 8e1b7dc..003a29d 100644
--- a/net/url_request/url_request_file_job.cc
+++ b/net/url_request/url_request_file_job.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -80,8 +80,15 @@
URLRequestJob* URLRequestFileJob::Factory(
URLRequest* request, const std::string& scheme) {
FilePath file_path;
+
+ // We need to decide whether to create URLRequestFileJob for file access or
+ // URLRequestFileDirJob for directory access. To avoid accessing the
+ // filesystem, we only look at the path string here.
+ // The code in the URLRequestFileJob::Start() method discovers that a path,
+ // which doesn't end with a slash, should really be treated as a directory,
+ // and it then redirects to the URLRequestFileDirJob.
if (net::FileURLToFilePath(request->url(), &file_path) &&
- file_util::EnsureEndsWithSeparator(&file_path) &&
+ file_util::EndsWithSeparator(file_path) &&
file_path.IsAbsolute())
return new URLRequestFileDirJob(request, file_path);
@@ -188,18 +195,23 @@
return net::GetMimeTypeFromFile(file_path_, mime_type);
}
-void URLRequestFileJob::SetExtraRequestHeaders(const std::string& headers) {
- // We only care about "Range" header here.
- std::vector<net::HttpByteRange> ranges;
- if (net::HttpUtil::ParseRanges(headers, &ranges)) {
- if (ranges.size() == 1) {
- byte_range_ = ranges[0];
- } else {
- // We don't support multiple range requests in one single URL request,
- // because we need to do multipart encoding here.
- // TODO(hclam): decide whether we want to support multiple range requests.
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+void URLRequestFileJob::SetExtraRequestHeaders(
+ const net::HttpRequestHeaders& headers) {
+ std::string range_header;
+ if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
+ // We only care about "Range" header here.
+ std::vector<net::HttpByteRange> ranges;
+ if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
+ if (ranges.size() == 1) {
+ byte_range_ = ranges[0];
+ } else {
+ // We don't support multiple range requests in one single URL request,
+ // because we need to do multipart encoding here.
+ // TODO(hclam): decide whether we want to support multiple range
+ // requests.
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ net::ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+ }
}
}
}
@@ -214,15 +226,20 @@
if (!request_)
return;
+ is_directory_ = file_info.is_directory;
+
int rv = net::OK;
- // We use URLRequestFileJob to handle valid and invalid files as well as
- // invalid directories. For a directory to be invalid, it must either not
- // exist, or be "\" on Windows. (Windows resolves "\" to "C:\", thus
- // reporting it as existent.) On POSIX, we don't count any existent
- // directory as invalid.
- if (!exists || file_info.is_directory) {
+ // We use URLRequestFileJob to handle files as well as directories without
+ // trailing slash.
+ // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise,
+ // we will append trailing slash and redirect to FileDirJob.
+ // A special case is "\" on Windows. We should resolve as invalid.
+ // However, Windows resolves "\" to "C:\", thus reports it as existent.
+ // So what happens is we append it with trailing slash and redirect it to
+ // FileDirJob where it is resolved as invalid.
+ if (!exists) {
rv = net::ERR_FILE_NOT_FOUND;
- } else {
+ } else if (!is_directory_) {
int flags = base::PLATFORM_FILE_OPEN |
base::PLATFORM_FILE_READ |
base::PLATFORM_FILE_ASYNC;
@@ -273,15 +290,25 @@
NotifyReadComplete(result);
}
-bool URLRequestFileJob::IsRedirectResponse(
- GURL* location, int* http_status_code) {
-#if defined(OS_WIN)
- std::wstring extension =
- file_util::GetFileExtensionFromPath(file_path_.value());
+bool URLRequestFileJob::IsRedirectResponse(GURL* location,
+ int* http_status_code) {
+ if (is_directory_) {
+ // This happens when we discovered the file is a directory, so needs a
+ // slash at the end of the path.
+ std::string new_path = request_->url().path();
+ new_path.push_back('/');
+ GURL::Replacements replacements;
+ replacements.SetPathStr(new_path);
+ *location = request_->url().ReplaceComponents(replacements);
+ *http_status_code = 301; // simulate a permanent redirect
+ return true;
+ }
+
+#if defined(OS_WIN)
// Follow a Windows shortcut.
// We just resolve .lnk file, ignore others.
- if (!LowerCaseEqualsASCII(extension, "lnk"))
+ if (!LowerCaseEqualsASCII(file_path_.Extension(), ".lnk"))
return false;
FilePath new_path = file_path_;
diff --git a/net/url_request/url_request_file_job.h b/net/url_request/url_request_file_job.h
index 8cb28bd..aed6859 100644
--- a/net/url_request/url_request_file_job.h
+++ b/net/url_request/url_request_file_job.h
@@ -30,7 +30,7 @@
virtual bool GetContentEncodings(
std::vector<Filter::FilterType>* encoding_type);
virtual bool GetMimeType(std::string* mime_type) const;
- virtual void SetExtraRequestHeaders(const std::string& headers);
+ virtual void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers);
static URLRequest::ProtocolFactory Factory;
diff --git a/net/url_request/url_request_filter.h b/net/url_request/url_request_filter.h
index 3f255b8..d81e68c 100644
--- a/net/url_request/url_request_filter.h
+++ b/net/url_request/url_request_filter.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -79,7 +79,7 @@
// Singleton instance.
static URLRequestFilter* shared_instance_;
- DISALLOW_EVIL_CONSTRUCTORS(URLRequestFilter);
+ DISALLOW_COPY_AND_ASSIGN(URLRequestFilter);
};
#endif // NET_URL_REQUEST_URL_REQUEST_FILTER_H_
diff --git a/net/url_request/url_request_ftp_job.cc b/net/url_request/url_request_ftp_job.cc
new file mode 100644
index 0000000..f8746fc
--- /dev/null
+++ b/net/url_request/url_request_ftp_job.cc
@@ -0,0 +1,244 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_ftp_job.h"
+
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "net/base/auth.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/ftp/ftp_response_info.h"
+#include "net/ftp/ftp_transaction_factory.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_error_job.h"
+
+URLRequestFtpJob::URLRequestFtpJob(URLRequest* request)
+ : URLRequestJob(request),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ start_callback_(this, &URLRequestFtpJob::OnStartCompleted)),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ read_callback_(this, &URLRequestFtpJob::OnReadCompleted)),
+ read_in_progress_(false),
+ context_(request->context()) {
+}
+
+URLRequestFtpJob::~URLRequestFtpJob() {
+}
+
+// static
+URLRequestJob* URLRequestFtpJob::Factory(URLRequest* request,
+ const std::string& scheme) {
+ DCHECK_EQ(scheme, "ftp");
+
+ int port = request->url().IntPort();
+ if (request->url().has_port() &&
+ !net::IsPortAllowedByFtp(port) && !net::IsPortAllowedByOverride(port))
+ return new URLRequestErrorJob(request, net::ERR_UNSAFE_PORT);
+
+ DCHECK(request->context());
+ DCHECK(request->context()->ftp_transaction_factory());
+ return new URLRequestFtpJob(request);
+}
+
+bool URLRequestFtpJob::GetMimeType(std::string* mime_type) const {
+ if (transaction_->GetResponseInfo()->is_directory_listing) {
+ *mime_type = "text/vnd.chromium.ftp-dir";
+ return true;
+ }
+ return false;
+}
+
+void URLRequestFtpJob::Start() {
+ DCHECK(!transaction_.get());
+ request_info_.url = request_->url();
+ StartTransaction();
+}
+
+void URLRequestFtpJob::Kill() {
+ if (!transaction_.get())
+ return;
+ DestroyTransaction();
+ URLRequestJob::Kill();
+}
+
+net::LoadState URLRequestFtpJob::GetLoadState() const {
+ return transaction_.get() ?
+ transaction_->GetLoadState() : net::LOAD_STATE_IDLE;
+}
+
+bool URLRequestFtpJob::NeedsAuth() {
+ // Note that we only have to worry about cases where an actual FTP server
+ // requires auth (and not a proxy), because connecting to FTP via proxy
+ // effectively means the browser communicates via HTTP, and uses HTTP's
+ // Proxy-Authenticate protocol when proxy servers require auth.
+ return server_auth_ && server_auth_->state == net::AUTH_STATE_NEED_AUTH;
+}
+
+void URLRequestFtpJob::GetAuthChallengeInfo(
+ scoped_refptr<net::AuthChallengeInfo>* result) {
+ DCHECK((server_auth_ != NULL) &&
+ (server_auth_->state == net::AUTH_STATE_NEED_AUTH));
+ scoped_refptr<net::AuthChallengeInfo> auth_info = new net::AuthChallengeInfo;
+ auth_info->is_proxy = false;
+ auth_info->host_and_port = ASCIIToWide(
+ net::GetHostAndPort(request_->url()));
+ auth_info->scheme = L"";
+ auth_info->realm = L"";
+ result->swap(auth_info);
+}
+
+void URLRequestFtpJob::SetAuth(const std::wstring& username,
+ const std::wstring& password) {
+ DCHECK(NeedsAuth());
+ server_auth_->state = net::AUTH_STATE_HAVE_AUTH;
+ server_auth_->username = username;
+ server_auth_->password = password;
+
+ request_->context()->ftp_auth_cache()->Add(request_->url().GetOrigin(),
+ username, password);
+
+ RestartTransactionWithAuth();
+}
+
+void URLRequestFtpJob::CancelAuth() {
+ DCHECK(NeedsAuth());
+ server_auth_->state = net::AUTH_STATE_CANCELED;
+
+ // Once the auth is cancelled, we proceed with the request as though
+ // there were no auth. Schedule this for later so that we don't cause
+ // any recursing into the caller as a result of this call.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::OnStartCompleted, net::OK));
+}
+
+bool URLRequestFtpJob::ReadRawData(net::IOBuffer* buf,
+ int buf_size,
+ int *bytes_read) {
+ DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
+ DCHECK(!read_in_progress_);
+
+ int rv = transaction_->Read(buf, buf_size, &read_callback_);
+ if (rv >= 0) {
+ *bytes_read = rv;
+ return true;
+ }
+
+ if (rv == net::ERR_IO_PENDING) {
+ read_in_progress_ = true;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ }
+ return false;
+}
+
+void URLRequestFtpJob::OnStartCompleted(int result) {
+ // If the request was destroyed, then there is no more work to do.
+ if (!request_ || !request_->delegate())
+ return;
+ // If the transaction was destroyed, then the job was cancelled, and
+ // we can just ignore this notification.
+ if (!transaction_.get())
+ return;
+ // Clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+
+ // FTP obviously doesn't have HTTP Content-Length header. We have to pass
+ // the content size information manually.
+ set_expected_content_size(
+ transaction_->GetResponseInfo()->expected_content_size);
+
+ if (result == net::OK) {
+ NotifyHeadersComplete();
+ } else if (transaction_->GetResponseInfo()->needs_auth) {
+ GURL origin = request_->url().GetOrigin();
+ if (server_auth_ && server_auth_->state == net::AUTH_STATE_HAVE_AUTH) {
+ request_->context()->ftp_auth_cache()->Remove(origin,
+ server_auth_->username,
+ server_auth_->password);
+ } else if (!server_auth_) {
+ server_auth_ = new net::AuthData();
+ }
+ server_auth_->state = net::AUTH_STATE_NEED_AUTH;
+
+ net::FtpAuthCache::Entry* cached_auth =
+ request_->context()->ftp_auth_cache()->Lookup(origin);
+
+ if (cached_auth) {
+ // Retry using cached auth data.
+ SetAuth(cached_auth->username, cached_auth->password);
+ } else {
+ // Prompt for a username/password.
+ NotifyHeadersComplete();
+ }
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ }
+}
+
+void URLRequestFtpJob::OnReadCompleted(int result) {
+ read_in_progress_ = false;
+ if (result == 0) {
+ NotifyDone(URLRequestStatus());
+ } else if (result < 0) {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+ } else {
+ // Clear the IO_PENDING status
+ SetStatus(URLRequestStatus());
+ }
+ NotifyReadComplete(result);
+}
+
+void URLRequestFtpJob::RestartTransactionWithAuth() {
+ DCHECK(server_auth_ && server_auth_->state == net::AUTH_STATE_HAVE_AUTH);
+
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+ int rv = transaction_->RestartWithAuth(server_auth_->username,
+ server_auth_->password,
+ &start_callback_);
+ if (rv == net::ERR_IO_PENDING)
+ return;
+
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::OnStartCompleted, rv));
+}
+
+void URLRequestFtpJob::StartTransaction() {
+ // Create a transaction.
+ DCHECK(!transaction_.get());
+ DCHECK(request_->context());
+ DCHECK(request_->context()->ftp_transaction_factory());
+
+ transaction_.reset(
+ request_->context()->ftp_transaction_factory()->CreateTransaction());
+
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ int rv;
+ if (transaction_.get()) {
+ rv = transaction_->Start(
+ &request_info_, &start_callback_, request_->net_log());
+ if (rv == net::ERR_IO_PENDING)
+ return;
+ } else {
+ rv = net::ERR_FAILED;
+ }
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestFtpJob::OnStartCompleted, rv));
+}
+
+void URLRequestFtpJob::DestroyTransaction() {
+ DCHECK(transaction_.get());
+
+ transaction_.reset();
+}
diff --git a/net/url_request/url_request_ftp_job.h b/net/url_request/url_request_ftp_job.h
new file mode 100644
index 0000000..453543f
--- /dev/null
+++ b/net/url_request/url_request_ftp_job.h
@@ -0,0 +1,79 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_FTP_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_FTP_JOB_H_
+
+#include <string>
+
+#include "net/base/auth.h"
+#include "net/base/completion_callback.h"
+#include "net/ftp/ftp_request_info.h"
+#include "net/ftp/ftp_transaction.h"
+#include "net/url_request/url_request_job.h"
+
+class URLRequestContext;
+
+namespace net {
+struct list_state;
+}
+
+// A URLRequestJob subclass that is built on top of FtpTransaction. It
+// provides an implementation for FTP.
+class URLRequestFtpJob : public URLRequestJob {
+ public:
+
+ explicit URLRequestFtpJob(URLRequest* request);
+
+ static URLRequestJob* Factory(URLRequest* request, const std::string& scheme);
+
+ // URLRequestJob methods:
+ virtual bool GetMimeType(std::string* mime_type) const;
+
+ private:
+ virtual ~URLRequestFtpJob();
+
+ // URLRequestJob methods:
+ virtual void Start();
+ virtual void Kill();
+ virtual net::LoadState GetLoadState() const;
+ virtual bool NeedsAuth();
+ virtual void GetAuthChallengeInfo(
+ scoped_refptr<net::AuthChallengeInfo>* auth_info);
+ virtual void SetAuth(const std::wstring& username,
+ const std::wstring& password);
+ virtual void CancelAuth();
+
+ // TODO(ibrar): Yet to give another look at this function.
+ virtual uint64 GetUploadProgress() const { return 0; }
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
+
+ void DestroyTransaction();
+ void StartTransaction();
+
+ void OnStartCompleted(int result);
+ void OnReadCompleted(int result);
+
+ void RestartTransactionWithAuth();
+
+ void LogFtpServerType(char server_type);
+
+ net::FtpRequestInfo request_info_;
+ scoped_ptr<net::FtpTransaction> transaction_;
+
+ net::CompletionCallbackImpl<URLRequestFtpJob> start_callback_;
+ net::CompletionCallbackImpl<URLRequestFtpJob> read_callback_;
+
+ bool read_in_progress_;
+
+ scoped_refptr<net::AuthData> server_auth_;
+
+ // Keep a reference to the url request context to be sure it's not deleted
+ // before us.
+ scoped_refptr<URLRequestContext> context_;
+
+ DISALLOW_COPY_AND_ASSIGN(URLRequestFtpJob);
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_FTP_JOB_H_
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index 25b0f33..9a03213 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -15,23 +15,26 @@
#include "net/base/cert_status_flags.h"
#include "net/base/cookie_policy.h"
#include "net/base/filter.h"
-#include "net/base/https_prober.h"
#include "net/base/transport_security_state.h"
#include "net/base/load_flags.h"
#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/base/sdch_manager.h"
#include "net/base/ssl_cert_request_info.h"
+#include "net/http/http_request_headers.h"
#include "net/http/http_response_headers.h"
#include "net/http/http_response_info.h"
#include "net/http/http_transaction.h"
#include "net/http/http_transaction_factory.h"
#include "net/http/http_util.h"
+#include "net/url_request/https_prober.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_error_job.h"
#include "net/url_request/url_request_redirect_job.h"
+static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
+
// TODO(darin): make sure the port blocking code is not lost
// static
URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
@@ -128,9 +131,9 @@
}
void URLRequestHttpJob::SetExtraRequestHeaders(
- const std::string& headers) {
+ const net::HttpRequestHeaders& headers) {
DCHECK(!transaction_.get()) << "cannot change once started";
- request_info_.extra_headers = headers;
+ request_info_.extra_headers.CopyFrom(headers);
}
void URLRequestHttpJob::Start() {
@@ -146,8 +149,9 @@
request_info_.priority = request_->priority();
if (request_->context()) {
- request_info_.user_agent =
- request_->context()->GetUserAgent(request_->url());
+ request_info_.extra_headers.SetHeader(
+ net::HttpRequestHeaders::kUserAgent,
+ request_->context()->GetUserAgent(request_->url()));
}
AddExtraHeaders();
@@ -334,9 +338,8 @@
// Update the cookies, since the cookie store may have been updated from the
// headers in the 401/407. Since cookies were already appended to
// extra_headers, we need to strip them out before adding them again.
- static const char* const cookie_name[] = { "cookie" };
- request_info_.extra_headers = net::HttpUtil::StripHeaders(
- request_info_.extra_headers, cookie_name, arraysize(cookie_name));
+ request_info_.extra_headers.RemoveHeader(
+ net::HttpRequestHeaders::kCookie);
AddCookieHeaderAndStart();
}
@@ -429,20 +432,34 @@
return false;
}
+void URLRequestHttpJob::StopCaching() {
+ if (transaction_.get())
+ transaction_->StopCaching();
+}
+
void URLRequestHttpJob::OnCanGetCookiesCompleted(int policy) {
// If the request was destroyed, then there is no more work to do.
if (request_ && request_->delegate()) {
- if (policy == net::OK && request_->context()->cookie_store()) {
+ if (policy == net::ERR_ACCESS_DENIED) {
+ request_->delegate()->OnGetCookies(request_, true);
+ } else if (policy == net::OK && request_->context()->cookie_store()) {
+ request_->delegate()->OnGetCookies(request_, false);
net::CookieOptions options;
options.set_include_httponly();
std::string cookies =
request_->context()->cookie_store()->GetCookiesWithOptions(
request_->url(), options);
- if (request_->context()->InterceptRequestCookies(request_, cookies) &&
- !cookies.empty())
- request_info_.extra_headers += "Cookie: " + cookies + "\r\n";
+ if (!cookies.empty()) {
+ request_info_.extra_headers.SetHeader(
+ net::HttpRequestHeaders::kCookie, cookies);
+ }
}
- StartTransaction();
+ // We may have been canceled within OnGetCookies.
+ if (GetStatus().is_success()) {
+ StartTransaction();
+ } else {
+ NotifyCanceled();
+ }
}
Release(); // Balance AddRef taken in AddCookieHeaderAndStart
}
@@ -450,16 +467,33 @@
void URLRequestHttpJob::OnCanSetCookieCompleted(int policy) {
// If the request was destroyed, then there is no more work to do.
if (request_ && request_->delegate()) {
- if (policy == net::OK && request_->context()->cookie_store()) {
+ if (policy == net::ERR_ACCESS_DENIED) {
+ request_->delegate()->OnSetCookie(
+ request_,
+ response_cookies_[response_cookies_save_index_],
+ true);
+ } else if ((policy == net::OK || policy == net::OK_FOR_SESSION_ONLY) &&
+ request_->context()->cookie_store()) {
// OK to save the current response cookie now.
net::CookieOptions options;
options.set_include_httponly();
+ if (policy == net::OK_FOR_SESSION_ONLY)
+ options.set_force_session();
request_->context()->cookie_store()->SetCookieWithOptions(
request_->url(), response_cookies_[response_cookies_save_index_],
options);
+ request_->delegate()->OnSetCookie(
+ request_,
+ response_cookies_[response_cookies_save_index_],
+ false);
}
response_cookies_save_index_++;
- SaveNextCookie();
+ // We may have been canceled within OnSetCookie.
+ if (GetStatus().is_success()) {
+ SaveNextCookie();
+ } else {
+ NotifyCanceled();
+ }
}
Release(); // Balance AddRef taken in SaveNextCookie
}
@@ -569,23 +603,12 @@
URLRequestJob::NotifyHeadersComplete();
}
-#if defined(OS_WIN)
-#pragma optimize("", off)
-#pragma warning(disable:4748)
-#endif
void URLRequestHttpJob::DestroyTransaction() {
- CHECK(transaction_.get());
- // TODO(rvargas): remove this after finding the cause for bug 31723.
- char local_obj[sizeof(*this)];
- memcpy(local_obj, this, sizeof(local_obj));
+ DCHECK(transaction_.get());
transaction_.reset();
response_info_ = NULL;
}
-#if defined(OS_WIN)
-#pragma warning(default:4748)
-#pragma optimize("", on)
-#endif
void URLRequestHttpJob::StartTransaction() {
// NOTE: This method assumes that request_info_ is already setup properly.
@@ -606,7 +629,7 @@
&transaction_);
if (rv == net::OK) {
rv = transaction_->Start(
- &request_info_, &start_callback_, request_->load_log());
+ &request_info_, &start_callback_, request_->net_log());
}
}
@@ -654,14 +677,16 @@
// these headers. Some proxies deliberately corrupt Accept-Encoding headers.
if (!advertise_sdch) {
// Tell the server what compression formats we support (other than SDCH).
- request_info_.extra_headers += "Accept-Encoding: gzip,deflate\r\n";
+ request_info_.extra_headers.SetHeader(
+ net::HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
} else {
// Include SDCH in acceptable list.
- request_info_.extra_headers += "Accept-Encoding: "
- "gzip,deflate,sdch\r\n";
+ request_info_.extra_headers.SetHeader(
+ net::HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
if (!avail_dictionaries.empty()) {
- request_info_.extra_headers += "Avail-Dictionary: "
- + avail_dictionaries + "\r\n";
+ request_info_.extra_headers.SetHeader(
+ kAvailDictionaryHeader,
+ avail_dictionaries);
sdch_dictionary_advertised_ = true;
// Since we're tagging this transaction as advertising a dictionary, we'll
// definately employ an SDCH filter (or tentative sdch filter) when we get
@@ -675,12 +700,18 @@
if (context) {
// Only add default Accept-Language and Accept-Charset if the request
// didn't have them specified.
- net::HttpUtil::AppendHeaderIfMissing("Accept-Language",
- context->accept_language(),
- &request_info_.extra_headers);
- net::HttpUtil::AppendHeaderIfMissing("Accept-Charset",
- context->accept_charset(),
- &request_info_.extra_headers);
+ if (!request_info_.extra_headers.HasHeader(
+ net::HttpRequestHeaders::kAcceptLanguage)) {
+ request_info_.extra_headers.SetHeader(
+ net::HttpRequestHeaders::kAcceptLanguage,
+ context->accept_language());
+ }
+ if (!request_info_.extra_headers.HasHeader(
+ net::HttpRequestHeaders::kAcceptCharset)) {
+ request_info_.extra_headers.SetHeader(
+ net::HttpRequestHeaders::kAcceptCharset,
+ context->accept_charset());
+ }
}
}
@@ -694,7 +725,7 @@
int policy = net::OK;
if (request_info_.load_flags & net::LOAD_DO_NOT_SEND_COOKIES) {
- policy = net::ERR_ACCESS_DENIED;
+ policy = net::ERR_FAILED;
} else if (request_->context()->cookie_policy()) {
policy = request_->context()->cookie_policy()->CanGetCookies(
request_->url(),
@@ -740,7 +771,7 @@
int policy = net::OK;
if (request_info_.load_flags & net::LOAD_DO_NOT_SAVE_COOKIES) {
- policy = net::ERR_ACCESS_DENIED;
+ policy = net::ERR_FAILED;
} else if (request_->context()->cookie_policy()) {
policy = request_->context()->cookie_policy()->CanSetCookie(
request_->url(),
@@ -761,10 +792,8 @@
std::string value;
void* iter = NULL;
- while (response_info->headers->EnumerateHeader(&iter, name, &value)) {
- if (request_->context()->InterceptResponseCookie(request_, value))
- cookies->push_back(value);
- }
+ while (response_info->headers->EnumerateHeader(&iter, name, &value))
+ cookies->push_back(value);
}
class HTTPSProberDelegate : public net::HTTPSProberDelegate {
@@ -810,8 +839,7 @@
const bool https = response_info_->ssl_info.is_valid();
const bool valid_https =
- https &&
- !(response_info_->ssl_info.cert_status & net::CERT_STATUS_ALL_ERRORS);
+ https && !net::IsCertStatusError(response_info_->ssl_info.cert_status);
std::string name = "Strict-Transport-Security";
std::string value;
@@ -879,7 +907,7 @@
continue;
}
- net::HTTPSProberDelegate* delegate =
+ HTTPSProberDelegate* delegate =
new HTTPSProberDelegate(request_info_.url.host(), max_age,
include_subdomains,
ctx->transport_security_state());
diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h
index e00e3c4..279cdd4 100644
--- a/net/url_request/url_request_http_job.h
+++ b/net/url_request/url_request_http_job.h
@@ -32,7 +32,7 @@
// URLRequestJob methods:
virtual void SetUpload(net::UploadData* upload);
- virtual void SetExtraRequestHeaders(const std::string& headers);
+ virtual void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers);
virtual void Start();
virtual void Kill();
virtual net::LoadState GetLoadState() const;
@@ -55,6 +55,7 @@
virtual void ContinueWithCertificate(net::X509Certificate* client_cert);
virtual void ContinueDespiteLastError();
virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
+ virtual void StopCaching();
// Shadows URLRequestJob's version of this method so we can grab cookies.
void NotifyHeadersComplete();
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 63611fd..23a7d85 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -1,14 +1,16 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/url_request/url_request_job.h"
+#include "base/histogram.h"
#include "base/message_loop.h"
#include "base/string_util.h"
#include "net/base/auth.h"
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
+#include "net/base/mime_util.h"
#include "net/base/net_errors.h"
#include "net/http/http_response_headers.h"
#include "net/url_request/url_request.h"
@@ -24,10 +26,13 @@
URLRequestJob::URLRequestJob(URLRequest* request)
: request_(request),
+ prefilter_bytes_read_(0),
+ postfilter_bytes_read_(0),
+ is_compressible_content_(false),
+ is_compressed_(false),
done_(false),
filter_needs_more_output_space_(false),
- read_buffer_(NULL),
- read_buffer_len_(0),
+ filtered_read_buffer_len_(0),
has_handled_response_(false),
expected_content_size_(-1),
deferred_redirect_status_code_(-1),
@@ -49,6 +54,13 @@
g_url_request_job_tracker.RemoveJob(this);
}
+void URLRequestJob::SetUpload(net::UploadData* upload) {
+}
+
+void URLRequestJob::SetExtraRequestHeaders(
+ const net::HttpRequestHeaders& headers) {
+}
+
void URLRequestJob::Kill() {
// Make sure the request is notified that we are done. We assume that the
// request took care of setting its error status before calling Kill.
@@ -60,10 +72,6 @@
request_ = NULL;
}
-bool URLRequestJob::IsDownload() const {
- return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
-}
-
void URLRequestJob::SetupFilter() {
std::vector<Filter::FilterType> encoding_types;
if (GetContentEncodings(&encoding_types)) {
@@ -87,6 +95,14 @@
return true;
}
+bool URLRequestJob::IsSafeRedirect(const GURL& location) {
+ return true;
+}
+
+bool URLRequestJob::NeedsAuth() {
+ return false;
+}
+
void URLRequestJob::GetAuthChallengeInfo(
scoped_refptr<net::AuthChallengeInfo>* auth_info) {
// This will only be called if NeedsAuth() returns true, in which
@@ -143,6 +159,10 @@
return filter_input_byte_count_;
}
+bool URLRequestJob::GetMimeType(std::string* mime_type) const {
+ return false;
+}
+
bool URLRequestJob::GetURL(GURL* gurl) const {
if (!request_)
return false;
@@ -156,6 +176,18 @@
return request_->request_time();
};
+bool URLRequestJob::IsCachedContent() const {
+ return false;
+}
+
+int URLRequestJob::GetResponseCode() const {
+ return -1;
+}
+
+int URLRequestJob::GetInputStreamBufferSize() const {
+ return kFilterBufSize;
+}
+
// This function calls ReadData to get stream data. If a filter exists, passes
// the data to the attached filter. Then returns the output from filter back to
// the caller.
@@ -165,19 +197,19 @@
DCHECK_LT(buf_size, 1000000); // sanity check
DCHECK(buf);
DCHECK(bytes_read);
+ DCHECK(filtered_read_buffer_ == NULL);
+ DCHECK_EQ(0, filtered_read_buffer_len_);
*bytes_read = 0;
// Skip Filter if not present
if (!filter_.get()) {
- rv = ReadRawData(buf, buf_size, bytes_read);
- if (rv && *bytes_read > 0)
- RecordBytesRead(*bytes_read);
+ rv = ReadRawDataHelper(buf, buf_size, bytes_read);
} else {
// Save the caller's buffers while we do IO
// in the filter's buffers.
- read_buffer_ = buf;
- read_buffer_len_ = buf_size;
+ filtered_read_buffer_ = buf;
+ filtered_read_buffer_len_ = buf_size;
if (ReadFilteredData(bytes_read)) {
rv = true; // we have data to return
@@ -190,7 +222,43 @@
return rv;
}
-bool URLRequestJob::ReadRawDataForFilter(int *bytes_read) {
+void URLRequestJob::StopCaching() {
+ // Nothing to do here.
+}
+
+net::LoadState URLRequestJob::GetLoadState() const {
+ return net::LOAD_STATE_IDLE;
+}
+
+uint64 URLRequestJob::GetUploadProgress() const {
+ return 0;
+}
+
+bool URLRequestJob::GetCharset(std::string* charset) {
+ return false;
+}
+
+void URLRequestJob::GetResponseInfo(net::HttpResponseInfo* info) {
+}
+
+bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
+ return false;
+}
+
+bool URLRequestJob::GetContentEncodings(
+ std::vector<Filter::FilterType>* encoding_types) {
+ return false;
+}
+
+bool URLRequestJob::IsDownload() const {
+ return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
+}
+
+bool URLRequestJob::IsSdchResponse() const {
+ return false;
+}
+
+bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
bool rv = false;
DCHECK(bytes_read);
@@ -204,9 +272,7 @@
if (!filter_->stream_data_len() && !is_done()) {
net::IOBuffer* stream_buffer = filter_->stream_buffer();
int stream_buffer_size = filter_->stream_buffer_size();
- rv = ReadRawData(stream_buffer, stream_buffer_size, bytes_read);
- if (rv && *bytes_read > 0)
- RecordBytesRead(*bytes_read);
+ rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
}
return rv;
}
@@ -224,11 +290,12 @@
filter_->FlushStreamBuffer(bytes_read);
}
-bool URLRequestJob::ReadFilteredData(int *bytes_read) {
+bool URLRequestJob::ReadFilteredData(int* bytes_read) {
DCHECK(filter_.get()); // don't add data if there is no filter
- DCHECK(read_buffer_ != NULL); // we need to have a buffer to fill
- DCHECK_GT(read_buffer_len_, 0); // sanity check
- DCHECK_LT(read_buffer_len_, 1000000); // sanity check
+ DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill
+ DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check
+ DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check
+ DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet
bool rv = false;
*bytes_read = 0;
@@ -254,10 +321,11 @@
if ((filter_->stream_data_len() || filter_needs_more_output_space_)
&& !is_done()) {
// Get filtered data.
- int filtered_data_len = read_buffer_len_;
+ int filtered_data_len = filtered_read_buffer_len_;
Filter::FilterStatus status;
int output_buffer_size = filtered_data_len;
- status = filter_->ReadData(read_buffer_->data(), &filtered_data_len);
+ status = filter_->ReadData(filtered_read_buffer_->data(),
+ &filtered_data_len);
if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
// filter_needs_more_output_space_ was mistaken... there are no more bytes
@@ -321,10 +389,29 @@
if (rv) {
// When we successfully finished a read, we no longer need to
- // save the caller's buffers. For debugging purposes, we clear
- // them out.
- read_buffer_ = NULL;
- read_buffer_len_ = 0;
+ // save the caller's buffers. Release our reference.
+ filtered_read_buffer_ = NULL;
+ filtered_read_buffer_len_ = 0;
+ }
+ return rv;
+}
+
+bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size,
+ int* bytes_read) {
+ DCHECK(!request_->status().is_io_pending());
+ DCHECK(raw_read_buffer_ == NULL);
+
+ // Keep a pointer to the read buffer, so we have access to it in the
+ // OnRawReadComplete() callback in the event that the read completes
+ // asynchronously.
+ raw_read_buffer_ = buf;
+ bool rv = ReadRawData(buf, buf_size, bytes_read);
+
+ if (!request_->status().is_io_pending()) {
+ // If the read completes synchronously, either success or failure,
+ // invoke the OnRawReadComplete callback so we can account for the
+ // completed read.
+ OnRawReadComplete(*bytes_read);
}
return rv;
}
@@ -412,14 +499,28 @@
}
has_handled_response_ = true;
- if (request_->status().is_success())
+ if (request_->status().is_success()) {
SetupFilter();
+ // Check if this content appears to be compressible.
+ std::string mime_type;
+ if (GetMimeType(&mime_type) &&
+ (net::IsSupportedJavascriptMimeType(mime_type.c_str()) ||
+ net::IsSupportedNonImageMimeType(mime_type.c_str()))) {
+ is_compressible_content_ = true;
+ }
+ }
+
if (!filter_.get()) {
std::string content_length;
request_->GetResponseHeaderByName("content-length", &content_length);
if (!content_length.empty())
expected_content_size_ = StringToInt64(content_length);
+ } else {
+ // Chrome today only sends "Accept-Encoding" for compression schemes.
+ // So, if there is a filter on the response, we know that the content
+ // was compressed.
+ is_compressed_ = true;
}
request_->ResponseStarted();
@@ -445,8 +546,7 @@
// The headers should be complete before reads complete
DCHECK(has_handled_response_);
- if (bytes_read > 0)
- RecordBytesRead(bytes_read);
+ OnRawReadComplete(bytes_read);
// Don't notify if we had an error.
if (!request_->status().is_success())
@@ -459,15 +559,19 @@
// survival until we can get out of this method.
scoped_refptr<URLRequestJob> self_preservation = this;
+ prefilter_bytes_read_ += bytes_read;
if (filter_.get()) {
// Tell the filter that it has more data
FilteredDataRead(bytes_read);
// Filter the data.
int filter_bytes_read = 0;
- if (ReadFilteredData(&filter_bytes_read))
+ if (ReadFilteredData(&filter_bytes_read)) {
+ postfilter_bytes_read_ += filter_bytes_read;
request_->delegate()->OnReadCompleted(request_, filter_bytes_read);
+ }
} else {
+ postfilter_bytes_read_ += bytes_read;
request_->delegate()->OnReadCompleted(request_, bytes_read);
}
}
@@ -478,6 +582,8 @@
return;
done_ = true;
+ RecordCompressionHistograms();
+
if (is_profiling() && metrics_->total_bytes_read_ > 0) {
// There are valid IO statistics. Fill in other fields of metrics for
// profiling consumers to retrieve information.
@@ -553,6 +659,14 @@
return filter_.get() && filter_->stream_data_len();
}
+void URLRequestJob::OnRawReadComplete(int bytes_read) {
+ DCHECK(raw_read_buffer_);
+ if (bytes_read > 0) {
+ RecordBytesRead(bytes_read);
+ }
+ raw_read_buffer_ = NULL;
+}
+
void URLRequestJob::RecordBytesRead(int bytes_read) {
if (is_profiling()) {
++(metrics_->number_of_read_IO_);
@@ -560,7 +674,8 @@
}
filter_input_byte_count_ += bytes_read;
UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
- g_url_request_job_tracker.OnBytesRead(this, bytes_read);
+ g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(),
+ bytes_read);
}
const URLRequestStatus URLRequestJob::GetStatus() {
@@ -743,3 +858,74 @@
return;
}
}
+
+// The common type of histogram we use for all compression-tracking histograms.
+#define COMPRESSION_HISTOGRAM(name, sample) \
+ do { \
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
+ 500, 1000000, 100); \
+ } while(0)
+
+void URLRequestJob::RecordCompressionHistograms() {
+ if (IsCachedContent() || // Don't record cached content
+ !GetStatus().is_success() || // Don't record failed content
+ !is_compressible_content_ || // Only record compressible content
+ !prefilter_bytes_read_) // Zero-byte responses aren't useful.
+ return;
+
+ // Miniature requests aren't really compressible. Don't count them.
+ const int kMinSize = 16;
+ if (prefilter_bytes_read_ < kMinSize)
+ return;
+
+ // Only record for http or https urls.
+ bool is_http = request_->url().SchemeIs("http");
+ bool is_https = request_->url().SchemeIs("https");
+ if (!is_http && !is_https)
+ return;
+
+ const net::HttpResponseInfo& response = request_->response_info_;
+ int compressed_B = prefilter_bytes_read_;
+ int decompressed_B = postfilter_bytes_read_;
+
+ // We want to record how often downloaded resources are compressed.
+ // But, we recognize that different protocols may have different
+ // properties. So, for each request, we'll put it into one of 3
+ // groups:
+ // a) SSL resources
+ // Proxies cannot tamper with compression headers with SSL.
+ // b) Non-SSL, loaded-via-proxy resources
+ // In this case, we know a proxy might have interfered.
+ // c) Non-SSL, loaded-without-proxy resources
+ // In this case, we know there was no explicit proxy. However,
+ // it is possible that a transparent proxy was still interfering.
+ //
+ // For each group, we record the same 3 histograms.
+
+ if (is_https) {
+ if (is_compressed_) {
+ COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
+ COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
+ } else {
+ COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
+ }
+ return;
+ }
+
+ if (response.was_fetched_via_proxy) {
+ if (is_compressed_) {
+ COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
+ COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
+ } else {
+ COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
+ }
+ return;
+ }
+
+ if (is_compressed_) {
+ COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
+ COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
+ } else {
+ COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
+ }
+}
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
index 2aeb8ff..da6fc16 100644
--- a/net/url_request/url_request_job.h
+++ b/net/url_request/url_request_job.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -17,6 +17,7 @@
namespace net {
class AuthChallengeInfo;
+class HttpRequestHeaders;
class HttpResponseInfo;
class IOBuffer;
class UploadData;
@@ -50,10 +51,10 @@
// Sets the upload data, most requests have no upload data, so this is a NOP.
// Job types supporting upload data will override this.
- virtual void SetUpload(net::UploadData* upload) { }
+ virtual void SetUpload(net::UploadData* upload);
// Sets extra request headers for Job types that support request headers.
- virtual void SetExtraRequestHeaders(const std::string& headers) { }
+ virtual void SetExtraRequestHeaders(const net::HttpRequestHeaders& headers);
// If any error occurs while starting the Job, NotifyStartError should be
// called.
@@ -90,29 +91,31 @@
// bytes read, 0 when there is no more data, or -1 if there was an error.
// This is just the backend for URLRequest::Read, see that function for more
// info.
- bool Read(net::IOBuffer* buf, int buf_size, int *bytes_read);
+ bool Read(net::IOBuffer* buf, int buf_size, int* bytes_read);
+
+ // Stops further caching of this request, if any. For more info, see
+ // URLRequest::StopCaching().
+ virtual void StopCaching();
// Called to fetch the current load state for the job.
- virtual net::LoadState GetLoadState() const { return net::LOAD_STATE_IDLE; }
+ virtual net::LoadState GetLoadState() const;
// Called to get the upload progress in bytes.
- virtual uint64 GetUploadProgress() const { return 0; }
+ virtual uint64 GetUploadProgress() const;
// Called to fetch the charset for this request. Only makes sense for some
// types of requests. Returns true on success. Calling this on a type that
// doesn't have a charset will return false.
- virtual bool GetCharset(std::string* charset) { return false; }
+ virtual bool GetCharset(std::string* charset);
// Called to get response info.
- virtual void GetResponseInfo(net::HttpResponseInfo* info) {}
+ virtual void GetResponseInfo(net::HttpResponseInfo* info);
// Returns the cookie values included in the response, if applicable.
// Returns true if applicable.
// NOTE: This removes the cookies from the job, so it will only return
// useful results once per job.
- virtual bool GetResponseCookies(std::vector<std::string>* cookies) {
- return false;
- }
+ virtual bool GetResponseCookies(std::vector<std::string>* cookies);
// Called to fetch the encoding types for this request. Only makes sense for
// some types of requests. Returns true on success. Calling this on a request
@@ -125,16 +128,14 @@
// in the reverse order (in the above example, ungzip first, and then sdch
// expand).
virtual bool GetContentEncodings(
- std::vector<Filter::FilterType>* encoding_types) {
- return false;
- }
+ std::vector<Filter::FilterType>* encoding_types);
// Find out if this is a download.
virtual bool IsDownload() const;
// Find out if this is a response to a request that advertised an SDCH
// dictionary. Only makes sense for some types of requests.
- virtual bool IsSdchResponse() const { return false; }
+ virtual bool IsSdchResponse() const;
// Called to setup stream filter for this request. An example of filter is
// content encoding/decoding.
@@ -157,14 +158,12 @@
// location. This may be used to implement protocol-specific restrictions.
// If this function returns false, then the URLRequest will fail reporting
// net::ERR_UNSAFE_REDIRECT.
- virtual bool IsSafeRedirect(const GURL& location) {
- return true;
- }
+ virtual bool IsSafeRedirect(const GURL& location);
// Called to determine if this response is asking for authentication. Only
// makes sense for some types of requests. The caller is responsible for
// obtaining the credentials passing them to SetAuth.
- virtual bool NeedsAuth() { return false; }
+ virtual bool NeedsAuth();
// Fills the authentication info with the server's response.
virtual void GetAuthChallengeInfo(
@@ -207,13 +206,13 @@
// FilterContext methods:
// These methods are not applicable to all connections.
- virtual bool GetMimeType(std::string* mime_type) const { return false; }
+ virtual bool GetMimeType(std::string* mime_type) const;
virtual bool GetURL(GURL* gurl) const;
virtual base::Time GetRequestTime() const;
- virtual bool IsCachedContent() const { return false; }
+ virtual bool IsCachedContent() const;
virtual int64 GetByteReadCount() const;
- virtual int GetResponseCode() const { return -1; }
- virtual int GetInputStreamBufferSize() const { return kFilterBufSize; }
+ virtual int GetResponseCode() const;
+ virtual int GetInputStreamBufferSize() const;
virtual void RecordPacketStats(StatisticSelector statistic) const;
protected:
@@ -294,6 +293,15 @@
// Contains IO performance measurement when profiling is enabled.
scoped_ptr<URLRequestJobMetrics> metrics_;
+ // The number of bytes read before passing to the filter.
+ int prefilter_bytes_read_;
+ // The number of bytes read after passing through the filter.
+ int postfilter_bytes_read_;
+ // True when (we believe) the content in this URLRequest was compressible.
+ bool is_compressible_content_;
+ // True when the content in this URLRequest was compressed.
+ bool is_compressed_;
+
private:
// Size of filter input buffers used by this class.
static const int kFilterBufSize;
@@ -303,13 +311,23 @@
// an error occurred (or we are waiting for IO to complete).
bool ReadRawDataForFilter(int *bytes_read);
+ // Invokes ReadRawData and records bytes read if the read completes
+ // synchronously.
+ bool ReadRawDataHelper(net::IOBuffer* buf, int buf_size, int* bytes_read);
+
// Called in response to a redirect that was not canceled to follow the
// redirect. The current job will be replaced with a new job loading the
// given redirect destination.
void FollowRedirect(const GURL& location, int http_status_code);
- // Updates the profiling info and notifies observers that bytes_read bytes
- // have been read.
+ // Called after every raw read. If |bytes_read| is > 0, this indicates
+ // a successful read of |bytes_read| unfiltered bytes. If |bytes_read|
+ // is 0, this indicates that there is no additional data to read. If
+ // |bytes_read| is < 0, an error occurred and no bytes were read.
+ void OnRawReadComplete(int bytes_read);
+
+ // Updates the profiling info and notifies observers that an additional
+ // |bytes_read| unfiltered bytes have been read for this job.
void RecordBytesRead(int bytes_read);
// Called to query whether there is data available in the filter to be read
@@ -319,6 +337,8 @@
// Record packet arrival times for possible use in histograms.
void UpdatePacketReadTimes();
+ void RecordCompressionHistograms();
+
// Indicates that the job is done producing data, either it has completed
// all the data or an error has been encountered. Set exclusively by
// NotifyDone so that it is kept in sync with the request.
@@ -338,8 +358,12 @@
// processing the filtered data, we return the data in the caller's buffer.
// While the async IO is in progress, we save the user buffer here, and
// when the IO completes, we fill this in.
- net::IOBuffer *read_buffer_;
- int read_buffer_len_;
+ scoped_refptr<net::IOBuffer> filtered_read_buffer_;
+ int filtered_read_buffer_len_;
+
+ // We keep a pointer to the read buffer while asynchronous reads are
+ // in progress, so we are able to pass those bytes to job observers.
+ scoped_refptr<net::IOBuffer> raw_read_buffer_;
// Used by HandleResponseIfNecessary to track whether we've sent the
// OnResponseStarted callback and potentially redirect callbacks as well.
@@ -366,7 +390,7 @@
// as gathered here is post-SSL, and post-cache-fetch, and does not reflect
// true packet arrival times in such cases.
- // Total number of bytes read from network (or cache) and and typically handed
+ // Total number of bytes read from network (or cache) and typically handed
// to filter to process. Used to histogram compression ratios, and error
// recovery scenarios in filters.
int64 filter_input_byte_count_;
diff --git a/net/url_request/url_request_job_manager.cc b/net/url_request/url_request_job_manager.cc
index 7cd934e..0615280 100644
--- a/net/url_request/url_request_job_manager.cc
+++ b/net/url_request/url_request_job_manager.cc
@@ -14,7 +14,7 @@
#include "net/url_request/url_request_data_job.h"
#include "net/url_request/url_request_error_job.h"
#include "net/url_request/url_request_file_job.h"
-#include "net/url_request/url_request_new_ftp_job.h"
+#include "net/url_request/url_request_ftp_job.h"
#include "net/url_request/url_request_http_job.h"
// The built-in set of protocol factories
@@ -31,7 +31,7 @@
{ "http", URLRequestHttpJob::Factory },
{ "https", URLRequestHttpJob::Factory },
{ "file", URLRequestFileJob::Factory },
- { "ftp", URLRequestNewFtpJob::Factory },
+ { "ftp", URLRequestFtpJob::Factory },
{ "about", URLRequestAboutJob::Factory },
{ "data", URLRequestDataJob::Factory },
};
diff --git a/net/url_request/url_request_job_manager.h b/net/url_request/url_request_job_manager.h
index 9930e17..d8934e6 100644
--- a/net/url_request/url_request_job_manager.h
+++ b/net/url_request/url_request_job_manager.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -97,7 +97,7 @@
}
#endif
- DISALLOW_EVIL_CONSTRUCTORS(URLRequestJobManager);
+ DISALLOW_COPY_AND_ASSIGN(URLRequestJobManager);
};
#endif // NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H__
diff --git a/net/url_request/url_request_job_metrics.cc b/net/url_request/url_request_job_metrics.cc
index eea21fa..e0726da 100644
--- a/net/url_request/url_request_job_metrics.cc
+++ b/net/url_request/url_request_job_metrics.cc
@@ -6,6 +6,7 @@
#include "base/basictypes.h"
#include "base/string_util.h"
+#include "base/utf_string_conversions.h"
using base::TimeDelta;
diff --git a/net/url_request/url_request_job_tracker.cc b/net/url_request/url_request_job_tracker.cc
index 1f5b33c..e3e5d36 100644
--- a/net/url_request/url_request_job_tracker.cc
+++ b/net/url_request/url_request_job_tracker.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -51,7 +51,8 @@
}
void URLRequestJobTracker::OnBytesRead(URLRequestJob* job,
+ const char* buf,
int byte_count) {
FOR_EACH_OBSERVER(JobObserver, observers_,
- OnBytesRead(job, byte_count));
+ OnBytesRead(job, buf, byte_count));
}
diff --git a/net/url_request/url_request_job_tracker.h b/net/url_request/url_request_job_tracker.h
index 5f12fc7..e03b71f 100644
--- a/net/url_request/url_request_job_tracker.h
+++ b/net/url_request/url_request_job_tracker.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -43,9 +43,13 @@
virtual void OnJobRedirect(URLRequestJob* job, const GURL& location,
int status_code) = 0;
- // Called when a new chunk of bytes has been read for the given job. The
- // byte count is the number of bytes for that read event only.
- virtual void OnBytesRead(URLRequestJob* job, int byte_count) = 0;
+ // Called when a new chunk of unfiltered bytes has been read for
+ // the given job. |byte_count| is the number of bytes for that
+ // read event only. |buf| is a pointer to the data buffer that
+ // contains those bytes. The data in |buf| is only valid for the
+ // duration of the OnBytesRead callback.
+ virtual void OnBytesRead(URLRequestJob* job, const char* buf,
+ int byte_count) = 0;
virtual ~JobObserver() {}
};
@@ -74,7 +78,7 @@
int status_code);
// Bytes read notifications.
- void OnBytesRead(URLRequestJob* job, int byte_count);
+ void OnBytesRead(URLRequestJob* job, const char* buf, int byte_count);
// allows iteration over all active jobs
JobIterator begin() const {
diff --git a/net/url_request/url_request_job_tracker_unittest.cc b/net/url_request/url_request_job_tracker_unittest.cc
new file mode 100644
index 0000000..3ddbcc2
--- /dev/null
+++ b/net/url_request/url_request_job_tracker_unittest.cc
@@ -0,0 +1,232 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+#include <algorithm>
+#include <string>
+#include <vector>
+#include "base/message_loop.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/filter.h"
+#include "net/base/io_buffer.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+#include "net/url_request/url_request_job_tracker.h"
+#include "net/url_request/url_request_status.h"
+#include "net/url_request/url_request_unittest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+using testing::Eq;
+using testing::InSequence;
+using testing::NotNull;
+using testing::StrictMock;
+
+namespace {
+
+const char kBasic[] = "Hello\n";
+
+// The above string "Hello\n", gzip compressed.
+const unsigned char kCompressed[] = {
+ 0x1f, 0x8b, 0x08, 0x08, 0x38, 0x18, 0x2e, 0x4c, 0x00, 0x03, 0x63,
+ 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x2e, 0x68,
+ 0x74, 0x6d, 0x6c, 0x00, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0xe7, 0x02,
+ 0x00, 0x16, 0x35, 0x96, 0x31, 0x06, 0x00, 0x00, 0x00
+};
+
+bool GetResponseBody(const GURL& url, std::string* out_body) {
+ if (url.spec() == "test:basic") {
+ *out_body = kBasic;
+ } else if (url.spec() == "test:compressed") {
+ out_body->assign(reinterpret_cast<const char*>(kCompressed),
+ sizeof(kCompressed));
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+class MockJobObserver : public URLRequestJobTracker::JobObserver {
+ public:
+ MOCK_METHOD1(OnJobAdded, void(URLRequestJob* job));
+ MOCK_METHOD1(OnJobRemoved, void(URLRequestJob* job));
+ MOCK_METHOD2(OnJobDone, void(URLRequestJob* job,
+ const URLRequestStatus& status));
+ MOCK_METHOD3(OnJobRedirect, void(URLRequestJob* job,
+ const GURL& location,
+ int status_code));
+ MOCK_METHOD3(OnBytesRead, void(URLRequestJob* job,
+ const char* buf,
+ int byte_count));
+};
+
+// A URLRequestJob that returns static content for given URLs. We do
+// not use URLRequestTestJob here because URLRequestTestJob fakes
+// async operations by calling ReadRawData synchronously in an async
+// callback. This test requires a URLRequestJob that returns false for
+// async reads, in order to exercise the real async read codepath.
+class URLRequestJobTrackerTestJob : public URLRequestJob {
+ public:
+ URLRequestJobTrackerTestJob(URLRequest* request, bool async_reads)
+ : URLRequestJob(request), async_reads_(async_reads) {}
+
+ void Start() {
+ ASSERT_TRUE(GetResponseBody(request_->url(), &response_data_));
+
+ // Start reading asynchronously so that all error reporting and data
+ // callbacks happen as they would for network requests.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestJobTrackerTestJob::NotifyHeadersComplete));
+ }
+
+ bool ReadRawData(net::IOBuffer* buf, int buf_size,
+ int *bytes_read) {
+ const size_t bytes_to_read = std::min(
+ response_data_.size(), static_cast<size_t>(buf_size));
+
+ // Regardless of whether we're performing a sync or async read,
+ // copy the data into the caller's buffer now. That way we don't
+ // have to hold on to the buffers in the async case.
+ memcpy(buf->data(), response_data_.data(), bytes_to_read);
+ response_data_.erase(0, bytes_to_read);
+
+ if (async_reads_) {
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
+ this, &URLRequestJobTrackerTestJob::OnReadCompleted,
+ bytes_to_read));
+ } else {
+ SetStatus(URLRequestStatus());
+ *bytes_read = bytes_to_read;
+ }
+ return !async_reads_;
+ }
+
+ void OnReadCompleted(int status) {
+ if (status == 0) {
+ NotifyDone(URLRequestStatus());
+ } else if (status > 0) {
+ SetStatus(URLRequestStatus());
+ } else {
+ ASSERT_FALSE(true) << "Unexpected OnReadCompleted callback.";
+ }
+
+ NotifyReadComplete(status);
+ }
+
+ bool GetContentEncodings(
+ std::vector<Filter::FilterType>* encoding_types) {
+ if (request_->url().spec() == "test:basic") {
+ return false;
+ } else if (request_->url().spec() == "test:compressed") {
+ encoding_types->push_back(Filter::FILTER_TYPE_GZIP);
+ return true;
+ } else {
+ return URLRequestJob::GetContentEncodings(encoding_types);
+ }
+ }
+
+ // The data to send, will be set in Start().
+ std::string response_data_;
+
+ // Should reads be synchronous or asynchronous?
+ const bool async_reads_;
+};
+
+// Google Mock Matcher to check two URLRequestStatus instances for
+// equality.
+MATCHER_P(StatusEq, other, "") {
+ return (arg.status() == other.status() &&
+ arg.os_error() == other.os_error());
+}
+
+// Google Mock Matcher to check that two blocks of memory are equal.
+MATCHER_P2(MemEq, other, len, "") {
+ return memcmp(arg, other, len) == 0;
+}
+
+class URLRequestJobTrackerTest : public PlatformTest {
+ protected:
+ static void SetUpTestCase() {
+ URLRequest::RegisterProtocolFactory("test", &Factory);
+ }
+
+ virtual void SetUp() {
+ g_async_reads = true;
+ }
+
+ void AssertJobTrackerCallbacks(const char* url) {
+ InSequence seq;
+ testing::StrictMock<MockJobObserver> observer;
+
+ const GURL gurl(url);
+ std::string body;
+ ASSERT_TRUE(GetResponseBody(gurl, &body));
+
+ // We expect to receive one call for each method on the JobObserver,
+ // in the following order:
+ EXPECT_CALL(observer, OnJobAdded(NotNull()));
+ EXPECT_CALL(observer, OnBytesRead(NotNull(),
+ MemEq(body.data(), body.size()),
+ Eq(static_cast<int>(body.size()))));
+ EXPECT_CALL(observer, OnJobDone(NotNull(), StatusEq(URLRequestStatus())));
+ EXPECT_CALL(observer, OnJobRemoved(NotNull()));
+
+ // Attach our observer and perform the resource fetch.
+ g_url_request_job_tracker.AddObserver(&observer);
+ Fetch(gurl);
+ g_url_request_job_tracker.RemoveObserver(&observer);
+ }
+
+ void Fetch(const GURL& url) {
+ TestDelegate d;
+ {
+ URLRequest request(url, &d);
+ request.Start();
+ MessageLoop::current()->RunAllPending();
+ }
+
+ // A few sanity checks to make sure that the delegate also
+ // receives the expected callbacks.
+ EXPECT_EQ(1, d.response_started_count());
+ EXPECT_FALSE(d.received_data_before_response());
+ EXPECT_STREQ(kBasic, d.data_received().c_str());
+ }
+
+ static URLRequest::ProtocolFactory Factory;
+ static bool g_async_reads;
+};
+
+// static
+URLRequestJob* URLRequestJobTrackerTest::Factory(URLRequest* request,
+ const std::string& scheme) {
+ return new URLRequestJobTrackerTestJob(request, g_async_reads);
+}
+
+// static
+bool URLRequestJobTrackerTest::g_async_reads = true;
+
+TEST_F(URLRequestJobTrackerTest, BasicAsync) {
+ g_async_reads = true;
+ AssertJobTrackerCallbacks("test:basic");
+}
+
+TEST_F(URLRequestJobTrackerTest, BasicSync) {
+ g_async_reads = false;
+ AssertJobTrackerCallbacks("test:basic");
+}
+
+TEST_F(URLRequestJobTrackerTest, CompressedAsync) {
+ g_async_reads = true;
+ AssertJobTrackerCallbacks("test:compressed");
+}
+
+TEST_F(URLRequestJobTrackerTest, CompressedSync) {
+ g_async_reads = false;
+ AssertJobTrackerCallbacks("test:compressed");
+}
+
+} // namespace
diff --git a/net/url_request/url_request_netlog_params.cc b/net/url_request/url_request_netlog_params.cc
new file mode 100644
index 0000000..3693ee9
--- /dev/null
+++ b/net/url_request/url_request_netlog_params.cc
@@ -0,0 +1,27 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_netlog_params.h"
+
+#include "base/values.h"
+
+URLRequestStartEventParameters::URLRequestStartEventParameters(
+ const GURL& url,
+ const std::string& method,
+ int load_flags,
+ net::RequestPriority priority)
+ : url_(url),
+ method_(method),
+ load_flags_(load_flags),
+ priority_(priority) {
+}
+
+Value* URLRequestStartEventParameters::ToValue() const {
+ DictionaryValue* dict = new DictionaryValue();
+ dict->SetString(L"url", url_.possibly_invalid_spec());
+ dict->SetString(L"method", method_);
+ dict->SetInteger(L"load_flags", load_flags_);
+ dict->SetInteger(L"priority", static_cast<int>(priority_));
+ return dict;
+}
diff --git a/net/url_request/url_request_netlog_params.h b/net/url_request/url_request_netlog_params.h
new file mode 100644
index 0000000..d84052a
--- /dev/null
+++ b/net/url_request/url_request_netlog_params.h
@@ -0,0 +1,42 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_NETLOG_PARAMS_H_
+#define NET_URL_REQUEST_URL_REQUEST_NETLOG_PARAMS_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/net_log.h"
+#include "net/base/request_priority.h"
+
+// Holds the parameters to emit to the NetLog when starting a URLRequest.
+class URLRequestStartEventParameters : public net::NetLog::EventParameters {
+ public:
+ URLRequestStartEventParameters(const GURL& url,
+ const std::string& method,
+ int load_flags,
+ net::RequestPriority priority);
+
+ const GURL& url() const {
+ return url_;
+ }
+
+ int load_flags() const {
+ return load_flags_;
+ }
+
+ virtual Value* ToValue() const;
+
+ private:
+ const GURL url_;
+ const std::string method_;
+ const int load_flags_;
+ const net::RequestPriority priority_;
+
+ DISALLOW_COPY_AND_ASSIGN(URLRequestStartEventParameters);
+};
+
+#endif // NET_URL_REQUEST_URL_REQUEST_NETLOG_PARAMS_H_
diff --git a/net/url_request/url_request_simple_job.h b/net/url_request/url_request_simple_job.h
index 786d2e4..4ea856c 100644
--- a/net/url_request/url_request_simple_job.h
+++ b/net/url_request/url_request_simple_job.h
@@ -28,9 +28,10 @@
std::string* charset,
std::string* data) const = 0;
- private:
+ protected:
void StartAsync();
+ private:
std::string mime_type_;
std::string charset_;
std::string data_;
diff --git a/net/url_request/url_request_test_job.cc b/net/url_request/url_request_test_job.cc
index b1e3b5c..363d178 100644
--- a/net/url_request/url_request_test_job.cc
+++ b/net/url_request/url_request_test_job.cc
@@ -183,6 +183,12 @@
info->headers = response_headers_;
}
+int URLRequestTestJob::GetResponseCode() const {
+ if (response_headers_)
+ return response_headers_->response_code();
+ return -1;
+}
+
bool URLRequestTestJob::IsRedirectResponse(GURL* location,
int* http_status_code) {
if (!response_headers_)
diff --git a/net/url_request/url_request_test_job.h b/net/url_request/url_request_test_job.h
index 4daabf6..f618c07 100644
--- a/net/url_request/url_request_test_job.h
+++ b/net/url_request/url_request_test_job.h
@@ -93,6 +93,7 @@
virtual void Kill();
virtual bool GetMimeType(std::string* mime_type) const;
virtual void GetResponseInfo(net::HttpResponseInfo* info);
+ virtual int GetResponseCode() const;
virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
protected:
diff --git a/net/url_request/url_request_unittest.cc b/net/url_request/url_request_unittest.cc
index a94158a..bd4e56a 100644
--- a/net/url_request/url_request_unittest.cc
+++ b/net/url_request/url_request_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -26,8 +26,8 @@
#include "net/base/cookie_monster.h"
#include "net/base/cookie_policy.h"
#include "net/base/load_flags.h"
-#include "net/base/load_log.h"
-#include "net/base/load_log_unittest.h"
+#include "net/base/net_log.h"
+#include "net/base/net_log_unittest.h"
#include "net/base/net_errors.h"
#include "net/base/net_module.h"
#include "net/base/net_util.h"
@@ -36,9 +36,10 @@
#include "net/ftp/ftp_network_layer.h"
#include "net/http/http_cache.h"
#include "net/http/http_network_layer.h"
+#include "net/http/http_request_headers.h"
#include "net/http/http_response_headers.h"
#include "net/proxy/proxy_service.h"
-#include "net/socket/ssl_test_util.h"
+#include "net/test/test_server.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_file_dir_job.h"
#include "net/url_request/url_request_http_job.h"
@@ -122,7 +123,7 @@
}
uploadBytes[kMsgSize] = '\0';
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
for (int i = 0; i < kIterations; ++i) {
TestDelegate d;
@@ -217,16 +218,7 @@
EXPECT_FALSE(d.received_data_before_response());
EXPECT_NE(0, d.bytes_received());
- // The first part of the log will be for URL_REQUEST_START.
- // After that, there should be an HTTP_TRANSACTION_READ_BODY
- EXPECT_TRUE(net::LogContainsBeginEvent(
- *r.load_log(), 0, net::LoadLog::TYPE_URL_REQUEST_START));
- EXPECT_TRUE(net::LogContainsEndEvent(
- *r.load_log(), -3, net::LoadLog::TYPE_URL_REQUEST_START));
- EXPECT_TRUE(net::LogContainsBeginEvent(
- *r.load_log(), -2, net::LoadLog::TYPE_HTTP_TRANSACTION_READ_BODY));
- EXPECT_TRUE(net::LogContainsEndEvent(
- *r.load_log(), -1, net::LoadLog::TYPE_HTTP_TRANSACTION_READ_BODY));
+ // TODO(eroman): Add back the NetLog tests...
}
}
@@ -283,42 +275,8 @@
class HTTPSRequestTest : public testing::Test {
};
-#if defined(OS_MACOSX)
-// Status 6/19/09:
-//
-// If these tests are enabled on OSX, the first one (HTTPSGetTest)
-// will fail. I didn't track it down explicitly, but did observe that
-// the testserver.py kills itself (e.g. "process_util_posix.cc(84)]
-// Unable to terminate process."). tlslite and testserver.py are hard
-// to debug (redirection of stdout/stderr to a file so you can't see
-// errors; lots of naked "except:" statements, etc), but I did track
-// down an SSL auth failure as one cause of it deciding to die
-// silently.
-//
-// The next test, HTTPSMismatchedTest, will look like it hangs by
-// looping over calls to SSLHandshake() (Security framework call) as
-// called from SSLClientSocketMac::DoHandshake(). Return values are a
-// repeating pattern of -9803 (come back later) and -9812 (cert valid
-// but root not trusted). If you don't have the cert in your keychain
-// as documented on http://dev.chromium.org/developers/testing, the
-// -9812 becomes a -9813 (no root cert). Interestingly, the handshake
-// also appears to be a failure point for other disabled tests, such
-// as (SSLClientSocketTest,Connect) in
-// net/base/ssl_client_socket_unittest.cc.
-//
-// Old comment (not sure if obsolete):
-// ssl_client_socket_mac.cc crashes currently in GetSSLInfo
-// when called on a connection with an unrecognized certificate
-#define MAYBE_HTTPSGetTest DISABLED_HTTPSGetTest
-#define MAYBE_HTTPSMismatchedTest DISABLED_HTTPSMismatchedTest
-#define MAYBE_HTTPSExpiredTest DISABLED_HTTPSExpiredTest
-#else
-#define MAYBE_HTTPSGetTest HTTPSGetTest
-#define MAYBE_HTTPSMismatchedTest HTTPSMismatchedTest
-#define MAYBE_HTTPSExpiredTest HTTPSExpiredTest
-#endif
-TEST_F(HTTPSRequestTest, MAYBE_HTTPSGetTest) {
+TEST_F(HTTPSRequestTest, HTTPSGetTest) {
// Note: tools/testserver/testserver.py does not need
// a working document root to server the pages / and /hello.html,
// so this test doesn't really need to specify a document root.
@@ -342,7 +300,7 @@
}
}
-TEST_F(HTTPSRequestTest, MAYBE_HTTPSMismatchedTest) {
+TEST_F(HTTPSRequestTest, HTTPSMismatchedTest) {
scoped_refptr<HTTPSTestServer> server =
HTTPSTestServer::CreateMismatchedServer(L"net/data/ssl");
ASSERT_TRUE(NULL != server.get());
@@ -370,7 +328,7 @@
}
}
-TEST_F(HTTPSRequestTest, MAYBE_HTTPSExpiredTest) {
+TEST_F(HTTPSRequestTest, HTTPSExpiredTest) {
scoped_refptr<HTTPSTestServer> server =
HTTPSTestServer::CreateExpiredServer(L"net/data/ssl");
ASSERT_TRUE(NULL != server.get());
@@ -493,7 +451,7 @@
TEST_F(URLRequestTestHTTP, CancelTest5) {
ASSERT_TRUE(NULL != server_.get());
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
// populate cache
{
@@ -696,9 +654,11 @@
{
TestURLRequest r(temp_url, &d);
- r.SetExtraRequestHeaders(
- StringPrintf("Range: bytes=%" PRIuS "-%" PRIuS "\n",
- first_byte_position, last_byte_position));
+ net::HttpRequestHeaders headers;
+ headers.SetHeader(net::HttpRequestHeaders::kRange,
+ StringPrintf("bytes=%" PRIuS "-%" PRIuS,
+ first_byte_position, last_byte_position));
+ r.SetExtraRequestHeaders(headers);
r.Start();
EXPECT_TRUE(r.is_pending());
@@ -737,8 +697,11 @@
{
TestURLRequest r(temp_url, &d);
- r.SetExtraRequestHeaders(StringPrintf("Range: bytes=%" PRIuS "-\n",
- first_byte_position));
+ net::HttpRequestHeaders headers;
+ headers.SetHeader(net::HttpRequestHeaders::kRange,
+ StringPrintf("bytes=%" PRIuS "-",
+ first_byte_position));
+ r.SetExtraRequestHeaders(headers);
r.Start();
EXPECT_TRUE(r.is_pending());
@@ -771,7 +734,10 @@
{
TestURLRequest r(temp_url, &d);
- r.SetExtraRequestHeaders(StringPrintf("Range: bytes=0-0,10-200,200-300\n"));
+ net::HttpRequestHeaders headers;
+ headers.SetHeader(net::HttpRequestHeaders::kRange,
+ "bytes=0-0,10-200,200-300");
+ r.SetExtraRequestHeaders(headers);
r.Start();
EXPECT_TRUE(r.is_pending());
@@ -795,21 +761,6 @@
}
}
-// This test is disabled because it fails on some computers due to proxies
-// returning a page in response to this request rather than reporting failure.
-TEST_F(URLRequestTest, DISABLED_DnsFailureTest) {
- TestDelegate d;
- {
- URLRequest r(GURL("http://thisisnotavalidurl0123456789foo.com/"), &d);
-
- r.Start();
- EXPECT_TRUE(r.is_pending());
-
- MessageLoop::current()->Run();
- EXPECT_TRUE(d.request_failed());
- }
-}
-
TEST_F(URLRequestTestHTTP, ResponseHeadersTest) {
ASSERT_TRUE(NULL != server_.get());
TestDelegate d;
@@ -962,26 +913,29 @@
path = path.Append(FILE_PATH_LITERAL("url_request_unittest"));
TestDelegate d;
- d.set_quit_on_redirect(true);
TestURLRequest req(net::FilePathToFileURL(path), &d);
req.Start();
MessageLoop::current()->Run();
- // Let the directory lister have time to finish its work, which will
- // cause the URLRequestFileDirJob's ref count to drop to 1.
- URLRequestFileDirJob* job = static_cast<URLRequestFileDirJob*>(req.job());
- while (!job->list_complete()) {
- PlatformThread::Sleep(10);
- MessageLoop::current()->RunAllPending();
- }
-
- // Should not crash during this call!
- req.FollowDeferredRedirect();
-
- // Flush event queue.
- MessageLoop::current()->RunAllPending();
+ ASSERT_EQ(1, d.received_redirect_count());
+ ASSERT_LT(0, d.bytes_received());
+ ASSERT_FALSE(d.request_failed());
+ ASSERT_TRUE(req.status().is_success());
}
+#if defined(OS_WIN)
+// Don't accept the url "file:///" on windows. See http://crbug.com/1474.
+TEST_F(URLRequestTest, FileDirRedirectSingleSlash) {
+ TestDelegate d;
+ TestURLRequest req(GURL("file:///"), &d);
+ req.Start();
+ MessageLoop::current()->Run();
+
+ ASSERT_EQ(1, d.received_redirect_count());
+ ASSERT_FALSE(req.status().is_success());
+}
+#endif
+
TEST_F(URLRequestTestHTTP, RestrictRedirects) {
ASSERT_TRUE(NULL != server_.get());
@@ -1094,14 +1048,16 @@
TEST_F(URLRequestTestHTTP, VaryHeader) {
ASSERT_TRUE(NULL != server_.get());
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
// populate the cache
{
TestDelegate d;
URLRequest req(server_->TestServerPage("echoheader?foo"), &d);
req.set_context(context);
- req.SetExtraRequestHeaders("foo:1");
+ net::HttpRequestHeaders headers;
+ headers.SetHeader("foo", "1");
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
}
@@ -1111,7 +1067,9 @@
TestDelegate d;
URLRequest req(server_->TestServerPage("echoheader?foo"), &d);
req.set_context(context);
- req.SetExtraRequestHeaders("foo:1");
+ net::HttpRequestHeaders headers;
+ headers.SetHeader("foo", "1");
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
@@ -1123,7 +1081,9 @@
TestDelegate d;
URLRequest req(server_->TestServerPage("echoheader?foo"), &d);
req.set_context(context);
- req.SetExtraRequestHeaders("foo:2");
+ net::HttpRequestHeaders headers;
+ headers.SetHeader("foo", "2");
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
@@ -1132,7 +1092,7 @@
}
TEST_F(URLRequestTestHTTP, BasicAuth) {
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
ASSERT_TRUE(NULL != server_.get());
// populate the cache
@@ -1183,7 +1143,7 @@
// Request a page that will give a 401 containing a Set-Cookie header.
// Verify that when the transaction is restarted, it includes the new cookie.
{
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
TestDelegate d;
d.set_username(L"user");
d.set_password(L"secret");
@@ -1204,7 +1164,7 @@
// Same test as above, except this time the restart is initiated earlier
// (without user intervention since identity is embedded in the URL).
{
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
TestDelegate d;
GURL::Replacements replacements;
@@ -1232,7 +1192,7 @@
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
// Set up a cookie.
{
@@ -1241,6 +1201,8 @@
req.set_context(context);
req.Start();
MessageLoop::current()->Run();
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Verify that the cookie is set.
@@ -1253,6 +1215,8 @@
EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
!= std::string::npos);
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Verify that the cookie isn't sent when LOAD_DO_NOT_SEND_COOKIES is set.
@@ -1266,6 +1230,10 @@
EXPECT_TRUE(d.data_received().find("Cookie: CookieToNotSend=1")
== std::string::npos);
+
+ // LOAD_DO_NOT_SEND_COOKIES does not trigger OnGetCookies.
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
}
@@ -1273,7 +1241,7 @@
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestContext> context = new URLRequestTestContext();
+ scoped_refptr<URLRequestContext> context = new TestURLRequestContext();
// Set up a cookie.
{
@@ -1283,6 +1251,10 @@
req.set_context(context);
req.Start();
MessageLoop::current()->Run();
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
+ EXPECT_EQ(1, d.set_cookie_count());
}
// Try to set-up another cookie and update the previous cookie.
@@ -1295,6 +1267,11 @@
req.Start();
MessageLoop::current()->Run();
+
+ // LOAD_DO_NOT_SAVE_COOKIES does not trigger OnSetCookie.
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
+ EXPECT_EQ(0, d.set_cookie_count());
}
// Verify the cookies weren't saved or updated.
@@ -1309,6 +1286,10 @@
== std::string::npos);
EXPECT_TRUE(d.data_received().find("CookieToNotUpdate=2")
!= std::string::npos);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
+ EXPECT_EQ(0, d.set_cookie_count());
}
}
@@ -1316,7 +1297,7 @@
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestTestContext> context = new URLRequestTestContext();
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
// Set up a cookie.
{
@@ -1325,6 +1306,9 @@
req.set_context(context);
req.Start();
MessageLoop::current()->Run();
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Verify that the cookie is set.
@@ -1337,6 +1321,9 @@
EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
!= std::string::npos);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Verify that the cookie isn't sent.
@@ -1354,6 +1341,9 @@
== std::string::npos);
context->set_cookie_policy(NULL);
+
+ EXPECT_EQ(1, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
}
@@ -1361,7 +1351,7 @@
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestTestContext> context = new URLRequestTestContext();
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
// Set up a cookie.
{
@@ -1371,6 +1361,9 @@
req.set_context(context);
req.Start();
MessageLoop::current()->Run();
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Try to set-up another cookie and update the previous cookie.
@@ -1387,6 +1380,9 @@
MessageLoop::current()->Run();
context->set_cookie_policy(NULL);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(2, d.blocked_set_cookie_count());
}
@@ -1402,6 +1398,9 @@
== std::string::npos);
EXPECT_TRUE(d.data_received().find("CookieToNotUpdate=2")
!= std::string::npos);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
}
@@ -1409,7 +1408,7 @@
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestTestContext> context = new URLRequestTestContext();
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
// Set up a cookie.
{
@@ -1418,6 +1417,9 @@
req.set_context(context);
req.Start();
MessageLoop::current()->Run();
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Verify that the cookie is set.
@@ -1430,6 +1432,9 @@
EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
!= std::string::npos);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Verify that the cookie isn't sent.
@@ -1448,6 +1453,9 @@
== std::string::npos);
context->set_cookie_policy(NULL);
+
+ EXPECT_EQ(1, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
}
@@ -1455,7 +1463,7 @@
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestTestContext> context = new URLRequestTestContext();
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
// Set up a cookie.
{
@@ -1465,6 +1473,9 @@
req.set_context(context);
req.Start();
MessageLoop::current()->Run();
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
// Try to set-up another cookie and update the previous cookie.
@@ -1482,6 +1493,9 @@
MessageLoop::current()->Run();
context->set_cookie_policy(NULL);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(2, d.blocked_set_cookie_count());
}
// Verify the cookies weren't saved or updated.
@@ -1496,14 +1510,17 @@
== std::string::npos);
EXPECT_TRUE(d.data_received().find("CookieToNotUpdate=2")
!= std::string::npos);
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
}
-TEST_F(URLRequestTest, CancelTest_DuringCookiePolicy) {
+TEST_F(URLRequestTest, CancelTest_During_CookiePolicy) {
scoped_refptr<HTTPTestServer> server =
HTTPTestServer::CreateServer(L"", NULL);
ASSERT_TRUE(NULL != server.get());
- scoped_refptr<URLRequestTestContext> context = new URLRequestTestContext();
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
TestCookiePolicy cookie_policy(TestCookiePolicy::ASYNC);
context->set_cookie_policy(&cookie_policy);
@@ -1516,10 +1533,113 @@
req.set_context(context);
req.Start(); // Triggers an asynchronous cookie policy check.
- // But, now we cancel the request. This should not cause a crash.
+ // But, now we cancel the request by letting it go out of scope. This
+ // should not cause a crash.
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
}
context->set_cookie_policy(NULL);
+
+ // Let the cookie policy complete. Make sure it handles the destruction of
+ // the URLRequest properly.
+ MessageLoop::current()->RunAllPending();
+}
+
+TEST_F(URLRequestTest, CancelTest_During_OnGetCookies) {
+ scoped_refptr<HTTPTestServer> server =
+ HTTPTestServer::CreateServer(L"", NULL);
+ ASSERT_TRUE(NULL != server.get());
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
+
+ TestCookiePolicy cookie_policy(TestCookiePolicy::NO_GET_COOKIES);
+ context->set_cookie_policy(&cookie_policy);
+
+ // Set up a cookie.
+ {
+ TestDelegate d;
+ d.set_cancel_in_get_cookies_blocked(true);
+ URLRequest req(server->TestServerPage("set-cookie?A=1&B=2&C=3"),
+ &d);
+ req.set_context(context);
+ req.Start(); // Triggers an asynchronous cookie policy check.
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+
+ EXPECT_EQ(1, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
+ }
+
+ context->set_cookie_policy(NULL);
+}
+
+TEST_F(URLRequestTest, CancelTest_During_OnSetCookie) {
+ scoped_refptr<HTTPTestServer> server =
+ HTTPTestServer::CreateServer(L"", NULL);
+ ASSERT_TRUE(NULL != server.get());
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
+
+ TestCookiePolicy cookie_policy(TestCookiePolicy::NO_SET_COOKIE);
+ context->set_cookie_policy(&cookie_policy);
+
+ // Set up a cookie.
+ {
+ TestDelegate d;
+ d.set_cancel_in_set_cookie_blocked(true);
+ URLRequest req(server->TestServerPage("set-cookie?A=1&B=2&C=3"),
+ &d);
+ req.set_context(context);
+ req.Start(); // Triggers an asynchronous cookie policy check.
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+
+ // Even though the response will contain 3 set-cookie headers, we expect
+ // only one to be blocked as that first one will cause OnSetCookie to be
+ // called, which will cancel the request. Once canceled, it should not
+ // attempt to set further cookies.
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(1, d.blocked_set_cookie_count());
+ }
+
+ context->set_cookie_policy(NULL);
+}
+
+TEST_F(URLRequestTest, CookiePolicy_ForceSession) {
+ scoped_refptr<HTTPTestServer> server =
+ HTTPTestServer::CreateServer(L"", NULL);
+ ASSERT_TRUE(NULL != server.get());
+ scoped_refptr<TestURLRequestContext> context = new TestURLRequestContext();
+
+ TestCookiePolicy cookie_policy(TestCookiePolicy::FORCE_SESSION);
+ context->set_cookie_policy(&cookie_policy);
+
+ // Set up a cookie.
+ {
+ TestDelegate d;
+ URLRequest req(server->TestServerPage(
+ "set-cookie?A=1;expires=\"Fri, 05 Feb 2010 23:42:01 GMT\""), &d);
+ req.set_context(context);
+ req.Start(); // Triggers an asynchronous cookie policy check.
+
+ MessageLoop::current()->Run();
+
+ EXPECT_EQ(0, d.blocked_get_cookies_count());
+ EXPECT_EQ(0, d.blocked_set_cookie_count());
+ }
+
+ // Now, check the cookie store.
+ net::CookieMonster::CookieList cookies =
+ context->cookie_store()->GetCookieMonster()->GetAllCookies();
+ EXPECT_EQ(1U, cookies.size());
+ EXPECT_FALSE(cookies[0].IsPersistent());
+
+ context->set_cookie_policy(NULL);
}
// In this test, we do a POST which the server will 302 redirect.
@@ -1535,7 +1655,8 @@
req.set_upload(CreateSimpleUploadData(kData));
// Set headers (some of which are specific to the POST).
- req.SetExtraRequestHeaders(
+ net::HttpRequestHeaders headers;
+ headers.AddHeadersFromString(
"Content-Type: multipart/form-data; "
"boundary=----WebKitFormBoundaryAADeAA+NAAWMAAwZ\r\n"
"Accept: text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,"
@@ -1544,6 +1665,7 @@
"Accept-Charset: ISO-8859-1,*,utf-8\r\n"
"Content-Length: 11\r\n"
"Origin: http://localhost:1337/");
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
@@ -1572,8 +1694,10 @@
&d);
req.set_method("POST");
req.set_upload(CreateSimpleUploadData(kData).get());
- req.SetExtraRequestHeaders(
- "Content-Length: " + UintToString(sizeof(kData) - 1));
+ net::HttpRequestHeaders headers;
+ headers.SetHeader(net::HttpRequestHeaders::kContentLength,
+ UintToString(arraysize(kData) - 1));
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
EXPECT_EQ("POST", req.method());
@@ -2330,7 +2454,7 @@
ASSERT_TRUE(NULL != server_.get());
TestDelegate d;
TestURLRequest req(server_->TestServerPage("echoheader?Accept-Language"), &d);
- req.set_context(new URLRequestTestContext());
+ req.set_context(new TestURLRequestContext());
req.Start();
MessageLoop::current()->Run();
EXPECT_EQ(req.context()->accept_language(), d.data_received());
@@ -2343,8 +2467,10 @@
TestDelegate d;
TestURLRequest
req(server_->TestServerPage("echoheaderoverride?Accept-Language"), &d);
- req.set_context(new URLRequestTestContext());
- req.SetExtraRequestHeaders("Accept-Language: ru");
+ req.set_context(new TestURLRequestContext());
+ net::HttpRequestHeaders headers;
+ headers.SetHeader(net::HttpRequestHeaders::kAcceptLanguage, "ru");
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
EXPECT_EQ(std::string("ru"), d.data_received());
@@ -2355,7 +2481,7 @@
ASSERT_TRUE(NULL != server_.get());
TestDelegate d;
TestURLRequest req(server_->TestServerPage("echoheader?Accept-Charset"), &d);
- req.set_context(new URLRequestTestContext());
+ req.set_context(new TestURLRequestContext());
req.Start();
MessageLoop::current()->Run();
EXPECT_EQ(req.context()->accept_charset(), d.data_received());
@@ -2368,8 +2494,10 @@
TestDelegate d;
TestURLRequest
req(server_->TestServerPage("echoheaderoverride?Accept-Charset"), &d);
- req.set_context(new URLRequestTestContext());
- req.SetExtraRequestHeaders("Accept-Charset: koi-8r");
+ req.set_context(new TestURLRequestContext());
+ net::HttpRequestHeaders headers;
+ headers.SetHeader(net::HttpRequestHeaders::kAcceptCharset, "koi-8r");
+ req.SetExtraRequestHeaders(headers);
req.Start();
MessageLoop::current()->Run();
EXPECT_EQ(std::string("koi-8r"), d.data_received());
diff --git a/net/url_request/url_request_unittest.h b/net/url_request/url_request_unittest.h
index 6c1be78..8f090ef 100644
--- a/net/url_request/url_request_unittest.h
+++ b/net/url_request/url_request_unittest.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -17,9 +17,9 @@
#include "base/message_loop.h"
#include "base/path_service.h"
#include "base/process_util.h"
-#include "base/string_util.h"
#include "base/thread.h"
#include "base/time.h"
+#include "base/utf_string_conversions.h"
#include "base/waitable_event.h"
#include "net/base/cookie_monster.h"
#include "net/base/cookie_policy.h"
@@ -30,9 +30,10 @@
#include "net/base/ssl_config_service_defaults.h"
#include "net/disk_cache/disk_cache.h"
#include "net/ftp/ftp_network_layer.h"
+#include "net/http/http_auth_handler_factory.h"
#include "net/http/http_cache.h"
#include "net/http/http_network_layer.h"
-#include "net/socket/ssl_test_util.h"
+#include "net/test/test_server.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_context.h"
#include "net/proxy/proxy_service.h"
@@ -53,7 +54,8 @@
enum Options {
NO_GET_COOKIES = 1 << 0,
NO_SET_COOKIE = 1 << 1,
- ASYNC = 1 << 2
+ ASYNC = 1 << 2,
+ FORCE_SESSION = 1 << 3,
};
explicit TestCookiePolicy(int options_bit_mask)
@@ -93,6 +95,9 @@
if (options_ & NO_SET_COOKIE)
return net::ERR_ACCESS_DENIED;
+ if (options_ & FORCE_SESSION)
+ return net::OK_FOR_SESSION_ONLY;
+
return net::OK;
}
@@ -126,15 +131,17 @@
class TestURLRequestContext : public URLRequestContext {
public:
TestURLRequestContext() {
- host_resolver_ = net::CreateSystemHostResolver(NULL);
+ host_resolver_ =
+ net::CreateSystemHostResolver(net::HostResolver::kDefaultParallelism);
proxy_service_ = net::ProxyService::CreateNull();
Init();
}
explicit TestURLRequestContext(const std::string& proxy) {
- host_resolver_ = net::CreateSystemHostResolver(NULL);
+ host_resolver_ =
+ net::CreateSystemHostResolver(net::HostResolver::kDefaultParallelism);
net::ProxyConfig proxy_config;
- proxy_config.proxy_rules.ParseFromString(proxy);
+ proxy_config.proxy_rules().ParseFromString(proxy);
proxy_service_ = net::ProxyService::CreateFixed(proxy_config);
Init();
}
@@ -147,28 +154,29 @@
virtual ~TestURLRequestContext() {
delete ftp_transaction_factory_;
delete http_transaction_factory_;
+ delete http_auth_handler_factory_;
}
private:
void Init() {
ftp_transaction_factory_ = new net::FtpNetworkLayer(host_resolver_);
ssl_config_service_ = new net::SSLConfigServiceDefaults;
- http_transaction_factory_ =
- new net::HttpCache(
- net::HttpNetworkLayer::CreateFactory(NULL, host_resolver_,
- proxy_service_,
- ssl_config_service_),
- disk_cache::CreateInMemoryCacheBackend(0));
+ http_auth_handler_factory_ = net::HttpAuthHandlerFactory::CreateDefault();
+ http_transaction_factory_ = new net::HttpCache(
+ net::HttpNetworkLayer::CreateFactory(host_resolver_,
+ proxy_service_,
+ ssl_config_service_,
+ http_auth_handler_factory_,
+ network_delegate_,
+ NULL),
+ net::HttpCache::DefaultBackend::InMemory(0));
// In-memory cookie store.
- cookie_store_ = new net::CookieMonster();
+ cookie_store_ = new net::CookieMonster(NULL, NULL);
accept_language_ = "en-us,fr";
accept_charset_ = "iso-8859-1,*,utf-8";
}
};
-// TODO(phajdan.jr): Migrate callers to the new name and remove the typedef.
-typedef TestURLRequestContext URLRequestTestContext;
-
//-----------------------------------------------------------------------------
class TestURLRequest : public URLRequest {
@@ -188,12 +196,17 @@
cancel_in_rs_(false),
cancel_in_rd_(false),
cancel_in_rd_pending_(false),
+ cancel_in_getcookiesblocked_(false),
+ cancel_in_setcookieblocked_(false),
quit_on_complete_(true),
quit_on_redirect_(false),
allow_certificate_errors_(false),
response_started_count_(0),
received_bytes_count_(0),
received_redirect_count_(0),
+ blocked_get_cookies_count_(0),
+ blocked_set_cookie_count_(0),
+ set_cookie_count_(0),
received_data_before_response_(false),
request_failed_(false),
have_certificate_errors_(false),
@@ -296,12 +309,38 @@
request->Cancel();
}
+ virtual void OnGetCookies(URLRequest* request, bool blocked_by_policy) {
+ if (blocked_by_policy) {
+ blocked_get_cookies_count_++;
+ if (cancel_in_getcookiesblocked_)
+ request->Cancel();
+ }
+ }
+
+ virtual void OnSetCookie(URLRequest* request,
+ const std::string& cookie_line,
+ bool blocked_by_policy) {
+ if (blocked_by_policy) {
+ blocked_set_cookie_count_++;
+ if (cancel_in_setcookieblocked_)
+ request->Cancel();
+ } else {
+ set_cookie_count_++;
+ }
+ }
+
void set_cancel_in_received_redirect(bool val) { cancel_in_rr_ = val; }
void set_cancel_in_response_started(bool val) { cancel_in_rs_ = val; }
void set_cancel_in_received_data(bool val) { cancel_in_rd_ = val; }
void set_cancel_in_received_data_pending(bool val) {
cancel_in_rd_pending_ = val;
}
+ void set_cancel_in_get_cookies_blocked(bool val) {
+ cancel_in_getcookiesblocked_ = val;
+ }
+ void set_cancel_in_set_cookie_blocked(bool val) {
+ cancel_in_setcookieblocked_ = val;
+ }
void set_quit_on_complete(bool val) { quit_on_complete_ = val; }
void set_quit_on_redirect(bool val) { quit_on_redirect_ = val; }
void set_allow_certificate_errors(bool val) {
@@ -315,6 +354,9 @@
int bytes_received() const { return static_cast<int>(data_received_.size()); }
int response_started_count() const { return response_started_count_; }
int received_redirect_count() const { return received_redirect_count_; }
+ int blocked_get_cookies_count() const { return blocked_get_cookies_count_; }
+ int blocked_set_cookie_count() const { return blocked_set_cookie_count_; }
+ int set_cookie_count() const { return set_cookie_count_; }
bool received_data_before_response() const {
return received_data_before_response_;
}
@@ -328,6 +370,8 @@
bool cancel_in_rs_;
bool cancel_in_rd_;
bool cancel_in_rd_pending_;
+ bool cancel_in_getcookiesblocked_;
+ bool cancel_in_setcookieblocked_;
bool quit_on_complete_;
bool quit_on_redirect_;
bool allow_certificate_errors_;
@@ -339,6 +383,9 @@
int response_started_count_;
int received_bytes_count_;
int received_redirect_count_;
+ int blocked_get_cookies_count_;
+ int blocked_set_cookie_count_;
+ int set_cookie_count_;
bool received_data_before_response_;
bool request_failed_;
bool have_certificate_errors_;
@@ -395,12 +442,6 @@
"@" + host_name_ + ":" + port_str_ + "/" + path);
}
- // Deprecated in favor of TestServerPage.
- // TODO(phajdan.jr): Remove TestServerPageW.
- GURL TestServerPageW(const std::wstring& path) {
- return TestServerPage(WideToUTF8(path));
- }
-
virtual bool MakeGETRequest(const std::string& page_name) = 0;
FilePath GetDataDirectory() {
diff --git a/net/url_request/view_cache_helper.cc b/net/url_request/view_cache_helper.cc
index b818ba5..92701b7 100644
--- a/net/url_request/view_cache_helper.cc
+++ b/net/url_request/view_cache_helper.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,6 +7,7 @@
#include "base/string_util.h"
#include "net/base/escape.h"
#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
#include "net/disk_cache/disk_cache.h"
#include "net/http/http_cache.h"
#include "net/http/http_response_headers.h"
@@ -19,7 +20,9 @@
#define VIEW_CACHE_TAIL \
"</table></body></html>"
-static void HexDump(const char *buf, size_t buf_len, std::string* result) {
+namespace {
+
+void HexDump(const char *buf, size_t buf_len, std::string* result) {
const size_t kMaxRows = 16;
int offset = 0;
@@ -56,8 +59,8 @@
}
}
-static std::string FormatEntryInfo(disk_cache::Entry* entry,
- const std::string& url_prefix) {
+std::string FormatEntryInfo(disk_cache::Entry* entry,
+ const std::string& url_prefix) {
std::string key = entry->GetKey();
GURL url = GURL(url_prefix + key);
std::string row =
@@ -66,115 +69,271 @@
return row;
}
-static std::string FormatEntryDetails(disk_cache::Entry* entry) {
- std::string result = EscapeForHTML(entry->GetKey());
+} // namespace.
- net::HttpResponseInfo response;
- bool truncated;
- if (net::HttpCache::ReadResponseInfo(entry, &response, &truncated) &&
- response.headers) {
- if (truncated)
- result.append("<pre>RESPONSE_INFO_TRUNCATED</pre>");
+namespace net {
- result.append("<hr><pre>");
- result.append(EscapeForHTML(response.headers->GetStatusLine()));
- result.push_back('\n');
+ViewCacheHelper::~ViewCacheHelper() {
+ if (entry_)
+ entry_->Close();
- void* iter = NULL;
- std::string name, value;
- while (response.headers->EnumerateHeaderLines(&iter, &name, &value)) {
- result.append(EscapeForHTML(name));
- result.append(": ");
- result.append(EscapeForHTML(value));
- result.push_back('\n');
- }
- result.append("</pre>");
- }
-
- for (int i = 0; i < 2; ++i) {
- result.append("<hr><pre>");
-
- int data_size = entry->GetDataSize(i);
-
- if (data_size) {
- scoped_refptr<net::IOBuffer> buffer = new net::IOBuffer(data_size);
- if (entry->ReadData(i, 0, buffer, data_size, NULL) == data_size)
- HexDump(buffer->data(), data_size, &result);
- }
-
- result.append("</pre>");
- }
-
- return result;
+ // Cancel any pending entry callback.
+ entry_callback_->Cancel();
}
-static disk_cache::Backend* GetDiskCache(URLRequestContext* context) {
- if (!context)
- return NULL;
+int ViewCacheHelper::GetEntryInfoHTML(const std::string& key,
+ URLRequestContext* context,
+ std::string* out,
+ CompletionCallback* callback) {
+ return GetInfoHTML(key, context, std::string(), out, callback);
+}
- if (!context->http_transaction_factory())
- return NULL;
+int ViewCacheHelper::GetContentsHTML(URLRequestContext* context,
+ const std::string& url_prefix,
+ std::string* out,
+ CompletionCallback* callback) {
+ return GetInfoHTML(std::string(), context, url_prefix, out, callback);
+}
- net::HttpCache* http_cache = context->http_transaction_factory()->GetCache();
+//-----------------------------------------------------------------------------
+
+int ViewCacheHelper::GetInfoHTML(const std::string& key,
+ URLRequestContext* context,
+ const std::string& url_prefix,
+ std::string* out,
+ CompletionCallback* callback) {
+ DCHECK(!callback_);
+ DCHECK(context);
+ key_ = key;
+ context_ = context;
+ url_prefix_ = url_prefix;
+ data_ = out;
+ next_state_ = STATE_GET_BACKEND;
+ int rv = DoLoop(OK);
+
+ if (rv == ERR_IO_PENDING)
+ callback_ = callback;
+
+ return rv;
+}
+
+void ViewCacheHelper::DoCallback(int rv) {
+ DCHECK_NE(ERR_IO_PENDING, rv);
+ DCHECK(callback_);
+
+ CompletionCallback* c = callback_;
+ callback_ = NULL;
+ c->Run(rv);
+}
+
+void ViewCacheHelper::HandleResult(int rv) {
+ DCHECK_NE(ERR_IO_PENDING, rv);
+ DCHECK_NE(ERR_FAILED, rv);
+ context_ = NULL;
+ if (callback_)
+ DoCallback(rv);
+}
+
+int ViewCacheHelper::DoLoop(int result) {
+ DCHECK(next_state_ != STATE_NONE);
+
+ int rv = result;
+ do {
+ State state = next_state_;
+ next_state_ = STATE_NONE;
+ switch (state) {
+ case STATE_GET_BACKEND:
+ DCHECK_EQ(OK, rv);
+ rv = DoGetBackend();
+ break;
+ case STATE_GET_BACKEND_COMPLETE:
+ rv = DoGetBackendComplete(rv);
+ break;
+ case STATE_OPEN_NEXT_ENTRY:
+ DCHECK_EQ(OK, rv);
+ rv = DoOpenNextEntry();
+ break;
+ case STATE_OPEN_NEXT_ENTRY_COMPLETE:
+ rv = DoOpenNextEntryComplete(rv);
+ break;
+ case STATE_OPEN_ENTRY:
+ DCHECK_EQ(OK, rv);
+ rv = DoOpenEntry();
+ break;
+ case STATE_OPEN_ENTRY_COMPLETE:
+ rv = DoOpenEntryComplete(rv);
+ break;
+ case STATE_READ_RESPONSE:
+ DCHECK_EQ(OK, rv);
+ rv = DoReadResponse();
+ break;
+ case STATE_READ_RESPONSE_COMPLETE:
+ rv = DoReadResponseComplete(rv);
+ break;
+ case STATE_READ_DATA:
+ DCHECK_EQ(OK, rv);
+ rv = DoReadData();
+ break;
+ case STATE_READ_DATA_COMPLETE:
+ rv = DoReadDataComplete(rv);
+ break;
+
+ default:
+ NOTREACHED() << "bad state";
+ rv = ERR_FAILED;
+ break;
+ }
+ } while (rv != ERR_IO_PENDING && next_state_ != STATE_NONE);
+
+ if (rv != ERR_IO_PENDING)
+ HandleResult(rv);
+
+ return rv;
+}
+
+int ViewCacheHelper::DoGetBackend() {
+ next_state_ = STATE_GET_BACKEND_COMPLETE;
+
+ if (!context_->http_transaction_factory())
+ return ERR_FAILED;
+
+ net::HttpCache* http_cache = context_->http_transaction_factory()->GetCache();
if (!http_cache)
- return NULL;
+ return ERR_FAILED;
- return http_cache->GetBackend();
+ return http_cache->GetBackend(&disk_cache_, &cache_callback_);
}
-static std::string FormatStatistics(disk_cache::Backend* disk_cache) {
- std::vector<std::pair<std::string, std::string> > stats;
- disk_cache->GetStats(&stats);
- std::string result;
-
- for (size_t index = 0; index < stats.size(); index++) {
- result.append(stats[index].first);
- result.append(": ");
- result.append(stats[index].second);
- result.append("<br/>\n");
+int ViewCacheHelper::DoGetBackendComplete(int result) {
+ if (result == ERR_FAILED) {
+ data_->append("no disk cache");
+ return OK;
}
- return result;
+ DCHECK_EQ(OK, result);
+ if (key_.empty()) {
+ data_->assign(VIEW_CACHE_HEAD);
+ DCHECK(!iter_);
+ next_state_ = STATE_OPEN_NEXT_ENTRY;
+ return OK;
+ }
+
+ next_state_ = STATE_OPEN_ENTRY;
+ return OK;
}
-// static
-void ViewCacheHelper::GetEntryInfoHTML(const std::string& key,
- URLRequestContext* context,
- const std::string& url_prefix,
- std::string* data) {
- disk_cache::Backend* disk_cache = GetDiskCache(context);
- if (!disk_cache) {
- data->assign("no disk cache");
- return;
+int ViewCacheHelper::DoOpenNextEntry() {
+ next_state_ = STATE_OPEN_NEXT_ENTRY_COMPLETE;
+ return disk_cache_->OpenNextEntry(&iter_, &entry_, &cache_callback_);
+}
+
+int ViewCacheHelper::DoOpenNextEntryComplete(int result) {
+ if (result == ERR_FAILED) {
+ data_->append(VIEW_CACHE_TAIL);
+ return OK;
}
- if (key.empty()) {
- data->assign(VIEW_CACHE_HEAD);
- void* iter = NULL;
- disk_cache::Entry* entry;
- while (disk_cache->OpenNextEntry(&iter, &entry)) {
- data->append(FormatEntryInfo(entry, url_prefix));
- entry->Close();
+ DCHECK_EQ(OK, result);
+ data_->append(FormatEntryInfo(entry_, url_prefix_));
+ entry_->Close();
+ entry_ = NULL;
+
+ next_state_ = STATE_OPEN_NEXT_ENTRY;
+ return OK;
+}
+
+int ViewCacheHelper::DoOpenEntry() {
+ next_state_ = STATE_OPEN_ENTRY_COMPLETE;
+ return disk_cache_->OpenEntry(key_, &entry_, &cache_callback_);
+}
+
+int ViewCacheHelper::DoOpenEntryComplete(int result) {
+ if (result == ERR_FAILED) {
+ data_->append("no matching cache entry for: " + EscapeForHTML(key_));
+ return OK;
+ }
+
+ data_->assign(VIEW_CACHE_HEAD);
+ data_->append(EscapeForHTML(entry_->GetKey()));
+ next_state_ = STATE_READ_RESPONSE;
+ return OK;
+}
+
+int ViewCacheHelper::DoReadResponse() {
+ next_state_ = STATE_READ_RESPONSE_COMPLETE;
+ buf_len_ = entry_->GetDataSize(0);
+ entry_callback_->AddRef();
+ if (!buf_len_)
+ return buf_len_;
+
+ buf_ = new net::IOBuffer(buf_len_);
+ return entry_->ReadData(0, 0, buf_, buf_len_, entry_callback_);
+}
+
+int ViewCacheHelper::DoReadResponseComplete(int result) {
+ entry_callback_->Release();
+ if (result && result == buf_len_) {
+ net::HttpResponseInfo response;
+ bool truncated;
+ if (net::HttpCache::ParseResponseInfo(buf_->data(), buf_len_, &response,
+ &truncated) &&
+ response.headers) {
+ if (truncated)
+ data_->append("<pre>RESPONSE_INFO_TRUNCATED</pre>");
+
+ data_->append("<hr><pre>");
+ data_->append(EscapeForHTML(response.headers->GetStatusLine()));
+ data_->push_back('\n');
+
+ void* iter = NULL;
+ std::string name, value;
+ while (response.headers->EnumerateHeaderLines(&iter, &name, &value)) {
+ data_->append(EscapeForHTML(name));
+ data_->append(": ");
+ data_->append(EscapeForHTML(value));
+ data_->push_back('\n');
+ }
+ data_->append("</pre>");
}
- data->append(VIEW_CACHE_TAIL);
+ }
+
+ index_ = 0;
+ next_state_ = STATE_READ_DATA;
+ return OK;
+}
+
+int ViewCacheHelper::DoReadData() {
+ data_->append("<hr><pre>");
+
+ next_state_ = STATE_READ_DATA_COMPLETE;
+ buf_len_ = entry_->GetDataSize(index_);
+ entry_callback_->AddRef();
+ if (!buf_len_)
+ return buf_len_;
+
+ buf_ = new net::IOBuffer(buf_len_);
+ return entry_->ReadData(index_, 0, buf_, buf_len_, entry_callback_);
+}
+
+int ViewCacheHelper::DoReadDataComplete(int result) {
+ entry_callback_->Release();
+ if (result && result == buf_len_) {
+ HexDump(buf_->data(), buf_len_, data_);
+ }
+ data_->append("</pre>");
+ index_++;
+ if (index_ < net::HttpCache::kNumCacheEntryDataIndices) {
+ next_state_ = STATE_READ_DATA;
} else {
- disk_cache::Entry* entry;
- if (disk_cache->OpenEntry(key, &entry)) {
- data->assign(FormatEntryDetails(entry));
- entry->Close();
- } else {
- data->assign("no matching cache entry for: " + key);
- }
+ data_->append(VIEW_CACHE_TAIL);
+ entry_->Close();
+ entry_ = NULL;
}
+ return OK;
}
-// static
-void ViewCacheHelper::GetStatisticsHTML(URLRequestContext* context,
- std::string* data) {
- disk_cache::Backend* disk_cache = GetDiskCache(context);
- if (!disk_cache) {
- data->append("no disk cache");
- return;
- }
- data->append(FormatStatistics(disk_cache));
+void ViewCacheHelper::OnIOComplete(int result) {
+ DoLoop(result);
}
+
+} // namespace net.
diff --git a/net/url_request/view_cache_helper.h b/net/url_request/view_cache_helper.h
index 2648699..777775a 100644
--- a/net/url_request/view_cache_helper.h
+++ b/net/url_request/view_cache_helper.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,18 +7,113 @@
#include <string>
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+
class URLRequestContext;
+namespace disk_cache {
+class Backend;
+class Entry;
+}
+
+namespace net {
+
class ViewCacheHelper {
public:
- // Formats the cache information for |key| as HTML.
- static void GetEntryInfoHTML(const std::string& key,
- URLRequestContext* context,
- const std::string& url_prefix,
- std::string* out);
+ ViewCacheHelper()
+ : disk_cache_(NULL), entry_(NULL), iter_(NULL), buf_len_(0), index_(0),
+ data_(NULL), callback_(NULL), next_state_(STATE_NONE),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ cache_callback_(this, &ViewCacheHelper::OnIOComplete)),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ entry_callback_(new CancelableCompletionCallback<ViewCacheHelper>(
+ this, &ViewCacheHelper::OnIOComplete))) {}
+ ~ViewCacheHelper();
- static void GetStatisticsHTML(URLRequestContext* context,
- std::string* out);
+ // Formats the cache information for |key| as HTML. Returns a net error code.
+ // If this method returns ERR_IO_PENDING, |callback| will be notified when the
+ // operation completes. |out| must remain valid until this operation completes
+ // or the object is destroyed.
+ int GetEntryInfoHTML(const std::string& key, URLRequestContext* context,
+ std::string* out, CompletionCallback* callback);
+
+ // Formats the cache contents as HTML. Returns a net error code.
+ // If this method returns ERR_IO_PENDING, |callback| will be notified when the
+ // operation completes. |out| must remain valid until this operation completes
+ // or the object is destroyed. |url_prefix| will be prepended to each entry
+ // key as a link to the entry.
+ int GetContentsHTML(URLRequestContext* context, const std::string& url_prefix,
+ std::string* out, CompletionCallback* callback);
+
+ private:
+ enum State {
+ STATE_NONE,
+ STATE_GET_BACKEND,
+ STATE_GET_BACKEND_COMPLETE,
+ STATE_OPEN_NEXT_ENTRY,
+ STATE_OPEN_NEXT_ENTRY_COMPLETE,
+ STATE_OPEN_ENTRY,
+ STATE_OPEN_ENTRY_COMPLETE,
+ STATE_READ_RESPONSE,
+ STATE_READ_RESPONSE_COMPLETE,
+ STATE_READ_DATA,
+ STATE_READ_DATA_COMPLETE
+ };
+
+ // Implements GetEntryInfoHTML and GetContentsHTML.
+ int GetInfoHTML(const std::string& key, URLRequestContext* context,
+ const std::string& url_prefix, std::string* out,
+ CompletionCallback* callback);
+
+ // This is a helper function used to trigger a completion callback. It may
+ // only be called if callback_ is non-null.
+ void DoCallback(int rv);
+
+ // This will trigger the completion callback if appropriate.
+ void HandleResult(int rv);
+
+ // Runs the state transition loop.
+ int DoLoop(int result);
+
+ // Each of these methods corresponds to a State value. If there is an
+ // argument, the value corresponds to the return of the previous state or
+ // corresponding callback.
+ int DoGetBackend();
+ int DoGetBackendComplete(int result);
+ int DoOpenNextEntry();
+ int DoOpenNextEntryComplete(int result);
+ int DoOpenEntry();
+ int DoOpenEntryComplete(int result);
+ int DoReadResponse();
+ int DoReadResponseComplete(int result);
+ int DoReadData();
+ int DoReadDataComplete(int result);
+
+ // Called to signal completion of asynchronous IO.
+ void OnIOComplete(int result);
+
+ scoped_refptr<URLRequestContext> context_;
+ disk_cache::Backend* disk_cache_;
+ disk_cache::Entry* entry_;
+ void* iter_;
+ scoped_refptr<net::IOBuffer> buf_;
+ int buf_len_;
+ int index_;
+
+ std::string key_;
+ std::string url_prefix_;
+ std::string* data_;
+ CompletionCallback* callback_;
+
+ State next_state_;
+
+ CompletionCallbackImpl<ViewCacheHelper> cache_callback_;
+ scoped_refptr<CancelableCompletionCallback<ViewCacheHelper> > entry_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(ViewCacheHelper);
};
+} // namespace net.
+
#endif // NET_URL_REQUEST_VIEW_CACHE_HELPER_H_
diff --git a/net/url_request/view_cache_helper_unittest.cc b/net/url_request/view_cache_helper_unittest.cc
new file mode 100644
index 0000000..e82ff15
--- /dev/null
+++ b/net/url_request/view_cache_helper_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/view_cache_helper.h"
+
+#include "base/pickle.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache.h"
+#include "net/url_request/url_request_context.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+class TestURLRequestContext : public URLRequestContext {
+ public:
+ TestURLRequestContext();
+
+ // Gets a pointer to the cache backend.
+ disk_cache::Backend* GetBackend();
+
+ private:
+ net::HttpCache cache_;
+};
+
+TestURLRequestContext::TestURLRequestContext()
+ : cache_(reinterpret_cast<net::HttpTransactionFactory*>(NULL),
+ net::HttpCache::DefaultBackend::InMemory(0)) {
+ http_transaction_factory_ = &cache_;
+}
+
+void WriteHeaders(disk_cache::Entry* entry, int flags, const std::string data) {
+ if (data.empty())
+ return;
+
+ Pickle pickle;
+ pickle.WriteInt(flags | 1); // Version 1.
+ pickle.WriteInt64(0);
+ pickle.WriteInt64(0);
+ pickle.WriteString(data);
+
+ scoped_refptr<net::WrappedIOBuffer> buf = new net::WrappedIOBuffer(
+ reinterpret_cast<const char*>(pickle.data()));
+ int len = static_cast<int>(pickle.size());
+
+ TestCompletionCallback cb;
+ int rv = entry->WriteData(0, 0, buf, len, &cb, true);
+ ASSERT_EQ(len, cb.GetResult(rv));
+}
+
+void WriteData(disk_cache::Entry* entry, int index, const std::string data) {
+ if (data.empty())
+ return;
+
+ int len = data.length();
+ scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(len));
+ memcpy(buf->data(), data.data(), data.length());
+
+ TestCompletionCallback cb;
+ int rv = entry->WriteData(index, 0, buf, len, &cb, true);
+ ASSERT_EQ(len, cb.GetResult(rv));
+}
+
+void WriteToEntry(disk_cache::Backend* cache, const std::string key,
+ const std::string data0, const std::string data1,
+ const std::string data2) {
+ TestCompletionCallback cb;
+ disk_cache::Entry* entry;
+ int rv = cache->CreateEntry(key, &entry, &cb);
+ rv = cb.GetResult(rv);
+ if (rv != net::OK) {
+ rv = cache->OpenEntry(key, &entry, &cb);
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+ }
+
+ WriteHeaders(entry, 0, data0);
+ WriteData(entry, 1, data1);
+ WriteData(entry, 2, data2);
+
+ entry->Close();
+}
+
+void FillCache(URLRequestContext* context) {
+ TestCompletionCallback cb;
+ disk_cache::Backend* cache;
+ int rv =
+ context->http_transaction_factory()->GetCache()->GetBackend(&cache, &cb);
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ std::string empty;
+ WriteToEntry(cache, "first", "some", empty, empty);
+ WriteToEntry(cache, "second", "only hex_dumped", "same", "kind");
+ WriteToEntry(cache, "third", empty, "another", "thing");
+}
+
+} // namespace.
+
+TEST(ViewCacheHelper, EmptyCache) {
+ scoped_refptr<TestURLRequestContext> context(new TestURLRequestContext());
+ net::ViewCacheHelper helper;
+
+ TestCompletionCallback cb;
+ std::string prefix, data;
+ int rv = helper.GetContentsHTML(context, prefix, &data, &cb);
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+ EXPECT_FALSE(data.empty());
+}
+
+TEST(ViewCacheHelper, ListContents) {
+ scoped_refptr<TestURLRequestContext> context(new TestURLRequestContext());
+ net::ViewCacheHelper helper;
+
+ FillCache(context);
+
+ std::string prefix, data;
+ TestCompletionCallback cb;
+ int rv = helper.GetContentsHTML(context, prefix, &data, &cb);
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+
+ EXPECT_EQ(0U, data.find("<html>"));
+ EXPECT_NE(std::string::npos, data.find("</html>"));
+ EXPECT_NE(std::string::npos, data.find("first"));
+ EXPECT_NE(std::string::npos, data.find("second"));
+ EXPECT_NE(std::string::npos, data.find("third"));
+
+ EXPECT_EQ(std::string::npos, data.find("some"));
+ EXPECT_EQ(std::string::npos, data.find("same"));
+ EXPECT_EQ(std::string::npos, data.find("thing"));
+}
+
+TEST(ViewCacheHelper, DumpEntry) {
+ scoped_refptr<TestURLRequestContext> context(new TestURLRequestContext());
+ net::ViewCacheHelper helper;
+
+ FillCache(context);
+
+ std::string data;
+ TestCompletionCallback cb;
+ int rv = helper.GetEntryInfoHTML("second", context, &data, &cb);
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+
+ EXPECT_EQ(0U, data.find("<html>"));
+ EXPECT_NE(std::string::npos, data.find("</html>"));
+
+ EXPECT_NE(std::string::npos, data.find("hex_dumped"));
+ EXPECT_NE(std::string::npos, data.find("same"));
+ EXPECT_NE(std::string::npos, data.find("kind"));
+
+ EXPECT_EQ(std::string::npos, data.find("first"));
+ EXPECT_EQ(std::string::npos, data.find("third"));
+ EXPECT_EQ(std::string::npos, data.find("some"));
+ EXPECT_EQ(std::string::npos, data.find("another"));
+}
+
+// Makes sure the links are correct.
+TEST(ViewCacheHelper, Prefix) {
+ scoped_refptr<TestURLRequestContext> context(new TestURLRequestContext());
+ net::ViewCacheHelper helper;
+
+ FillCache(context);
+
+ std::string key, data;
+ std::string prefix("prefix:");
+ TestCompletionCallback cb;
+ int rv = helper.GetContentsHTML(context, prefix, &data, &cb);
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+
+ EXPECT_EQ(0U, data.find("<html>"));
+ EXPECT_NE(std::string::npos, data.find("</html>"));
+ EXPECT_NE(std::string::npos, data.find("<a href=\"prefix:first\">"));
+ EXPECT_NE(std::string::npos, data.find("<a href=\"prefix:second\">"));
+ EXPECT_NE(std::string::npos, data.find("<a href=\"prefix:third\">"));
+}
+
+TEST(ViewCacheHelper, TruncatedFlag) {
+ scoped_refptr<TestURLRequestContext> context(new TestURLRequestContext());
+ net::ViewCacheHelper helper;
+
+ TestCompletionCallback cb;
+ disk_cache::Backend* cache;
+ int rv =
+ context->http_transaction_factory()->GetCache()->GetBackend(&cache, &cb);
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ std::string key("the key");
+ disk_cache::Entry* entry;
+ rv = cache->CreateEntry(key, &entry, &cb);
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+
+ // RESPONSE_INFO_TRUNCATED defined on response_info.cc
+ int flags = 1 << 12;
+ WriteHeaders(entry, flags, "something");
+ entry->Close();
+
+ std::string data;
+ rv = helper.GetEntryInfoHTML(key, context, &data, &cb);
+ EXPECT_EQ(net::OK, cb.GetResult(rv));
+
+ EXPECT_NE(std::string::npos, data.find("RESPONSE_INFO_TRUNCATED"));
+}