Update ASan/Android runtime and setup script to LLVM r200682.
[chromium-blink-merge.git] / content / browser / speech / google_streaming_remote_engine.cc
blob89bfdcd13b98fbc134ff90a8fba6f739036bb3e4
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/speech/google_streaming_remote_engine.h"
7 #include <vector>
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/rand_util.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_util.h"
14 #include "base/strings/utf_string_conversions.h"
15 #include "base/time/time.h"
16 #include "content/browser/speech/audio_buffer.h"
17 #include "content/browser/speech/proto/google_streaming_api.pb.h"
18 #include "content/public/common/content_switches.h"
19 #include "content/public/common/speech_recognition_error.h"
20 #include "content/public/common/speech_recognition_result.h"
21 #include "google_apis/google_api_keys.h"
22 #include "net/base/escape.h"
23 #include "net/base/load_flags.h"
24 #include "net/url_request/http_user_agent_settings.h"
25 #include "net/url_request/url_fetcher.h"
26 #include "net/url_request/url_request_context.h"
27 #include "net/url_request/url_request_context_getter.h"
28 #include "net/url_request/url_request_status.h"
30 using net::URLFetcher;
32 namespace content {
33 namespace {
35 const char kWebServiceBaseUrl[] =
36 "https://www.google.com/speech-api/full-duplex/v1";
37 const char kDownstreamUrl[] = "/down?";
38 const char kUpstreamUrl[] = "/up?";
39 const AudioEncoder::Codec kDefaultAudioCodec = AudioEncoder::CODEC_FLAC;
41 // This matches the maximum maxAlternatives value supported by the server.
42 const uint32 kMaxMaxAlternatives = 30;
44 // TODO(hans): Remove this and other logging when we don't need it anymore.
45 void DumpResponse(const std::string& response) {
46 DVLOG(1) << "------------";
47 proto::SpeechRecognitionEvent event;
48 if (!event.ParseFromString(response)) {
49 DVLOG(1) << "Parse failed!";
50 return;
52 if (event.has_status())
53 DVLOG(1) << "STATUS\t" << event.status();
54 for (int i = 0; i < event.result_size(); ++i) {
55 DVLOG(1) << "RESULT #" << i << ":";
56 const proto::SpeechRecognitionResult& res = event.result(i);
57 if (res.has_final())
58 DVLOG(1) << " FINAL:\t" << res.final();
59 if (res.has_stability())
60 DVLOG(1) << " STABILITY:\t" << res.stability();
61 for (int j = 0; j < res.alternative_size(); ++j) {
62 const proto::SpeechRecognitionAlternative& alt =
63 res.alternative(j);
64 if (alt.has_confidence())
65 DVLOG(1) << " CONFIDENCE:\t" << alt.confidence();
66 if (alt.has_transcript())
67 DVLOG(1) << " TRANSCRIPT:\t" << alt.transcript();
72 std::string GetAPIKey() {
73 const CommandLine& command_line = *CommandLine::ForCurrentProcess();
74 if (command_line.HasSwitch(switches::kSpeechRecognitionWebserviceKey)) {
75 DVLOG(1) << "GetAPIKey() used key from command-line.";
76 return command_line.GetSwitchValueASCII(
77 switches::kSpeechRecognitionWebserviceKey);
80 std::string api_key = google_apis::GetAPIKey();
81 if (api_key.empty())
82 DVLOG(1) << "GetAPIKey() returned empty string!";
84 return api_key;
87 } // namespace
89 const int GoogleStreamingRemoteEngine::kAudioPacketIntervalMs = 100;
90 const int GoogleStreamingRemoteEngine::kUpstreamUrlFetcherIdForTesting = 0;
91 const int GoogleStreamingRemoteEngine::kDownstreamUrlFetcherIdForTesting = 1;
92 const int GoogleStreamingRemoteEngine::kWebserviceStatusNoError = 0;
93 const int GoogleStreamingRemoteEngine::kWebserviceStatusErrorNoMatch = 5;
95 GoogleStreamingRemoteEngine::GoogleStreamingRemoteEngine(
96 net::URLRequestContextGetter* context)
97 : url_context_(context),
98 previous_response_length_(0),
99 got_last_definitive_result_(false),
100 is_dispatching_event_(false),
101 state_(STATE_IDLE) {}
103 GoogleStreamingRemoteEngine::~GoogleStreamingRemoteEngine() {}
105 void GoogleStreamingRemoteEngine::SetConfig(
106 const SpeechRecognitionEngineConfig& config) {
107 config_ = config;
110 void GoogleStreamingRemoteEngine::StartRecognition() {
111 FSMEventArgs event_args(EVENT_START_RECOGNITION);
112 DispatchEvent(event_args);
115 void GoogleStreamingRemoteEngine::EndRecognition() {
116 FSMEventArgs event_args(EVENT_END_RECOGNITION);
117 DispatchEvent(event_args);
120 void GoogleStreamingRemoteEngine::TakeAudioChunk(const AudioChunk& data) {
121 FSMEventArgs event_args(EVENT_AUDIO_CHUNK);
122 event_args.audio_data = &data;
123 DispatchEvent(event_args);
126 void GoogleStreamingRemoteEngine::AudioChunksEnded() {
127 FSMEventArgs event_args(EVENT_AUDIO_CHUNKS_ENDED);
128 DispatchEvent(event_args);
131 void GoogleStreamingRemoteEngine::OnURLFetchComplete(const URLFetcher* source) {
132 const bool kResponseComplete = true;
133 DispatchHTTPResponse(source, kResponseComplete);
136 void GoogleStreamingRemoteEngine::OnURLFetchDownloadProgress(
137 const URLFetcher* source, int64 current, int64 total) {
138 const bool kPartialResponse = false;
139 DispatchHTTPResponse(source, kPartialResponse);
142 void GoogleStreamingRemoteEngine::DispatchHTTPResponse(const URLFetcher* source,
143 bool end_of_response) {
144 DCHECK(CalledOnValidThread());
145 DCHECK(source);
146 const bool response_is_good = source->GetStatus().is_success() &&
147 source->GetResponseCode() == 200;
148 std::string response;
149 if (response_is_good)
150 source->GetResponseAsString(&response);
151 const size_t current_response_length = response.size();
153 DVLOG(1) << (source == downstream_fetcher_.get() ? "Downstream" : "Upstream")
154 << "HTTP, code: " << source->GetResponseCode()
155 << " length: " << current_response_length
156 << " eor: " << end_of_response;
158 // URLFetcher provides always the entire response buffer, but we are only
159 // interested in the fresh data introduced by the last chunk. Therefore, we
160 // drop the previous content we have already processed.
161 if (current_response_length != 0) {
162 DCHECK_GE(current_response_length, previous_response_length_);
163 response.erase(0, previous_response_length_);
164 previous_response_length_ = current_response_length;
167 if (!response_is_good && source == downstream_fetcher_.get()) {
168 DVLOG(1) << "Downstream error " << source->GetResponseCode();
169 FSMEventArgs event_args(EVENT_DOWNSTREAM_ERROR);
170 DispatchEvent(event_args);
171 return;
173 if (!response_is_good && source == upstream_fetcher_.get()) {
174 DVLOG(1) << "Upstream error " << source->GetResponseCode()
175 << " EOR " << end_of_response;
176 FSMEventArgs event_args(EVENT_UPSTREAM_ERROR);
177 DispatchEvent(event_args);
178 return;
181 // Ignore incoming data on the upstream connection.
182 if (source == upstream_fetcher_.get())
183 return;
185 DCHECK(response_is_good && source == downstream_fetcher_.get());
187 // The downstream response is organized in chunks, whose size is determined
188 // by a 4 bytes prefix, transparently handled by the ChunkedByteBuffer class.
189 // Such chunks are sent by the speech recognition webservice over the HTTP
190 // downstream channel using HTTP chunked transfer (unrelated to our chunks).
191 // This function is called every time an HTTP chunk is received by the
192 // url fetcher. However there isn't any particular matching beween our
193 // protocol chunks and HTTP chunks, in the sense that a single HTTP chunk can
194 // contain a portion of one chunk or even more chunks together.
195 chunked_byte_buffer_.Append(response);
197 // A single HTTP chunk can contain more than one data chunk, thus the while.
198 while (chunked_byte_buffer_.HasChunks()) {
199 FSMEventArgs event_args(EVENT_DOWNSTREAM_RESPONSE);
200 event_args.response = chunked_byte_buffer_.PopChunk();
201 DCHECK(event_args.response.get());
202 DumpResponse(std::string(event_args.response->begin(),
203 event_args.response->end()));
204 DispatchEvent(event_args);
206 if (end_of_response) {
207 FSMEventArgs event_args(EVENT_DOWNSTREAM_CLOSED);
208 DispatchEvent(event_args);
212 bool GoogleStreamingRemoteEngine::IsRecognitionPending() const {
213 DCHECK(CalledOnValidThread());
214 return state_ != STATE_IDLE;
217 int GoogleStreamingRemoteEngine::GetDesiredAudioChunkDurationMs() const {
218 return kAudioPacketIntervalMs;
221 // ----------------------- Core FSM implementation ---------------------------
223 void GoogleStreamingRemoteEngine::DispatchEvent(
224 const FSMEventArgs& event_args) {
225 DCHECK(CalledOnValidThread());
226 DCHECK_LE(event_args.event, EVENT_MAX_VALUE);
227 DCHECK_LE(state_, STATE_MAX_VALUE);
229 // Event dispatching must be sequential, otherwise it will break all the rules
230 // and the assumptions of the finite state automata model.
231 DCHECK(!is_dispatching_event_);
232 is_dispatching_event_ = true;
234 state_ = ExecuteTransitionAndGetNextState(event_args);
236 is_dispatching_event_ = false;
239 GoogleStreamingRemoteEngine::FSMState
240 GoogleStreamingRemoteEngine::ExecuteTransitionAndGetNextState(
241 const FSMEventArgs& event_args) {
242 const FSMEvent event = event_args.event;
243 switch (state_) {
244 case STATE_IDLE:
245 switch (event) {
246 case EVENT_START_RECOGNITION:
247 return ConnectBothStreams(event_args);
248 case EVENT_END_RECOGNITION:
249 // Note AUDIO_CHUNK and AUDIO_END events can remain enqueued in case of
250 // abort, so we just silently drop them here.
251 case EVENT_AUDIO_CHUNK:
252 case EVENT_AUDIO_CHUNKS_ENDED:
253 // DOWNSTREAM_CLOSED can be received if we end up here due to an error.
254 case EVENT_DOWNSTREAM_CLOSED:
255 return DoNothing(event_args);
256 case EVENT_UPSTREAM_ERROR:
257 case EVENT_DOWNSTREAM_ERROR:
258 case EVENT_DOWNSTREAM_RESPONSE:
259 return NotFeasible(event_args);
261 break;
262 case STATE_BOTH_STREAMS_CONNECTED:
263 switch (event) {
264 case EVENT_AUDIO_CHUNK:
265 return TransmitAudioUpstream(event_args);
266 case EVENT_DOWNSTREAM_RESPONSE:
267 return ProcessDownstreamResponse(event_args);
268 case EVENT_AUDIO_CHUNKS_ENDED:
269 return CloseUpstreamAndWaitForResults(event_args);
270 case EVENT_END_RECOGNITION:
271 return AbortSilently(event_args);
272 case EVENT_UPSTREAM_ERROR:
273 case EVENT_DOWNSTREAM_ERROR:
274 case EVENT_DOWNSTREAM_CLOSED:
275 return AbortWithError(event_args);
276 case EVENT_START_RECOGNITION:
277 return NotFeasible(event_args);
279 break;
280 case STATE_WAITING_DOWNSTREAM_RESULTS:
281 switch (event) {
282 case EVENT_DOWNSTREAM_RESPONSE:
283 return ProcessDownstreamResponse(event_args);
284 case EVENT_DOWNSTREAM_CLOSED:
285 return RaiseNoMatchErrorIfGotNoResults(event_args);
286 case EVENT_END_RECOGNITION:
287 return AbortSilently(event_args);
288 case EVENT_UPSTREAM_ERROR:
289 case EVENT_DOWNSTREAM_ERROR:
290 return AbortWithError(event_args);
291 case EVENT_START_RECOGNITION:
292 case EVENT_AUDIO_CHUNK:
293 case EVENT_AUDIO_CHUNKS_ENDED:
294 return NotFeasible(event_args);
296 break;
298 return NotFeasible(event_args);
301 // ----------- Contract for all the FSM evolution functions below -------------
302 // - Are guaranteed to be executed in the same thread (IO, except for tests);
303 // - Are guaranteed to be not reentrant (themselves and each other);
304 // - event_args members are guaranteed to be stable during the call;
306 GoogleStreamingRemoteEngine::FSMState
307 GoogleStreamingRemoteEngine::ConnectBothStreams(const FSMEventArgs&) {
308 DCHECK(!upstream_fetcher_.get());
309 DCHECK(!downstream_fetcher_.get());
311 encoder_.reset(AudioEncoder::Create(kDefaultAudioCodec,
312 config_.audio_sample_rate,
313 config_.audio_num_bits_per_sample));
314 DCHECK(encoder_.get());
315 const std::string request_key = GenerateRequestKey();
317 // Setup downstream fetcher.
318 std::vector<std::string> downstream_args;
319 downstream_args.push_back(
320 "key=" + net::EscapeQueryParamValue(GetAPIKey(), true));
321 downstream_args.push_back("pair=" + request_key);
322 downstream_args.push_back("output=pb");
323 GURL downstream_url(std::string(kWebServiceBaseUrl) +
324 std::string(kDownstreamUrl) +
325 JoinString(downstream_args, '&'));
327 downstream_fetcher_.reset(URLFetcher::Create(
328 kDownstreamUrlFetcherIdForTesting, downstream_url, URLFetcher::GET,
329 this));
330 downstream_fetcher_->SetRequestContext(url_context_.get());
331 downstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
332 net::LOAD_DO_NOT_SEND_COOKIES |
333 net::LOAD_DO_NOT_SEND_AUTH_DATA);
334 downstream_fetcher_->Start();
336 // Setup upstream fetcher.
337 // TODO(hans): Support for user-selected grammars.
338 std::vector<std::string> upstream_args;
339 upstream_args.push_back("key=" +
340 net::EscapeQueryParamValue(GetAPIKey(), true));
341 upstream_args.push_back("pair=" + request_key);
342 upstream_args.push_back("output=pb");
343 upstream_args.push_back(
344 "lang=" + net::EscapeQueryParamValue(GetAcceptedLanguages(), true));
345 upstream_args.push_back(
346 config_.filter_profanities ? "pFilter=2" : "pFilter=0");
347 if (config_.max_hypotheses > 0U) {
348 int max_alternatives = std::min(kMaxMaxAlternatives,
349 config_.max_hypotheses);
350 upstream_args.push_back("maxAlternatives=" +
351 base::UintToString(max_alternatives));
353 upstream_args.push_back("client=chromium");
354 if (!config_.hardware_info.empty()) {
355 upstream_args.push_back(
356 "xhw=" + net::EscapeQueryParamValue(config_.hardware_info, true));
358 if (config_.continuous)
359 upstream_args.push_back("continuous");
360 if (config_.interim_results)
361 upstream_args.push_back("interim");
363 GURL upstream_url(std::string(kWebServiceBaseUrl) +
364 std::string(kUpstreamUrl) +
365 JoinString(upstream_args, '&'));
367 upstream_fetcher_.reset(URLFetcher::Create(
368 kUpstreamUrlFetcherIdForTesting, upstream_url, URLFetcher::POST, this));
369 upstream_fetcher_->SetChunkedUpload(encoder_->mime_type());
370 upstream_fetcher_->SetRequestContext(url_context_.get());
371 upstream_fetcher_->SetReferrer(config_.origin_url);
372 upstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
373 net::LOAD_DO_NOT_SEND_COOKIES |
374 net::LOAD_DO_NOT_SEND_AUTH_DATA);
375 upstream_fetcher_->Start();
376 previous_response_length_ = 0;
377 return STATE_BOTH_STREAMS_CONNECTED;
380 GoogleStreamingRemoteEngine::FSMState
381 GoogleStreamingRemoteEngine::TransmitAudioUpstream(
382 const FSMEventArgs& event_args) {
383 DCHECK(upstream_fetcher_.get());
384 DCHECK(event_args.audio_data.get());
385 const AudioChunk& audio = *(event_args.audio_data.get());
387 DCHECK_EQ(audio.bytes_per_sample(), config_.audio_num_bits_per_sample / 8);
388 encoder_->Encode(audio);
389 scoped_refptr<AudioChunk> encoded_data(encoder_->GetEncodedDataAndClear());
390 upstream_fetcher_->AppendChunkToUpload(encoded_data->AsString(), false);
391 return state_;
394 GoogleStreamingRemoteEngine::FSMState
395 GoogleStreamingRemoteEngine::ProcessDownstreamResponse(
396 const FSMEventArgs& event_args) {
397 DCHECK(event_args.response.get());
399 proto::SpeechRecognitionEvent ws_event;
400 if (!ws_event.ParseFromString(std::string(event_args.response->begin(),
401 event_args.response->end())))
402 return AbortWithError(event_args);
404 // An empty (default) event is used to notify us that the upstream has
405 // been connected. Ignore.
406 if (!ws_event.result_size() && (!ws_event.has_status() ||
407 ws_event.status() == proto::SpeechRecognitionEvent::STATUS_SUCCESS)) {
408 DVLOG(1) << "Received empty response";
409 return state_;
412 if (ws_event.has_status()) {
413 switch (ws_event.status()) {
414 case proto::SpeechRecognitionEvent::STATUS_SUCCESS:
415 break;
416 case proto::SpeechRecognitionEvent::STATUS_NO_SPEECH:
417 return Abort(SPEECH_RECOGNITION_ERROR_NO_SPEECH);
418 case proto::SpeechRecognitionEvent::STATUS_ABORTED:
419 return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
420 case proto::SpeechRecognitionEvent::STATUS_AUDIO_CAPTURE:
421 return Abort(SPEECH_RECOGNITION_ERROR_AUDIO);
422 case proto::SpeechRecognitionEvent::STATUS_NETWORK:
423 return Abort(SPEECH_RECOGNITION_ERROR_NETWORK);
424 case proto::SpeechRecognitionEvent::STATUS_NOT_ALLOWED:
425 // TODO(hans): We need a better error code for this.
426 return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
427 case proto::SpeechRecognitionEvent::STATUS_SERVICE_NOT_ALLOWED:
428 // TODO(hans): We need a better error code for this.
429 return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
430 case proto::SpeechRecognitionEvent::STATUS_BAD_GRAMMAR:
431 return Abort(SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR);
432 case proto::SpeechRecognitionEvent::STATUS_LANGUAGE_NOT_SUPPORTED:
433 // TODO(hans): We need a better error code for this.
434 return Abort(SPEECH_RECOGNITION_ERROR_ABORTED);
438 SpeechRecognitionResults results;
439 for (int i = 0; i < ws_event.result_size(); ++i) {
440 const proto::SpeechRecognitionResult& ws_result = ws_event.result(i);
441 results.push_back(SpeechRecognitionResult());
442 SpeechRecognitionResult& result = results.back();
443 result.is_provisional = !(ws_result.has_final() && ws_result.final());
445 if (!result.is_provisional)
446 got_last_definitive_result_ = true;
448 for (int j = 0; j < ws_result.alternative_size(); ++j) {
449 const proto::SpeechRecognitionAlternative& ws_alternative =
450 ws_result.alternative(j);
451 SpeechRecognitionHypothesis hypothesis;
452 if (ws_alternative.has_confidence())
453 hypothesis.confidence = ws_alternative.confidence();
454 else if (ws_result.has_stability())
455 hypothesis.confidence = ws_result.stability();
456 DCHECK(ws_alternative.has_transcript());
457 // TODO(hans): Perhaps the transcript should be required in the proto?
458 if (ws_alternative.has_transcript())
459 hypothesis.utterance = base::UTF8ToUTF16(ws_alternative.transcript());
461 result.hypotheses.push_back(hypothesis);
465 delegate()->OnSpeechRecognitionEngineResults(results);
467 return state_;
470 GoogleStreamingRemoteEngine::FSMState
471 GoogleStreamingRemoteEngine::RaiseNoMatchErrorIfGotNoResults(
472 const FSMEventArgs& event_args) {
473 if (!got_last_definitive_result_) {
474 // Provide an empty result to notify that recognition is ended with no
475 // errors, yet neither any further results.
476 delegate()->OnSpeechRecognitionEngineResults(SpeechRecognitionResults());
478 return AbortSilently(event_args);
481 GoogleStreamingRemoteEngine::FSMState
482 GoogleStreamingRemoteEngine::CloseUpstreamAndWaitForResults(
483 const FSMEventArgs&) {
484 DCHECK(upstream_fetcher_.get());
485 DCHECK(encoder_.get());
487 DVLOG(1) << "Closing upstream.";
489 // The encoder requires a non-empty final buffer. So we encode a packet
490 // of silence in case encoder had no data already.
491 std::vector<short> samples(
492 config_.audio_sample_rate * kAudioPacketIntervalMs / 1000);
493 scoped_refptr<AudioChunk> dummy_chunk =
494 new AudioChunk(reinterpret_cast<uint8*>(&samples[0]),
495 samples.size() * sizeof(short),
496 encoder_->bits_per_sample() / 8);
497 encoder_->Encode(*dummy_chunk.get());
498 encoder_->Flush();
499 scoped_refptr<AudioChunk> encoded_dummy_data =
500 encoder_->GetEncodedDataAndClear();
501 DCHECK(!encoded_dummy_data->IsEmpty());
502 encoder_.reset();
504 upstream_fetcher_->AppendChunkToUpload(encoded_dummy_data->AsString(), true);
505 got_last_definitive_result_ = false;
506 return STATE_WAITING_DOWNSTREAM_RESULTS;
509 GoogleStreamingRemoteEngine::FSMState
510 GoogleStreamingRemoteEngine::CloseDownstream(const FSMEventArgs&) {
511 DCHECK(!upstream_fetcher_.get());
512 DCHECK(downstream_fetcher_.get());
514 DVLOG(1) << "Closing downstream.";
515 downstream_fetcher_.reset();
516 return STATE_IDLE;
519 GoogleStreamingRemoteEngine::FSMState
520 GoogleStreamingRemoteEngine::AbortSilently(const FSMEventArgs&) {
521 return Abort(SPEECH_RECOGNITION_ERROR_NONE);
524 GoogleStreamingRemoteEngine::FSMState
525 GoogleStreamingRemoteEngine::AbortWithError(const FSMEventArgs&) {
526 return Abort(SPEECH_RECOGNITION_ERROR_NETWORK);
529 GoogleStreamingRemoteEngine::FSMState GoogleStreamingRemoteEngine::Abort(
530 SpeechRecognitionErrorCode error_code) {
531 DVLOG(1) << "Aborting with error " << error_code;
533 if (error_code != SPEECH_RECOGNITION_ERROR_NONE) {
534 delegate()->OnSpeechRecognitionEngineError(
535 SpeechRecognitionError(error_code));
537 downstream_fetcher_.reset();
538 upstream_fetcher_.reset();
539 encoder_.reset();
540 return STATE_IDLE;
543 GoogleStreamingRemoteEngine::FSMState
544 GoogleStreamingRemoteEngine::DoNothing(const FSMEventArgs&) {
545 return state_;
548 GoogleStreamingRemoteEngine::FSMState
549 GoogleStreamingRemoteEngine::NotFeasible(const FSMEventArgs& event_args) {
550 NOTREACHED() << "Unfeasible event " << event_args.event
551 << " in state " << state_;
552 return state_;
555 std::string GoogleStreamingRemoteEngine::GetAcceptedLanguages() const {
556 std::string langs = config_.language;
557 if (langs.empty() && url_context_.get()) {
558 // If no language is provided then we use the first from the accepted
559 // language list. If this list is empty then it defaults to "en-US".
560 // Example of the contents of this list: "es,en-GB;q=0.8", ""
561 net::URLRequestContext* request_context =
562 url_context_->GetURLRequestContext();
563 DCHECK(request_context);
564 // TODO(pauljensen): GoogleStreamingRemoteEngine should be constructed with
565 // a reference to the HttpUserAgentSettings rather than accessing the
566 // accept language through the URLRequestContext.
567 if (request_context->http_user_agent_settings()) {
568 std::string accepted_language_list =
569 request_context->http_user_agent_settings()->GetAcceptLanguage();
570 size_t separator = accepted_language_list.find_first_of(",;");
571 if (separator != std::string::npos)
572 langs = accepted_language_list.substr(0, separator);
575 if (langs.empty())
576 langs = "en-US";
577 return langs;
580 // TODO(primiano): Is there any utility in the codebase that already does this?
581 std::string GoogleStreamingRemoteEngine::GenerateRequestKey() const {
582 const int64 kKeepLowBytes = GG_LONGLONG(0x00000000FFFFFFFF);
583 const int64 kKeepHighBytes = GG_LONGLONG(0xFFFFFFFF00000000);
585 // Just keep the least significant bits of timestamp, in order to reduce
586 // probability of collisions.
587 int64 key = (base::Time::Now().ToInternalValue() & kKeepLowBytes) |
588 (base::RandUint64() & kKeepHighBytes);
589 return base::HexEncode(reinterpret_cast<void*>(&key), sizeof(key));
592 GoogleStreamingRemoteEngine::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
593 : event(event_value) {
596 GoogleStreamingRemoteEngine::FSMEventArgs::~FSMEventArgs() {
599 } // namespace content