Import Cobalt 25.master.0.1032706
diff --git a/content/browser/speech/audio_buffer.cc b/content/browser/speech/audio_buffer.cc
index 1038ac9..b109523 100644
--- a/content/browser/speech/audio_buffer.cc
+++ b/content/browser/speech/audio_buffer.cc
@@ -9,8 +9,7 @@
 namespace content {
 
 AudioChunk::AudioChunk(int bytes_per_sample)
-    : bytes_per_sample_(bytes_per_sample) {
-}
+    : bytes_per_sample_(bytes_per_sample) {}
 
 AudioChunk::AudioChunk(size_t length, int bytes_per_sample)
     : data_string_(length, '\0'), bytes_per_sample_(bytes_per_sample) {
@@ -46,8 +45,7 @@
 
 AudioBuffer::AudioBuffer(int bytes_per_sample)
     : bytes_per_sample_(bytes_per_sample) {
-  DCHECK(bytes_per_sample == 1 ||
-         bytes_per_sample == 2 ||
+  DCHECK(bytes_per_sample == 1 || bytes_per_sample == 2 ||
          bytes_per_sample == 4);
 }
 
@@ -69,7 +67,7 @@
 scoped_refptr<AudioChunk> AudioBuffer::DequeueAll() {
   size_t resulting_length = 0;
   ChunksContainer::const_iterator it;
-  // In order to improve performance, calulate in advance the total length
+  // In order to improve performance, calculate in advance the total length
   // and then copy the chunks.
   for (it = chunks_.begin(); it != chunks_.end(); ++it) {
     resulting_length += (*it)->AsString().length();
diff --git a/content/browser/speech/audio_buffer.h b/content/browser/speech/audio_buffer.h
index 1231fa4..0c43c0e 100644
--- a/content/browser/speech/audio_buffer.h
+++ b/content/browser/speech/audio_buffer.h
@@ -18,8 +18,8 @@
 namespace content {
 
 // Models a chunk derived from an AudioBuffer.
-class CONTENT_EXPORT AudioChunk :
-    public base::RefCountedThreadSafe<AudioChunk> {
+class CONTENT_EXPORT AudioChunk
+    : public base::RefCountedThreadSafe<AudioChunk> {
  public:
   explicit AudioChunk(int bytes_per_sample);
   // Creates a chunk of |length| bytes, initialized to zeros.
diff --git a/content/browser/speech/chunked_byte_buffer.cc b/content/browser/speech/chunked_byte_buffer.cc
index 5e3f70c..032d3f8 100644
--- a/content/browser/speech/chunked_byte_buffer.cc
+++ b/content/browser/speech/chunked_byte_buffer.cc
@@ -23,9 +23,7 @@
 namespace content {
 
 ChunkedByteBuffer::ChunkedByteBuffer()
-    : partial_chunk_(new Chunk()),
-      total_bytes_stored_(0) {
-}
+    : partial_chunk_(new Chunk()), total_bytes_stored_(0) {}
 
 ChunkedByteBuffer::~ChunkedByteBuffer() {
   Clear();
@@ -62,8 +60,7 @@
     DCHECK_GT(insert_length, 0U);
     DCHECK_LE(insert_length, remaining_bytes);
     DCHECK_LE(next_data + insert_length, start + length);
-    insert_target->insert(insert_target->end(),
-                          next_data,
+    insert_target->insert(insert_target->end(), next_data,
                           next_data + insert_length);
     next_data += insert_length;
     remaining_bytes -= insert_length;
@@ -117,8 +114,7 @@
 
 ChunkedByteBuffer::Chunk::Chunk() : content(new std::vector<uint8_t>()) {}
 
-ChunkedByteBuffer::Chunk::~Chunk() {
-}
+ChunkedByteBuffer::Chunk::~Chunk() {}
 
 size_t ChunkedByteBuffer::Chunk::ExpectedContentLength() const {
   DCHECK_EQ(header.size(), kHeaderLength);
diff --git a/content/browser/speech/chunked_byte_buffer.h b/content/browser/speech/chunked_byte_buffer.h
index 98bdd92..93bae9e 100644
--- a/content/browser/speech/chunked_byte_buffer.h
+++ b/content/browser/speech/chunked_byte_buffer.h
@@ -19,7 +19,7 @@
 namespace content {
 
 // Models a chunk-oriented byte buffer. The term chunk is herein defined as an
-// arbitrary sequence of bytes that is preceeded by N header bytes, indicating
+// arbitrary sequence of bytes that is preceded by N header bytes, indicating
 // its size. Data may be appended to the buffer with no particular respect of
 // chunks boundaries. However, chunks can be extracted (FIFO) only when their
 // content (according to their header) is fully available in the buffer.
@@ -72,7 +72,6 @@
   DISALLOW_COPY_AND_ASSIGN(ChunkedByteBuffer);
 };
 
-
 }  // namespace content
 
 #endif  // CONTENT_BROWSER_SPEECH_CHUNKED_BYTE_BUFFER_H_
diff --git a/content/browser/speech/endpointer/endpointer.cc b/content/browser/speech/endpointer/endpointer.cc
index 96d8e6c..f45ac12 100644
--- a/content/browser/speech/endpointer/endpointer.cc
+++ b/content/browser/speech/endpointer/endpointer.cc
@@ -10,7 +10,7 @@
 namespace {
 const int64_t kMicrosecondsPerSecond = base::Time::kMicrosecondsPerSecond;
 const int kFrameRate = 50;  // 1 frame = 20ms of audio.
-}
+}  // namespace
 
 namespace content {
 
@@ -61,7 +61,7 @@
   waiting_for_speech_complete_timeout_ = false;
   speech_previously_detected_ = false;
   speech_input_complete_ = false;
-  audio_frame_time_us_ = 0; // Reset time for packets sent to endpointer.
+  audio_frame_time_us_ = 0;  // Reset time for packets sent to endpointer.
   speech_end_time_us_ = -1;
   speech_start_time_us_ = -1;
 }
@@ -107,8 +107,7 @@
         reinterpret_cast<const int16_t*>(int16_audio_bus.interleaved_data());
   } else {
     DCHECK_EQ(audio_bus.sample_type(), AudioBus::kInt16);
-    audio_data =
-        reinterpret_cast<const int16_t*>(audio_bus.interleaved_data());
+    audio_data = reinterpret_cast<const int16_t*>(audio_bus.interleaved_data());
   }
 #else
 EpStatus Endpointer::ProcessAudio(const AudioChunk& raw_audio, float* rms_out) {
@@ -123,10 +122,8 @@
   int sample_index = 0;
   while (sample_index + frame_size_ <= num_samples) {
     // Have the endpointer process the frame.
-    energy_endpointer_.ProcessAudioFrame(audio_frame_time_us_,
-                                         audio_data + sample_index,
-                                         frame_size_,
-                                         rms_out);
+    energy_endpointer_.ProcessAudioFrame(
+        audio_frame_time_us_, audio_data + sample_index, frame_size_, rms_out);
     sample_index += frame_size_;
     audio_frame_time_us_ +=
         (frame_size_ * kMicrosecondsPerSecond) / sample_rate_;
@@ -157,7 +154,7 @@
       // Speech possibly complete timeout.
       if ((waiting_for_speech_possibly_complete_timeout_) &&
           (ep_time - speech_end_time_us_ >
-              speech_input_possibly_complete_silence_length_us_)) {
+           speech_input_possibly_complete_silence_length_us_)) {
         waiting_for_speech_possibly_complete_timeout_ = false;
       }
       if (waiting_for_speech_complete_timeout_) {
@@ -173,8 +170,7 @@
           requested_silence_length =
               long_speech_input_complete_silence_length_us_;
         } else {
-          requested_silence_length =
-              speech_input_complete_silence_length_us_;
+          requested_silence_length = speech_input_complete_silence_length_us_;
         }
 
         // Speech complete timeout.
diff --git a/content/browser/speech/endpointer/endpointer.h b/content/browser/speech/endpointer/endpointer.h
index 2264842..a0d450b 100644
--- a/content/browser/speech/endpointer/endpointer.h
+++ b/content/browser/speech/endpointer/endpointer.h
@@ -78,9 +78,7 @@
 
   // Returns true if the endpointer detected reasonable audio levels above
   // background noise which could be user speech, false if not.
-  bool DidStartReceivingSpeech() const {
-    return speech_previously_detected_;
-  }
+  bool DidStartReceivingSpeech() const { return speech_previously_detected_; }
 
   bool IsEstimatingEnvironment() const {
     return energy_endpointer_.estimating_environment();
@@ -102,9 +100,7 @@
     long_speech_length_us_ = time_us;
   }
 
-  bool speech_input_complete() const {
-    return speech_input_complete_;
-  }
+  bool speech_input_complete() const { return speech_input_complete_; }
 
 #if defined(STARBOARD)
   int sample_rate() const { return sample_rate_; }
@@ -123,7 +119,7 @@
 
   // The speechInputPossiblyComplete event signals that silence/noise has been
   // detected for a *short* amount of time after some speech has been detected.
-  // This proporty specifies the time period.
+  // This property specifies the time period.
   int64_t speech_input_possibly_complete_silence_length_us_;
 
   // The speechInputComplete event signals that silence/noise has been
diff --git a/content/browser/speech/endpointer/endpointer_unittest.cc b/content/browser/speech/endpointer/endpointer_unittest.cc
index 4b3cbb5..5578202 100644
--- a/content/browser/speech/endpointer/endpointer_unittest.cc
+++ b/content/browser/speech/endpointer/endpointer_unittest.cc
@@ -3,20 +3,22 @@
 // found in the LICENSE file.
 
 #include <stdint.h>
+#include <memory>
+#include <utility>
 
 #include "content/browser/speech/audio_buffer.h"
 #include "content/browser/speech/endpointer/endpointer.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
-const int kFrameRate = 50;  // 20 ms long frames for AMR encoding.
+const int kFrameRate = 50;     // 20 ms long frames for AMR encoding.
 const int kSampleRate = 8000;  // 8 k samples per second for AMR encoding.
 
-// At 8 sample per second a 20 ms frame is 160 samples, which corrsponds
+// At 8 sample per second a 20 ms frame is 160 samples, which corresponds
 // to the AMR codec.
 const int kFrameSize = kSampleRate / kFrameRate;  // 160 samples.
 static_assert(kFrameSize == 160, "invalid frame size");
-}
+}  // namespace
 
 namespace content {
 
@@ -50,7 +52,7 @@
     // Create random samples.
     for (int i = 0; i < kFrameSize; ++i) {
       float randNum = static_cast<float>(rand() - (RAND_MAX / 2)) /
-          static_cast<float>(RAND_MAX);
+                      static_cast<float>(RAND_MAX);
       samples[i] = static_cast<int16_t>(gain * randNum);
     }
 
@@ -91,7 +93,7 @@
 
 TEST(EndpointerTest, TestEnergyEndpointerEvents) {
   // Initialize endpointer and configure it. We specify the parameters
-  // here for a 20ms window, and a 20ms step size, which corrsponds to
+  // here for a 20ms window, and a 20ms step size, which corresponds to
   // the narrow band AMR codec.
   EnergyEndpointerParams ep_config;
   ep_config.set_frame_period(1.0f / static_cast<float>(kFrameRate));
@@ -114,7 +116,7 @@
   RunEndpointerEventsTest(&frame_processor);
 
   endpointer.EndSession();
-};
+}
 
 // Test endpointer wrapper class.
 class EndpointerFrameProcessor : public FrameProcessor {
diff --git a/content/browser/speech/endpointer/energy_endpointer.cc b/content/browser/speech/endpointer/energy_endpointer.cc
index eb0b565..79be5d5 100644
--- a/content/browser/speech/endpointer/energy_endpointer.cc
+++ b/content/browser/speech/endpointer/energy_endpointer.cc
@@ -76,7 +76,7 @@
 void EnergyEndpointer::HistoryRing::SetRing(int size, bool initial_state) {
   insertion_index_ = 0;
   decision_points_.clear();
-  DecisionPoint init = { -1, initial_state };
+  DecisionPoint init = {-1, initial_state};
   decision_points_.resize(size, init);
 }
 
@@ -138,11 +138,9 @@
       rms_adapt_(0),
       start_lag_(0),
       end_lag_(0),
-      user_input_start_time_us_(0) {
-}
+      user_input_start_time_us_(0) {}
 
-EnergyEndpointer::~EnergyEndpointer() {
-}
+EnergyEndpointer::~EnergyEndpointer() {}
 
 int EnergyEndpointer::TimeToFrame(float time) const {
   return static_cast<int32_t>(0.5 + (time / params_.frame_period()));
@@ -183,8 +181,8 @@
     max_window_dur_ = params_.offset_window();
   Restart(true);
 
-  offset_confirm_dur_sec_ = params_.offset_window() -
-                            params_.offset_confirm_dur();
+  offset_confirm_dur_sec_ =
+      params_.offset_window() - params_.offset_confirm_dur();
   if (offset_confirm_dur_sec_ < 0.0)
     offset_confirm_dur_sec_ = 0.0;
 
@@ -204,10 +202,10 @@
   frame_counter_ = 0;  // Used for rapid initial update of levels.
 
   sample_rate_ = params_.sample_rate();
-  start_lag_ = static_cast<int>(sample_rate_ /
-                                params_.max_fundamental_frequency());
-  end_lag_ = static_cast<int>(sample_rate_ /
-                              params_.min_fundamental_frequency());
+  start_lag_ =
+      static_cast<int>(sample_rate_ / params_.max_fundamental_frequency());
+  end_lag_ =
+      static_cast<int>(sample_rate_ / params_.min_fundamental_frequency());
 }
 
 void EnergyEndpointer::StartSession() {
@@ -316,9 +314,9 @@
         } else {
           rms_adapt_ = (0.95f * rms_adapt_) + (0.05f * rms);
         }
-        float target_threshold = 0.3f * rms_adapt_ +  noise_level_;
-        decision_threshold_ = (.90f * decision_threshold_) +
-                              (0.10f * target_threshold);
+        float target_threshold = 0.3f * rms_adapt_ + noise_level_;
+        decision_threshold_ =
+            (.90f * decision_threshold_) + (0.10f * target_threshold);
       }
     }
 
@@ -346,7 +344,7 @@
     // Alpha increases from 0 to (k-1)/k where k is the number of time
     // steps in the initial adaptation period.
     float alpha = static_cast<float>(frame_counter_) /
-        static_cast<float>(fast_update_frames_);
+                  static_cast<float>(fast_update_frames_);
     noise_level_ = (alpha * noise_level_) + ((1 - alpha) * rms);
     DVLOG(1) << "FAST UPDATE, frame_counter_ " << frame_counter_
              << ", fast_update_frames_ " << fast_update_frames_;
@@ -360,7 +358,7 @@
       noise_level_ = (0.95f * noise_level_) + (0.05f * rms);
   }
   if (estimating_environment_ || (frame_counter_ < fast_update_frames_)) {
-    decision_threshold_ = noise_level_ * 2; // 6dB above noise level.
+    decision_threshold_ = noise_level_ * 2;  // 6dB above noise level.
     // Set a floor
     if (decision_threshold_ < params_.min_decision_threshold())
       decision_threshold_ = params_.min_decision_threshold();
diff --git a/content/browser/speech/endpointer/energy_endpointer.h b/content/browser/speech/endpointer/energy_endpointer.h
index 7b0b292..bf880e6 100644
--- a/content/browser/speech/endpointer/energy_endpointer.h
+++ b/content/browser/speech/endpointer/energy_endpointer.h
@@ -13,7 +13,7 @@
 // defers decisions re onset and offset times until these
 // specifications have been met.  Three basic intervals are tested: an
 // onset window, a speech-on window, and an offset window.  We require
-// super-threshold to exceed some mimimum total durations in the onset
+// super-threshold to exceed some minimum total durations in the onset
 // and speech-on windows before declaring the speech onset time, and
 // we specify a required sub-threshold residency in the offset window
 // before declaring speech offset. As the various residency requirements are
@@ -24,7 +24,7 @@
 // important that the background noise level be estimated initially for
 // robustness in noisy conditions. The first frames are assumed to be background
 // noise and a fast update rate is used for the noise level. The duration for
-// fast update is controlled by the fast_update_dur_ paramter.
+// fast update is controlled by the fast_update_dur_ parameter.
 //
 // If used in noisy conditions, the endpointer should be started and run in the
 // EnvironmentEstimation mode, for at least 200ms, before switching to
@@ -91,9 +91,7 @@
   // corresponding to the most recently computed frame.
   EpStatus Status(int64_t* status_time_us) const;
 
-  bool estimating_environment() const {
-    return estimating_environment_;
-  }
+  bool estimating_environment() const { return estimating_environment_; }
 
   // Returns estimated noise level in dB.
   float GetNoiseLevelDb() const;
@@ -113,7 +111,7 @@
   // the 'time' (in seconds).
   int TimeToFrame(float time) const;
 
-  EpStatus status_;  // The current state of this instance.
+  EpStatus status_;               // The current state of this instance.
   float offset_confirm_dur_sec_;  // max on time allowed to confirm POST_SPEECH
   int64_t
       endpointer_time_us_;  // Time of the most recently received audio frame.
@@ -122,7 +120,7 @@
   int64_t
       frame_counter_;     // Number of frames seen. Used for initial adaptation.
   float max_window_dur_;  // Largest search window size (seconds)
-  float sample_rate_;  // Sampling rate.
+  float sample_rate_;     // Sampling rate.
 
   // Ring buffers to hold the speech activity history.
   std::unique_ptr<HistoryRing> history_;
diff --git a/content/browser/speech/endpointer/energy_endpointer_params.h b/content/browser/speech/endpointer/energy_endpointer_params.h
index 1510435..1ce3bb6 100644
--- a/content/browser/speech/endpointer/energy_endpointer_params.h
+++ b/content/browser/speech/endpointer/energy_endpointer_params.h
@@ -20,9 +20,7 @@
 
   // Accessors and mutators
   float frame_period() const { return frame_period_; }
-  void set_frame_period(float frame_period) {
-    frame_period_ = frame_period;
-  }
+  void set_frame_period(float frame_period) { frame_period_ = frame_period; }
 
   float frame_duration() const { return frame_duration_; }
   void set_frame_duration(float frame_duration) {
@@ -104,16 +102,16 @@
   }
 
  private:
-  float frame_period_;  // Frame period
-  float frame_duration_;  // Window size
-  float onset_window_;  // Interval scanned for onset activity
-  float speech_on_window_;  // Inverval scanned for ongoing speech
-  float offset_window_;  // Interval scanned for offset evidence
-  float offset_confirm_dur_;  // Silence duration required to confirm offset
-  float decision_threshold_;  // Initial rms detection threshold
+  float frame_period_;            // Frame period
+  float frame_duration_;          // Window size
+  float onset_window_;            // Interval scanned for onset activity
+  float speech_on_window_;        // Interval scanned for ongoing speech
+  float offset_window_;           // Interval scanned for offset evidence
+  float offset_confirm_dur_;      // Silence duration required to confirm offset
+  float decision_threshold_;      // Initial rms detection threshold
   float min_decision_threshold_;  // Minimum rms detection threshold
-  float fast_update_dur_;  // Period for initial estimation of levels.
-  float sample_rate_;  // Expected sample rate.
+  float fast_update_dur_;         // Period for initial estimation of levels.
+  float sample_rate_;             // Expected sample rate.
 
   // Time to add on either side of endpoint threshold crossings
   float endpoint_margin_;